github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/cmd/compile/internal/gc/walk.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/objabi" 10 "cmd/internal/sys" 11 "fmt" 12 "strings" 13 ) 14 15 // The constant is known to runtime. 16 const ( 17 tmpstringbufsize = 32 18 ) 19 20 func walk(fn *Node) { 21 Curfn = fn 22 23 if Debug['W'] != 0 { 24 s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym) 25 dumplist(s, Curfn.Nbody) 26 } 27 28 lno := lineno 29 30 // Final typecheck for any unused variables. 31 for i, ln := range fn.Func.Dcl { 32 if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) { 33 ln = typecheck(ln, Erv|Easgn) 34 fn.Func.Dcl[i] = ln 35 } 36 } 37 38 // Propagate the used flag for typeswitch variables up to the NONAME in it's definition. 39 for _, ln := range fn.Func.Dcl { 40 if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() { 41 ln.Name.Defn.Left.Name.SetUsed(true) 42 } 43 } 44 45 for _, ln := range fn.Func.Dcl { 46 if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() { 47 continue 48 } 49 if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW { 50 if defn.Left.Name.Used() { 51 continue 52 } 53 yyerrorl(defn.Left.Pos, "%v declared and not used", ln.Sym) 54 defn.Left.Name.SetUsed(true) // suppress repeats 55 } else { 56 yyerrorl(ln.Pos, "%v declared and not used", ln.Sym) 57 } 58 } 59 60 lineno = lno 61 if nerrors != 0 { 62 return 63 } 64 walkstmtlist(Curfn.Nbody.Slice()) 65 if Debug['W'] != 0 { 66 s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym) 67 dumplist(s, Curfn.Nbody) 68 } 69 70 zeroResults() 71 heapmoves() 72 if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 { 73 s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym) 74 dumplist(s, Curfn.Func.Enter) 75 } 76 } 77 78 func walkstmtlist(s []*Node) { 79 for i := range s { 80 s[i] = walkstmt(s[i]) 81 } 82 } 83 84 func samelist(a, b []*Node) bool { 85 if len(a) != len(b) { 86 return false 87 } 88 for i, n := range a { 89 if n != b[i] { 90 return false 91 } 92 } 93 return true 94 } 95 96 func paramoutheap(fn *Node) bool { 97 for _, ln := range fn.Func.Dcl { 98 switch ln.Class() { 99 case PPARAMOUT: 100 if ln.isParamStackCopy() || ln.Addrtaken() { 101 return true 102 } 103 104 case PAUTO: 105 // stop early - parameters are over 106 return false 107 } 108 } 109 110 return false 111 } 112 113 // adds "adjust" to all the argument locations for the call n. 114 // n must be a defer or go node that has already been walked. 115 func adjustargs(n *Node, adjust int) { 116 var arg *Node 117 var lhs *Node 118 119 callfunc := n.Left 120 for _, arg = range callfunc.List.Slice() { 121 if arg.Op != OAS { 122 Fatalf("call arg not assignment") 123 } 124 lhs = arg.Left 125 if lhs.Op == ONAME { 126 // This is a temporary introduced by reorder1. 127 // The real store to the stack appears later in the arg list. 128 continue 129 } 130 131 if lhs.Op != OINDREGSP { 132 Fatalf("call argument store does not use OINDREGSP") 133 } 134 135 // can't really check this in machine-indep code. 136 //if(lhs->val.u.reg != D_SP) 137 // Fatalf("call arg assign not indreg(SP)") 138 lhs.Xoffset += int64(adjust) 139 } 140 } 141 142 // The result of walkstmt MUST be assigned back to n, e.g. 143 // n.Left = walkstmt(n.Left) 144 func walkstmt(n *Node) *Node { 145 if n == nil { 146 return n 147 } 148 149 setlineno(n) 150 151 walkstmtlist(n.Ninit.Slice()) 152 153 switch n.Op { 154 default: 155 if n.Op == ONAME { 156 yyerror("%v is not a top level statement", n.Sym) 157 } else { 158 yyerror("%v is not a top level statement", n.Op) 159 } 160 Dump("nottop", n) 161 162 case OAS, 163 OASOP, 164 OAS2, 165 OAS2DOTTYPE, 166 OAS2RECV, 167 OAS2FUNC, 168 OAS2MAPR, 169 OCLOSE, 170 OCOPY, 171 OCALLMETH, 172 OCALLINTER, 173 OCALL, 174 OCALLFUNC, 175 ODELETE, 176 OSEND, 177 OPRINT, 178 OPRINTN, 179 OPANIC, 180 OEMPTY, 181 ORECOVER, 182 OGETG: 183 if n.Typecheck() == 0 { 184 Fatalf("missing typecheck: %+v", n) 185 } 186 wascopy := n.Op == OCOPY 187 init := n.Ninit 188 n.Ninit.Set(nil) 189 n = walkexpr(n, &init) 190 n = addinit(n, init.Slice()) 191 if wascopy && n.Op == OCONVNOP { 192 n.Op = OEMPTY // don't leave plain values as statements. 193 } 194 195 // special case for a receive where we throw away 196 // the value received. 197 case ORECV: 198 if n.Typecheck() == 0 { 199 Fatalf("missing typecheck: %+v", n) 200 } 201 init := n.Ninit 202 n.Ninit.Set(nil) 203 204 n.Left = walkexpr(n.Left, &init) 205 n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil()) 206 n = walkexpr(n, &init) 207 208 n = addinit(n, init.Slice()) 209 210 case OBREAK, 211 OCONTINUE, 212 OFALL, 213 OGOTO, 214 OLABEL, 215 ODCLCONST, 216 ODCLTYPE, 217 OCHECKNIL, 218 OVARKILL, 219 OVARLIVE: 220 break 221 222 case ODCL: 223 v := n.Left 224 if v.Class() == PAUTOHEAP { 225 if compiling_runtime { 226 yyerror("%v escapes to heap, not allowed in runtime.", v) 227 } 228 if prealloc[v] == nil { 229 prealloc[v] = callnew(v.Type) 230 } 231 nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v]) 232 nn.SetColas(true) 233 nn = typecheck(nn, Etop) 234 return walkstmt(nn) 235 } 236 237 case OBLOCK: 238 walkstmtlist(n.List.Slice()) 239 240 case OXCASE: 241 yyerror("case statement out of place") 242 n.Op = OCASE 243 fallthrough 244 245 case OCASE: 246 n.Right = walkstmt(n.Right) 247 248 case ODEFER: 249 Curfn.Func.SetHasDefer(true) 250 switch n.Left.Op { 251 case OPRINT, OPRINTN: 252 n.Left = walkprintfunc(n.Left, &n.Ninit) 253 254 case OCOPY: 255 n.Left = copyany(n.Left, &n.Ninit, true) 256 257 default: 258 n.Left = walkexpr(n.Left, &n.Ninit) 259 } 260 261 // make room for size & fn arguments. 262 adjustargs(n, 2*Widthptr) 263 264 case OFOR, OFORUNTIL: 265 if n.Left != nil { 266 walkstmtlist(n.Left.Ninit.Slice()) 267 init := n.Left.Ninit 268 n.Left.Ninit.Set(nil) 269 n.Left = walkexpr(n.Left, &init) 270 n.Left = addinit(n.Left, init.Slice()) 271 } 272 273 n.Right = walkstmt(n.Right) 274 walkstmtlist(n.Nbody.Slice()) 275 276 case OIF: 277 n.Left = walkexpr(n.Left, &n.Ninit) 278 walkstmtlist(n.Nbody.Slice()) 279 walkstmtlist(n.Rlist.Slice()) 280 281 case OPROC: 282 switch n.Left.Op { 283 case OPRINT, OPRINTN: 284 n.Left = walkprintfunc(n.Left, &n.Ninit) 285 286 case OCOPY: 287 n.Left = copyany(n.Left, &n.Ninit, true) 288 289 default: 290 n.Left = walkexpr(n.Left, &n.Ninit) 291 } 292 293 // make room for size & fn arguments. 294 adjustargs(n, 2*Widthptr) 295 296 case ORETURN: 297 walkexprlist(n.List.Slice(), &n.Ninit) 298 if n.List.Len() == 0 { 299 break 300 } 301 if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) { 302 // assign to the function out parameters, 303 // so that reorder3 can fix up conflicts 304 var rl []*Node 305 306 var cl Class 307 for _, ln := range Curfn.Func.Dcl { 308 cl = ln.Class() 309 if cl == PAUTO || cl == PAUTOHEAP { 310 break 311 } 312 if cl == PPARAMOUT { 313 if ln.isParamStackCopy() { 314 ln = walkexpr(typecheck(nod(OIND, ln.Name.Param.Heapaddr, nil), Erv), nil) 315 } 316 rl = append(rl, ln) 317 } 318 } 319 320 if got, want := n.List.Len(), len(rl); got != want { 321 // order should have rewritten multi-value function calls 322 // with explicit OAS2FUNC nodes. 323 Fatalf("expected %v return arguments, have %v", want, got) 324 } 325 326 if samelist(rl, n.List.Slice()) { 327 // special return in disguise 328 n.List.Set(nil) 329 330 break 331 } 332 333 // move function calls out, to make reorder3's job easier. 334 walkexprlistsafe(n.List.Slice(), &n.Ninit) 335 336 ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit) 337 n.List.Set(reorder3(ll)) 338 break 339 } 340 341 ll := ascompatte(nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit) 342 n.List.Set(ll) 343 344 case ORETJMP: 345 break 346 347 case OSELECT: 348 walkselect(n) 349 350 case OSWITCH: 351 walkswitch(n) 352 353 case ORANGE: 354 n = walkrange(n) 355 356 case OXFALL: 357 yyerror("fallthrough statement out of place") 358 n.Op = OFALL 359 } 360 361 if n.Op == ONAME { 362 Fatalf("walkstmt ended up with name: %+v", n) 363 } 364 return n 365 } 366 367 func isSmallMakeSlice(n *Node) bool { 368 if n.Op != OMAKESLICE { 369 return false 370 } 371 l := n.Left 372 r := n.Right 373 if r == nil { 374 r = l 375 } 376 t := n.Type 377 378 return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width) 379 } 380 381 // walk the whole tree of the body of an 382 // expression or simple statement. 383 // the types expressions are calculated. 384 // compile-time constants are evaluated. 385 // complex side effects like statements are appended to init 386 func walkexprlist(s []*Node, init *Nodes) { 387 for i := range s { 388 s[i] = walkexpr(s[i], init) 389 } 390 } 391 392 func walkexprlistsafe(s []*Node, init *Nodes) { 393 for i, n := range s { 394 s[i] = safeexpr(n, init) 395 s[i] = walkexpr(s[i], init) 396 } 397 } 398 399 func walkexprlistcheap(s []*Node, init *Nodes) { 400 for i, n := range s { 401 s[i] = cheapexpr(n, init) 402 s[i] = walkexpr(s[i], init) 403 } 404 } 405 406 // Build name of function for interface conversion. 407 // Not all names are possible 408 // (e.g., we'll never generate convE2E or convE2I or convI2E). 409 func convFuncName(from, to *types.Type) string { 410 tkind := to.Tie() 411 switch from.Tie() { 412 case 'I': 413 switch tkind { 414 case 'I': 415 return "convI2I" 416 } 417 case 'T': 418 switch tkind { 419 case 'E': 420 switch { 421 case from.Size() == 2 && from.Align == 2: 422 return "convT2E16" 423 case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): 424 return "convT2E32" 425 case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): 426 return "convT2E64" 427 case from.IsString(): 428 return "convT2Estring" 429 case from.IsSlice(): 430 return "convT2Eslice" 431 case !types.Haspointers(from): 432 return "convT2Enoptr" 433 } 434 return "convT2E" 435 case 'I': 436 switch { 437 case from.Size() == 2 && from.Align == 2: 438 return "convT2I16" 439 case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from): 440 return "convT2I32" 441 case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from): 442 return "convT2I64" 443 case from.IsString(): 444 return "convT2Istring" 445 case from.IsSlice(): 446 return "convT2Islice" 447 case !types.Haspointers(from): 448 return "convT2Inoptr" 449 } 450 return "convT2I" 451 } 452 } 453 Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie()) 454 panic("unreachable") 455 } 456 457 // The result of walkexpr MUST be assigned back to n, e.g. 458 // n.Left = walkexpr(n.Left, init) 459 func walkexpr(n *Node, init *Nodes) *Node { 460 if n == nil { 461 return n 462 } 463 464 if init == &n.Ninit { 465 // not okay to use n->ninit when walking n, 466 // because we might replace n with some other node 467 // and would lose the init list. 468 Fatalf("walkexpr init == &n->ninit") 469 } 470 471 if n.Ninit.Len() != 0 { 472 walkstmtlist(n.Ninit.Slice()) 473 init.AppendNodes(&n.Ninit) 474 } 475 476 lno := setlineno(n) 477 478 if Debug['w'] > 1 { 479 Dump("walk-before", n) 480 } 481 482 if n.Typecheck() != 1 { 483 Fatalf("missed typecheck: %+v", n) 484 } 485 486 if n.Op == ONAME && n.Class() == PAUTOHEAP { 487 nn := nod(OIND, n.Name.Param.Heapaddr, nil) 488 nn = typecheck(nn, Erv) 489 nn = walkexpr(nn, init) 490 nn.Left.SetNonNil(true) 491 return nn 492 } 493 494 opswitch: 495 switch n.Op { 496 default: 497 Dump("walk", n) 498 Fatalf("walkexpr: switch 1 unknown op %+S", n) 499 500 case ONONAME, OINDREGSP, OEMPTY, OGETG: 501 502 case OTYPE, ONAME, OLITERAL: 503 // TODO(mdempsky): Just return n; see discussion on CL 38655. 504 // Perhaps refactor to use Node.mayBeShared for these instead. 505 // If these return early, make sure to still call 506 // stringsym for constant strings. 507 508 case ONOT, OMINUS, OPLUS, OCOM, OREAL, OIMAG, ODOTMETH, ODOTINTER, 509 OIND, OSPTR, OITAB, OIDATA, OADDR: 510 n.Left = walkexpr(n.Left, init) 511 512 case OEFACE, OAND, OSUB, OMUL, OLT, OLE, OGE, OGT, OADD, OOR, OXOR: 513 n.Left = walkexpr(n.Left, init) 514 n.Right = walkexpr(n.Right, init) 515 516 case ODOT: 517 usefield(n) 518 n.Left = walkexpr(n.Left, init) 519 520 case ODOTTYPE, ODOTTYPE2: 521 n.Left = walkexpr(n.Left, init) 522 // Set up interface type addresses for back end. 523 n.Right = typename(n.Type) 524 if n.Op == ODOTTYPE { 525 n.Right.Right = typename(n.Left.Type) 526 } 527 if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { 528 n.List.Set1(itabname(n.Type, n.Left.Type)) 529 } 530 531 case ODOTPTR: 532 usefield(n) 533 if n.Op == ODOTPTR && n.Left.Type.Elem().Width == 0 { 534 // No actual copy will be generated, so emit an explicit nil check. 535 n.Left = cheapexpr(n.Left, init) 536 537 checknil(n.Left, init) 538 } 539 540 n.Left = walkexpr(n.Left, init) 541 542 case OLEN, OCAP: 543 n.Left = walkexpr(n.Left, init) 544 545 // replace len(*[10]int) with 10. 546 // delayed until now to preserve side effects. 547 t := n.Left.Type 548 549 if t.IsPtr() { 550 t = t.Elem() 551 } 552 if t.IsArray() { 553 safeexpr(n.Left, init) 554 nodconst(n, n.Type, t.NumElem()) 555 n.SetTypecheck(1) 556 } 557 558 case OLSH, ORSH: 559 n.Left = walkexpr(n.Left, init) 560 n.Right = walkexpr(n.Right, init) 561 t := n.Left.Type 562 n.SetBounded(bounded(n.Right, 8*t.Width)) 563 if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) { 564 Warn("shift bounds check elided") 565 } 566 567 case OCOMPLEX: 568 // Use results from call expression as arguments for complex. 569 if n.Left == nil && n.Right == nil { 570 n.Left = n.List.First() 571 n.Right = n.List.Second() 572 } 573 n.Left = walkexpr(n.Left, init) 574 n.Right = walkexpr(n.Right, init) 575 576 case OEQ, ONE: 577 n.Left = walkexpr(n.Left, init) 578 n.Right = walkexpr(n.Right, init) 579 580 // Disable safemode while compiling this code: the code we 581 // generate internally can refer to unsafe.Pointer. 582 // In this case it can happen if we need to generate an == 583 // for a struct containing a reflect.Value, which itself has 584 // an unexported field of type unsafe.Pointer. 585 old_safemode := safemode 586 safemode = false 587 n = walkcompare(n, init) 588 safemode = old_safemode 589 590 case OANDAND, OOROR: 591 n.Left = walkexpr(n.Left, init) 592 593 // cannot put side effects from n.Right on init, 594 // because they cannot run before n.Left is checked. 595 // save elsewhere and store on the eventual n.Right. 596 var ll Nodes 597 598 n.Right = walkexpr(n.Right, &ll) 599 n.Right = addinit(n.Right, ll.Slice()) 600 n = walkinrange(n, init) 601 602 case OPRINT, OPRINTN: 603 walkexprlist(n.List.Slice(), init) 604 n = walkprint(n, init) 605 606 case OPANIC: 607 n = mkcall("gopanic", nil, init, n.Left) 608 609 case ORECOVER: 610 n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil)) 611 612 case OCLOSUREVAR, OCFUNC: 613 n.SetAddable(true) 614 615 case OCALLINTER: 616 usemethod(n) 617 t := n.Left.Type 618 if n.List.Len() != 0 && n.List.First().Op == OAS { 619 break 620 } 621 n.Left = walkexpr(n.Left, init) 622 walkexprlist(n.List.Slice(), init) 623 ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 624 n.List.Set(reorder1(ll)) 625 626 case OCALLFUNC: 627 if n.Left.Op == OCLOSURE { 628 // Transform direct call of a closure to call of a normal function. 629 // transformclosure already did all preparation work. 630 631 // Prepend captured variables to argument list. 632 n.List.Prepend(n.Left.Func.Enter.Slice()...) 633 634 n.Left.Func.Enter.Set(nil) 635 636 // Replace OCLOSURE with ONAME/PFUNC. 637 n.Left = n.Left.Func.Closure.Func.Nname 638 639 // Update type of OCALLFUNC node. 640 // Output arguments had not changed, but their offsets could. 641 if n.Left.Type.Results().NumFields() == 1 { 642 n.Type = n.Left.Type.Results().Field(0).Type 643 } else { 644 n.Type = n.Left.Type.Results() 645 } 646 } 647 648 t := n.Left.Type 649 if n.List.Len() != 0 && n.List.First().Op == OAS { 650 break 651 } 652 653 n.Left = walkexpr(n.Left, init) 654 walkexprlist(n.List.Slice(), init) 655 656 ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 657 n.List.Set(reorder1(ll)) 658 659 case OCALLMETH: 660 t := n.Left.Type 661 if n.List.Len() != 0 && n.List.First().Op == OAS { 662 break 663 } 664 n.Left = walkexpr(n.Left, init) 665 walkexprlist(n.List.Slice(), init) 666 ll := ascompatte(n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init) 667 lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init) 668 ll = append(ll, lr...) 669 n.Left.Left = nil 670 updateHasCall(n.Left) 671 n.List.Set(reorder1(ll)) 672 673 case OAS: 674 init.AppendNodes(&n.Ninit) 675 676 n.Left = walkexpr(n.Left, init) 677 n.Left = safeexpr(n.Left, init) 678 679 if oaslit(n, init) { 680 break 681 } 682 683 if n.Right == nil { 684 // TODO(austin): Check all "implicit zeroing" 685 break 686 } 687 688 if !instrumenting && iszero(n.Right) { 689 break 690 } 691 692 switch n.Right.Op { 693 default: 694 n.Right = walkexpr(n.Right, init) 695 696 case ORECV: 697 // x = <-c; n.Left is x, n.Right.Left is c. 698 // orderstmt made sure x is addressable. 699 n.Right.Left = walkexpr(n.Right.Left, init) 700 701 n1 := nod(OADDR, n.Left, nil) 702 r := n.Right.Left // the channel 703 n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1) 704 n = walkexpr(n, init) 705 break opswitch 706 707 case OAPPEND: 708 // x = append(...) 709 r := n.Right 710 if r.Type.Elem().NotInHeap() { 711 yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem()) 712 } 713 if r.Isddd() { 714 r = appendslice(r, init) // also works for append(slice, string). 715 } else { 716 r = walkappend(r, init, n) 717 } 718 n.Right = r 719 if r.Op == OAPPEND { 720 // Left in place for back end. 721 // Do not add a new write barrier. 722 // Set up address of type for back end. 723 r.Left = typename(r.Type.Elem()) 724 break opswitch 725 } 726 // Otherwise, lowered for race detector. 727 // Treat as ordinary assignment. 728 } 729 730 if n.Left != nil && n.Right != nil { 731 n = convas(n, init) 732 } 733 734 case OAS2: 735 init.AppendNodes(&n.Ninit) 736 walkexprlistsafe(n.List.Slice(), init) 737 walkexprlistsafe(n.Rlist.Slice(), init) 738 ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init) 739 ll = reorder3(ll) 740 n = liststmt(ll) 741 742 // a,b,... = fn() 743 case OAS2FUNC: 744 init.AppendNodes(&n.Ninit) 745 746 r := n.Rlist.First() 747 walkexprlistsafe(n.List.Slice(), init) 748 r = walkexpr(r, init) 749 750 if isIntrinsicCall(r) { 751 n.Rlist.Set1(r) 752 break 753 } 754 init.Append(r) 755 756 ll := ascompatet(n.Op, n.List, r.Type) 757 n = liststmt(ll) 758 759 // x, y = <-c 760 // orderstmt made sure x is addressable. 761 case OAS2RECV: 762 init.AppendNodes(&n.Ninit) 763 764 r := n.Rlist.First() 765 walkexprlistsafe(n.List.Slice(), init) 766 r.Left = walkexpr(r.Left, init) 767 var n1 *Node 768 if isblank(n.List.First()) { 769 n1 = nodnil() 770 } else { 771 n1 = nod(OADDR, n.List.First(), nil) 772 } 773 n1.Etype = 1 // addr does not escape 774 fn := chanfn("chanrecv2", 2, r.Left.Type) 775 ok := n.List.Second() 776 call := mkcall1(fn, ok.Type, init, r.Left, n1) 777 n = nod(OAS, ok, call) 778 n = typecheck(n, Etop) 779 780 // a,b = m[i] 781 case OAS2MAPR: 782 init.AppendNodes(&n.Ninit) 783 784 r := n.Rlist.First() 785 walkexprlistsafe(n.List.Slice(), init) 786 r.Left = walkexpr(r.Left, init) 787 r.Right = walkexpr(r.Right, init) 788 t := r.Left.Type 789 790 fast := mapfast(t) 791 var key *Node 792 if fast != mapslow { 793 // fast versions take key by value 794 key = r.Right 795 } else { 796 // standard version takes key by reference 797 // orderexpr made sure key is addressable. 798 key = nod(OADDR, r.Right, nil) 799 } 800 801 // from: 802 // a,b = m[i] 803 // to: 804 // var,b = mapaccess2*(t, m, i) 805 // a = *var 806 a := n.List.First() 807 808 if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero 809 fn := mapfn(mapaccess2[fast], t) 810 r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key) 811 } else { 812 fn := mapfn("mapaccess2_fat", t) 813 z := zeroaddr(w) 814 r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z) 815 } 816 817 // mapaccess2* returns a typed bool, but due to spec changes, 818 // the boolean result of i.(T) is now untyped so we make it the 819 // same type as the variable on the lhs. 820 if ok := n.List.Second(); !isblank(ok) && ok.Type.IsBoolean() { 821 r.Type.Field(1).Type = ok.Type 822 } 823 n.Rlist.Set1(r) 824 n.Op = OAS2FUNC 825 826 // don't generate a = *var if a is _ 827 if !isblank(a) { 828 var_ := temp(types.NewPtr(t.Val())) 829 var_.SetTypecheck(1) 830 var_.SetNonNil(true) // mapaccess always returns a non-nil pointer 831 n.List.SetFirst(var_) 832 n = walkexpr(n, init) 833 init.Append(n) 834 n = nod(OAS, a, nod(OIND, var_, nil)) 835 } 836 837 n = typecheck(n, Etop) 838 n = walkexpr(n, init) 839 840 case ODELETE: 841 init.AppendNodes(&n.Ninit) 842 map_ := n.List.First() 843 key := n.List.Second() 844 map_ = walkexpr(map_, init) 845 key = walkexpr(key, init) 846 847 t := map_.Type 848 fast := mapfast(t) 849 if fast == mapslow { 850 // orderstmt made sure key is addressable. 851 key = nod(OADDR, key, nil) 852 } 853 n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key) 854 855 case OAS2DOTTYPE: 856 walkexprlistsafe(n.List.Slice(), init) 857 n.Rlist.SetFirst(walkexpr(n.Rlist.First(), init)) 858 859 case OCONVIFACE: 860 n.Left = walkexpr(n.Left, init) 861 862 // Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped. 863 if isdirectiface(n.Left.Type) { 864 var t *Node 865 if n.Type.IsEmptyInterface() { 866 t = typename(n.Left.Type) 867 } else { 868 t = itabname(n.Left.Type, n.Type) 869 } 870 l := nod(OEFACE, t, n.Left) 871 l.Type = n.Type 872 l.SetTypecheck(n.Typecheck()) 873 n = l 874 break 875 } 876 877 if staticbytes == nil { 878 staticbytes = newname(Runtimepkg.Lookup("staticbytes")) 879 staticbytes.SetClass(PEXTERN) 880 staticbytes.Type = types.NewArray(types.Types[TUINT8], 256) 881 zerobase = newname(Runtimepkg.Lookup("zerobase")) 882 zerobase.SetClass(PEXTERN) 883 zerobase.Type = types.Types[TUINTPTR] 884 } 885 886 // Optimize convT2{E,I} for many cases in which T is not pointer-shaped, 887 // by using an existing addressable value identical to n.Left 888 // or creating one on the stack. 889 var value *Node 890 switch { 891 case n.Left.Type.Size() == 0: 892 // n.Left is zero-sized. Use zerobase. 893 cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246. 894 value = zerobase 895 case n.Left.Type.IsBoolean() || (n.Left.Type.Size() == 1 && n.Left.Type.IsInteger()): 896 // n.Left is a bool/byte. Use staticbytes[n.Left]. 897 n.Left = cheapexpr(n.Left, init) 898 value = nod(OINDEX, staticbytes, byteindex(n.Left)) 899 value.SetBounded(true) 900 case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly(): 901 // n.Left is a readonly global; use it directly. 902 value = n.Left 903 case !n.Left.Type.IsInterface() && n.Esc == EscNone && n.Left.Type.Width <= 1024: 904 // n.Left does not escape. Use a stack temporary initialized to n.Left. 905 value = temp(n.Left.Type) 906 init.Append(typecheck(nod(OAS, value, n.Left), Etop)) 907 } 908 909 if value != nil { 910 // Value is identical to n.Left. 911 // Construct the interface directly: {type/itab, &value}. 912 var t *Node 913 if n.Type.IsEmptyInterface() { 914 t = typename(n.Left.Type) 915 } else { 916 t = itabname(n.Left.Type, n.Type) 917 } 918 l := nod(OEFACE, t, typecheck(nod(OADDR, value, nil), Erv)) 919 l.Type = n.Type 920 l.SetTypecheck(n.Typecheck()) 921 n = l 922 break 923 } 924 925 // Implement interface to empty interface conversion. 926 // tmp = i.itab 927 // if tmp != nil { 928 // tmp = tmp.type 929 // } 930 // e = iface{tmp, i.data} 931 if n.Type.IsEmptyInterface() && n.Left.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() { 932 // Evaluate the input interface. 933 c := temp(n.Left.Type) 934 init.Append(nod(OAS, c, n.Left)) 935 936 // Get the itab out of the interface. 937 tmp := temp(types.NewPtr(types.Types[TUINT8])) 938 init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv))) 939 940 // Get the type out of the itab. 941 nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), Erv), nil) 942 nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp))) 943 init.Append(nif) 944 945 // Build the result. 946 e := nod(OEFACE, tmp, ifaceData(c, types.NewPtr(types.Types[TUINT8]))) 947 e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE. 948 e.SetTypecheck(1) 949 n = e 950 break 951 } 952 953 var ll []*Node 954 if n.Type.IsEmptyInterface() { 955 if !n.Left.Type.IsInterface() { 956 ll = append(ll, typename(n.Left.Type)) 957 } 958 } else { 959 if n.Left.Type.IsInterface() { 960 ll = append(ll, typename(n.Type)) 961 } else { 962 ll = append(ll, itabname(n.Left.Type, n.Type)) 963 } 964 } 965 966 if n.Left.Type.IsInterface() { 967 ll = append(ll, n.Left) 968 } else { 969 // regular types are passed by reference to avoid C vararg calls 970 // orderexpr arranged for n.Left to be a temporary for all 971 // the conversions it could see. comparison of an interface 972 // with a non-interface, especially in a switch on interface value 973 // with non-interface cases, is not visible to orderstmt, so we 974 // have to fall back on allocating a temp here. 975 if islvalue(n.Left) { 976 ll = append(ll, nod(OADDR, n.Left, nil)) 977 } else { 978 ll = append(ll, nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil)) 979 } 980 dowidth(n.Left.Type) 981 } 982 983 fn := syslook(convFuncName(n.Left.Type, n.Type)) 984 fn = substArgTypes(fn, n.Left.Type, n.Type) 985 dowidth(fn.Type) 986 n = nod(OCALL, fn, nil) 987 n.List.Set(ll) 988 n = typecheck(n, Erv) 989 n = walkexpr(n, init) 990 991 case OCONV, OCONVNOP: 992 if thearch.LinkArch.Family == sys.ARM || thearch.LinkArch.Family == sys.MIPS { 993 if n.Left.Type.IsFloat() { 994 if n.Type.Etype == TINT64 { 995 n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 996 break 997 } 998 999 if n.Type.Etype == TUINT64 { 1000 n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1001 break 1002 } 1003 } 1004 1005 if n.Type.IsFloat() { 1006 if n.Left.Type.Etype == TINT64 { 1007 n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type) 1008 break 1009 } 1010 1011 if n.Left.Type.Etype == TUINT64 { 1012 n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type) 1013 break 1014 } 1015 } 1016 } 1017 1018 if thearch.LinkArch.Family == sys.I386 { 1019 if n.Left.Type.IsFloat() { 1020 if n.Type.Etype == TINT64 { 1021 n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1022 break 1023 } 1024 1025 if n.Type.Etype == TUINT64 { 1026 n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1027 break 1028 } 1029 if n.Type.Etype == TUINT32 || n.Type.Etype == TUINT || n.Type.Etype == TUINTPTR { 1030 n = mkcall("float64touint32", n.Type, init, conv(n.Left, types.Types[TFLOAT64])) 1031 break 1032 } 1033 } 1034 if n.Type.IsFloat() { 1035 if n.Left.Type.Etype == TINT64 { 1036 n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type) 1037 break 1038 } 1039 1040 if n.Left.Type.Etype == TUINT64 { 1041 n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type) 1042 break 1043 } 1044 if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINT || n.Left.Type.Etype == TUINTPTR { 1045 n = conv(mkcall("uint32tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT32])), n.Type) 1046 break 1047 } 1048 } 1049 } 1050 1051 n.Left = walkexpr(n.Left, init) 1052 1053 case OANDNOT: 1054 n.Left = walkexpr(n.Left, init) 1055 n.Op = OAND 1056 n.Right = nod(OCOM, n.Right, nil) 1057 n.Right = typecheck(n.Right, Erv) 1058 n.Right = walkexpr(n.Right, init) 1059 1060 case ODIV, OMOD: 1061 n.Left = walkexpr(n.Left, init) 1062 n.Right = walkexpr(n.Right, init) 1063 1064 // rewrite complex div into function call. 1065 et := n.Left.Type.Etype 1066 1067 if isComplex[et] && n.Op == ODIV { 1068 t := n.Type 1069 n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128])) 1070 n = conv(n, t) 1071 break 1072 } 1073 1074 // Nothing to do for float divisions. 1075 if isFloat[et] { 1076 break 1077 } 1078 1079 // rewrite 64-bit div and mod on 32-bit architectures. 1080 // TODO: Remove this code once we can introduce 1081 // runtime calls late in SSA processing. 1082 if Widthreg < 8 && (et == TINT64 || et == TUINT64) { 1083 if n.Right.Op == OLITERAL { 1084 // Leave div/mod by constant powers of 2. 1085 // The SSA backend will handle those. 1086 switch et { 1087 case TINT64: 1088 c := n.Right.Int64() 1089 if c < 0 { 1090 c = -c 1091 } 1092 if c != 0 && c&(c-1) == 0 { 1093 break opswitch 1094 } 1095 case TUINT64: 1096 c := uint64(n.Right.Int64()) 1097 if c != 0 && c&(c-1) == 0 { 1098 break opswitch 1099 } 1100 } 1101 } 1102 var fn string 1103 if et == TINT64 { 1104 fn = "int64" 1105 } else { 1106 fn = "uint64" 1107 } 1108 if n.Op == ODIV { 1109 fn += "div" 1110 } else { 1111 fn += "mod" 1112 } 1113 n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et])) 1114 } 1115 1116 case OINDEX: 1117 n.Left = walkexpr(n.Left, init) 1118 1119 // save the original node for bounds checking elision. 1120 // If it was a ODIV/OMOD walk might rewrite it. 1121 r := n.Right 1122 1123 n.Right = walkexpr(n.Right, init) 1124 1125 // if range of type cannot exceed static array bound, 1126 // disable bounds check. 1127 if n.Bounded() { 1128 break 1129 } 1130 t := n.Left.Type 1131 if t != nil && t.IsPtr() { 1132 t = t.Elem() 1133 } 1134 if t.IsArray() { 1135 n.SetBounded(bounded(r, t.NumElem())) 1136 if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { 1137 Warn("index bounds check elided") 1138 } 1139 if smallintconst(n.Right) && !n.Bounded() { 1140 yyerror("index out of bounds") 1141 } 1142 } else if Isconst(n.Left, CTSTR) { 1143 n.SetBounded(bounded(r, int64(len(n.Left.Val().U.(string))))) 1144 if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) { 1145 Warn("index bounds check elided") 1146 } 1147 if smallintconst(n.Right) && !n.Bounded() { 1148 yyerror("index out of bounds") 1149 } 1150 } 1151 1152 if Isconst(n.Right, CTINT) { 1153 if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 { 1154 yyerror("index out of bounds") 1155 } 1156 } 1157 1158 case OINDEXMAP: 1159 // Replace m[k] with *map{access1,assign}(maptype, m, &k) 1160 n.Left = walkexpr(n.Left, init) 1161 n.Right = walkexpr(n.Right, init) 1162 map_ := n.Left 1163 key := n.Right 1164 t := map_.Type 1165 if n.Etype == 1 { 1166 // This m[k] expression is on the left-hand side of an assignment. 1167 fast := mapfast(t) 1168 if fast == mapslow { 1169 // standard version takes key by reference. 1170 // orderexpr made sure key is addressable. 1171 key = nod(OADDR, key, nil) 1172 } 1173 n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key) 1174 } else { 1175 // m[k] is not the target of an assignment. 1176 fast := mapfast(t) 1177 if fast == mapslow { 1178 // standard version takes key by reference. 1179 // orderexpr made sure key is addressable. 1180 key = nod(OADDR, key, nil) 1181 } 1182 1183 if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero 1184 n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Val()), init, typename(t), map_, key) 1185 } else { 1186 z := zeroaddr(w) 1187 n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Val()), init, typename(t), map_, key, z) 1188 } 1189 } 1190 n.Type = types.NewPtr(t.Val()) 1191 n.SetNonNil(true) // mapaccess1* and mapassign always return non-nil pointers. 1192 n = nod(OIND, n, nil) 1193 n.Type = t.Val() 1194 n.SetTypecheck(1) 1195 1196 case ORECV: 1197 Fatalf("walkexpr ORECV") // should see inside OAS only 1198 1199 case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR: 1200 n.Left = walkexpr(n.Left, init) 1201 low, high, max := n.SliceBounds() 1202 low = walkexpr(low, init) 1203 if low != nil && iszero(low) { 1204 // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k]. 1205 low = nil 1206 } 1207 high = walkexpr(high, init) 1208 max = walkexpr(max, init) 1209 n.SetSliceBounds(low, high, max) 1210 if n.Op.IsSlice3() { 1211 if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) { 1212 // Reduce x[i:j:cap(x)] to x[i:j]. 1213 if n.Op == OSLICE3 { 1214 n.Op = OSLICE 1215 } else { 1216 n.Op = OSLICEARR 1217 } 1218 n = reduceSlice(n) 1219 } 1220 } else { 1221 n = reduceSlice(n) 1222 } 1223 1224 case ONEW: 1225 if n.Esc == EscNone { 1226 if n.Type.Elem().Width >= 1<<16 { 1227 Fatalf("large ONEW with EscNone: %v", n) 1228 } 1229 r := temp(n.Type.Elem()) 1230 r = nod(OAS, r, nil) // zero temp 1231 r = typecheck(r, Etop) 1232 init.Append(r) 1233 r = nod(OADDR, r.Left, nil) 1234 r = typecheck(r, Erv) 1235 n = r 1236 } else { 1237 n = callnew(n.Type.Elem()) 1238 } 1239 1240 case OCMPSTR: 1241 // s + "badgerbadgerbadger" == "badgerbadgerbadger" 1242 if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) { 1243 // TODO(marvin): Fix Node.EType type union. 1244 r := nod(Op(n.Etype), nod(OLEN, n.Left.List.First(), nil), nodintconst(0)) 1245 r = typecheck(r, Erv) 1246 r = walkexpr(r, init) 1247 r.Type = n.Type 1248 n = r 1249 break 1250 } 1251 1252 // Rewrite comparisons to short constant strings as length+byte-wise comparisons. 1253 var cs, ncs *Node // const string, non-const string 1254 switch { 1255 case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR): 1256 // ignore; will be constant evaluated 1257 case Isconst(n.Left, CTSTR): 1258 cs = n.Left 1259 ncs = n.Right 1260 case Isconst(n.Right, CTSTR): 1261 cs = n.Right 1262 ncs = n.Left 1263 } 1264 if cs != nil { 1265 cmp := Op(n.Etype) 1266 // maxRewriteLen was chosen empirically. 1267 // It is the value that minimizes cmd/go file size 1268 // across most architectures. 1269 // See the commit description for CL 26758 for details. 1270 maxRewriteLen := 6 1271 // Some architectures can load unaligned byte sequence as 1 word. 1272 // So we can cover longer strings with the same amount of code. 1273 canCombineLoads := false 1274 combine64bit := false 1275 // TODO: does this improve performance on any other architectures? 1276 switch thearch.LinkArch.Family { 1277 case sys.AMD64: 1278 // Larger compare require longer instructions, so keep this reasonably low. 1279 // Data from CL 26758 shows that longer strings are rare. 1280 // If we really want we can do 16 byte SSE comparisons in the future. 1281 maxRewriteLen = 16 1282 canCombineLoads = true 1283 combine64bit = true 1284 case sys.I386: 1285 maxRewriteLen = 8 1286 canCombineLoads = true 1287 } 1288 var and Op 1289 switch cmp { 1290 case OEQ: 1291 and = OANDAND 1292 case ONE: 1293 and = OOROR 1294 default: 1295 // Don't do byte-wise comparisons for <, <=, etc. 1296 // They're fairly complicated. 1297 // Length-only checks are ok, though. 1298 maxRewriteLen = 0 1299 } 1300 if s := cs.Val().U.(string); len(s) <= maxRewriteLen { 1301 if len(s) > 0 { 1302 ncs = safeexpr(ncs, init) 1303 } 1304 // TODO(marvin): Fix Node.EType type union. 1305 r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s)))) 1306 remains := len(s) 1307 for i := 0; remains > 0; { 1308 if remains == 1 || !canCombineLoads { 1309 cb := nodintconst(int64(s[i])) 1310 ncb := nod(OINDEX, ncs, nodintconst(int64(i))) 1311 r = nod(and, r, nod(cmp, ncb, cb)) 1312 remains-- 1313 i++ 1314 continue 1315 } 1316 var step int 1317 var convType *types.Type 1318 switch { 1319 case remains >= 8 && combine64bit: 1320 convType = types.Types[TINT64] 1321 step = 8 1322 case remains >= 4: 1323 convType = types.Types[TUINT32] 1324 step = 4 1325 case remains >= 2: 1326 convType = types.Types[TUINT16] 1327 step = 2 1328 } 1329 ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i))) 1330 ncsubstr = conv(ncsubstr, convType) 1331 csubstr := int64(s[i]) 1332 // Calculate large constant from bytes as sequence of shifts and ors. 1333 // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ... 1334 // ssa will combine this into a single large load. 1335 for offset := 1; offset < step; offset++ { 1336 b := nod(OINDEX, ncs, nodintconst(int64(i+offset))) 1337 b = conv(b, convType) 1338 b = nod(OLSH, b, nodintconst(int64(8*offset))) 1339 ncsubstr = nod(OOR, ncsubstr, b) 1340 csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset) 1341 } 1342 csubstrPart := nodintconst(csubstr) 1343 // Compare "step" bytes as once 1344 r = nod(and, r, nod(cmp, csubstrPart, ncsubstr)) 1345 remains -= step 1346 i += step 1347 } 1348 r = typecheck(r, Erv) 1349 r = walkexpr(r, init) 1350 r.Type = n.Type 1351 n = r 1352 break 1353 } 1354 } 1355 1356 var r *Node 1357 // TODO(marvin): Fix Node.EType type union. 1358 if Op(n.Etype) == OEQ || Op(n.Etype) == ONE { 1359 // prepare for rewrite below 1360 n.Left = cheapexpr(n.Left, init) 1361 n.Right = cheapexpr(n.Right, init) 1362 1363 r = mkcall("eqstring", types.Types[TBOOL], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING])) 1364 1365 // quick check of len before full compare for == or != 1366 // eqstring assumes that the lengths are equal 1367 // TODO(marvin): Fix Node.EType type union. 1368 if Op(n.Etype) == OEQ { 1369 // len(left) == len(right) && eqstring(left, right) 1370 r = nod(OANDAND, nod(OEQ, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r) 1371 } else { 1372 // len(left) != len(right) || !eqstring(left, right) 1373 r = nod(ONOT, r, nil) 1374 r = nod(OOROR, nod(ONE, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r) 1375 } 1376 1377 r = typecheck(r, Erv) 1378 r = walkexpr(r, nil) 1379 } else { 1380 // sys_cmpstring(s1, s2) :: 0 1381 r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING])) 1382 // TODO(marvin): Fix Node.EType type union. 1383 r = nod(Op(n.Etype), r, nodintconst(0)) 1384 } 1385 1386 r = typecheck(r, Erv) 1387 if !n.Type.IsBoolean() { 1388 Fatalf("cmp %v", n.Type) 1389 } 1390 r.Type = n.Type 1391 n = r 1392 1393 case OADDSTR: 1394 n = addstr(n, init) 1395 1396 case OAPPEND: 1397 // order should make sure we only see OAS(node, OAPPEND), which we handle above. 1398 Fatalf("append outside assignment") 1399 1400 case OCOPY: 1401 n = copyany(n, init, instrumenting && !compiling_runtime) 1402 1403 // cannot use chanfn - closechan takes any, not chan any 1404 case OCLOSE: 1405 fn := syslook("closechan") 1406 1407 fn = substArgTypes(fn, n.Left.Type) 1408 n = mkcall1(fn, nil, init, n.Left) 1409 1410 case OMAKECHAN: 1411 n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64])) 1412 1413 case OMAKEMAP: 1414 t := n.Type 1415 1416 a := nodnil() // hmap buffer 1417 r := nodnil() // bucket buffer 1418 if n.Esc == EscNone { 1419 // Allocate hmap buffer on stack. 1420 var_ := temp(hmap(t)) 1421 1422 a = nod(OAS, var_, nil) // zero temp 1423 a = typecheck(a, Etop) 1424 init.Append(a) 1425 a = nod(OADDR, var_, nil) 1426 1427 // Allocate one bucket on stack. 1428 // Maximum key/value size is 128 bytes, larger objects 1429 // are stored with an indirection. So max bucket size is 2048+eps. 1430 var_ = temp(mapbucket(t)) 1431 1432 r = nod(OAS, var_, nil) // zero temp 1433 r = typecheck(r, Etop) 1434 init.Append(r) 1435 r = nod(OADDR, var_, nil) 1436 } 1437 1438 fn := syslook("makemap") 1439 fn = substArgTypes(fn, hmap(t), mapbucket(t), t.Key(), t.Val()) 1440 n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]), a, r) 1441 1442 case OMAKESLICE: 1443 l := n.Left 1444 r := n.Right 1445 if r == nil { 1446 r = safeexpr(l, init) 1447 l = r 1448 } 1449 t := n.Type 1450 if n.Esc == EscNone { 1451 if !isSmallMakeSlice(n) { 1452 Fatalf("non-small OMAKESLICE with EscNone: %v", n) 1453 } 1454 // var arr [r]T 1455 // n = arr[:l] 1456 t = types.NewArray(t.Elem(), nonnegintconst(r)) // [r]T 1457 var_ := temp(t) 1458 a := nod(OAS, var_, nil) // zero temp 1459 a = typecheck(a, Etop) 1460 init.Append(a) 1461 r := nod(OSLICE, var_, nil) // arr[:l] 1462 r.SetSliceBounds(nil, l, nil) 1463 r = conv(r, n.Type) // in case n.Type is named. 1464 r = typecheck(r, Erv) 1465 r = walkexpr(r, init) 1466 n = r 1467 } else { 1468 // n escapes; set up a call to makeslice. 1469 // When len and cap can fit into int, use makeslice instead of 1470 // makeslice64, which is faster and shorter on 32 bit platforms. 1471 1472 if t.Elem().NotInHeap() { 1473 yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem()) 1474 } 1475 1476 len, cap := l, r 1477 1478 fnname := "makeslice64" 1479 argtype := types.Types[TINT64] 1480 1481 // typechecking guarantees that TIDEAL len/cap are positive and fit in an int. 1482 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT 1483 // will be handled by the negative range checks in makeslice during runtime. 1484 if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) && 1485 (cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) { 1486 fnname = "makeslice" 1487 argtype = types.Types[TINT] 1488 } 1489 1490 fn := syslook(fnname) 1491 fn = substArgTypes(fn, t.Elem()) // any-1 1492 n = mkcall1(fn, t, init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype)) 1493 } 1494 1495 case ORUNESTR: 1496 a := nodnil() 1497 if n.Esc == EscNone { 1498 t := types.NewArray(types.Types[TUINT8], 4) 1499 var_ := temp(t) 1500 a = nod(OADDR, var_, nil) 1501 } 1502 1503 // intstring(*[4]byte, rune) 1504 n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64])) 1505 1506 case OARRAYBYTESTR: 1507 a := nodnil() 1508 if n.Esc == EscNone { 1509 // Create temporary buffer for string on stack. 1510 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1511 1512 a = nod(OADDR, temp(t), nil) 1513 } 1514 1515 // slicebytetostring(*[32]byte, []byte) string; 1516 n = mkcall("slicebytetostring", n.Type, init, a, n.Left) 1517 1518 // slicebytetostringtmp([]byte) string; 1519 case OARRAYBYTESTRTMP: 1520 n.Left = walkexpr(n.Left, init) 1521 1522 if !instrumenting { 1523 // Let the backend handle OARRAYBYTESTRTMP directly 1524 // to avoid a function call to slicebytetostringtmp. 1525 break 1526 } 1527 1528 n = mkcall("slicebytetostringtmp", n.Type, init, n.Left) 1529 1530 // slicerunetostring(*[32]byte, []rune) string; 1531 case OARRAYRUNESTR: 1532 a := nodnil() 1533 1534 if n.Esc == EscNone { 1535 // Create temporary buffer for string on stack. 1536 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1537 1538 a = nod(OADDR, temp(t), nil) 1539 } 1540 1541 n = mkcall("slicerunetostring", n.Type, init, a, n.Left) 1542 1543 // stringtoslicebyte(*32[byte], string) []byte; 1544 case OSTRARRAYBYTE: 1545 a := nodnil() 1546 1547 if n.Esc == EscNone { 1548 // Create temporary buffer for slice on stack. 1549 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 1550 1551 a = nod(OADDR, temp(t), nil) 1552 } 1553 1554 n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING])) 1555 1556 case OSTRARRAYBYTETMP: 1557 // []byte(string) conversion that creates a slice 1558 // referring to the actual string bytes. 1559 // This conversion is handled later by the backend and 1560 // is only for use by internal compiler optimizations 1561 // that know that the slice won't be mutated. 1562 // The only such case today is: 1563 // for i, c := range []byte(string) 1564 n.Left = walkexpr(n.Left, init) 1565 1566 // stringtoslicerune(*[32]rune, string) []rune 1567 case OSTRARRAYRUNE: 1568 a := nodnil() 1569 1570 if n.Esc == EscNone { 1571 // Create temporary buffer for slice on stack. 1572 t := types.NewArray(types.Types[TINT32], tmpstringbufsize) 1573 1574 a = nod(OADDR, temp(t), nil) 1575 } 1576 1577 n = mkcall("stringtoslicerune", n.Type, init, a, n.Left) 1578 1579 // ifaceeq(i1 any-1, i2 any-2) (ret bool); 1580 case OCMPIFACE: 1581 if !eqtype(n.Left.Type, n.Right.Type) { 1582 Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type) 1583 } 1584 var fn *Node 1585 if n.Left.Type.IsEmptyInterface() { 1586 fn = syslook("efaceeq") 1587 } else { 1588 fn = syslook("ifaceeq") 1589 } 1590 1591 n.Right = cheapexpr(n.Right, init) 1592 n.Left = cheapexpr(n.Left, init) 1593 lt := nod(OITAB, n.Left, nil) 1594 rt := nod(OITAB, n.Right, nil) 1595 ld := nod(OIDATA, n.Left, nil) 1596 rd := nod(OIDATA, n.Right, nil) 1597 ld.Type = types.Types[TUNSAFEPTR] 1598 rd.Type = types.Types[TUNSAFEPTR] 1599 ld.SetTypecheck(1) 1600 rd.SetTypecheck(1) 1601 call := mkcall1(fn, n.Type, init, lt, ld, rd) 1602 1603 // Check itable/type before full compare. 1604 // Note: short-circuited because order matters. 1605 // TODO(marvin): Fix Node.EType type union. 1606 var cmp *Node 1607 if Op(n.Etype) == OEQ { 1608 cmp = nod(OANDAND, nod(OEQ, lt, rt), call) 1609 } else { 1610 cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil)) 1611 } 1612 cmp = typecheck(cmp, Erv) 1613 cmp = walkexpr(cmp, init) 1614 cmp.Type = n.Type 1615 n = cmp 1616 1617 case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT: 1618 if isStaticCompositeLiteral(n) { 1619 // n can be directly represented in the read-only data section. 1620 // Make direct reference to the static data. See issue 12841. 1621 vstat := staticname(n.Type) 1622 vstat.Name.SetReadonly(true) 1623 fixedlit(inInitFunction, initKindStatic, n, vstat, init) 1624 n = vstat 1625 n = typecheck(n, Erv) 1626 break 1627 } 1628 var_ := temp(n.Type) 1629 anylit(n, var_, init) 1630 n = var_ 1631 1632 case OSEND: 1633 n1 := n.Right 1634 n1 = assignconv(n1, n.Left.Type.Elem(), "chan send") 1635 n1 = walkexpr(n1, init) 1636 n1 = nod(OADDR, n1, nil) 1637 n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1) 1638 1639 case OCLOSURE: 1640 n = walkclosure(n, init) 1641 1642 case OCALLPART: 1643 n = walkpartialcall(n, init) 1644 } 1645 1646 // Expressions that are constant at run time but not 1647 // considered const by the language spec are not turned into 1648 // constants until walk. For example, if n is y%1 == 0, the 1649 // walk of y%1 may have replaced it by 0. 1650 // Check whether n with its updated args is itself now a constant. 1651 t := n.Type 1652 evconst(n) 1653 if n.Type != t { 1654 Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type) 1655 } 1656 if n.Op == OLITERAL { 1657 n = typecheck(n, Erv) 1658 // Emit string symbol now to avoid emitting 1659 // any concurrently during the backend. 1660 if s, ok := n.Val().U.(string); ok { 1661 _ = stringsym(s) 1662 } 1663 } 1664 1665 updateHasCall(n) 1666 1667 if Debug['w'] != 0 && n != nil { 1668 Dump("walk", n) 1669 } 1670 1671 lineno = lno 1672 return n 1673 } 1674 1675 // TODO(josharian): combine this with its caller and simplify 1676 func reduceSlice(n *Node) *Node { 1677 low, high, max := n.SliceBounds() 1678 if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) { 1679 // Reduce x[i:len(x)] to x[i:]. 1680 high = nil 1681 } 1682 n.SetSliceBounds(low, high, max) 1683 if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil { 1684 // Reduce x[:] to x. 1685 if Debug_slice > 0 { 1686 Warn("slice: omit slice operation") 1687 } 1688 return n.Left 1689 } 1690 return n 1691 } 1692 1693 func ascompatee1(op Op, l *Node, r *Node, init *Nodes) *Node { 1694 // convas will turn map assigns into function calls, 1695 // making it impossible for reorder3 to work. 1696 n := nod(OAS, l, r) 1697 1698 if l.Op == OINDEXMAP { 1699 return n 1700 } 1701 1702 return convas(n, init) 1703 } 1704 1705 func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node { 1706 // check assign expression list to 1707 // a expression list. called in 1708 // expr-list = expr-list 1709 1710 // ensure order of evaluation for function calls 1711 for i := range nl { 1712 nl[i] = safeexpr(nl[i], init) 1713 } 1714 for i1 := range nr { 1715 nr[i1] = safeexpr(nr[i1], init) 1716 } 1717 1718 var nn []*Node 1719 i := 0 1720 for ; i < len(nl); i++ { 1721 if i >= len(nr) { 1722 break 1723 } 1724 // Do not generate 'x = x' during return. See issue 4014. 1725 if op == ORETURN && samesafeexpr(nl[i], nr[i]) { 1726 continue 1727 } 1728 nn = append(nn, ascompatee1(op, nl[i], nr[i], init)) 1729 } 1730 1731 // cannot happen: caller checked that lists had same length 1732 if i < len(nl) || i < len(nr) { 1733 var nln, nrn Nodes 1734 nln.Set(nl) 1735 nrn.Set(nr) 1736 Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname()) 1737 } 1738 return nn 1739 } 1740 1741 // l is an lv and rt is the type of an rv 1742 // return 1 if this implies a function call 1743 // evaluating the lv or a function call 1744 // in the conversion of the types 1745 func fncall(l *Node, rt *types.Type) bool { 1746 if l.HasCall() || l.Op == OINDEXMAP { 1747 return true 1748 } 1749 if needwritebarrier(l) { 1750 return true 1751 } 1752 if eqtype(l.Type, rt) { 1753 return false 1754 } 1755 return true 1756 } 1757 1758 // check assign type list to 1759 // a expression list. called in 1760 // expr-list = func() 1761 func ascompatet(op Op, nl Nodes, nr *types.Type) []*Node { 1762 if nl.Len() != nr.NumFields() { 1763 Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields()) 1764 } 1765 1766 var nn, mm Nodes 1767 for i, l := range nl.Slice() { 1768 if isblank(l) { 1769 continue 1770 } 1771 r := nr.Field(i) 1772 1773 // any lv that causes a fn call must be 1774 // deferred until all the return arguments 1775 // have been pulled from the output arguments 1776 if fncall(l, r.Type) { 1777 tmp := temp(r.Type) 1778 tmp = typecheck(tmp, Erv) 1779 a := nod(OAS, l, tmp) 1780 a = convas(a, &mm) 1781 mm.Append(a) 1782 l = tmp 1783 } 1784 1785 a := nod(OAS, l, nodarg(r, 0)) 1786 a = convas(a, &nn) 1787 updateHasCall(a) 1788 if a.HasCall() { 1789 Dump("ascompatet ucount", a) 1790 Fatalf("ascompatet: too many function calls evaluating parameters") 1791 } 1792 1793 nn.Append(a) 1794 } 1795 return append(nn.Slice(), mm.Slice()...) 1796 } 1797 1798 // nodarg returns a Node for the function argument denoted by t, 1799 // which is either the entire function argument or result struct (t is a struct *types.Type) 1800 // or a specific argument (t is a *types.Field within a struct *types.Type). 1801 // 1802 // If fp is 0, the node is for use by a caller invoking the given 1803 // function, preparing the arguments before the call 1804 // or retrieving the results after the call. 1805 // In this case, the node will correspond to an outgoing argument 1806 // slot like 8(SP). 1807 // 1808 // If fp is 1, the node is for use by the function itself 1809 // (the callee), to retrieve its arguments or write its results. 1810 // In this case the node will be an ONAME with an appropriate 1811 // type and offset. 1812 func nodarg(t interface{}, fp int) *Node { 1813 var n *Node 1814 1815 var funarg types.Funarg 1816 switch t := t.(type) { 1817 default: 1818 Fatalf("bad nodarg %T(%v)", t, t) 1819 1820 case *types.Type: 1821 // Entire argument struct, not just one arg 1822 if !t.IsFuncArgStruct() { 1823 Fatalf("nodarg: bad type %v", t) 1824 } 1825 funarg = t.StructType().Funarg 1826 1827 // Build fake variable name for whole arg struct. 1828 n = newname(lookup(".args")) 1829 n.Type = t 1830 first := t.Field(0) 1831 if first == nil { 1832 Fatalf("nodarg: bad struct") 1833 } 1834 if first.Offset == BADWIDTH { 1835 Fatalf("nodarg: offset not computed for %v", t) 1836 } 1837 n.Xoffset = first.Offset 1838 1839 case *types.Field: 1840 funarg = t.Funarg 1841 if fp == 1 { 1842 // NOTE(rsc): This should be using t.Nname directly, 1843 // except in the case where t.Nname.Sym is the blank symbol and 1844 // so the assignment would be discarded during code generation. 1845 // In that case we need to make a new node, and there is no harm 1846 // in optimization passes to doing so. But otherwise we should 1847 // definitely be using the actual declaration and not a newly built node. 1848 // The extra Fatalf checks here are verifying that this is the case, 1849 // without changing the actual logic (at time of writing, it's getting 1850 // toward time for the Go 1.7 beta). 1851 // At some quieter time (assuming we've never seen these Fatalfs happen) 1852 // we could change this code to use "expect" directly. 1853 expect := asNode(t.Nname) 1854 if expect.isParamHeapCopy() { 1855 expect = expect.Name.Param.Stackcopy 1856 } 1857 1858 for _, n := range Curfn.Func.Dcl { 1859 if (n.Class() == PPARAM || n.Class() == PPARAMOUT) && !t.Sym.IsBlank() && n.Sym == t.Sym { 1860 if n != expect { 1861 Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, asNode(t.Nname), asNode(t.Nname), asNode(t.Nname).Op) 1862 } 1863 return n 1864 } 1865 } 1866 1867 if !expect.Sym.IsBlank() { 1868 Fatalf("nodarg: did not find node in dcl list: %v", expect) 1869 } 1870 } 1871 1872 // Build fake name for individual variable. 1873 // This is safe because if there was a real declared name 1874 // we'd have used it above. 1875 n = newname(lookup("__")) 1876 n.Type = t.Type 1877 if t.Offset == BADWIDTH { 1878 Fatalf("nodarg: offset not computed for %v", t) 1879 } 1880 n.Xoffset = t.Offset 1881 n.Orig = asNode(t.Nname) 1882 } 1883 1884 // Rewrite argument named _ to __, 1885 // or else the assignment to _ will be 1886 // discarded during code generation. 1887 if isblank(n) { 1888 n.Sym = lookup("__") 1889 } 1890 1891 switch fp { 1892 default: 1893 Fatalf("bad fp") 1894 1895 case 0: // preparing arguments for call 1896 n.Op = OINDREGSP 1897 n.Xoffset += Ctxt.FixedFrameSize() 1898 1899 case 1: // reading arguments inside call 1900 n.SetClass(PPARAM) 1901 if funarg == types.FunargResults { 1902 n.SetClass(PPARAMOUT) 1903 } 1904 } 1905 1906 n.SetTypecheck(1) 1907 n.SetAddrtaken(true) // keep optimizers at bay 1908 return n 1909 } 1910 1911 // package all the arguments that match a ... T parameter into a []T. 1912 func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node { 1913 esc := uint16(EscUnknown) 1914 if ddd != nil { 1915 esc = ddd.Esc 1916 } 1917 1918 if len(args) == 0 { 1919 n := nodnil() 1920 n.Type = typ 1921 return n 1922 } 1923 1924 n := nod(OCOMPLIT, nil, typenod(typ)) 1925 if ddd != nil && prealloc[ddd] != nil { 1926 prealloc[n] = prealloc[ddd] // temporary to use 1927 } 1928 n.List.Set(args) 1929 n.Esc = esc 1930 n = typecheck(n, Erv) 1931 if n.Type == nil { 1932 Fatalf("mkdotargslice: typecheck failed") 1933 } 1934 n = walkexpr(n, init) 1935 return n 1936 } 1937 1938 // check assign expression list to 1939 // a type list. called in 1940 // return expr-list 1941 // func(expr-list) 1942 func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node { 1943 var nn []*Node 1944 1945 // f(g()) where g has multiple return values 1946 if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() { 1947 // optimization - can do block copy 1948 if eqtypenoname(rhs[0].Type, lhs) { 1949 nl := nodarg(lhs, fp) 1950 nr := nod(OCONVNOP, rhs[0], nil) 1951 nr.Type = nl.Type 1952 nn = []*Node{convas(nod(OAS, nl, nr), init)} 1953 goto ret 1954 } 1955 1956 // conversions involved. 1957 // copy into temporaries. 1958 var tmps []*Node 1959 for _, nr := range rhs[0].Type.FieldSlice() { 1960 tmps = append(tmps, temp(nr.Type)) 1961 } 1962 1963 a := nod(OAS2, nil, nil) 1964 a.List.Set(tmps) 1965 a.Rlist.Set(rhs) 1966 a = typecheck(a, Etop) 1967 a = walkstmt(a) 1968 init.Append(a) 1969 1970 rhs = tmps 1971 } 1972 1973 // For each parameter (LHS), assign its corresponding argument (RHS). 1974 // If there's a ... parameter (which is only valid as the final 1975 // parameter) and this is not a ... call expression, 1976 // then assign the remaining arguments as a slice. 1977 for i, nl := range lhs.FieldSlice() { 1978 var nr *Node 1979 if nl.Isddd() && !isddd { 1980 nr = mkdotargslice(nl.Type, rhs[i:], init, call.Right) 1981 } else { 1982 nr = rhs[i] 1983 } 1984 1985 a := nod(OAS, nodarg(nl, fp), nr) 1986 a = convas(a, init) 1987 nn = append(nn, a) 1988 } 1989 1990 ret: 1991 for _, n := range nn { 1992 n.SetTypecheck(1) 1993 } 1994 return nn 1995 } 1996 1997 // generate code for print 1998 func walkprint(nn *Node, init *Nodes) *Node { 1999 var r *Node 2000 var n *Node 2001 var on *Node 2002 var t *types.Type 2003 var et types.EType 2004 2005 op := nn.Op 2006 all := nn.List 2007 var calls []*Node 2008 notfirst := false 2009 2010 // Hoist all the argument evaluation up before the lock. 2011 walkexprlistcheap(all.Slice(), init) 2012 2013 calls = append(calls, mkcall("printlock", nil, init)) 2014 for i1, n1 := range all.Slice() { 2015 if notfirst { 2016 calls = append(calls, mkcall("printsp", nil, init)) 2017 } 2018 2019 notfirst = op == OPRINTN 2020 2021 n = n1 2022 if n.Op == OLITERAL { 2023 switch n.Val().Ctype() { 2024 case CTRUNE: 2025 n = defaultlit(n, types.Runetype) 2026 2027 case CTINT: 2028 n = defaultlit(n, types.Types[TINT64]) 2029 2030 case CTFLT: 2031 n = defaultlit(n, types.Types[TFLOAT64]) 2032 } 2033 } 2034 2035 if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL { 2036 n = defaultlit(n, types.Types[TINT64]) 2037 } 2038 n = defaultlit(n, nil) 2039 all.SetIndex(i1, n) 2040 if n.Type == nil || n.Type.Etype == TFORW { 2041 continue 2042 } 2043 2044 t = n.Type 2045 et = n.Type.Etype 2046 if n.Type.IsInterface() { 2047 if n.Type.IsEmptyInterface() { 2048 on = syslook("printeface") 2049 } else { 2050 on = syslook("printiface") 2051 } 2052 on = substArgTypes(on, n.Type) // any-1 2053 } else if n.Type.IsPtr() || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR { 2054 on = syslook("printpointer") 2055 on = substArgTypes(on, n.Type) // any-1 2056 } else if n.Type.IsSlice() { 2057 on = syslook("printslice") 2058 on = substArgTypes(on, n.Type) // any-1 2059 } else if isInt[et] { 2060 if et == TUINT64 { 2061 if isRuntimePkg(t.Sym.Pkg) && t.Sym.Name == "hex" { 2062 on = syslook("printhex") 2063 } else { 2064 on = syslook("printuint") 2065 } 2066 } else { 2067 on = syslook("printint") 2068 } 2069 } else if isFloat[et] { 2070 on = syslook("printfloat") 2071 } else if isComplex[et] { 2072 on = syslook("printcomplex") 2073 } else if et == TBOOL { 2074 on = syslook("printbool") 2075 } else if et == TSTRING { 2076 on = syslook("printstring") 2077 } else { 2078 badtype(OPRINT, n.Type, nil) 2079 continue 2080 } 2081 2082 t = on.Type.Params().Field(0).Type 2083 2084 if !eqtype(t, n.Type) { 2085 n = nod(OCONV, n, nil) 2086 n.Type = t 2087 } 2088 2089 r = nod(OCALL, on, nil) 2090 r.List.Append(n) 2091 calls = append(calls, r) 2092 } 2093 2094 if op == OPRINTN { 2095 calls = append(calls, mkcall("printnl", nil, nil)) 2096 } 2097 2098 calls = append(calls, mkcall("printunlock", nil, init)) 2099 2100 typecheckslice(calls, Etop) 2101 walkexprlist(calls, init) 2102 2103 r = nod(OEMPTY, nil, nil) 2104 r = typecheck(r, Etop) 2105 r = walkexpr(r, init) 2106 r.Ninit.Set(calls) 2107 return r 2108 } 2109 2110 func callnew(t *types.Type) *Node { 2111 if t.NotInHeap() { 2112 yyerror("%v is go:notinheap; heap allocation disallowed", t) 2113 } 2114 dowidth(t) 2115 fn := syslook("newobject") 2116 fn = substArgTypes(fn, t) 2117 v := mkcall1(fn, types.NewPtr(t), nil, typename(t)) 2118 v.SetNonNil(true) 2119 return v 2120 } 2121 2122 func iscallret(n *Node) bool { 2123 n = outervalue(n) 2124 return n.Op == OINDREGSP 2125 } 2126 2127 func isstack(n *Node) bool { 2128 n = outervalue(n) 2129 2130 // If n is *autotmp and autotmp = &foo, replace n with foo. 2131 // We introduce such temps when initializing struct literals. 2132 if n.Op == OIND && n.Left.Op == ONAME && n.Left.IsAutoTmp() { 2133 defn := n.Left.Name.Defn 2134 if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR { 2135 n = defn.Right.Left 2136 } 2137 } 2138 2139 switch n.Op { 2140 case OINDREGSP: 2141 return true 2142 2143 case ONAME: 2144 switch n.Class() { 2145 case PAUTO, PPARAM, PPARAMOUT: 2146 return true 2147 } 2148 } 2149 2150 return false 2151 } 2152 2153 // isReflectHeaderDataField reports whether l is an expression p.Data 2154 // where p has type reflect.SliceHeader or reflect.StringHeader. 2155 func isReflectHeaderDataField(l *Node) bool { 2156 if l.Type != types.Types[TUINTPTR] { 2157 return false 2158 } 2159 2160 var tsym *types.Sym 2161 switch l.Op { 2162 case ODOT: 2163 tsym = l.Left.Type.Sym 2164 case ODOTPTR: 2165 tsym = l.Left.Type.Elem().Sym 2166 default: 2167 return false 2168 } 2169 2170 if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" { 2171 return false 2172 } 2173 return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader" 2174 } 2175 2176 // Do we need a write barrier for assigning to l? 2177 func needwritebarrier(l *Node) bool { 2178 if !use_writebarrier { 2179 return false 2180 } 2181 2182 if l == nil || isblank(l) { 2183 return false 2184 } 2185 2186 // No write barrier for write to stack. 2187 if isstack(l) { 2188 return false 2189 } 2190 2191 // Package unsafe's documentation says storing pointers into 2192 // reflect.SliceHeader and reflect.StringHeader's Data fields 2193 // is valid, even though they have type uintptr (#19168). 2194 if isReflectHeaderDataField(l) { 2195 return true 2196 } 2197 2198 // No write barrier for write of non-pointers. 2199 dowidth(l.Type) 2200 if !types.Haspointers(l.Type) { 2201 return false 2202 } 2203 2204 // No write barrier if this is a pointer to a go:notinheap 2205 // type, since the write barrier's inheap(ptr) check will fail. 2206 if l.Type.IsPtr() && l.Type.Elem().NotInHeap() { 2207 return false 2208 } 2209 2210 // TODO: We can eliminate write barriers if we know *both* the 2211 // current and new content of the slot must already be shaded. 2212 // We know a pointer is shaded if it's nil, or points to 2213 // static data, a global (variable or function), or the stack. 2214 // The nil optimization could be particularly useful for 2215 // writes to just-allocated objects. Unfortunately, knowing 2216 // the "current" value of the slot requires flow analysis. 2217 2218 // Otherwise, be conservative and use write barrier. 2219 return true 2220 } 2221 2222 func convas(n *Node, init *Nodes) *Node { 2223 if n.Op != OAS { 2224 Fatalf("convas: not OAS %v", n.Op) 2225 } 2226 2227 n.SetTypecheck(1) 2228 2229 var lt *types.Type 2230 var rt *types.Type 2231 if n.Left == nil || n.Right == nil { 2232 goto out 2233 } 2234 2235 lt = n.Left.Type 2236 rt = n.Right.Type 2237 if lt == nil || rt == nil { 2238 goto out 2239 } 2240 2241 if isblank(n.Left) { 2242 n.Right = defaultlit(n.Right, nil) 2243 goto out 2244 } 2245 2246 if !eqtype(lt, rt) { 2247 n.Right = assignconv(n.Right, lt, "assignment") 2248 n.Right = walkexpr(n.Right, init) 2249 } 2250 dowidth(n.Right.Type) 2251 2252 out: 2253 updateHasCall(n) 2254 return n 2255 } 2256 2257 // from ascompat[te] 2258 // evaluating actual function arguments. 2259 // f(a,b) 2260 // if there is exactly one function expr, 2261 // then it is done first. otherwise must 2262 // make temp variables 2263 func reorder1(all []*Node) []*Node { 2264 c := 0 // function calls 2265 t := 0 // total parameters 2266 2267 for _, n := range all { 2268 t++ 2269 updateHasCall(n) 2270 if n.HasCall() { 2271 c++ 2272 } 2273 } 2274 2275 if c == 0 || t == 1 { 2276 return all 2277 } 2278 2279 var g []*Node // fncalls assigned to tempnames 2280 var f *Node // last fncall assigned to stack 2281 var r []*Node // non fncalls and tempnames assigned to stack 2282 d := 0 2283 var a *Node 2284 for _, n := range all { 2285 if !n.HasCall() { 2286 r = append(r, n) 2287 continue 2288 } 2289 2290 d++ 2291 if d == c { 2292 f = n 2293 continue 2294 } 2295 2296 // make assignment of fncall to tempname 2297 a = temp(n.Right.Type) 2298 2299 a = nod(OAS, a, n.Right) 2300 g = append(g, a) 2301 2302 // put normal arg assignment on list 2303 // with fncall replaced by tempname 2304 n.Right = a.Left 2305 2306 r = append(r, n) 2307 } 2308 2309 if f != nil { 2310 g = append(g, f) 2311 } 2312 return append(g, r...) 2313 } 2314 2315 // from ascompat[ee] 2316 // a,b = c,d 2317 // simultaneous assignment. there cannot 2318 // be later use of an earlier lvalue. 2319 // 2320 // function calls have been removed. 2321 func reorder3(all []*Node) []*Node { 2322 var l *Node 2323 2324 // If a needed expression may be affected by an 2325 // earlier assignment, make an early copy of that 2326 // expression and use the copy instead. 2327 var early []*Node 2328 2329 var mapinit Nodes 2330 for i, n := range all { 2331 l = n.Left 2332 2333 // Save subexpressions needed on left side. 2334 // Drill through non-dereferences. 2335 for { 2336 if l.Op == ODOT || l.Op == OPAREN { 2337 l = l.Left 2338 continue 2339 } 2340 2341 if l.Op == OINDEX && l.Left.Type.IsArray() { 2342 l.Right = reorder3save(l.Right, all, i, &early) 2343 l = l.Left 2344 continue 2345 } 2346 2347 break 2348 } 2349 2350 switch l.Op { 2351 default: 2352 Fatalf("reorder3 unexpected lvalue %#v", l.Op) 2353 2354 case ONAME: 2355 break 2356 2357 case OINDEX, OINDEXMAP: 2358 l.Left = reorder3save(l.Left, all, i, &early) 2359 l.Right = reorder3save(l.Right, all, i, &early) 2360 if l.Op == OINDEXMAP { 2361 all[i] = convas(all[i], &mapinit) 2362 } 2363 2364 case OIND, ODOTPTR: 2365 l.Left = reorder3save(l.Left, all, i, &early) 2366 } 2367 2368 // Save expression on right side. 2369 all[i].Right = reorder3save(all[i].Right, all, i, &early) 2370 } 2371 2372 early = append(mapinit.Slice(), early...) 2373 return append(early, all...) 2374 } 2375 2376 // if the evaluation of *np would be affected by the 2377 // assignments in all up to but not including the ith assignment, 2378 // copy into a temporary during *early and 2379 // replace *np with that temp. 2380 // The result of reorder3save MUST be assigned back to n, e.g. 2381 // n.Left = reorder3save(n.Left, all, i, early) 2382 func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node { 2383 if !aliased(n, all, i) { 2384 return n 2385 } 2386 2387 q := temp(n.Type) 2388 q = nod(OAS, q, n) 2389 q = typecheck(q, Etop) 2390 *early = append(*early, q) 2391 return q.Left 2392 } 2393 2394 // what's the outer value that a write to n affects? 2395 // outer value means containing struct or array. 2396 func outervalue(n *Node) *Node { 2397 for { 2398 if n.Op == OXDOT { 2399 Fatalf("OXDOT in walk") 2400 } 2401 if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP { 2402 n = n.Left 2403 continue 2404 } 2405 2406 if n.Op == OINDEX && n.Left.Type != nil && n.Left.Type.IsArray() { 2407 n = n.Left 2408 continue 2409 } 2410 2411 break 2412 } 2413 2414 return n 2415 } 2416 2417 // Is it possible that the computation of n might be 2418 // affected by writes in as up to but not including the ith element? 2419 func aliased(n *Node, all []*Node, i int) bool { 2420 if n == nil { 2421 return false 2422 } 2423 2424 // Treat all fields of a struct as referring to the whole struct. 2425 // We could do better but we would have to keep track of the fields. 2426 for n.Op == ODOT { 2427 n = n.Left 2428 } 2429 2430 // Look for obvious aliasing: a variable being assigned 2431 // during the all list and appearing in n. 2432 // Also record whether there are any writes to main memory. 2433 // Also record whether there are any writes to variables 2434 // whose addresses have been taken. 2435 memwrite := 0 2436 2437 varwrite := 0 2438 var a *Node 2439 for _, an := range all[:i] { 2440 a = outervalue(an.Left) 2441 2442 for a.Op == ODOT { 2443 a = a.Left 2444 } 2445 2446 if a.Op != ONAME { 2447 memwrite = 1 2448 continue 2449 } 2450 2451 switch n.Class() { 2452 default: 2453 varwrite = 1 2454 continue 2455 2456 case PAUTO, PPARAM, PPARAMOUT: 2457 if n.Addrtaken() { 2458 varwrite = 1 2459 continue 2460 } 2461 2462 if vmatch2(a, n) { 2463 // Direct hit. 2464 return true 2465 } 2466 } 2467 } 2468 2469 // The variables being written do not appear in n. 2470 // However, n might refer to computed addresses 2471 // that are being written. 2472 2473 // If no computed addresses are affected by the writes, no aliasing. 2474 if memwrite == 0 && varwrite == 0 { 2475 return false 2476 } 2477 2478 // If n does not refer to computed addresses 2479 // (that is, if n only refers to variables whose addresses 2480 // have not been taken), no aliasing. 2481 if varexpr(n) { 2482 return false 2483 } 2484 2485 // Otherwise, both the writes and n refer to computed memory addresses. 2486 // Assume that they might conflict. 2487 return true 2488 } 2489 2490 // does the evaluation of n only refer to variables 2491 // whose addresses have not been taken? 2492 // (and no other memory) 2493 func varexpr(n *Node) bool { 2494 if n == nil { 2495 return true 2496 } 2497 2498 switch n.Op { 2499 case OLITERAL: 2500 return true 2501 2502 case ONAME: 2503 switch n.Class() { 2504 case PAUTO, PPARAM, PPARAMOUT: 2505 if !n.Addrtaken() { 2506 return true 2507 } 2508 } 2509 2510 return false 2511 2512 case OADD, 2513 OSUB, 2514 OOR, 2515 OXOR, 2516 OMUL, 2517 ODIV, 2518 OMOD, 2519 OLSH, 2520 ORSH, 2521 OAND, 2522 OANDNOT, 2523 OPLUS, 2524 OMINUS, 2525 OCOM, 2526 OPAREN, 2527 OANDAND, 2528 OOROR, 2529 OCONV, 2530 OCONVNOP, 2531 OCONVIFACE, 2532 ODOTTYPE: 2533 return varexpr(n.Left) && varexpr(n.Right) 2534 2535 case ODOT: // but not ODOTPTR 2536 // Should have been handled in aliased. 2537 Fatalf("varexpr unexpected ODOT") 2538 } 2539 2540 // Be conservative. 2541 return false 2542 } 2543 2544 // is the name l mentioned in r? 2545 func vmatch2(l *Node, r *Node) bool { 2546 if r == nil { 2547 return false 2548 } 2549 switch r.Op { 2550 // match each right given left 2551 case ONAME: 2552 return l == r 2553 2554 case OLITERAL: 2555 return false 2556 } 2557 2558 if vmatch2(l, r.Left) { 2559 return true 2560 } 2561 if vmatch2(l, r.Right) { 2562 return true 2563 } 2564 for _, n := range r.List.Slice() { 2565 if vmatch2(l, n) { 2566 return true 2567 } 2568 } 2569 return false 2570 } 2571 2572 // is any name mentioned in l also mentioned in r? 2573 // called by sinit.go 2574 func vmatch1(l *Node, r *Node) bool { 2575 // isolate all left sides 2576 if l == nil || r == nil { 2577 return false 2578 } 2579 switch l.Op { 2580 case ONAME: 2581 switch l.Class() { 2582 case PPARAM, PAUTO: 2583 break 2584 2585 default: 2586 // assignment to non-stack variable must be 2587 // delayed if right has function calls. 2588 if r.HasCall() { 2589 return true 2590 } 2591 } 2592 2593 return vmatch2(l, r) 2594 2595 case OLITERAL: 2596 return false 2597 } 2598 2599 if vmatch1(l.Left, r) { 2600 return true 2601 } 2602 if vmatch1(l.Right, r) { 2603 return true 2604 } 2605 for _, n := range l.List.Slice() { 2606 if vmatch1(n, r) { 2607 return true 2608 } 2609 } 2610 return false 2611 } 2612 2613 // paramstoheap returns code to allocate memory for heap-escaped parameters 2614 // and to copy non-result parameters' values from the stack. 2615 func paramstoheap(params *types.Type) []*Node { 2616 var nn []*Node 2617 for _, t := range params.Fields().Slice() { 2618 v := asNode(t.Nname) 2619 if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result 2620 v = nil 2621 } 2622 if v == nil { 2623 continue 2624 } 2625 2626 if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil { 2627 nn = append(nn, walkstmt(nod(ODCL, v, nil))) 2628 if stackcopy.Class() == PPARAM { 2629 nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), Etop))) 2630 } 2631 } 2632 } 2633 2634 return nn 2635 } 2636 2637 // zeroResults zeros the return values at the start of the function. 2638 // We need to do this very early in the function. Defer might stop a 2639 // panic and show the return values as they exist at the time of 2640 // panic. For precise stacks, the garbage collector assumes results 2641 // are always live, so we need to zero them before any allocations, 2642 // even allocations to move params/results to the heap. 2643 // The generated code is added to Curfn's Enter list. 2644 func zeroResults() { 2645 lno := lineno 2646 lineno = Curfn.Pos 2647 for _, f := range Curfn.Type.Results().Fields().Slice() { 2648 if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil { 2649 // The local which points to the return value is the 2650 // thing that needs zeroing. This is already handled 2651 // by a Needzero annotation in plive.go:livenessepilogue. 2652 continue 2653 } 2654 // Zero the stack location containing f. 2655 Curfn.Func.Enter.Append(nod(OAS, nodarg(f, 1), nil)) 2656 } 2657 lineno = lno 2658 } 2659 2660 // returnsfromheap returns code to copy values for heap-escaped parameters 2661 // back to the stack. 2662 func returnsfromheap(params *types.Type) []*Node { 2663 var nn []*Node 2664 for _, t := range params.Fields().Slice() { 2665 v := asNode(t.Nname) 2666 if v == nil { 2667 continue 2668 } 2669 if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT { 2670 nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), Etop))) 2671 } 2672 } 2673 2674 return nn 2675 } 2676 2677 // heapmoves generates code to handle migrating heap-escaped parameters 2678 // between the stack and the heap. The generated code is added to Curfn's 2679 // Enter and Exit lists. 2680 func heapmoves() { 2681 lno := lineno 2682 lineno = Curfn.Pos 2683 nn := paramstoheap(Curfn.Type.Recvs()) 2684 nn = append(nn, paramstoheap(Curfn.Type.Params())...) 2685 nn = append(nn, paramstoheap(Curfn.Type.Results())...) 2686 Curfn.Func.Enter.Append(nn...) 2687 lineno = Curfn.Func.Endlineno 2688 Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...) 2689 lineno = lno 2690 } 2691 2692 func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node { 2693 if fn.Type == nil || fn.Type.Etype != TFUNC { 2694 Fatalf("mkcall %v %v", fn, fn.Type) 2695 } 2696 2697 n := fn.Type.Params().NumFields() 2698 2699 r := nod(OCALL, fn, nil) 2700 r.List.Set(va[:n]) 2701 if fn.Type.Results().NumFields() > 0 { 2702 r = typecheck(r, Erv|Efnstruct) 2703 } else { 2704 r = typecheck(r, Etop) 2705 } 2706 r = walkexpr(r, init) 2707 r.Type = t 2708 return r 2709 } 2710 2711 func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node { 2712 return vmkcall(syslook(name), t, init, args) 2713 } 2714 2715 func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node { 2716 return vmkcall(fn, t, init, args) 2717 } 2718 2719 func conv(n *Node, t *types.Type) *Node { 2720 if eqtype(n.Type, t) { 2721 return n 2722 } 2723 n = nod(OCONV, n, nil) 2724 n.Type = t 2725 n = typecheck(n, Erv) 2726 return n 2727 } 2728 2729 // byteindex converts n, which is byte-sized, to a uint8. 2730 // We cannot use conv, because we allow converting bool to uint8 here, 2731 // which is forbidden in user code. 2732 func byteindex(n *Node) *Node { 2733 if eqtype(n.Type, types.Types[TUINT8]) { 2734 return n 2735 } 2736 n = nod(OCONV, n, nil) 2737 n.Type = types.Types[TUINT8] 2738 n.SetTypecheck(1) 2739 return n 2740 } 2741 2742 func chanfn(name string, n int, t *types.Type) *Node { 2743 if !t.IsChan() { 2744 Fatalf("chanfn %v", t) 2745 } 2746 fn := syslook(name) 2747 switch n { 2748 default: 2749 Fatalf("chanfn %d", n) 2750 case 1: 2751 fn = substArgTypes(fn, t.Elem()) 2752 case 2: 2753 fn = substArgTypes(fn, t.Elem(), t.Elem()) 2754 } 2755 return fn 2756 } 2757 2758 func mapfn(name string, t *types.Type) *Node { 2759 if !t.IsMap() { 2760 Fatalf("mapfn %v", t) 2761 } 2762 fn := syslook(name) 2763 fn = substArgTypes(fn, t.Key(), t.Val(), t.Key(), t.Val()) 2764 return fn 2765 } 2766 2767 func mapfndel(name string, t *types.Type) *Node { 2768 if !t.IsMap() { 2769 Fatalf("mapfn %v", t) 2770 } 2771 fn := syslook(name) 2772 fn = substArgTypes(fn, t.Key(), t.Val(), t.Key()) 2773 return fn 2774 } 2775 2776 const ( 2777 mapslow = iota 2778 mapfast32 2779 mapfast64 2780 mapfaststr 2781 nmapfast 2782 ) 2783 2784 type mapnames [nmapfast]string 2785 2786 func mkmapnames(base string) mapnames { 2787 return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"} 2788 } 2789 2790 var mapaccess1 mapnames = mkmapnames("mapaccess1") 2791 var mapaccess2 mapnames = mkmapnames("mapaccess2") 2792 var mapassign mapnames = mkmapnames("mapassign") 2793 var mapdelete mapnames = mkmapnames("mapdelete") 2794 2795 func mapfast(t *types.Type) int { 2796 // Check ../../runtime/hashmap.go:maxValueSize before changing. 2797 if t.Val().Width > 128 { 2798 return mapslow 2799 } 2800 switch algtype(t.Key()) { 2801 case AMEM32: 2802 return mapfast32 2803 case AMEM64: 2804 return mapfast64 2805 case ASTRING: 2806 return mapfaststr 2807 } 2808 return mapslow 2809 } 2810 2811 func writebarrierfn(name string, l *types.Type, r *types.Type) *Node { 2812 fn := syslook(name) 2813 fn = substArgTypes(fn, l, r) 2814 return fn 2815 } 2816 2817 func addstr(n *Node, init *Nodes) *Node { 2818 // orderexpr rewrote OADDSTR to have a list of strings. 2819 c := n.List.Len() 2820 2821 if c < 2 { 2822 Fatalf("addstr count %d too small", c) 2823 } 2824 2825 buf := nodnil() 2826 if n.Esc == EscNone { 2827 sz := int64(0) 2828 for _, n1 := range n.List.Slice() { 2829 if n1.Op == OLITERAL { 2830 sz += int64(len(n1.Val().U.(string))) 2831 } 2832 } 2833 2834 // Don't allocate the buffer if the result won't fit. 2835 if sz < tmpstringbufsize { 2836 // Create temporary buffer for result string on stack. 2837 t := types.NewArray(types.Types[TUINT8], tmpstringbufsize) 2838 2839 buf = nod(OADDR, temp(t), nil) 2840 } 2841 } 2842 2843 // build list of string arguments 2844 args := []*Node{buf} 2845 for _, n2 := range n.List.Slice() { 2846 args = append(args, conv(n2, types.Types[TSTRING])) 2847 } 2848 2849 var fn string 2850 if c <= 5 { 2851 // small numbers of strings use direct runtime helpers. 2852 // note: orderexpr knows this cutoff too. 2853 fn = fmt.Sprintf("concatstring%d", c) 2854 } else { 2855 // large numbers of strings are passed to the runtime as a slice. 2856 fn = "concatstrings" 2857 2858 t := types.NewSlice(types.Types[TSTRING]) 2859 slice := nod(OCOMPLIT, nil, typenod(t)) 2860 if prealloc[n] != nil { 2861 prealloc[slice] = prealloc[n] 2862 } 2863 slice.List.Set(args[1:]) // skip buf arg 2864 args = []*Node{buf, slice} 2865 slice.Esc = EscNone 2866 } 2867 2868 cat := syslook(fn) 2869 r := nod(OCALL, cat, nil) 2870 r.List.Set(args) 2871 r = typecheck(r, Erv) 2872 r = walkexpr(r, init) 2873 r.Type = n.Type 2874 2875 return r 2876 } 2877 2878 // expand append(l1, l2...) to 2879 // init { 2880 // s := l1 2881 // n := len(s) + len(l2) 2882 // // Compare as uint so growslice can panic on overflow. 2883 // if uint(n) > uint(cap(s)) { 2884 // s = growslice(s, n) 2885 // } 2886 // s = s[:n] 2887 // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) 2888 // } 2889 // s 2890 // 2891 // l2 is allowed to be a string. 2892 func appendslice(n *Node, init *Nodes) *Node { 2893 walkexprlistsafe(n.List.Slice(), init) 2894 2895 // walkexprlistsafe will leave OINDEX (s[n]) alone if both s 2896 // and n are name or literal, but those may index the slice we're 2897 // modifying here. Fix explicitly. 2898 ls := n.List.Slice() 2899 for i1, n1 := range ls { 2900 ls[i1] = cheapexpr(n1, init) 2901 } 2902 2903 l1 := n.List.First() 2904 l2 := n.List.Second() 2905 2906 var l []*Node 2907 2908 // var s []T 2909 s := temp(l1.Type) 2910 l = append(l, nod(OAS, s, l1)) // s = l1 2911 2912 // n := len(s) + len(l2) 2913 nn := temp(types.Types[TINT]) 2914 l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil)))) 2915 2916 // if uint(n) > uint(cap(s)) 2917 nif := nod(OIF, nil, nil) 2918 nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil)) 2919 nif.Left.Left.Type = types.Types[TUINT] 2920 nif.Left.Right.Type = types.Types[TUINT] 2921 2922 // instantiate growslice(Type*, []any, int) []any 2923 fn := syslook("growslice") 2924 fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) 2925 2926 // s = growslice(T, s, n) 2927 nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn))) 2928 l = append(l, nif) 2929 2930 // s = s[:n] 2931 nt := nod(OSLICE, s, nil) 2932 nt.SetSliceBounds(nil, nn, nil) 2933 nt.Etype = 1 2934 l = append(l, nod(OAS, s, nt)) 2935 2936 if types.Haspointers(l1.Type.Elem()) { 2937 // copy(s[len(l1):], l2) 2938 nptr1 := nod(OSLICE, s, nil) 2939 nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) 2940 nptr1.Etype = 1 2941 nptr2 := l2 2942 fn := syslook("typedslicecopy") 2943 fn = substArgTypes(fn, l1.Type, l2.Type) 2944 var ln Nodes 2945 ln.Set(l) 2946 nt := mkcall1(fn, types.Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2) 2947 l = append(ln.Slice(), nt) 2948 } else if instrumenting && !compiling_runtime { 2949 // rely on runtime to instrument copy. 2950 // copy(s[len(l1):], l2) 2951 nptr1 := nod(OSLICE, s, nil) 2952 nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil) 2953 nptr1.Etype = 1 2954 nptr2 := l2 2955 var fn *Node 2956 if l2.Type.IsString() { 2957 fn = syslook("slicestringcopy") 2958 } else { 2959 fn = syslook("slicecopy") 2960 } 2961 fn = substArgTypes(fn, l1.Type, l2.Type) 2962 var ln Nodes 2963 ln.Set(l) 2964 nt := mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width)) 2965 l = append(ln.Slice(), nt) 2966 } else { 2967 // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T)) 2968 nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil)) 2969 nptr1.SetBounded(true) 2970 2971 nptr1 = nod(OADDR, nptr1, nil) 2972 2973 nptr2 := nod(OSPTR, l2, nil) 2974 2975 fn := syslook("memmove") 2976 fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem()) 2977 2978 var ln Nodes 2979 ln.Set(l) 2980 nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &ln) 2981 2982 nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width)) 2983 nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid) 2984 l = append(ln.Slice(), nt) 2985 } 2986 2987 typecheckslice(l, Etop) 2988 walkstmtlist(l) 2989 init.Append(l...) 2990 return s 2991 } 2992 2993 // Rewrite append(src, x, y, z) so that any side effects in 2994 // x, y, z (including runtime panics) are evaluated in 2995 // initialization statements before the append. 2996 // For normal code generation, stop there and leave the 2997 // rest to cgen_append. 2998 // 2999 // For race detector, expand append(src, a [, b]* ) to 3000 // 3001 // init { 3002 // s := src 3003 // const argc = len(args) - 1 3004 // if cap(s) - len(s) < argc { 3005 // s = growslice(s, len(s)+argc) 3006 // } 3007 // n := len(s) 3008 // s = s[:n+argc] 3009 // s[n] = a 3010 // s[n+1] = b 3011 // ... 3012 // } 3013 // s 3014 func walkappend(n *Node, init *Nodes, dst *Node) *Node { 3015 if !samesafeexpr(dst, n.List.First()) { 3016 n.List.SetFirst(safeexpr(n.List.First(), init)) 3017 n.List.SetFirst(walkexpr(n.List.First(), init)) 3018 } 3019 walkexprlistsafe(n.List.Slice()[1:], init) 3020 3021 // walkexprlistsafe will leave OINDEX (s[n]) alone if both s 3022 // and n are name or literal, but those may index the slice we're 3023 // modifying here. Fix explicitly. 3024 // Using cheapexpr also makes sure that the evaluation 3025 // of all arguments (and especially any panics) happen 3026 // before we begin to modify the slice in a visible way. 3027 ls := n.List.Slice()[1:] 3028 for i, n := range ls { 3029 ls[i] = cheapexpr(n, init) 3030 } 3031 3032 nsrc := n.List.First() 3033 3034 argc := n.List.Len() - 1 3035 if argc < 1 { 3036 return nsrc 3037 } 3038 3039 // General case, with no function calls left as arguments. 3040 // Leave for gen, except that instrumentation requires old form. 3041 if !instrumenting || compiling_runtime { 3042 return n 3043 } 3044 3045 var l []*Node 3046 3047 ns := temp(nsrc.Type) 3048 l = append(l, nod(OAS, ns, nsrc)) // s = src 3049 3050 na := nodintconst(int64(argc)) // const argc 3051 nx := nod(OIF, nil, nil) // if cap(s) - len(s) < argc 3052 nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na) 3053 3054 fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T) 3055 fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem()) 3056 3057 nx.Nbody.Set1(nod(OAS, ns, 3058 mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns, 3059 nod(OADD, nod(OLEN, ns, nil), na)))) 3060 3061 l = append(l, nx) 3062 3063 nn := temp(types.Types[TINT]) 3064 l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s) 3065 3066 nx = nod(OSLICE, ns, nil) // ...s[:n+argc] 3067 nx.SetSliceBounds(nil, nod(OADD, nn, na), nil) 3068 nx.Etype = 1 3069 l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc] 3070 3071 ls = n.List.Slice()[1:] 3072 for i, n := range ls { 3073 nx = nod(OINDEX, ns, nn) // s[n] ... 3074 nx.SetBounded(true) 3075 l = append(l, nod(OAS, nx, n)) // s[n] = arg 3076 if i+1 < len(ls) { 3077 l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1 3078 } 3079 } 3080 3081 typecheckslice(l, Etop) 3082 walkstmtlist(l) 3083 init.Append(l...) 3084 return ns 3085 } 3086 3087 // Lower copy(a, b) to a memmove call or a runtime call. 3088 // 3089 // init { 3090 // n := len(a) 3091 // if n > len(b) { n = len(b) } 3092 // memmove(a.ptr, b.ptr, n*sizeof(elem(a))) 3093 // } 3094 // n; 3095 // 3096 // Also works if b is a string. 3097 // 3098 func copyany(n *Node, init *Nodes, runtimecall bool) *Node { 3099 if types.Haspointers(n.Left.Type.Elem()) { 3100 fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type) 3101 return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right) 3102 } 3103 3104 if runtimecall { 3105 var fn *Node 3106 if n.Right.Type.IsString() { 3107 fn = syslook("slicestringcopy") 3108 } else { 3109 fn = syslook("slicecopy") 3110 } 3111 fn = substArgTypes(fn, n.Left.Type, n.Right.Type) 3112 return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width)) 3113 } 3114 3115 n.Left = walkexpr(n.Left, init) 3116 n.Right = walkexpr(n.Right, init) 3117 nl := temp(n.Left.Type) 3118 nr := temp(n.Right.Type) 3119 var l []*Node 3120 l = append(l, nod(OAS, nl, n.Left)) 3121 l = append(l, nod(OAS, nr, n.Right)) 3122 3123 nfrm := nod(OSPTR, nr, nil) 3124 nto := nod(OSPTR, nl, nil) 3125 3126 nlen := temp(types.Types[TINT]) 3127 3128 // n = len(to) 3129 l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil))) 3130 3131 // if n > len(frm) { n = len(frm) } 3132 nif := nod(OIF, nil, nil) 3133 3134 nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil)) 3135 nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil))) 3136 l = append(l, nif) 3137 3138 // Call memmove. 3139 fn := syslook("memmove") 3140 3141 fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem()) 3142 nwid := temp(types.Types[TUINTPTR]) 3143 l = append(l, nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR]))) 3144 nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width)) 3145 l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid)) 3146 3147 typecheckslice(l, Etop) 3148 walkstmtlist(l) 3149 init.Append(l...) 3150 return nlen 3151 } 3152 3153 func eqfor(t *types.Type, needsize *int) *Node { 3154 // Should only arrive here with large memory or 3155 // a struct/array containing a non-memory field/element. 3156 // Small memory is handled inline, and single non-memory 3157 // is handled during type check (OCMPSTR etc). 3158 switch a, _ := algtype1(t); a { 3159 case AMEM: 3160 n := syslook("memequal") 3161 n = substArgTypes(n, t, t) 3162 *needsize = 1 3163 return n 3164 case ASPECIAL: 3165 sym := typesymprefix(".eq", t) 3166 n := newname(sym) 3167 n.SetClass(PFUNC) 3168 ntype := nod(OTFUNC, nil, nil) 3169 ntype.List.Append(anonfield(types.NewPtr(t))) 3170 ntype.List.Append(anonfield(types.NewPtr(t))) 3171 ntype.Rlist.Append(anonfield(types.Types[TBOOL])) 3172 ntype = typecheck(ntype, Etype) 3173 n.Type = ntype.Type 3174 *needsize = 0 3175 return n 3176 } 3177 Fatalf("eqfor %v", t) 3178 return nil 3179 } 3180 3181 // The result of walkcompare MUST be assigned back to n, e.g. 3182 // n.Left = walkcompare(n.Left, init) 3183 func walkcompare(n *Node, init *Nodes) *Node { 3184 // Given interface value l and concrete value r, rewrite 3185 // l == r 3186 // into types-equal && data-equal. 3187 // This is efficient, avoids allocations, and avoids runtime calls. 3188 var l, r *Node 3189 if n.Left.Type.IsInterface() && !n.Right.Type.IsInterface() { 3190 l = n.Left 3191 r = n.Right 3192 } else if !n.Left.Type.IsInterface() && n.Right.Type.IsInterface() { 3193 l = n.Right 3194 r = n.Left 3195 } 3196 3197 if l != nil { 3198 // Handle both == and !=. 3199 eq := n.Op 3200 var andor Op 3201 if eq == OEQ { 3202 andor = OANDAND 3203 } else { 3204 andor = OOROR 3205 } 3206 // Check for types equal. 3207 // For empty interface, this is: 3208 // l.tab == type(r) 3209 // For non-empty interface, this is: 3210 // l.tab != nil && l.tab._type == type(r) 3211 var eqtype *Node 3212 tab := nod(OITAB, l, nil) 3213 rtyp := typename(r.Type) 3214 if l.Type.IsEmptyInterface() { 3215 tab.Type = types.NewPtr(types.Types[TUINT8]) 3216 tab.SetTypecheck(1) 3217 eqtype = nod(eq, tab, rtyp) 3218 } else { 3219 nonnil := nod(brcom(eq), nodnil(), tab) 3220 match := nod(eq, itabType(tab), rtyp) 3221 eqtype = nod(andor, nonnil, match) 3222 } 3223 // Check for data equal. 3224 eqdata := nod(eq, ifaceData(l, r.Type), r) 3225 // Put it all together. 3226 expr := nod(andor, eqtype, eqdata) 3227 n = finishcompare(n, expr, init) 3228 return n 3229 } 3230 3231 // Must be comparison of array or struct. 3232 // Otherwise back end handles it. 3233 // While we're here, decide whether to 3234 // inline or call an eq alg. 3235 t := n.Left.Type 3236 var inline bool 3237 switch t.Etype { 3238 default: 3239 return n 3240 case TARRAY: 3241 inline = t.NumElem() <= 1 || (t.NumElem() <= 4 && issimple[t.Elem().Etype]) 3242 case TSTRUCT: 3243 inline = t.NumFields() <= 4 3244 } 3245 3246 cmpl := n.Left 3247 for cmpl != nil && cmpl.Op == OCONVNOP { 3248 cmpl = cmpl.Left 3249 } 3250 cmpr := n.Right 3251 for cmpr != nil && cmpr.Op == OCONVNOP { 3252 cmpr = cmpr.Left 3253 } 3254 3255 // Chose not to inline. Call equality function directly. 3256 if !inline { 3257 if isvaluelit(cmpl) { 3258 var_ := temp(cmpl.Type) 3259 anylit(cmpl, var_, init) 3260 cmpl = var_ 3261 } 3262 if isvaluelit(cmpr) { 3263 var_ := temp(cmpr.Type) 3264 anylit(cmpr, var_, init) 3265 cmpr = var_ 3266 } 3267 if !islvalue(cmpl) || !islvalue(cmpr) { 3268 Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr) 3269 } 3270 3271 // eq algs take pointers 3272 pl := temp(types.NewPtr(t)) 3273 al := nod(OAS, pl, nod(OADDR, cmpl, nil)) 3274 al.Right.Etype = 1 // addr does not escape 3275 al = typecheck(al, Etop) 3276 init.Append(al) 3277 3278 pr := temp(types.NewPtr(t)) 3279 ar := nod(OAS, pr, nod(OADDR, cmpr, nil)) 3280 ar.Right.Etype = 1 // addr does not escape 3281 ar = typecheck(ar, Etop) 3282 init.Append(ar) 3283 3284 var needsize int 3285 call := nod(OCALL, eqfor(t, &needsize), nil) 3286 call.List.Append(pl) 3287 call.List.Append(pr) 3288 if needsize != 0 { 3289 call.List.Append(nodintconst(t.Width)) 3290 } 3291 res := call 3292 if n.Op != OEQ { 3293 res = nod(ONOT, res, nil) 3294 } 3295 n = finishcompare(n, res, init) 3296 return n 3297 } 3298 3299 // inline: build boolean expression comparing element by element 3300 andor := OANDAND 3301 if n.Op == ONE { 3302 andor = OOROR 3303 } 3304 var expr *Node 3305 compare := func(el, er *Node) { 3306 a := nod(n.Op, el, er) 3307 if expr == nil { 3308 expr = a 3309 } else { 3310 expr = nod(andor, expr, a) 3311 } 3312 } 3313 cmpl = safeexpr(cmpl, init) 3314 cmpr = safeexpr(cmpr, init) 3315 if t.IsStruct() { 3316 for _, f := range t.Fields().Slice() { 3317 sym := f.Sym 3318 if sym.IsBlank() { 3319 continue 3320 } 3321 compare( 3322 nodSym(OXDOT, cmpl, sym), 3323 nodSym(OXDOT, cmpr, sym), 3324 ) 3325 } 3326 } else { 3327 for i := 0; int64(i) < t.NumElem(); i++ { 3328 compare( 3329 nod(OINDEX, cmpl, nodintconst(int64(i))), 3330 nod(OINDEX, cmpr, nodintconst(int64(i))), 3331 ) 3332 } 3333 } 3334 if expr == nil { 3335 expr = nodbool(n.Op == OEQ) 3336 } 3337 n = finishcompare(n, expr, init) 3338 return n 3339 } 3340 3341 // The result of finishcompare MUST be assigned back to n, e.g. 3342 // n.Left = finishcompare(n.Left, x, r, init) 3343 func finishcompare(n, r *Node, init *Nodes) *Node { 3344 // Use nn here to avoid passing r to typecheck. 3345 nn := r 3346 nn = typecheck(nn, Erv) 3347 nn = walkexpr(nn, init) 3348 r = nn 3349 if r.Type != n.Type { 3350 r = nod(OCONVNOP, r, nil) 3351 r.Type = n.Type 3352 r.SetTypecheck(1) 3353 nn = r 3354 } 3355 return nn 3356 } 3357 3358 // isIntOrdering reports whether n is a <, ≤, >, or ≥ ordering between integers. 3359 func (n *Node) isIntOrdering() bool { 3360 switch n.Op { 3361 case OLE, OLT, OGE, OGT: 3362 default: 3363 return false 3364 } 3365 return n.Left.Type.IsInteger() && n.Right.Type.IsInteger() 3366 } 3367 3368 // walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10. 3369 // n must be an OANDAND or OOROR node. 3370 // The result of walkinrange MUST be assigned back to n, e.g. 3371 // n.Left = walkinrange(n.Left) 3372 func walkinrange(n *Node, init *Nodes) *Node { 3373 // We are looking for something equivalent to a opl b OP b opr c, where: 3374 // * a, b, and c have integer type 3375 // * b is side-effect-free 3376 // * opl and opr are each < or ≤ 3377 // * OP is && 3378 l := n.Left 3379 r := n.Right 3380 if !l.isIntOrdering() || !r.isIntOrdering() { 3381 return n 3382 } 3383 3384 // Find b, if it exists, and rename appropriately. 3385 // Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right 3386 // Output is: a opl b(==x) ANDAND/OROR b(==x) opr c 3387 a, opl, b := l.Left, l.Op, l.Right 3388 x, opr, c := r.Left, r.Op, r.Right 3389 for i := 0; ; i++ { 3390 if samesafeexpr(b, x) { 3391 break 3392 } 3393 if i == 3 { 3394 // Tried all permutations and couldn't find an appropriate b == x. 3395 return n 3396 } 3397 if i&1 == 0 { 3398 a, opl, b = b, brrev(opl), a 3399 } else { 3400 x, opr, c = c, brrev(opr), x 3401 } 3402 } 3403 3404 // If n.Op is ||, apply de Morgan. 3405 // Negate the internal ops now; we'll negate the top level op at the end. 3406 // Henceforth assume &&. 3407 negateResult := n.Op == OOROR 3408 if negateResult { 3409 opl = brcom(opl) 3410 opr = brcom(opr) 3411 } 3412 3413 cmpdir := func(o Op) int { 3414 switch o { 3415 case OLE, OLT: 3416 return -1 3417 case OGE, OGT: 3418 return +1 3419 } 3420 Fatalf("walkinrange cmpdir %v", o) 3421 return 0 3422 } 3423 if cmpdir(opl) != cmpdir(opr) { 3424 // Not a range check; something like b < a && b < c. 3425 return n 3426 } 3427 3428 switch opl { 3429 case OGE, OGT: 3430 // We have something like a > b && b ≥ c. 3431 // Switch and reverse ops and rename constants, 3432 // to make it look like a ≤ b && b < c. 3433 a, c = c, a 3434 opl, opr = brrev(opr), brrev(opl) 3435 } 3436 3437 // We must ensure that c-a is non-negative. 3438 // For now, require a and c to be constants. 3439 // In the future, we could also support a == 0 and c == len/cap(...). 3440 // Unfortunately, by this point, most len/cap expressions have been 3441 // stored into temporary variables. 3442 if !Isconst(a, CTINT) || !Isconst(c, CTINT) { 3443 return n 3444 } 3445 3446 if opl == OLT { 3447 // We have a < b && ... 3448 // We need a ≤ b && ... to safely use unsigned comparison tricks. 3449 // If a is not the maximum constant for b's type, 3450 // we can increment a and switch to ≤. 3451 if a.Int64() >= maxintval[b.Type.Etype].Int64() { 3452 return n 3453 } 3454 a = nodintconst(a.Int64() + 1) 3455 opl = OLE 3456 } 3457 3458 bound := c.Int64() - a.Int64() 3459 if bound < 0 { 3460 // Bad news. Something like 5 <= x && x < 3. 3461 // Rare in practice, and we still need to generate side-effects, 3462 // so just leave it alone. 3463 return n 3464 } 3465 3466 // We have a ≤ b && b < c (or a ≤ b && b ≤ c). 3467 // This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a), 3468 // which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a), 3469 // which is equivalent to uint(b-a) < uint(c-a). 3470 ut := b.Type.ToUnsigned() 3471 lhs := conv(nod(OSUB, b, a), ut) 3472 rhs := nodintconst(bound) 3473 if negateResult { 3474 // Negate top level. 3475 opr = brcom(opr) 3476 } 3477 cmp := nod(opr, lhs, rhs) 3478 cmp.Pos = n.Pos 3479 cmp = addinit(cmp, l.Ninit.Slice()) 3480 cmp = addinit(cmp, r.Ninit.Slice()) 3481 // Typecheck the AST rooted at cmp... 3482 cmp = typecheck(cmp, Erv) 3483 // ...but then reset cmp's type to match n's type. 3484 cmp.Type = n.Type 3485 cmp = walkexpr(cmp, init) 3486 return cmp 3487 } 3488 3489 // return 1 if integer n must be in range [0, max), 0 otherwise 3490 func bounded(n *Node, max int64) bool { 3491 if n.Type == nil || !n.Type.IsInteger() { 3492 return false 3493 } 3494 3495 sign := n.Type.IsSigned() 3496 bits := int32(8 * n.Type.Width) 3497 3498 if smallintconst(n) { 3499 v := n.Int64() 3500 return 0 <= v && v < max 3501 } 3502 3503 switch n.Op { 3504 case OAND: 3505 v := int64(-1) 3506 if smallintconst(n.Left) { 3507 v = n.Left.Int64() 3508 } else if smallintconst(n.Right) { 3509 v = n.Right.Int64() 3510 } 3511 3512 if 0 <= v && v < max { 3513 return true 3514 } 3515 3516 case OMOD: 3517 if !sign && smallintconst(n.Right) { 3518 v := n.Right.Int64() 3519 if 0 <= v && v <= max { 3520 return true 3521 } 3522 } 3523 3524 case ODIV: 3525 if !sign && smallintconst(n.Right) { 3526 v := n.Right.Int64() 3527 for bits > 0 && v >= 2 { 3528 bits-- 3529 v >>= 1 3530 } 3531 } 3532 3533 case ORSH: 3534 if !sign && smallintconst(n.Right) { 3535 v := n.Right.Int64() 3536 if v > int64(bits) { 3537 return true 3538 } 3539 bits -= int32(v) 3540 } 3541 } 3542 3543 if !sign && bits <= 62 && 1<<uint(bits) <= max { 3544 return true 3545 } 3546 3547 return false 3548 } 3549 3550 // usemethod check interface method calls for uses of reflect.Type.Method. 3551 func usemethod(n *Node) { 3552 t := n.Left.Type 3553 3554 // Looking for either of: 3555 // Method(int) reflect.Method 3556 // MethodByName(string) (reflect.Method, bool) 3557 // 3558 // TODO(crawshaw): improve precision of match by working out 3559 // how to check the method name. 3560 if n := t.Params().NumFields(); n != 1 { 3561 return 3562 } 3563 if n := t.Results().NumFields(); n != 1 && n != 2 { 3564 return 3565 } 3566 p0 := t.Params().Field(0) 3567 res0 := t.Results().Field(0) 3568 var res1 *types.Field 3569 if t.Results().NumFields() == 2 { 3570 res1 = t.Results().Field(1) 3571 } 3572 3573 if res1 == nil { 3574 if p0.Type.Etype != TINT { 3575 return 3576 } 3577 } else { 3578 if !p0.Type.IsString() { 3579 return 3580 } 3581 if !res1.Type.IsBoolean() { 3582 return 3583 } 3584 } 3585 if res0.Type.String() != "reflect.Method" { 3586 return 3587 } 3588 3589 Curfn.Func.SetReflectMethod(true) 3590 } 3591 3592 func usefield(n *Node) { 3593 if objabi.Fieldtrack_enabled == 0 { 3594 return 3595 } 3596 3597 switch n.Op { 3598 default: 3599 Fatalf("usefield %v", n.Op) 3600 3601 case ODOT, ODOTPTR: 3602 break 3603 } 3604 if n.Sym == nil { 3605 // No field name. This DOTPTR was built by the compiler for access 3606 // to runtime data structures. Ignore. 3607 return 3608 } 3609 3610 t := n.Left.Type 3611 if t.IsPtr() { 3612 t = t.Elem() 3613 } 3614 field := dotField[typeSymKey{t.Orig, n.Sym}] 3615 if field == nil { 3616 Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym) 3617 } 3618 if !strings.Contains(field.Note, "go:\"track\"") { 3619 return 3620 } 3621 3622 outer := n.Left.Type 3623 if outer.IsPtr() { 3624 outer = outer.Elem() 3625 } 3626 if outer.Sym == nil { 3627 yyerror("tracked field must be in named struct type") 3628 } 3629 if !exportname(field.Sym.Name) { 3630 yyerror("tracked field must be exported (upper case)") 3631 } 3632 3633 sym := tracksym(outer, field) 3634 if Curfn.Func.FieldTrack == nil { 3635 Curfn.Func.FieldTrack = make(map[*types.Sym]struct{}) 3636 } 3637 Curfn.Func.FieldTrack[sym] = struct{}{} 3638 } 3639 3640 func candiscardlist(l Nodes) bool { 3641 for _, n := range l.Slice() { 3642 if !candiscard(n) { 3643 return false 3644 } 3645 } 3646 return true 3647 } 3648 3649 func candiscard(n *Node) bool { 3650 if n == nil { 3651 return true 3652 } 3653 3654 switch n.Op { 3655 default: 3656 return false 3657 3658 // Discardable as long as the subpieces are. 3659 case ONAME, 3660 ONONAME, 3661 OTYPE, 3662 OPACK, 3663 OLITERAL, 3664 OADD, 3665 OSUB, 3666 OOR, 3667 OXOR, 3668 OADDSTR, 3669 OADDR, 3670 OANDAND, 3671 OARRAYBYTESTR, 3672 OARRAYRUNESTR, 3673 OSTRARRAYBYTE, 3674 OSTRARRAYRUNE, 3675 OCAP, 3676 OCMPIFACE, 3677 OCMPSTR, 3678 OCOMPLIT, 3679 OMAPLIT, 3680 OSTRUCTLIT, 3681 OARRAYLIT, 3682 OSLICELIT, 3683 OPTRLIT, 3684 OCONV, 3685 OCONVIFACE, 3686 OCONVNOP, 3687 ODOT, 3688 OEQ, 3689 ONE, 3690 OLT, 3691 OLE, 3692 OGT, 3693 OGE, 3694 OKEY, 3695 OSTRUCTKEY, 3696 OLEN, 3697 OMUL, 3698 OLSH, 3699 ORSH, 3700 OAND, 3701 OANDNOT, 3702 ONEW, 3703 ONOT, 3704 OCOM, 3705 OPLUS, 3706 OMINUS, 3707 OOROR, 3708 OPAREN, 3709 ORUNESTR, 3710 OREAL, 3711 OIMAG, 3712 OCOMPLEX: 3713 break 3714 3715 // Discardable as long as we know it's not division by zero. 3716 case ODIV, OMOD: 3717 if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 { 3718 break 3719 } 3720 if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 { 3721 break 3722 } 3723 return false 3724 3725 // Discardable as long as we know it won't fail because of a bad size. 3726 case OMAKECHAN, OMAKEMAP: 3727 if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 { 3728 break 3729 } 3730 return false 3731 3732 // Difficult to tell what sizes are okay. 3733 case OMAKESLICE: 3734 return false 3735 } 3736 3737 if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) { 3738 return false 3739 } 3740 3741 return true 3742 } 3743 3744 // rewrite 3745 // print(x, y, z) 3746 // into 3747 // func(a1, a2, a3) { 3748 // print(a1, a2, a3) 3749 // }(x, y, z) 3750 // and same for println. 3751 3752 var walkprintfunc_prgen int 3753 3754 // The result of walkprintfunc MUST be assigned back to n, e.g. 3755 // n.Left = walkprintfunc(n.Left, init) 3756 func walkprintfunc(n *Node, init *Nodes) *Node { 3757 if n.Ninit.Len() != 0 { 3758 walkstmtlist(n.Ninit.Slice()) 3759 init.AppendNodes(&n.Ninit) 3760 } 3761 3762 t := nod(OTFUNC, nil, nil) 3763 num := 0 3764 var printargs []*Node 3765 var a *Node 3766 var buf string 3767 for _, n1 := range n.List.Slice() { 3768 buf = fmt.Sprintf("a%d", num) 3769 num++ 3770 a = namedfield(buf, n1.Type) 3771 t.List.Append(a) 3772 printargs = append(printargs, a.Left) 3773 } 3774 3775 oldfn := Curfn 3776 Curfn = nil 3777 3778 walkprintfunc_prgen++ 3779 sym := lookupN("print·%d", walkprintfunc_prgen) 3780 fn := dclfunc(sym, t) 3781 3782 a = nod(n.Op, nil, nil) 3783 a.List.Set(printargs) 3784 a = typecheck(a, Etop) 3785 a = walkstmt(a) 3786 3787 fn.Nbody.Set1(a) 3788 3789 funcbody(fn) 3790 3791 fn = typecheck(fn, Etop) 3792 typecheckslice(fn.Nbody.Slice(), Etop) 3793 xtop = append(xtop, fn) 3794 Curfn = oldfn 3795 3796 a = nod(OCALL, nil, nil) 3797 a.Left = fn.Func.Nname 3798 a.List.Set(n.List.Slice()) 3799 a = typecheck(a, Etop) 3800 a = walkexpr(a, init) 3801 return a 3802 } 3803 3804 // substArgTypes substitutes the given list of types for 3805 // successive occurrences of the "any" placeholder in the 3806 // type syntax expression n.Type. 3807 // The result of substArgTypes MUST be assigned back to old, e.g. 3808 // n.Left = substArgTypes(n.Left, t1, t2) 3809 func substArgTypes(old *Node, types_ ...*types.Type) *Node { 3810 n := *old // make shallow copy 3811 3812 for _, t := range types_ { 3813 dowidth(t) 3814 } 3815 n.Type = types.SubstAny(n.Type, &types_) 3816 if len(types_) > 0 { 3817 Fatalf("substArgTypes: too many argument types") 3818 } 3819 return &n 3820 }