github.com/eun/go@v0.0.0-20170811110501-92cfd07a6cfd/src/cmd/compile/internal/gc/walk.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package gc
     6  
     7  import (
     8  	"cmd/compile/internal/types"
     9  	"cmd/internal/objabi"
    10  	"cmd/internal/sys"
    11  	"fmt"
    12  	"strings"
    13  )
    14  
    15  // The constant is known to runtime.
    16  const (
    17  	tmpstringbufsize = 32
    18  )
    19  
    20  func walk(fn *Node) {
    21  	Curfn = fn
    22  
    23  	if Debug['W'] != 0 {
    24  		s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym)
    25  		dumplist(s, Curfn.Nbody)
    26  	}
    27  
    28  	lno := lineno
    29  
    30  	// Final typecheck for any unused variables.
    31  	for i, ln := range fn.Func.Dcl {
    32  		if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) {
    33  			ln = typecheck(ln, Erv|Easgn)
    34  			fn.Func.Dcl[i] = ln
    35  		}
    36  	}
    37  
    38  	// Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
    39  	for _, ln := range fn.Func.Dcl {
    40  		if ln.Op == ONAME && (ln.Class() == PAUTO || ln.Class() == PAUTOHEAP) && ln.Name.Defn != nil && ln.Name.Defn.Op == OTYPESW && ln.Name.Used() {
    41  			ln.Name.Defn.Left.Name.SetUsed(true)
    42  		}
    43  	}
    44  
    45  	for _, ln := range fn.Func.Dcl {
    46  		if ln.Op != ONAME || (ln.Class() != PAUTO && ln.Class() != PAUTOHEAP) || ln.Sym.Name[0] == '&' || ln.Name.Used() {
    47  			continue
    48  		}
    49  		if defn := ln.Name.Defn; defn != nil && defn.Op == OTYPESW {
    50  			if defn.Left.Name.Used() {
    51  				continue
    52  			}
    53  			yyerrorl(defn.Left.Pos, "%v declared and not used", ln.Sym)
    54  			defn.Left.Name.SetUsed(true) // suppress repeats
    55  		} else {
    56  			yyerrorl(ln.Pos, "%v declared and not used", ln.Sym)
    57  		}
    58  	}
    59  
    60  	lineno = lno
    61  	if nerrors != 0 {
    62  		return
    63  	}
    64  	walkstmtlist(Curfn.Nbody.Slice())
    65  	if Debug['W'] != 0 {
    66  		s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
    67  		dumplist(s, Curfn.Nbody)
    68  	}
    69  
    70  	zeroResults()
    71  	heapmoves()
    72  	if Debug['W'] != 0 && Curfn.Func.Enter.Len() > 0 {
    73  		s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
    74  		dumplist(s, Curfn.Func.Enter)
    75  	}
    76  }
    77  
    78  func walkstmtlist(s []*Node) {
    79  	for i := range s {
    80  		s[i] = walkstmt(s[i])
    81  	}
    82  }
    83  
    84  func samelist(a, b []*Node) bool {
    85  	if len(a) != len(b) {
    86  		return false
    87  	}
    88  	for i, n := range a {
    89  		if n != b[i] {
    90  			return false
    91  		}
    92  	}
    93  	return true
    94  }
    95  
    96  func paramoutheap(fn *Node) bool {
    97  	for _, ln := range fn.Func.Dcl {
    98  		switch ln.Class() {
    99  		case PPARAMOUT:
   100  			if ln.isParamStackCopy() || ln.Addrtaken() {
   101  				return true
   102  			}
   103  
   104  		case PAUTO:
   105  			// stop early - parameters are over
   106  			return false
   107  		}
   108  	}
   109  
   110  	return false
   111  }
   112  
   113  // adds "adjust" to all the argument locations for the call n.
   114  // n must be a defer or go node that has already been walked.
   115  func adjustargs(n *Node, adjust int) {
   116  	var arg *Node
   117  	var lhs *Node
   118  
   119  	callfunc := n.Left
   120  	for _, arg = range callfunc.List.Slice() {
   121  		if arg.Op != OAS {
   122  			Fatalf("call arg not assignment")
   123  		}
   124  		lhs = arg.Left
   125  		if lhs.Op == ONAME {
   126  			// This is a temporary introduced by reorder1.
   127  			// The real store to the stack appears later in the arg list.
   128  			continue
   129  		}
   130  
   131  		if lhs.Op != OINDREGSP {
   132  			Fatalf("call argument store does not use OINDREGSP")
   133  		}
   134  
   135  		// can't really check this in machine-indep code.
   136  		//if(lhs->val.u.reg != D_SP)
   137  		//      Fatalf("call arg assign not indreg(SP)")
   138  		lhs.Xoffset += int64(adjust)
   139  	}
   140  }
   141  
   142  // The result of walkstmt MUST be assigned back to n, e.g.
   143  // 	n.Left = walkstmt(n.Left)
   144  func walkstmt(n *Node) *Node {
   145  	if n == nil {
   146  		return n
   147  	}
   148  
   149  	setlineno(n)
   150  
   151  	walkstmtlist(n.Ninit.Slice())
   152  
   153  	switch n.Op {
   154  	default:
   155  		if n.Op == ONAME {
   156  			yyerror("%v is not a top level statement", n.Sym)
   157  		} else {
   158  			yyerror("%v is not a top level statement", n.Op)
   159  		}
   160  		Dump("nottop", n)
   161  
   162  	case OAS,
   163  		OASOP,
   164  		OAS2,
   165  		OAS2DOTTYPE,
   166  		OAS2RECV,
   167  		OAS2FUNC,
   168  		OAS2MAPR,
   169  		OCLOSE,
   170  		OCOPY,
   171  		OCALLMETH,
   172  		OCALLINTER,
   173  		OCALL,
   174  		OCALLFUNC,
   175  		ODELETE,
   176  		OSEND,
   177  		OPRINT,
   178  		OPRINTN,
   179  		OPANIC,
   180  		OEMPTY,
   181  		ORECOVER,
   182  		OGETG:
   183  		if n.Typecheck() == 0 {
   184  			Fatalf("missing typecheck: %+v", n)
   185  		}
   186  		wascopy := n.Op == OCOPY
   187  		init := n.Ninit
   188  		n.Ninit.Set(nil)
   189  		n = walkexpr(n, &init)
   190  		n = addinit(n, init.Slice())
   191  		if wascopy && n.Op == OCONVNOP {
   192  			n.Op = OEMPTY // don't leave plain values as statements.
   193  		}
   194  
   195  	// special case for a receive where we throw away
   196  	// the value received.
   197  	case ORECV:
   198  		if n.Typecheck() == 0 {
   199  			Fatalf("missing typecheck: %+v", n)
   200  		}
   201  		init := n.Ninit
   202  		n.Ninit.Set(nil)
   203  
   204  		n.Left = walkexpr(n.Left, &init)
   205  		n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, n.Left, nodnil())
   206  		n = walkexpr(n, &init)
   207  
   208  		n = addinit(n, init.Slice())
   209  
   210  	case OBREAK,
   211  		OCONTINUE,
   212  		OFALL,
   213  		OGOTO,
   214  		OLABEL,
   215  		ODCLCONST,
   216  		ODCLTYPE,
   217  		OCHECKNIL,
   218  		OVARKILL,
   219  		OVARLIVE:
   220  		break
   221  
   222  	case ODCL:
   223  		v := n.Left
   224  		if v.Class() == PAUTOHEAP {
   225  			if compiling_runtime {
   226  				yyerror("%v escapes to heap, not allowed in runtime.", v)
   227  			}
   228  			if prealloc[v] == nil {
   229  				prealloc[v] = callnew(v.Type)
   230  			}
   231  			nn := nod(OAS, v.Name.Param.Heapaddr, prealloc[v])
   232  			nn.SetColas(true)
   233  			nn = typecheck(nn, Etop)
   234  			return walkstmt(nn)
   235  		}
   236  
   237  	case OBLOCK:
   238  		walkstmtlist(n.List.Slice())
   239  
   240  	case OXCASE:
   241  		yyerror("case statement out of place")
   242  		n.Op = OCASE
   243  		fallthrough
   244  
   245  	case OCASE:
   246  		n.Right = walkstmt(n.Right)
   247  
   248  	case ODEFER:
   249  		Curfn.Func.SetHasDefer(true)
   250  		switch n.Left.Op {
   251  		case OPRINT, OPRINTN:
   252  			n.Left = walkprintfunc(n.Left, &n.Ninit)
   253  
   254  		case OCOPY:
   255  			n.Left = copyany(n.Left, &n.Ninit, true)
   256  
   257  		default:
   258  			n.Left = walkexpr(n.Left, &n.Ninit)
   259  		}
   260  
   261  		// make room for size & fn arguments.
   262  		adjustargs(n, 2*Widthptr)
   263  
   264  	case OFOR, OFORUNTIL:
   265  		if n.Left != nil {
   266  			walkstmtlist(n.Left.Ninit.Slice())
   267  			init := n.Left.Ninit
   268  			n.Left.Ninit.Set(nil)
   269  			n.Left = walkexpr(n.Left, &init)
   270  			n.Left = addinit(n.Left, init.Slice())
   271  		}
   272  
   273  		n.Right = walkstmt(n.Right)
   274  		walkstmtlist(n.Nbody.Slice())
   275  
   276  	case OIF:
   277  		n.Left = walkexpr(n.Left, &n.Ninit)
   278  		walkstmtlist(n.Nbody.Slice())
   279  		walkstmtlist(n.Rlist.Slice())
   280  
   281  	case OPROC:
   282  		switch n.Left.Op {
   283  		case OPRINT, OPRINTN:
   284  			n.Left = walkprintfunc(n.Left, &n.Ninit)
   285  
   286  		case OCOPY:
   287  			n.Left = copyany(n.Left, &n.Ninit, true)
   288  
   289  		default:
   290  			n.Left = walkexpr(n.Left, &n.Ninit)
   291  		}
   292  
   293  		// make room for size & fn arguments.
   294  		adjustargs(n, 2*Widthptr)
   295  
   296  	case ORETURN:
   297  		walkexprlist(n.List.Slice(), &n.Ninit)
   298  		if n.List.Len() == 0 {
   299  			break
   300  		}
   301  		if (Curfn.Type.FuncType().Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
   302  			// assign to the function out parameters,
   303  			// so that reorder3 can fix up conflicts
   304  			var rl []*Node
   305  
   306  			var cl Class
   307  			for _, ln := range Curfn.Func.Dcl {
   308  				cl = ln.Class()
   309  				if cl == PAUTO || cl == PAUTOHEAP {
   310  					break
   311  				}
   312  				if cl == PPARAMOUT {
   313  					if ln.isParamStackCopy() {
   314  						ln = walkexpr(typecheck(nod(OIND, ln.Name.Param.Heapaddr, nil), Erv), nil)
   315  					}
   316  					rl = append(rl, ln)
   317  				}
   318  			}
   319  
   320  			if got, want := n.List.Len(), len(rl); got != want {
   321  				// order should have rewritten multi-value function calls
   322  				// with explicit OAS2FUNC nodes.
   323  				Fatalf("expected %v return arguments, have %v", want, got)
   324  			}
   325  
   326  			if samelist(rl, n.List.Slice()) {
   327  				// special return in disguise
   328  				n.List.Set(nil)
   329  
   330  				break
   331  			}
   332  
   333  			// move function calls out, to make reorder3's job easier.
   334  			walkexprlistsafe(n.List.Slice(), &n.Ninit)
   335  
   336  			ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
   337  			n.List.Set(reorder3(ll))
   338  			break
   339  		}
   340  
   341  		ll := ascompatte(nil, false, Curfn.Type.Results(), n.List.Slice(), 1, &n.Ninit)
   342  		n.List.Set(ll)
   343  
   344  	case ORETJMP:
   345  		break
   346  
   347  	case OSELECT:
   348  		walkselect(n)
   349  
   350  	case OSWITCH:
   351  		walkswitch(n)
   352  
   353  	case ORANGE:
   354  		n = walkrange(n)
   355  
   356  	case OXFALL:
   357  		yyerror("fallthrough statement out of place")
   358  		n.Op = OFALL
   359  	}
   360  
   361  	if n.Op == ONAME {
   362  		Fatalf("walkstmt ended up with name: %+v", n)
   363  	}
   364  	return n
   365  }
   366  
   367  func isSmallMakeSlice(n *Node) bool {
   368  	if n.Op != OMAKESLICE {
   369  		return false
   370  	}
   371  	l := n.Left
   372  	r := n.Right
   373  	if r == nil {
   374  		r = l
   375  	}
   376  	t := n.Type
   377  
   378  	return smallintconst(l) && smallintconst(r) && (t.Elem().Width == 0 || r.Int64() < (1<<16)/t.Elem().Width)
   379  }
   380  
   381  // walk the whole tree of the body of an
   382  // expression or simple statement.
   383  // the types expressions are calculated.
   384  // compile-time constants are evaluated.
   385  // complex side effects like statements are appended to init
   386  func walkexprlist(s []*Node, init *Nodes) {
   387  	for i := range s {
   388  		s[i] = walkexpr(s[i], init)
   389  	}
   390  }
   391  
   392  func walkexprlistsafe(s []*Node, init *Nodes) {
   393  	for i, n := range s {
   394  		s[i] = safeexpr(n, init)
   395  		s[i] = walkexpr(s[i], init)
   396  	}
   397  }
   398  
   399  func walkexprlistcheap(s []*Node, init *Nodes) {
   400  	for i, n := range s {
   401  		s[i] = cheapexpr(n, init)
   402  		s[i] = walkexpr(s[i], init)
   403  	}
   404  }
   405  
   406  // Build name of function for interface conversion.
   407  // Not all names are possible
   408  // (e.g., we'll never generate convE2E or convE2I or convI2E).
   409  func convFuncName(from, to *types.Type) string {
   410  	tkind := to.Tie()
   411  	switch from.Tie() {
   412  	case 'I':
   413  		switch tkind {
   414  		case 'I':
   415  			return "convI2I"
   416  		}
   417  	case 'T':
   418  		switch tkind {
   419  		case 'E':
   420  			switch {
   421  			case from.Size() == 2 && from.Align == 2:
   422  				return "convT2E16"
   423  			case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
   424  				return "convT2E32"
   425  			case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
   426  				return "convT2E64"
   427  			case from.IsString():
   428  				return "convT2Estring"
   429  			case from.IsSlice():
   430  				return "convT2Eslice"
   431  			case !types.Haspointers(from):
   432  				return "convT2Enoptr"
   433  			}
   434  			return "convT2E"
   435  		case 'I':
   436  			switch {
   437  			case from.Size() == 2 && from.Align == 2:
   438  				return "convT2I16"
   439  			case from.Size() == 4 && from.Align == 4 && !types.Haspointers(from):
   440  				return "convT2I32"
   441  			case from.Size() == 8 && from.Align == types.Types[TUINT64].Align && !types.Haspointers(from):
   442  				return "convT2I64"
   443  			case from.IsString():
   444  				return "convT2Istring"
   445  			case from.IsSlice():
   446  				return "convT2Islice"
   447  			case !types.Haspointers(from):
   448  				return "convT2Inoptr"
   449  			}
   450  			return "convT2I"
   451  		}
   452  	}
   453  	Fatalf("unknown conv func %c2%c", from.Tie(), to.Tie())
   454  	panic("unreachable")
   455  }
   456  
   457  // The result of walkexpr MUST be assigned back to n, e.g.
   458  // 	n.Left = walkexpr(n.Left, init)
   459  func walkexpr(n *Node, init *Nodes) *Node {
   460  	if n == nil {
   461  		return n
   462  	}
   463  
   464  	// Eagerly checkwidth all expressions for the back end.
   465  	if n.Type != nil && !n.Type.WidthCalculated() {
   466  		switch n.Type.Etype {
   467  		case TBLANK, TNIL, TIDEAL:
   468  		default:
   469  			checkwidth(n.Type)
   470  		}
   471  	}
   472  
   473  	if init == &n.Ninit {
   474  		// not okay to use n->ninit when walking n,
   475  		// because we might replace n with some other node
   476  		// and would lose the init list.
   477  		Fatalf("walkexpr init == &n->ninit")
   478  	}
   479  
   480  	if n.Ninit.Len() != 0 {
   481  		walkstmtlist(n.Ninit.Slice())
   482  		init.AppendNodes(&n.Ninit)
   483  	}
   484  
   485  	lno := setlineno(n)
   486  
   487  	if Debug['w'] > 1 {
   488  		Dump("walk-before", n)
   489  	}
   490  
   491  	if n.Typecheck() != 1 {
   492  		Fatalf("missed typecheck: %+v", n)
   493  	}
   494  
   495  	if n.Op == ONAME && n.Class() == PAUTOHEAP {
   496  		nn := nod(OIND, n.Name.Param.Heapaddr, nil)
   497  		nn = typecheck(nn, Erv)
   498  		nn = walkexpr(nn, init)
   499  		nn.Left.SetNonNil(true)
   500  		return nn
   501  	}
   502  
   503  opswitch:
   504  	switch n.Op {
   505  	default:
   506  		Dump("walk", n)
   507  		Fatalf("walkexpr: switch 1 unknown op %+S", n)
   508  
   509  	case ONONAME, OINDREGSP, OEMPTY, OGETG:
   510  
   511  	case OTYPE, ONAME, OLITERAL:
   512  		// TODO(mdempsky): Just return n; see discussion on CL 38655.
   513  		// Perhaps refactor to use Node.mayBeShared for these instead.
   514  		// If these return early, make sure to still call
   515  		// stringsym for constant strings.
   516  
   517  	case ONOT, OMINUS, OPLUS, OCOM, OREAL, OIMAG, ODOTMETH, ODOTINTER,
   518  		OIND, OSPTR, OITAB, OIDATA, OADDR:
   519  		n.Left = walkexpr(n.Left, init)
   520  
   521  	case OEFACE, OAND, OSUB, OMUL, OLT, OLE, OGE, OGT, OADD, OOR, OXOR:
   522  		n.Left = walkexpr(n.Left, init)
   523  		n.Right = walkexpr(n.Right, init)
   524  
   525  	case ODOT:
   526  		usefield(n)
   527  		n.Left = walkexpr(n.Left, init)
   528  
   529  	case ODOTTYPE, ODOTTYPE2:
   530  		n.Left = walkexpr(n.Left, init)
   531  		// Set up interface type addresses for back end.
   532  		n.Right = typename(n.Type)
   533  		if n.Op == ODOTTYPE {
   534  			n.Right.Right = typename(n.Left.Type)
   535  		}
   536  		if !n.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
   537  			n.List.Set1(itabname(n.Type, n.Left.Type))
   538  		}
   539  
   540  	case ODOTPTR:
   541  		usefield(n)
   542  		if n.Op == ODOTPTR && n.Left.Type.Elem().Width == 0 {
   543  			// No actual copy will be generated, so emit an explicit nil check.
   544  			n.Left = cheapexpr(n.Left, init)
   545  
   546  			checknil(n.Left, init)
   547  		}
   548  
   549  		n.Left = walkexpr(n.Left, init)
   550  
   551  	case OLEN, OCAP:
   552  		n.Left = walkexpr(n.Left, init)
   553  
   554  		// replace len(*[10]int) with 10.
   555  		// delayed until now to preserve side effects.
   556  		t := n.Left.Type
   557  
   558  		if t.IsPtr() {
   559  			t = t.Elem()
   560  		}
   561  		if t.IsArray() {
   562  			safeexpr(n.Left, init)
   563  			nodconst(n, n.Type, t.NumElem())
   564  			n.SetTypecheck(1)
   565  		}
   566  
   567  	case OLSH, ORSH:
   568  		n.Left = walkexpr(n.Left, init)
   569  		n.Right = walkexpr(n.Right, init)
   570  		t := n.Left.Type
   571  		n.SetBounded(bounded(n.Right, 8*t.Width))
   572  		if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
   573  			Warn("shift bounds check elided")
   574  		}
   575  
   576  	case OCOMPLEX:
   577  		// Use results from call expression as arguments for complex.
   578  		if n.Left == nil && n.Right == nil {
   579  			n.Left = n.List.First()
   580  			n.Right = n.List.Second()
   581  		}
   582  		n.Left = walkexpr(n.Left, init)
   583  		n.Right = walkexpr(n.Right, init)
   584  
   585  	case OEQ, ONE:
   586  		n.Left = walkexpr(n.Left, init)
   587  		n.Right = walkexpr(n.Right, init)
   588  
   589  		// Disable safemode while compiling this code: the code we
   590  		// generate internally can refer to unsafe.Pointer.
   591  		// In this case it can happen if we need to generate an ==
   592  		// for a struct containing a reflect.Value, which itself has
   593  		// an unexported field of type unsafe.Pointer.
   594  		old_safemode := safemode
   595  		safemode = false
   596  		n = walkcompare(n, init)
   597  		safemode = old_safemode
   598  
   599  	case OANDAND, OOROR:
   600  		n.Left = walkexpr(n.Left, init)
   601  
   602  		// cannot put side effects from n.Right on init,
   603  		// because they cannot run before n.Left is checked.
   604  		// save elsewhere and store on the eventual n.Right.
   605  		var ll Nodes
   606  
   607  		n.Right = walkexpr(n.Right, &ll)
   608  		n.Right = addinit(n.Right, ll.Slice())
   609  		n = walkinrange(n, init)
   610  
   611  	case OPRINT, OPRINTN:
   612  		walkexprlist(n.List.Slice(), init)
   613  		n = walkprint(n, init)
   614  
   615  	case OPANIC:
   616  		n = mkcall("gopanic", nil, init, n.Left)
   617  
   618  	case ORECOVER:
   619  		n = mkcall("gorecover", n.Type, init, nod(OADDR, nodfp, nil))
   620  
   621  	case OCLOSUREVAR, OCFUNC:
   622  		n.SetAddable(true)
   623  
   624  	case OCALLINTER:
   625  		usemethod(n)
   626  		t := n.Left.Type
   627  		if n.List.Len() != 0 && n.List.First().Op == OAS {
   628  			break
   629  		}
   630  		n.Left = walkexpr(n.Left, init)
   631  		walkexprlist(n.List.Slice(), init)
   632  		ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
   633  		n.List.Set(reorder1(ll))
   634  
   635  	case OCALLFUNC:
   636  		if n.Left.Op == OCLOSURE {
   637  			// Transform direct call of a closure to call of a normal function.
   638  			// transformclosure already did all preparation work.
   639  
   640  			// Prepend captured variables to argument list.
   641  			n.List.Prepend(n.Left.Func.Enter.Slice()...)
   642  
   643  			n.Left.Func.Enter.Set(nil)
   644  
   645  			// Replace OCLOSURE with ONAME/PFUNC.
   646  			n.Left = n.Left.Func.Closure.Func.Nname
   647  
   648  			// Update type of OCALLFUNC node.
   649  			// Output arguments had not changed, but their offsets could.
   650  			if n.Left.Type.Results().NumFields() == 1 {
   651  				n.Type = n.Left.Type.Results().Field(0).Type
   652  			} else {
   653  				n.Type = n.Left.Type.Results()
   654  			}
   655  		}
   656  
   657  		t := n.Left.Type
   658  		if n.List.Len() != 0 && n.List.First().Op == OAS {
   659  			break
   660  		}
   661  
   662  		n.Left = walkexpr(n.Left, init)
   663  		walkexprlist(n.List.Slice(), init)
   664  
   665  		ll := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
   666  		n.List.Set(reorder1(ll))
   667  
   668  	case OCALLMETH:
   669  		t := n.Left.Type
   670  		if n.List.Len() != 0 && n.List.First().Op == OAS {
   671  			break
   672  		}
   673  		n.Left = walkexpr(n.Left, init)
   674  		walkexprlist(n.List.Slice(), init)
   675  		ll := ascompatte(n, false, t.Recvs(), []*Node{n.Left.Left}, 0, init)
   676  		lr := ascompatte(n, n.Isddd(), t.Params(), n.List.Slice(), 0, init)
   677  		ll = append(ll, lr...)
   678  		n.Left.Left = nil
   679  		updateHasCall(n.Left)
   680  		n.List.Set(reorder1(ll))
   681  
   682  	case OAS:
   683  		init.AppendNodes(&n.Ninit)
   684  
   685  		n.Left = walkexpr(n.Left, init)
   686  		n.Left = safeexpr(n.Left, init)
   687  
   688  		if oaslit(n, init) {
   689  			break
   690  		}
   691  
   692  		if n.Right == nil {
   693  			// TODO(austin): Check all "implicit zeroing"
   694  			break
   695  		}
   696  
   697  		if !instrumenting && iszero(n.Right) {
   698  			break
   699  		}
   700  
   701  		switch n.Right.Op {
   702  		default:
   703  			n.Right = walkexpr(n.Right, init)
   704  
   705  		case ORECV:
   706  			// x = <-c; n.Left is x, n.Right.Left is c.
   707  			// orderstmt made sure x is addressable.
   708  			n.Right.Left = walkexpr(n.Right.Left, init)
   709  
   710  			n1 := nod(OADDR, n.Left, nil)
   711  			r := n.Right.Left // the channel
   712  			n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, r, n1)
   713  			n = walkexpr(n, init)
   714  			break opswitch
   715  
   716  		case OAPPEND:
   717  			// x = append(...)
   718  			r := n.Right
   719  			if r.Type.Elem().NotInHeap() {
   720  				yyerror("%v is go:notinheap; heap allocation disallowed", r.Type.Elem())
   721  			}
   722  			if r.Isddd() {
   723  				r = appendslice(r, init) // also works for append(slice, string).
   724  			} else {
   725  				r = walkappend(r, init, n)
   726  			}
   727  			n.Right = r
   728  			if r.Op == OAPPEND {
   729  				// Left in place for back end.
   730  				// Do not add a new write barrier.
   731  				// Set up address of type for back end.
   732  				r.Left = typename(r.Type.Elem())
   733  				break opswitch
   734  			}
   735  			// Otherwise, lowered for race detector.
   736  			// Treat as ordinary assignment.
   737  		}
   738  
   739  		if n.Left != nil && n.Right != nil {
   740  			n = convas(n, init)
   741  		}
   742  
   743  	case OAS2:
   744  		init.AppendNodes(&n.Ninit)
   745  		walkexprlistsafe(n.List.Slice(), init)
   746  		walkexprlistsafe(n.Rlist.Slice(), init)
   747  		ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
   748  		ll = reorder3(ll)
   749  		n = liststmt(ll)
   750  
   751  	// a,b,... = fn()
   752  	case OAS2FUNC:
   753  		init.AppendNodes(&n.Ninit)
   754  
   755  		r := n.Rlist.First()
   756  		walkexprlistsafe(n.List.Slice(), init)
   757  		r = walkexpr(r, init)
   758  
   759  		if isIntrinsicCall(r) {
   760  			n.Rlist.Set1(r)
   761  			break
   762  		}
   763  		init.Append(r)
   764  
   765  		ll := ascompatet(n.List, r.Type)
   766  		n = liststmt(ll)
   767  
   768  	// x, y = <-c
   769  	// orderstmt made sure x is addressable.
   770  	case OAS2RECV:
   771  		init.AppendNodes(&n.Ninit)
   772  
   773  		r := n.Rlist.First()
   774  		walkexprlistsafe(n.List.Slice(), init)
   775  		r.Left = walkexpr(r.Left, init)
   776  		var n1 *Node
   777  		if isblank(n.List.First()) {
   778  			n1 = nodnil()
   779  		} else {
   780  			n1 = nod(OADDR, n.List.First(), nil)
   781  		}
   782  		n1.Etype = 1 // addr does not escape
   783  		fn := chanfn("chanrecv2", 2, r.Left.Type)
   784  		ok := n.List.Second()
   785  		call := mkcall1(fn, ok.Type, init, r.Left, n1)
   786  		n = nod(OAS, ok, call)
   787  		n = typecheck(n, Etop)
   788  
   789  	// a,b = m[i]
   790  	case OAS2MAPR:
   791  		init.AppendNodes(&n.Ninit)
   792  
   793  		r := n.Rlist.First()
   794  		walkexprlistsafe(n.List.Slice(), init)
   795  		r.Left = walkexpr(r.Left, init)
   796  		r.Right = walkexpr(r.Right, init)
   797  		t := r.Left.Type
   798  
   799  		fast := mapfast(t)
   800  		var key *Node
   801  		if fast != mapslow {
   802  			// fast versions take key by value
   803  			key = r.Right
   804  		} else {
   805  			// standard version takes key by reference
   806  			// orderexpr made sure key is addressable.
   807  			key = nod(OADDR, r.Right, nil)
   808  		}
   809  
   810  		// from:
   811  		//   a,b = m[i]
   812  		// to:
   813  		//   var,b = mapaccess2*(t, m, i)
   814  		//   a = *var
   815  		a := n.List.First()
   816  
   817  		if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero
   818  			fn := mapfn(mapaccess2[fast], t)
   819  			r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
   820  		} else {
   821  			fn := mapfn("mapaccess2_fat", t)
   822  			z := zeroaddr(w)
   823  			r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key, z)
   824  		}
   825  
   826  		// mapaccess2* returns a typed bool, but due to spec changes,
   827  		// the boolean result of i.(T) is now untyped so we make it the
   828  		// same type as the variable on the lhs.
   829  		if ok := n.List.Second(); !isblank(ok) && ok.Type.IsBoolean() {
   830  			r.Type.Field(1).Type = ok.Type
   831  		}
   832  		n.Rlist.Set1(r)
   833  		n.Op = OAS2FUNC
   834  
   835  		// don't generate a = *var if a is _
   836  		if !isblank(a) {
   837  			var_ := temp(types.NewPtr(t.Val()))
   838  			var_.SetTypecheck(1)
   839  			var_.SetNonNil(true) // mapaccess always returns a non-nil pointer
   840  			n.List.SetFirst(var_)
   841  			n = walkexpr(n, init)
   842  			init.Append(n)
   843  			n = nod(OAS, a, nod(OIND, var_, nil))
   844  		}
   845  
   846  		n = typecheck(n, Etop)
   847  		n = walkexpr(n, init)
   848  
   849  	case ODELETE:
   850  		init.AppendNodes(&n.Ninit)
   851  		map_ := n.List.First()
   852  		key := n.List.Second()
   853  		map_ = walkexpr(map_, init)
   854  		key = walkexpr(key, init)
   855  
   856  		t := map_.Type
   857  		fast := mapfast(t)
   858  		if fast == mapslow {
   859  			// orderstmt made sure key is addressable.
   860  			key = nod(OADDR, key, nil)
   861  		}
   862  		n = mkcall1(mapfndel(mapdelete[fast], t), nil, init, typename(t), map_, key)
   863  
   864  	case OAS2DOTTYPE:
   865  		walkexprlistsafe(n.List.Slice(), init)
   866  		n.Rlist.SetFirst(walkexpr(n.Rlist.First(), init))
   867  
   868  	case OCONVIFACE:
   869  		n.Left = walkexpr(n.Left, init)
   870  
   871  		// Optimize convT2E or convT2I as a two-word copy when T is pointer-shaped.
   872  		if isdirectiface(n.Left.Type) {
   873  			var t *Node
   874  			if n.Type.IsEmptyInterface() {
   875  				t = typename(n.Left.Type)
   876  			} else {
   877  				t = itabname(n.Left.Type, n.Type)
   878  			}
   879  			l := nod(OEFACE, t, n.Left)
   880  			l.Type = n.Type
   881  			l.SetTypecheck(n.Typecheck())
   882  			n = l
   883  			break
   884  		}
   885  
   886  		if staticbytes == nil {
   887  			staticbytes = newname(Runtimepkg.Lookup("staticbytes"))
   888  			staticbytes.SetClass(PEXTERN)
   889  			staticbytes.Type = types.NewArray(types.Types[TUINT8], 256)
   890  			zerobase = newname(Runtimepkg.Lookup("zerobase"))
   891  			zerobase.SetClass(PEXTERN)
   892  			zerobase.Type = types.Types[TUINTPTR]
   893  		}
   894  
   895  		// Optimize convT2{E,I} for many cases in which T is not pointer-shaped,
   896  		// by using an existing addressable value identical to n.Left
   897  		// or creating one on the stack.
   898  		var value *Node
   899  		switch {
   900  		case n.Left.Type.Size() == 0:
   901  			// n.Left is zero-sized. Use zerobase.
   902  			cheapexpr(n.Left, init) // Evaluate n.Left for side-effects. See issue 19246.
   903  			value = zerobase
   904  		case n.Left.Type.IsBoolean() || (n.Left.Type.Size() == 1 && n.Left.Type.IsInteger()):
   905  			// n.Left is a bool/byte. Use staticbytes[n.Left].
   906  			n.Left = cheapexpr(n.Left, init)
   907  			value = nod(OINDEX, staticbytes, byteindex(n.Left))
   908  			value.SetBounded(true)
   909  		case n.Left.Class() == PEXTERN && n.Left.Name != nil && n.Left.Name.Readonly():
   910  			// n.Left is a readonly global; use it directly.
   911  			value = n.Left
   912  		case !n.Left.Type.IsInterface() && n.Esc == EscNone && n.Left.Type.Width <= 1024:
   913  			// n.Left does not escape. Use a stack temporary initialized to n.Left.
   914  			value = temp(n.Left.Type)
   915  			init.Append(typecheck(nod(OAS, value, n.Left), Etop))
   916  		}
   917  
   918  		if value != nil {
   919  			// Value is identical to n.Left.
   920  			// Construct the interface directly: {type/itab, &value}.
   921  			var t *Node
   922  			if n.Type.IsEmptyInterface() {
   923  				t = typename(n.Left.Type)
   924  			} else {
   925  				t = itabname(n.Left.Type, n.Type)
   926  			}
   927  			l := nod(OEFACE, t, typecheck(nod(OADDR, value, nil), Erv))
   928  			l.Type = n.Type
   929  			l.SetTypecheck(n.Typecheck())
   930  			n = l
   931  			break
   932  		}
   933  
   934  		// Implement interface to empty interface conversion.
   935  		// tmp = i.itab
   936  		// if tmp != nil {
   937  		//    tmp = tmp.type
   938  		// }
   939  		// e = iface{tmp, i.data}
   940  		if n.Type.IsEmptyInterface() && n.Left.Type.IsInterface() && !n.Left.Type.IsEmptyInterface() {
   941  			// Evaluate the input interface.
   942  			c := temp(n.Left.Type)
   943  			init.Append(nod(OAS, c, n.Left))
   944  
   945  			// Get the itab out of the interface.
   946  			tmp := temp(types.NewPtr(types.Types[TUINT8]))
   947  			init.Append(nod(OAS, tmp, typecheck(nod(OITAB, c, nil), Erv)))
   948  
   949  			// Get the type out of the itab.
   950  			nif := nod(OIF, typecheck(nod(ONE, tmp, nodnil()), Erv), nil)
   951  			nif.Nbody.Set1(nod(OAS, tmp, itabType(tmp)))
   952  			init.Append(nif)
   953  
   954  			// Build the result.
   955  			e := nod(OEFACE, tmp, ifaceData(c, types.NewPtr(types.Types[TUINT8])))
   956  			e.Type = n.Type // assign type manually, typecheck doesn't understand OEFACE.
   957  			e.SetTypecheck(1)
   958  			n = e
   959  			break
   960  		}
   961  
   962  		var ll []*Node
   963  		if n.Type.IsEmptyInterface() {
   964  			if !n.Left.Type.IsInterface() {
   965  				ll = append(ll, typename(n.Left.Type))
   966  			}
   967  		} else {
   968  			if n.Left.Type.IsInterface() {
   969  				ll = append(ll, typename(n.Type))
   970  			} else {
   971  				ll = append(ll, itabname(n.Left.Type, n.Type))
   972  			}
   973  		}
   974  
   975  		if n.Left.Type.IsInterface() {
   976  			ll = append(ll, n.Left)
   977  		} else {
   978  			// regular types are passed by reference to avoid C vararg calls
   979  			// orderexpr arranged for n.Left to be a temporary for all
   980  			// the conversions it could see. comparison of an interface
   981  			// with a non-interface, especially in a switch on interface value
   982  			// with non-interface cases, is not visible to orderstmt, so we
   983  			// have to fall back on allocating a temp here.
   984  			if islvalue(n.Left) {
   985  				ll = append(ll, nod(OADDR, n.Left, nil))
   986  			} else {
   987  				ll = append(ll, nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
   988  			}
   989  			dowidth(n.Left.Type)
   990  		}
   991  
   992  		fn := syslook(convFuncName(n.Left.Type, n.Type))
   993  		fn = substArgTypes(fn, n.Left.Type, n.Type)
   994  		dowidth(fn.Type)
   995  		n = nod(OCALL, fn, nil)
   996  		n.List.Set(ll)
   997  		n = typecheck(n, Erv)
   998  		n = walkexpr(n, init)
   999  
  1000  	case OCONV, OCONVNOP:
  1001  		if thearch.LinkArch.Family == sys.ARM || thearch.LinkArch.Family == sys.MIPS {
  1002  			if n.Left.Type.IsFloat() {
  1003  				if n.Type.Etype == TINT64 {
  1004  					n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
  1005  					break
  1006  				}
  1007  
  1008  				if n.Type.Etype == TUINT64 {
  1009  					n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
  1010  					break
  1011  				}
  1012  			}
  1013  
  1014  			if n.Type.IsFloat() {
  1015  				if n.Left.Type.Etype == TINT64 {
  1016  					n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type)
  1017  					break
  1018  				}
  1019  
  1020  				if n.Left.Type.Etype == TUINT64 {
  1021  					n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type)
  1022  					break
  1023  				}
  1024  			}
  1025  		}
  1026  
  1027  		if thearch.LinkArch.Family == sys.I386 {
  1028  			if n.Left.Type.IsFloat() {
  1029  				if n.Type.Etype == TINT64 {
  1030  					n = mkcall("float64toint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
  1031  					break
  1032  				}
  1033  
  1034  				if n.Type.Etype == TUINT64 {
  1035  					n = mkcall("float64touint64", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
  1036  					break
  1037  				}
  1038  				if n.Type.Etype == TUINT32 || n.Type.Etype == TUINT || n.Type.Etype == TUINTPTR {
  1039  					n = mkcall("float64touint32", n.Type, init, conv(n.Left, types.Types[TFLOAT64]))
  1040  					break
  1041  				}
  1042  			}
  1043  			if n.Type.IsFloat() {
  1044  				if n.Left.Type.Etype == TINT64 {
  1045  					n = conv(mkcall("int64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TINT64])), n.Type)
  1046  					break
  1047  				}
  1048  
  1049  				if n.Left.Type.Etype == TUINT64 {
  1050  					n = conv(mkcall("uint64tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT64])), n.Type)
  1051  					break
  1052  				}
  1053  				if n.Left.Type.Etype == TUINT32 || n.Left.Type.Etype == TUINT || n.Left.Type.Etype == TUINTPTR {
  1054  					n = conv(mkcall("uint32tofloat64", types.Types[TFLOAT64], init, conv(n.Left, types.Types[TUINT32])), n.Type)
  1055  					break
  1056  				}
  1057  			}
  1058  		}
  1059  
  1060  		n.Left = walkexpr(n.Left, init)
  1061  
  1062  	case OANDNOT:
  1063  		n.Left = walkexpr(n.Left, init)
  1064  		n.Op = OAND
  1065  		n.Right = nod(OCOM, n.Right, nil)
  1066  		n.Right = typecheck(n.Right, Erv)
  1067  		n.Right = walkexpr(n.Right, init)
  1068  
  1069  	case ODIV, OMOD:
  1070  		n.Left = walkexpr(n.Left, init)
  1071  		n.Right = walkexpr(n.Right, init)
  1072  
  1073  		// rewrite complex div into function call.
  1074  		et := n.Left.Type.Etype
  1075  
  1076  		if isComplex[et] && n.Op == ODIV {
  1077  			t := n.Type
  1078  			n = mkcall("complex128div", types.Types[TCOMPLEX128], init, conv(n.Left, types.Types[TCOMPLEX128]), conv(n.Right, types.Types[TCOMPLEX128]))
  1079  			n = conv(n, t)
  1080  			break
  1081  		}
  1082  
  1083  		// Nothing to do for float divisions.
  1084  		if isFloat[et] {
  1085  			break
  1086  		}
  1087  
  1088  		// rewrite 64-bit div and mod on 32-bit architectures.
  1089  		// TODO: Remove this code once we can introduce
  1090  		// runtime calls late in SSA processing.
  1091  		if Widthreg < 8 && (et == TINT64 || et == TUINT64) {
  1092  			if n.Right.Op == OLITERAL {
  1093  				// Leave div/mod by constant powers of 2.
  1094  				// The SSA backend will handle those.
  1095  				switch et {
  1096  				case TINT64:
  1097  					c := n.Right.Int64()
  1098  					if c < 0 {
  1099  						c = -c
  1100  					}
  1101  					if c != 0 && c&(c-1) == 0 {
  1102  						break opswitch
  1103  					}
  1104  				case TUINT64:
  1105  					c := uint64(n.Right.Int64())
  1106  					if c != 0 && c&(c-1) == 0 {
  1107  						break opswitch
  1108  					}
  1109  				}
  1110  			}
  1111  			var fn string
  1112  			if et == TINT64 {
  1113  				fn = "int64"
  1114  			} else {
  1115  				fn = "uint64"
  1116  			}
  1117  			if n.Op == ODIV {
  1118  				fn += "div"
  1119  			} else {
  1120  				fn += "mod"
  1121  			}
  1122  			n = mkcall(fn, n.Type, init, conv(n.Left, types.Types[et]), conv(n.Right, types.Types[et]))
  1123  		}
  1124  
  1125  	case OINDEX:
  1126  		n.Left = walkexpr(n.Left, init)
  1127  
  1128  		// save the original node for bounds checking elision.
  1129  		// If it was a ODIV/OMOD walk might rewrite it.
  1130  		r := n.Right
  1131  
  1132  		n.Right = walkexpr(n.Right, init)
  1133  
  1134  		// if range of type cannot exceed static array bound,
  1135  		// disable bounds check.
  1136  		if n.Bounded() {
  1137  			break
  1138  		}
  1139  		t := n.Left.Type
  1140  		if t != nil && t.IsPtr() {
  1141  			t = t.Elem()
  1142  		}
  1143  		if t.IsArray() {
  1144  			n.SetBounded(bounded(r, t.NumElem()))
  1145  			if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
  1146  				Warn("index bounds check elided")
  1147  			}
  1148  			if smallintconst(n.Right) && !n.Bounded() {
  1149  				yyerror("index out of bounds")
  1150  			}
  1151  		} else if Isconst(n.Left, CTSTR) {
  1152  			n.SetBounded(bounded(r, int64(len(n.Left.Val().U.(string)))))
  1153  			if Debug['m'] != 0 && n.Bounded() && !Isconst(n.Right, CTINT) {
  1154  				Warn("index bounds check elided")
  1155  			}
  1156  			if smallintconst(n.Right) && !n.Bounded() {
  1157  				yyerror("index out of bounds")
  1158  			}
  1159  		}
  1160  
  1161  		if Isconst(n.Right, CTINT) {
  1162  			if n.Right.Val().U.(*Mpint).CmpInt64(0) < 0 || n.Right.Val().U.(*Mpint).Cmp(maxintval[TINT]) > 0 {
  1163  				yyerror("index out of bounds")
  1164  			}
  1165  		}
  1166  
  1167  	case OINDEXMAP:
  1168  		// Replace m[k] with *map{access1,assign}(maptype, m, &k)
  1169  		n.Left = walkexpr(n.Left, init)
  1170  		n.Right = walkexpr(n.Right, init)
  1171  		map_ := n.Left
  1172  		key := n.Right
  1173  		t := map_.Type
  1174  		if n.Etype == 1 {
  1175  			// This m[k] expression is on the left-hand side of an assignment.
  1176  			fast := mapfast(t)
  1177  			if fast == mapslow {
  1178  				// standard version takes key by reference.
  1179  				// orderexpr made sure key is addressable.
  1180  				key = nod(OADDR, key, nil)
  1181  			}
  1182  			n = mkcall1(mapfn(mapassign[fast], t), nil, init, typename(t), map_, key)
  1183  		} else {
  1184  			// m[k] is not the target of an assignment.
  1185  			fast := mapfast(t)
  1186  			if fast == mapslow {
  1187  				// standard version takes key by reference.
  1188  				// orderexpr made sure key is addressable.
  1189  				key = nod(OADDR, key, nil)
  1190  			}
  1191  
  1192  			if w := t.Val().Width; w <= 1024 { // 1024 must match ../../../../runtime/hashmap.go:maxZero
  1193  				n = mkcall1(mapfn(mapaccess1[fast], t), types.NewPtr(t.Val()), init, typename(t), map_, key)
  1194  			} else {
  1195  				z := zeroaddr(w)
  1196  				n = mkcall1(mapfn("mapaccess1_fat", t), types.NewPtr(t.Val()), init, typename(t), map_, key, z)
  1197  			}
  1198  		}
  1199  		n.Type = types.NewPtr(t.Val())
  1200  		n.SetNonNil(true) // mapaccess1* and mapassign always return non-nil pointers.
  1201  		n = nod(OIND, n, nil)
  1202  		n.Type = t.Val()
  1203  		n.SetTypecheck(1)
  1204  
  1205  	case ORECV:
  1206  		Fatalf("walkexpr ORECV") // should see inside OAS only
  1207  
  1208  	case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
  1209  		n.Left = walkexpr(n.Left, init)
  1210  		low, high, max := n.SliceBounds()
  1211  		low = walkexpr(low, init)
  1212  		if low != nil && iszero(low) {
  1213  			// Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
  1214  			low = nil
  1215  		}
  1216  		high = walkexpr(high, init)
  1217  		max = walkexpr(max, init)
  1218  		n.SetSliceBounds(low, high, max)
  1219  		if n.Op.IsSlice3() {
  1220  			if max != nil && max.Op == OCAP && samesafeexpr(n.Left, max.Left) {
  1221  				// Reduce x[i:j:cap(x)] to x[i:j].
  1222  				if n.Op == OSLICE3 {
  1223  					n.Op = OSLICE
  1224  				} else {
  1225  					n.Op = OSLICEARR
  1226  				}
  1227  				n = reduceSlice(n)
  1228  			}
  1229  		} else {
  1230  			n = reduceSlice(n)
  1231  		}
  1232  
  1233  	case ONEW:
  1234  		if n.Esc == EscNone {
  1235  			if n.Type.Elem().Width >= 1<<16 {
  1236  				Fatalf("large ONEW with EscNone: %v", n)
  1237  			}
  1238  			r := temp(n.Type.Elem())
  1239  			r = nod(OAS, r, nil) // zero temp
  1240  			r = typecheck(r, Etop)
  1241  			init.Append(r)
  1242  			r = nod(OADDR, r.Left, nil)
  1243  			r = typecheck(r, Erv)
  1244  			n = r
  1245  		} else {
  1246  			n = callnew(n.Type.Elem())
  1247  		}
  1248  
  1249  	case OCMPSTR:
  1250  		// s + "badgerbadgerbadger" == "badgerbadgerbadger"
  1251  		if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) {
  1252  			// TODO(marvin): Fix Node.EType type union.
  1253  			r := nod(Op(n.Etype), nod(OLEN, n.Left.List.First(), nil), nodintconst(0))
  1254  			r = typecheck(r, Erv)
  1255  			r = walkexpr(r, init)
  1256  			r.Type = n.Type
  1257  			n = r
  1258  			break
  1259  		}
  1260  
  1261  		// Rewrite comparisons to short constant strings as length+byte-wise comparisons.
  1262  		var cs, ncs *Node // const string, non-const string
  1263  		switch {
  1264  		case Isconst(n.Left, CTSTR) && Isconst(n.Right, CTSTR):
  1265  			// ignore; will be constant evaluated
  1266  		case Isconst(n.Left, CTSTR):
  1267  			cs = n.Left
  1268  			ncs = n.Right
  1269  		case Isconst(n.Right, CTSTR):
  1270  			cs = n.Right
  1271  			ncs = n.Left
  1272  		}
  1273  		if cs != nil {
  1274  			cmp := Op(n.Etype)
  1275  			// maxRewriteLen was chosen empirically.
  1276  			// It is the value that minimizes cmd/go file size
  1277  			// across most architectures.
  1278  			// See the commit description for CL 26758 for details.
  1279  			maxRewriteLen := 6
  1280  			// Some architectures can load unaligned byte sequence as 1 word.
  1281  			// So we can cover longer strings with the same amount of code.
  1282  			canCombineLoads := false
  1283  			combine64bit := false
  1284  			// TODO: does this improve performance on any other architectures?
  1285  			switch thearch.LinkArch.Family {
  1286  			case sys.AMD64:
  1287  				// Larger compare require longer instructions, so keep this reasonably low.
  1288  				// Data from CL 26758 shows that longer strings are rare.
  1289  				// If we really want we can do 16 byte SSE comparisons in the future.
  1290  				maxRewriteLen = 16
  1291  				canCombineLoads = true
  1292  				combine64bit = true
  1293  			case sys.I386:
  1294  				maxRewriteLen = 8
  1295  				canCombineLoads = true
  1296  			}
  1297  			var and Op
  1298  			switch cmp {
  1299  			case OEQ:
  1300  				and = OANDAND
  1301  			case ONE:
  1302  				and = OOROR
  1303  			default:
  1304  				// Don't do byte-wise comparisons for <, <=, etc.
  1305  				// They're fairly complicated.
  1306  				// Length-only checks are ok, though.
  1307  				maxRewriteLen = 0
  1308  			}
  1309  			if s := cs.Val().U.(string); len(s) <= maxRewriteLen {
  1310  				if len(s) > 0 {
  1311  					ncs = safeexpr(ncs, init)
  1312  				}
  1313  				// TODO(marvin): Fix Node.EType type union.
  1314  				r := nod(cmp, nod(OLEN, ncs, nil), nodintconst(int64(len(s))))
  1315  				remains := len(s)
  1316  				for i := 0; remains > 0; {
  1317  					if remains == 1 || !canCombineLoads {
  1318  						cb := nodintconst(int64(s[i]))
  1319  						ncb := nod(OINDEX, ncs, nodintconst(int64(i)))
  1320  						r = nod(and, r, nod(cmp, ncb, cb))
  1321  						remains--
  1322  						i++
  1323  						continue
  1324  					}
  1325  					var step int
  1326  					var convType *types.Type
  1327  					switch {
  1328  					case remains >= 8 && combine64bit:
  1329  						convType = types.Types[TINT64]
  1330  						step = 8
  1331  					case remains >= 4:
  1332  						convType = types.Types[TUINT32]
  1333  						step = 4
  1334  					case remains >= 2:
  1335  						convType = types.Types[TUINT16]
  1336  						step = 2
  1337  					}
  1338  					ncsubstr := nod(OINDEX, ncs, nodintconst(int64(i)))
  1339  					ncsubstr = conv(ncsubstr, convType)
  1340  					csubstr := int64(s[i])
  1341  					// Calculate large constant from bytes as sequence of shifts and ors.
  1342  					// Like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
  1343  					// ssa will combine this into a single large load.
  1344  					for offset := 1; offset < step; offset++ {
  1345  						b := nod(OINDEX, ncs, nodintconst(int64(i+offset)))
  1346  						b = conv(b, convType)
  1347  						b = nod(OLSH, b, nodintconst(int64(8*offset)))
  1348  						ncsubstr = nod(OOR, ncsubstr, b)
  1349  						csubstr = csubstr | int64(s[i+offset])<<uint8(8*offset)
  1350  					}
  1351  					csubstrPart := nodintconst(csubstr)
  1352  					// Compare "step" bytes as once
  1353  					r = nod(and, r, nod(cmp, csubstrPart, ncsubstr))
  1354  					remains -= step
  1355  					i += step
  1356  				}
  1357  				r = typecheck(r, Erv)
  1358  				r = walkexpr(r, init)
  1359  				r.Type = n.Type
  1360  				n = r
  1361  				break
  1362  			}
  1363  		}
  1364  
  1365  		var r *Node
  1366  		// TODO(marvin): Fix Node.EType type union.
  1367  		if Op(n.Etype) == OEQ || Op(n.Etype) == ONE {
  1368  			// prepare for rewrite below
  1369  			n.Left = cheapexpr(n.Left, init)
  1370  			n.Right = cheapexpr(n.Right, init)
  1371  
  1372  			r = mkcall("eqstring", types.Types[TBOOL], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
  1373  
  1374  			// quick check of len before full compare for == or !=
  1375  			// eqstring assumes that the lengths are equal
  1376  			// TODO(marvin): Fix Node.EType type union.
  1377  			if Op(n.Etype) == OEQ {
  1378  				// len(left) == len(right) && eqstring(left, right)
  1379  				r = nod(OANDAND, nod(OEQ, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r)
  1380  			} else {
  1381  				// len(left) != len(right) || !eqstring(left, right)
  1382  				r = nod(ONOT, r, nil)
  1383  				r = nod(OOROR, nod(ONE, nod(OLEN, n.Left, nil), nod(OLEN, n.Right, nil)), r)
  1384  			}
  1385  
  1386  			r = typecheck(r, Erv)
  1387  			r = walkexpr(r, nil)
  1388  		} else {
  1389  			// sys_cmpstring(s1, s2) :: 0
  1390  			r = mkcall("cmpstring", types.Types[TINT], init, conv(n.Left, types.Types[TSTRING]), conv(n.Right, types.Types[TSTRING]))
  1391  			// TODO(marvin): Fix Node.EType type union.
  1392  			r = nod(Op(n.Etype), r, nodintconst(0))
  1393  		}
  1394  
  1395  		r = typecheck(r, Erv)
  1396  		if !n.Type.IsBoolean() {
  1397  			Fatalf("cmp %v", n.Type)
  1398  		}
  1399  		r.Type = n.Type
  1400  		n = r
  1401  
  1402  	case OADDSTR:
  1403  		n = addstr(n, init)
  1404  
  1405  	case OAPPEND:
  1406  		// order should make sure we only see OAS(node, OAPPEND), which we handle above.
  1407  		Fatalf("append outside assignment")
  1408  
  1409  	case OCOPY:
  1410  		n = copyany(n, init, instrumenting && !compiling_runtime)
  1411  
  1412  		// cannot use chanfn - closechan takes any, not chan any
  1413  	case OCLOSE:
  1414  		fn := syslook("closechan")
  1415  
  1416  		fn = substArgTypes(fn, n.Left.Type)
  1417  		n = mkcall1(fn, nil, init, n.Left)
  1418  
  1419  	case OMAKECHAN:
  1420  		n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]))
  1421  
  1422  	case OMAKEMAP:
  1423  		t := n.Type
  1424  
  1425  		a := nodnil() // hmap buffer
  1426  		r := nodnil() // bucket buffer
  1427  		if n.Esc == EscNone {
  1428  			// Allocate hmap buffer on stack.
  1429  			var_ := temp(hmap(t))
  1430  
  1431  			a = nod(OAS, var_, nil) // zero temp
  1432  			a = typecheck(a, Etop)
  1433  			init.Append(a)
  1434  			a = nod(OADDR, var_, nil)
  1435  
  1436  			// Allocate one bucket on stack.
  1437  			// Maximum key/value size is 128 bytes, larger objects
  1438  			// are stored with an indirection. So max bucket size is 2048+eps.
  1439  			var_ = temp(mapbucket(t))
  1440  
  1441  			r = nod(OAS, var_, nil) // zero temp
  1442  			r = typecheck(r, Etop)
  1443  			init.Append(r)
  1444  			r = nod(OADDR, var_, nil)
  1445  		}
  1446  
  1447  		fn := syslook("makemap")
  1448  		fn = substArgTypes(fn, hmap(t), mapbucket(t), t.Key(), t.Val())
  1449  		n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, types.Types[TINT64]), a, r)
  1450  
  1451  	case OMAKESLICE:
  1452  		l := n.Left
  1453  		r := n.Right
  1454  		if r == nil {
  1455  			r = safeexpr(l, init)
  1456  			l = r
  1457  		}
  1458  		t := n.Type
  1459  		if n.Esc == EscNone {
  1460  			if !isSmallMakeSlice(n) {
  1461  				Fatalf("non-small OMAKESLICE with EscNone: %v", n)
  1462  			}
  1463  			// var arr [r]T
  1464  			// n = arr[:l]
  1465  			t = types.NewArray(t.Elem(), nonnegintconst(r)) // [r]T
  1466  			var_ := temp(t)
  1467  			a := nod(OAS, var_, nil) // zero temp
  1468  			a = typecheck(a, Etop)
  1469  			init.Append(a)
  1470  			r := nod(OSLICE, var_, nil) // arr[:l]
  1471  			r.SetSliceBounds(nil, l, nil)
  1472  			r = conv(r, n.Type) // in case n.Type is named.
  1473  			r = typecheck(r, Erv)
  1474  			r = walkexpr(r, init)
  1475  			n = r
  1476  		} else {
  1477  			// n escapes; set up a call to makeslice.
  1478  			// When len and cap can fit into int, use makeslice instead of
  1479  			// makeslice64, which is faster and shorter on 32 bit platforms.
  1480  
  1481  			if t.Elem().NotInHeap() {
  1482  				yyerror("%v is go:notinheap; heap allocation disallowed", t.Elem())
  1483  			}
  1484  
  1485  			len, cap := l, r
  1486  
  1487  			fnname := "makeslice64"
  1488  			argtype := types.Types[TINT64]
  1489  
  1490  			// typechecking guarantees that TIDEAL len/cap are positive and fit in an int.
  1491  			// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
  1492  			// will be handled by the negative range checks in makeslice during runtime.
  1493  			if (len.Type.IsKind(TIDEAL) || maxintval[len.Type.Etype].Cmp(maxintval[TUINT]) <= 0) &&
  1494  				(cap.Type.IsKind(TIDEAL) || maxintval[cap.Type.Etype].Cmp(maxintval[TUINT]) <= 0) {
  1495  				fnname = "makeslice"
  1496  				argtype = types.Types[TINT]
  1497  			}
  1498  
  1499  			fn := syslook(fnname)
  1500  			fn = substArgTypes(fn, t.Elem()) // any-1
  1501  			n = mkcall1(fn, t, init, typename(t.Elem()), conv(len, argtype), conv(cap, argtype))
  1502  		}
  1503  
  1504  	case ORUNESTR:
  1505  		a := nodnil()
  1506  		if n.Esc == EscNone {
  1507  			t := types.NewArray(types.Types[TUINT8], 4)
  1508  			var_ := temp(t)
  1509  			a = nod(OADDR, var_, nil)
  1510  		}
  1511  
  1512  		// intstring(*[4]byte, rune)
  1513  		n = mkcall("intstring", n.Type, init, a, conv(n.Left, types.Types[TINT64]))
  1514  
  1515  	case OARRAYBYTESTR:
  1516  		a := nodnil()
  1517  		if n.Esc == EscNone {
  1518  			// Create temporary buffer for string on stack.
  1519  			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
  1520  
  1521  			a = nod(OADDR, temp(t), nil)
  1522  		}
  1523  
  1524  		// slicebytetostring(*[32]byte, []byte) string;
  1525  		n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
  1526  
  1527  		// slicebytetostringtmp([]byte) string;
  1528  	case OARRAYBYTESTRTMP:
  1529  		n.Left = walkexpr(n.Left, init)
  1530  
  1531  		if !instrumenting {
  1532  			// Let the backend handle OARRAYBYTESTRTMP directly
  1533  			// to avoid a function call to slicebytetostringtmp.
  1534  			break
  1535  		}
  1536  
  1537  		n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
  1538  
  1539  		// slicerunetostring(*[32]byte, []rune) string;
  1540  	case OARRAYRUNESTR:
  1541  		a := nodnil()
  1542  
  1543  		if n.Esc == EscNone {
  1544  			// Create temporary buffer for string on stack.
  1545  			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
  1546  
  1547  			a = nod(OADDR, temp(t), nil)
  1548  		}
  1549  
  1550  		n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
  1551  
  1552  		// stringtoslicebyte(*32[byte], string) []byte;
  1553  	case OSTRARRAYBYTE:
  1554  		a := nodnil()
  1555  
  1556  		if n.Esc == EscNone {
  1557  			// Create temporary buffer for slice on stack.
  1558  			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
  1559  
  1560  			a = nod(OADDR, temp(t), nil)
  1561  		}
  1562  
  1563  		n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, types.Types[TSTRING]))
  1564  
  1565  	case OSTRARRAYBYTETMP:
  1566  		// []byte(string) conversion that creates a slice
  1567  		// referring to the actual string bytes.
  1568  		// This conversion is handled later by the backend and
  1569  		// is only for use by internal compiler optimizations
  1570  		// that know that the slice won't be mutated.
  1571  		// The only such case today is:
  1572  		// for i, c := range []byte(string)
  1573  		n.Left = walkexpr(n.Left, init)
  1574  
  1575  		// stringtoslicerune(*[32]rune, string) []rune
  1576  	case OSTRARRAYRUNE:
  1577  		a := nodnil()
  1578  
  1579  		if n.Esc == EscNone {
  1580  			// Create temporary buffer for slice on stack.
  1581  			t := types.NewArray(types.Types[TINT32], tmpstringbufsize)
  1582  
  1583  			a = nod(OADDR, temp(t), nil)
  1584  		}
  1585  
  1586  		n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
  1587  
  1588  		// ifaceeq(i1 any-1, i2 any-2) (ret bool);
  1589  	case OCMPIFACE:
  1590  		if !eqtype(n.Left.Type, n.Right.Type) {
  1591  			Fatalf("ifaceeq %v %v %v", n.Op, n.Left.Type, n.Right.Type)
  1592  		}
  1593  		var fn *Node
  1594  		if n.Left.Type.IsEmptyInterface() {
  1595  			fn = syslook("efaceeq")
  1596  		} else {
  1597  			fn = syslook("ifaceeq")
  1598  		}
  1599  
  1600  		n.Right = cheapexpr(n.Right, init)
  1601  		n.Left = cheapexpr(n.Left, init)
  1602  		lt := nod(OITAB, n.Left, nil)
  1603  		rt := nod(OITAB, n.Right, nil)
  1604  		ld := nod(OIDATA, n.Left, nil)
  1605  		rd := nod(OIDATA, n.Right, nil)
  1606  		ld.Type = types.Types[TUNSAFEPTR]
  1607  		rd.Type = types.Types[TUNSAFEPTR]
  1608  		ld.SetTypecheck(1)
  1609  		rd.SetTypecheck(1)
  1610  		call := mkcall1(fn, n.Type, init, lt, ld, rd)
  1611  
  1612  		// Check itable/type before full compare.
  1613  		// Note: short-circuited because order matters.
  1614  		// TODO(marvin): Fix Node.EType type union.
  1615  		var cmp *Node
  1616  		if Op(n.Etype) == OEQ {
  1617  			cmp = nod(OANDAND, nod(OEQ, lt, rt), call)
  1618  		} else {
  1619  			cmp = nod(OOROR, nod(ONE, lt, rt), nod(ONOT, call, nil))
  1620  		}
  1621  		cmp = typecheck(cmp, Erv)
  1622  		cmp = walkexpr(cmp, init)
  1623  		cmp.Type = n.Type
  1624  		n = cmp
  1625  
  1626  	case OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
  1627  		if isStaticCompositeLiteral(n) && !canSSAType(n.Type) {
  1628  			// n can be directly represented in the read-only data section.
  1629  			// Make direct reference to the static data. See issue 12841.
  1630  			vstat := staticname(n.Type)
  1631  			vstat.Name.SetReadonly(true)
  1632  			fixedlit(inInitFunction, initKindStatic, n, vstat, init)
  1633  			n = vstat
  1634  			n = typecheck(n, Erv)
  1635  			break
  1636  		}
  1637  		var_ := temp(n.Type)
  1638  		anylit(n, var_, init)
  1639  		n = var_
  1640  
  1641  	case OSEND:
  1642  		n1 := n.Right
  1643  		n1 = assignconv(n1, n.Left.Type.Elem(), "chan send")
  1644  		n1 = walkexpr(n1, init)
  1645  		n1 = nod(OADDR, n1, nil)
  1646  		n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, n.Left, n1)
  1647  
  1648  	case OCLOSURE:
  1649  		n = walkclosure(n, init)
  1650  
  1651  	case OCALLPART:
  1652  		n = walkpartialcall(n, init)
  1653  	}
  1654  
  1655  	// Expressions that are constant at run time but not
  1656  	// considered const by the language spec are not turned into
  1657  	// constants until walk. For example, if n is y%1 == 0, the
  1658  	// walk of y%1 may have replaced it by 0.
  1659  	// Check whether n with its updated args is itself now a constant.
  1660  	t := n.Type
  1661  	evconst(n)
  1662  	if n.Type != t {
  1663  		Fatalf("evconst changed Type: %v had type %v, now %v", n, t, n.Type)
  1664  	}
  1665  	if n.Op == OLITERAL {
  1666  		n = typecheck(n, Erv)
  1667  		// Emit string symbol now to avoid emitting
  1668  		// any concurrently during the backend.
  1669  		if s, ok := n.Val().U.(string); ok {
  1670  			_ = stringsym(s)
  1671  		}
  1672  	}
  1673  
  1674  	updateHasCall(n)
  1675  
  1676  	if Debug['w'] != 0 && n != nil {
  1677  		Dump("walk", n)
  1678  	}
  1679  
  1680  	lineno = lno
  1681  	return n
  1682  }
  1683  
  1684  // TODO(josharian): combine this with its caller and simplify
  1685  func reduceSlice(n *Node) *Node {
  1686  	low, high, max := n.SliceBounds()
  1687  	if high != nil && high.Op == OLEN && samesafeexpr(n.Left, high.Left) {
  1688  		// Reduce x[i:len(x)] to x[i:].
  1689  		high = nil
  1690  	}
  1691  	n.SetSliceBounds(low, high, max)
  1692  	if (n.Op == OSLICE || n.Op == OSLICESTR) && low == nil && high == nil {
  1693  		// Reduce x[:] to x.
  1694  		if Debug_slice > 0 {
  1695  			Warn("slice: omit slice operation")
  1696  		}
  1697  		return n.Left
  1698  	}
  1699  	return n
  1700  }
  1701  
  1702  func ascompatee1(l *Node, r *Node, init *Nodes) *Node {
  1703  	// convas will turn map assigns into function calls,
  1704  	// making it impossible for reorder3 to work.
  1705  	n := nod(OAS, l, r)
  1706  
  1707  	if l.Op == OINDEXMAP {
  1708  		return n
  1709  	}
  1710  
  1711  	return convas(n, init)
  1712  }
  1713  
  1714  func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
  1715  	// check assign expression list to
  1716  	// a expression list. called in
  1717  	//	expr-list = expr-list
  1718  
  1719  	// ensure order of evaluation for function calls
  1720  	for i := range nl {
  1721  		nl[i] = safeexpr(nl[i], init)
  1722  	}
  1723  	for i1 := range nr {
  1724  		nr[i1] = safeexpr(nr[i1], init)
  1725  	}
  1726  
  1727  	var nn []*Node
  1728  	i := 0
  1729  	for ; i < len(nl); i++ {
  1730  		if i >= len(nr) {
  1731  			break
  1732  		}
  1733  		// Do not generate 'x = x' during return. See issue 4014.
  1734  		if op == ORETURN && samesafeexpr(nl[i], nr[i]) {
  1735  			continue
  1736  		}
  1737  		nn = append(nn, ascompatee1(nl[i], nr[i], init))
  1738  	}
  1739  
  1740  	// cannot happen: caller checked that lists had same length
  1741  	if i < len(nl) || i < len(nr) {
  1742  		var nln, nrn Nodes
  1743  		nln.Set(nl)
  1744  		nrn.Set(nr)
  1745  		Fatalf("error in shape across %+v %v %+v / %d %d [%s]", nln, op, nrn, len(nl), len(nr), Curfn.funcname())
  1746  	}
  1747  	return nn
  1748  }
  1749  
  1750  // l is an lv and rt is the type of an rv
  1751  // return 1 if this implies a function call
  1752  // evaluating the lv or a function call
  1753  // in the conversion of the types
  1754  func fncall(l *Node, rt *types.Type) bool {
  1755  	if l.HasCall() || l.Op == OINDEXMAP {
  1756  		return true
  1757  	}
  1758  	if needwritebarrier(l) {
  1759  		return true
  1760  	}
  1761  	if eqtype(l.Type, rt) {
  1762  		return false
  1763  	}
  1764  	return true
  1765  }
  1766  
  1767  // check assign type list to
  1768  // a expression list. called in
  1769  //	expr-list = func()
  1770  func ascompatet(nl Nodes, nr *types.Type) []*Node {
  1771  	if nl.Len() != nr.NumFields() {
  1772  		Fatalf("ascompatet: assignment count mismatch: %d = %d", nl.Len(), nr.NumFields())
  1773  	}
  1774  
  1775  	var nn, mm Nodes
  1776  	for i, l := range nl.Slice() {
  1777  		if isblank(l) {
  1778  			continue
  1779  		}
  1780  		r := nr.Field(i)
  1781  
  1782  		// any lv that causes a fn call must be
  1783  		// deferred until all the return arguments
  1784  		// have been pulled from the output arguments
  1785  		if fncall(l, r.Type) {
  1786  			tmp := temp(r.Type)
  1787  			tmp = typecheck(tmp, Erv)
  1788  			a := nod(OAS, l, tmp)
  1789  			a = convas(a, &mm)
  1790  			mm.Append(a)
  1791  			l = tmp
  1792  		}
  1793  
  1794  		a := nod(OAS, l, nodarg(r, 0))
  1795  		a = convas(a, &nn)
  1796  		updateHasCall(a)
  1797  		if a.HasCall() {
  1798  			Dump("ascompatet ucount", a)
  1799  			Fatalf("ascompatet: too many function calls evaluating parameters")
  1800  		}
  1801  
  1802  		nn.Append(a)
  1803  	}
  1804  	return append(nn.Slice(), mm.Slice()...)
  1805  }
  1806  
  1807  // nodarg returns a Node for the function argument denoted by t,
  1808  // which is either the entire function argument or result struct (t is a  struct *types.Type)
  1809  // or a specific argument (t is a *types.Field within a struct *types.Type).
  1810  //
  1811  // If fp is 0, the node is for use by a caller invoking the given
  1812  // function, preparing the arguments before the call
  1813  // or retrieving the results after the call.
  1814  // In this case, the node will correspond to an outgoing argument
  1815  // slot like 8(SP).
  1816  //
  1817  // If fp is 1, the node is for use by the function itself
  1818  // (the callee), to retrieve its arguments or write its results.
  1819  // In this case the node will be an ONAME with an appropriate
  1820  // type and offset.
  1821  func nodarg(t interface{}, fp int) *Node {
  1822  	var n *Node
  1823  
  1824  	var funarg types.Funarg
  1825  	switch t := t.(type) {
  1826  	default:
  1827  		Fatalf("bad nodarg %T(%v)", t, t)
  1828  
  1829  	case *types.Type:
  1830  		// Entire argument struct, not just one arg
  1831  		if !t.IsFuncArgStruct() {
  1832  			Fatalf("nodarg: bad type %v", t)
  1833  		}
  1834  		funarg = t.StructType().Funarg
  1835  
  1836  		// Build fake variable name for whole arg struct.
  1837  		n = newname(lookup(".args"))
  1838  		n.Type = t
  1839  		first := t.Field(0)
  1840  		if first == nil {
  1841  			Fatalf("nodarg: bad struct")
  1842  		}
  1843  		if first.Offset == BADWIDTH {
  1844  			Fatalf("nodarg: offset not computed for %v", t)
  1845  		}
  1846  		n.Xoffset = first.Offset
  1847  
  1848  	case *types.Field:
  1849  		funarg = t.Funarg
  1850  		if fp == 1 {
  1851  			// NOTE(rsc): This should be using t.Nname directly,
  1852  			// except in the case where t.Nname.Sym is the blank symbol and
  1853  			// so the assignment would be discarded during code generation.
  1854  			// In that case we need to make a new node, and there is no harm
  1855  			// in optimization passes to doing so. But otherwise we should
  1856  			// definitely be using the actual declaration and not a newly built node.
  1857  			// The extra Fatalf checks here are verifying that this is the case,
  1858  			// without changing the actual logic (at time of writing, it's getting
  1859  			// toward time for the Go 1.7 beta).
  1860  			// At some quieter time (assuming we've never seen these Fatalfs happen)
  1861  			// we could change this code to use "expect" directly.
  1862  			expect := asNode(t.Nname)
  1863  			if expect.isParamHeapCopy() {
  1864  				expect = expect.Name.Param.Stackcopy
  1865  			}
  1866  
  1867  			for _, n := range Curfn.Func.Dcl {
  1868  				if (n.Class() == PPARAM || n.Class() == PPARAMOUT) && !t.Sym.IsBlank() && n.Sym == t.Sym {
  1869  					if n != expect {
  1870  						Fatalf("nodarg: unexpected node: %v (%p %v) vs %v (%p %v)", n, n, n.Op, asNode(t.Nname), asNode(t.Nname), asNode(t.Nname).Op)
  1871  					}
  1872  					return n
  1873  				}
  1874  			}
  1875  
  1876  			if !expect.Sym.IsBlank() {
  1877  				Fatalf("nodarg: did not find node in dcl list: %v", expect)
  1878  			}
  1879  		}
  1880  
  1881  		// Build fake name for individual variable.
  1882  		// This is safe because if there was a real declared name
  1883  		// we'd have used it above.
  1884  		n = newname(lookup("__"))
  1885  		n.Type = t.Type
  1886  		if t.Offset == BADWIDTH {
  1887  			Fatalf("nodarg: offset not computed for %v", t)
  1888  		}
  1889  		n.Xoffset = t.Offset
  1890  		n.Orig = asNode(t.Nname)
  1891  	}
  1892  
  1893  	// Rewrite argument named _ to __,
  1894  	// or else the assignment to _ will be
  1895  	// discarded during code generation.
  1896  	if isblank(n) {
  1897  		n.Sym = lookup("__")
  1898  	}
  1899  
  1900  	switch fp {
  1901  	default:
  1902  		Fatalf("bad fp")
  1903  
  1904  	case 0: // preparing arguments for call
  1905  		n.Op = OINDREGSP
  1906  		n.Xoffset += Ctxt.FixedFrameSize()
  1907  
  1908  	case 1: // reading arguments inside call
  1909  		n.SetClass(PPARAM)
  1910  		if funarg == types.FunargResults {
  1911  			n.SetClass(PPARAMOUT)
  1912  		}
  1913  	}
  1914  
  1915  	n.SetTypecheck(1)
  1916  	n.SetAddrtaken(true) // keep optimizers at bay
  1917  	return n
  1918  }
  1919  
  1920  // package all the arguments that match a ... T parameter into a []T.
  1921  func mkdotargslice(typ *types.Type, args []*Node, init *Nodes, ddd *Node) *Node {
  1922  	esc := uint16(EscUnknown)
  1923  	if ddd != nil {
  1924  		esc = ddd.Esc
  1925  	}
  1926  
  1927  	if len(args) == 0 {
  1928  		n := nodnil()
  1929  		n.Type = typ
  1930  		return n
  1931  	}
  1932  
  1933  	n := nod(OCOMPLIT, nil, typenod(typ))
  1934  	if ddd != nil && prealloc[ddd] != nil {
  1935  		prealloc[n] = prealloc[ddd] // temporary to use
  1936  	}
  1937  	n.List.Set(args)
  1938  	n.Esc = esc
  1939  	n = typecheck(n, Erv)
  1940  	if n.Type == nil {
  1941  		Fatalf("mkdotargslice: typecheck failed")
  1942  	}
  1943  	n = walkexpr(n, init)
  1944  	return n
  1945  }
  1946  
  1947  // check assign expression list to
  1948  // a type list. called in
  1949  //	return expr-list
  1950  //	func(expr-list)
  1951  func ascompatte(call *Node, isddd bool, lhs *types.Type, rhs []*Node, fp int, init *Nodes) []*Node {
  1952  	var nn []*Node
  1953  
  1954  	// f(g()) where g has multiple return values
  1955  	if len(rhs) == 1 && rhs[0].Type.IsFuncArgStruct() {
  1956  		// optimization - can do block copy
  1957  		if eqtypenoname(rhs[0].Type, lhs) {
  1958  			nl := nodarg(lhs, fp)
  1959  			nr := nod(OCONVNOP, rhs[0], nil)
  1960  			nr.Type = nl.Type
  1961  			nn = []*Node{convas(nod(OAS, nl, nr), init)}
  1962  			goto ret
  1963  		}
  1964  
  1965  		// conversions involved.
  1966  		// copy into temporaries.
  1967  		var tmps []*Node
  1968  		for _, nr := range rhs[0].Type.FieldSlice() {
  1969  			tmps = append(tmps, temp(nr.Type))
  1970  		}
  1971  
  1972  		a := nod(OAS2, nil, nil)
  1973  		a.List.Set(tmps)
  1974  		a.Rlist.Set(rhs)
  1975  		a = typecheck(a, Etop)
  1976  		a = walkstmt(a)
  1977  		init.Append(a)
  1978  
  1979  		rhs = tmps
  1980  	}
  1981  
  1982  	// For each parameter (LHS), assign its corresponding argument (RHS).
  1983  	// If there's a ... parameter (which is only valid as the final
  1984  	// parameter) and this is not a ... call expression,
  1985  	// then assign the remaining arguments as a slice.
  1986  	for i, nl := range lhs.FieldSlice() {
  1987  		var nr *Node
  1988  		if nl.Isddd() && !isddd {
  1989  			nr = mkdotargslice(nl.Type, rhs[i:], init, call.Right)
  1990  		} else {
  1991  			nr = rhs[i]
  1992  		}
  1993  
  1994  		a := nod(OAS, nodarg(nl, fp), nr)
  1995  		a = convas(a, init)
  1996  		nn = append(nn, a)
  1997  	}
  1998  
  1999  ret:
  2000  	for _, n := range nn {
  2001  		n.SetTypecheck(1)
  2002  	}
  2003  	return nn
  2004  }
  2005  
  2006  // generate code for print
  2007  func walkprint(nn *Node, init *Nodes) *Node {
  2008  	var r *Node
  2009  	var n *Node
  2010  	var on *Node
  2011  	var t *types.Type
  2012  	var et types.EType
  2013  
  2014  	op := nn.Op
  2015  	all := nn.List
  2016  	var calls []*Node
  2017  	notfirst := false
  2018  
  2019  	// Hoist all the argument evaluation up before the lock.
  2020  	walkexprlistcheap(all.Slice(), init)
  2021  
  2022  	calls = append(calls, mkcall("printlock", nil, init))
  2023  	for i1, n1 := range all.Slice() {
  2024  		if notfirst {
  2025  			calls = append(calls, mkcall("printsp", nil, init))
  2026  		}
  2027  
  2028  		notfirst = op == OPRINTN
  2029  
  2030  		n = n1
  2031  		if n.Op == OLITERAL {
  2032  			switch n.Val().Ctype() {
  2033  			case CTRUNE:
  2034  				n = defaultlit(n, types.Runetype)
  2035  
  2036  			case CTINT:
  2037  				n = defaultlit(n, types.Types[TINT64])
  2038  
  2039  			case CTFLT:
  2040  				n = defaultlit(n, types.Types[TFLOAT64])
  2041  			}
  2042  		}
  2043  
  2044  		if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
  2045  			n = defaultlit(n, types.Types[TINT64])
  2046  		}
  2047  		n = defaultlit(n, nil)
  2048  		all.SetIndex(i1, n)
  2049  		if n.Type == nil || n.Type.Etype == TFORW {
  2050  			continue
  2051  		}
  2052  
  2053  		t = n.Type
  2054  		et = n.Type.Etype
  2055  		if n.Type.IsInterface() {
  2056  			if n.Type.IsEmptyInterface() {
  2057  				on = syslook("printeface")
  2058  			} else {
  2059  				on = syslook("printiface")
  2060  			}
  2061  			on = substArgTypes(on, n.Type) // any-1
  2062  		} else if n.Type.IsPtr() || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
  2063  			on = syslook("printpointer")
  2064  			on = substArgTypes(on, n.Type) // any-1
  2065  		} else if n.Type.IsSlice() {
  2066  			on = syslook("printslice")
  2067  			on = substArgTypes(on, n.Type) // any-1
  2068  		} else if isInt[et] {
  2069  			if et == TUINT64 {
  2070  				if isRuntimePkg(t.Sym.Pkg) && t.Sym.Name == "hex" {
  2071  					on = syslook("printhex")
  2072  				} else {
  2073  					on = syslook("printuint")
  2074  				}
  2075  			} else {
  2076  				on = syslook("printint")
  2077  			}
  2078  		} else if isFloat[et] {
  2079  			on = syslook("printfloat")
  2080  		} else if isComplex[et] {
  2081  			on = syslook("printcomplex")
  2082  		} else if et == TBOOL {
  2083  			on = syslook("printbool")
  2084  		} else if et == TSTRING {
  2085  			on = syslook("printstring")
  2086  		} else {
  2087  			badtype(OPRINT, n.Type, nil)
  2088  			continue
  2089  		}
  2090  
  2091  		t = on.Type.Params().Field(0).Type
  2092  
  2093  		if !eqtype(t, n.Type) {
  2094  			n = nod(OCONV, n, nil)
  2095  			n.Type = t
  2096  		}
  2097  
  2098  		r = nod(OCALL, on, nil)
  2099  		r.List.Append(n)
  2100  		calls = append(calls, r)
  2101  	}
  2102  
  2103  	if op == OPRINTN {
  2104  		calls = append(calls, mkcall("printnl", nil, nil))
  2105  	}
  2106  
  2107  	calls = append(calls, mkcall("printunlock", nil, init))
  2108  
  2109  	typecheckslice(calls, Etop)
  2110  	walkexprlist(calls, init)
  2111  
  2112  	r = nod(OEMPTY, nil, nil)
  2113  	r = typecheck(r, Etop)
  2114  	r = walkexpr(r, init)
  2115  	r.Ninit.Set(calls)
  2116  	return r
  2117  }
  2118  
  2119  func callnew(t *types.Type) *Node {
  2120  	if t.NotInHeap() {
  2121  		yyerror("%v is go:notinheap; heap allocation disallowed", t)
  2122  	}
  2123  	dowidth(t)
  2124  	fn := syslook("newobject")
  2125  	fn = substArgTypes(fn, t)
  2126  	v := mkcall1(fn, types.NewPtr(t), nil, typename(t))
  2127  	v.SetNonNil(true)
  2128  	return v
  2129  }
  2130  
  2131  func iscallret(n *Node) bool {
  2132  	n = outervalue(n)
  2133  	return n.Op == OINDREGSP
  2134  }
  2135  
  2136  func isstack(n *Node) bool {
  2137  	n = outervalue(n)
  2138  
  2139  	// If n is *autotmp and autotmp = &foo, replace n with foo.
  2140  	// We introduce such temps when initializing struct literals.
  2141  	if n.Op == OIND && n.Left.Op == ONAME && n.Left.IsAutoTmp() {
  2142  		defn := n.Left.Name.Defn
  2143  		if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
  2144  			n = defn.Right.Left
  2145  		}
  2146  	}
  2147  
  2148  	switch n.Op {
  2149  	case OINDREGSP:
  2150  		return true
  2151  
  2152  	case ONAME:
  2153  		switch n.Class() {
  2154  		case PAUTO, PPARAM, PPARAMOUT:
  2155  			return true
  2156  		}
  2157  	}
  2158  
  2159  	return false
  2160  }
  2161  
  2162  // isReflectHeaderDataField reports whether l is an expression p.Data
  2163  // where p has type reflect.SliceHeader or reflect.StringHeader.
  2164  func isReflectHeaderDataField(l *Node) bool {
  2165  	if l.Type != types.Types[TUINTPTR] {
  2166  		return false
  2167  	}
  2168  
  2169  	var tsym *types.Sym
  2170  	switch l.Op {
  2171  	case ODOT:
  2172  		tsym = l.Left.Type.Sym
  2173  	case ODOTPTR:
  2174  		tsym = l.Left.Type.Elem().Sym
  2175  	default:
  2176  		return false
  2177  	}
  2178  
  2179  	if tsym == nil || l.Sym.Name != "Data" || tsym.Pkg.Path != "reflect" {
  2180  		return false
  2181  	}
  2182  	return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
  2183  }
  2184  
  2185  // Do we need a write barrier for assigning to l?
  2186  func needwritebarrier(l *Node) bool {
  2187  	if !use_writebarrier {
  2188  		return false
  2189  	}
  2190  
  2191  	if l == nil || isblank(l) {
  2192  		return false
  2193  	}
  2194  
  2195  	// No write barrier for write to stack.
  2196  	if isstack(l) {
  2197  		return false
  2198  	}
  2199  
  2200  	// Package unsafe's documentation says storing pointers into
  2201  	// reflect.SliceHeader and reflect.StringHeader's Data fields
  2202  	// is valid, even though they have type uintptr (#19168).
  2203  	if isReflectHeaderDataField(l) {
  2204  		return true
  2205  	}
  2206  
  2207  	// No write barrier for write of non-pointers.
  2208  	dowidth(l.Type)
  2209  	if !types.Haspointers(l.Type) {
  2210  		return false
  2211  	}
  2212  
  2213  	// No write barrier if this is a pointer to a go:notinheap
  2214  	// type, since the write barrier's inheap(ptr) check will fail.
  2215  	if l.Type.IsPtr() && l.Type.Elem().NotInHeap() {
  2216  		return false
  2217  	}
  2218  
  2219  	// TODO: We can eliminate write barriers if we know *both* the
  2220  	// current and new content of the slot must already be shaded.
  2221  	// We know a pointer is shaded if it's nil, or points to
  2222  	// static data, a global (variable or function), or the stack.
  2223  	// The nil optimization could be particularly useful for
  2224  	// writes to just-allocated objects. Unfortunately, knowing
  2225  	// the "current" value of the slot requires flow analysis.
  2226  
  2227  	// Otherwise, be conservative and use write barrier.
  2228  	return true
  2229  }
  2230  
  2231  func convas(n *Node, init *Nodes) *Node {
  2232  	if n.Op != OAS {
  2233  		Fatalf("convas: not OAS %v", n.Op)
  2234  	}
  2235  
  2236  	n.SetTypecheck(1)
  2237  
  2238  	var lt *types.Type
  2239  	var rt *types.Type
  2240  	if n.Left == nil || n.Right == nil {
  2241  		goto out
  2242  	}
  2243  
  2244  	lt = n.Left.Type
  2245  	rt = n.Right.Type
  2246  	if lt == nil || rt == nil {
  2247  		goto out
  2248  	}
  2249  
  2250  	if isblank(n.Left) {
  2251  		n.Right = defaultlit(n.Right, nil)
  2252  		goto out
  2253  	}
  2254  
  2255  	if !eqtype(lt, rt) {
  2256  		n.Right = assignconv(n.Right, lt, "assignment")
  2257  		n.Right = walkexpr(n.Right, init)
  2258  	}
  2259  	dowidth(n.Right.Type)
  2260  
  2261  out:
  2262  	updateHasCall(n)
  2263  	return n
  2264  }
  2265  
  2266  // from ascompat[te]
  2267  // evaluating actual function arguments.
  2268  //	f(a,b)
  2269  // if there is exactly one function expr,
  2270  // then it is done first. otherwise must
  2271  // make temp variables
  2272  func reorder1(all []*Node) []*Node {
  2273  	c := 0 // function calls
  2274  	t := 0 // total parameters
  2275  
  2276  	for _, n := range all {
  2277  		t++
  2278  		updateHasCall(n)
  2279  		if n.HasCall() {
  2280  			c++
  2281  		}
  2282  	}
  2283  
  2284  	if c == 0 || t == 1 {
  2285  		return all
  2286  	}
  2287  
  2288  	var g []*Node // fncalls assigned to tempnames
  2289  	var f *Node   // last fncall assigned to stack
  2290  	var r []*Node // non fncalls and tempnames assigned to stack
  2291  	d := 0
  2292  	var a *Node
  2293  	for _, n := range all {
  2294  		if !n.HasCall() {
  2295  			r = append(r, n)
  2296  			continue
  2297  		}
  2298  
  2299  		d++
  2300  		if d == c {
  2301  			f = n
  2302  			continue
  2303  		}
  2304  
  2305  		// make assignment of fncall to tempname
  2306  		a = temp(n.Right.Type)
  2307  
  2308  		a = nod(OAS, a, n.Right)
  2309  		g = append(g, a)
  2310  
  2311  		// put normal arg assignment on list
  2312  		// with fncall replaced by tempname
  2313  		n.Right = a.Left
  2314  
  2315  		r = append(r, n)
  2316  	}
  2317  
  2318  	if f != nil {
  2319  		g = append(g, f)
  2320  	}
  2321  	return append(g, r...)
  2322  }
  2323  
  2324  // from ascompat[ee]
  2325  //	a,b = c,d
  2326  // simultaneous assignment. there cannot
  2327  // be later use of an earlier lvalue.
  2328  //
  2329  // function calls have been removed.
  2330  func reorder3(all []*Node) []*Node {
  2331  	var l *Node
  2332  
  2333  	// If a needed expression may be affected by an
  2334  	// earlier assignment, make an early copy of that
  2335  	// expression and use the copy instead.
  2336  	var early []*Node
  2337  
  2338  	var mapinit Nodes
  2339  	for i, n := range all {
  2340  		l = n.Left
  2341  
  2342  		// Save subexpressions needed on left side.
  2343  		// Drill through non-dereferences.
  2344  		for {
  2345  			if l.Op == ODOT || l.Op == OPAREN {
  2346  				l = l.Left
  2347  				continue
  2348  			}
  2349  
  2350  			if l.Op == OINDEX && l.Left.Type.IsArray() {
  2351  				l.Right = reorder3save(l.Right, all, i, &early)
  2352  				l = l.Left
  2353  				continue
  2354  			}
  2355  
  2356  			break
  2357  		}
  2358  
  2359  		switch l.Op {
  2360  		default:
  2361  			Fatalf("reorder3 unexpected lvalue %#v", l.Op)
  2362  
  2363  		case ONAME:
  2364  			break
  2365  
  2366  		case OINDEX, OINDEXMAP:
  2367  			l.Left = reorder3save(l.Left, all, i, &early)
  2368  			l.Right = reorder3save(l.Right, all, i, &early)
  2369  			if l.Op == OINDEXMAP {
  2370  				all[i] = convas(all[i], &mapinit)
  2371  			}
  2372  
  2373  		case OIND, ODOTPTR:
  2374  			l.Left = reorder3save(l.Left, all, i, &early)
  2375  		}
  2376  
  2377  		// Save expression on right side.
  2378  		all[i].Right = reorder3save(all[i].Right, all, i, &early)
  2379  	}
  2380  
  2381  	early = append(mapinit.Slice(), early...)
  2382  	return append(early, all...)
  2383  }
  2384  
  2385  // if the evaluation of *np would be affected by the
  2386  // assignments in all up to but not including the ith assignment,
  2387  // copy into a temporary during *early and
  2388  // replace *np with that temp.
  2389  // The result of reorder3save MUST be assigned back to n, e.g.
  2390  // 	n.Left = reorder3save(n.Left, all, i, early)
  2391  func reorder3save(n *Node, all []*Node, i int, early *[]*Node) *Node {
  2392  	if !aliased(n, all, i) {
  2393  		return n
  2394  	}
  2395  
  2396  	q := temp(n.Type)
  2397  	q = nod(OAS, q, n)
  2398  	q = typecheck(q, Etop)
  2399  	*early = append(*early, q)
  2400  	return q.Left
  2401  }
  2402  
  2403  // what's the outer value that a write to n affects?
  2404  // outer value means containing struct or array.
  2405  func outervalue(n *Node) *Node {
  2406  	for {
  2407  		if n.Op == OXDOT {
  2408  			Fatalf("OXDOT in walk")
  2409  		}
  2410  		if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
  2411  			n = n.Left
  2412  			continue
  2413  		}
  2414  
  2415  		if n.Op == OINDEX && n.Left.Type != nil && n.Left.Type.IsArray() {
  2416  			n = n.Left
  2417  			continue
  2418  		}
  2419  
  2420  		break
  2421  	}
  2422  
  2423  	return n
  2424  }
  2425  
  2426  // Is it possible that the computation of n might be
  2427  // affected by writes in as up to but not including the ith element?
  2428  func aliased(n *Node, all []*Node, i int) bool {
  2429  	if n == nil {
  2430  		return false
  2431  	}
  2432  
  2433  	// Treat all fields of a struct as referring to the whole struct.
  2434  	// We could do better but we would have to keep track of the fields.
  2435  	for n.Op == ODOT {
  2436  		n = n.Left
  2437  	}
  2438  
  2439  	// Look for obvious aliasing: a variable being assigned
  2440  	// during the all list and appearing in n.
  2441  	// Also record whether there are any writes to main memory.
  2442  	// Also record whether there are any writes to variables
  2443  	// whose addresses have been taken.
  2444  	memwrite := 0
  2445  
  2446  	varwrite := 0
  2447  	var a *Node
  2448  	for _, an := range all[:i] {
  2449  		a = outervalue(an.Left)
  2450  
  2451  		for a.Op == ODOT {
  2452  			a = a.Left
  2453  		}
  2454  
  2455  		if a.Op != ONAME {
  2456  			memwrite = 1
  2457  			continue
  2458  		}
  2459  
  2460  		switch n.Class() {
  2461  		default:
  2462  			varwrite = 1
  2463  			continue
  2464  
  2465  		case PAUTO, PPARAM, PPARAMOUT:
  2466  			if n.Addrtaken() {
  2467  				varwrite = 1
  2468  				continue
  2469  			}
  2470  
  2471  			if vmatch2(a, n) {
  2472  				// Direct hit.
  2473  				return true
  2474  			}
  2475  		}
  2476  	}
  2477  
  2478  	// The variables being written do not appear in n.
  2479  	// However, n might refer to computed addresses
  2480  	// that are being written.
  2481  
  2482  	// If no computed addresses are affected by the writes, no aliasing.
  2483  	if memwrite == 0 && varwrite == 0 {
  2484  		return false
  2485  	}
  2486  
  2487  	// If n does not refer to computed addresses
  2488  	// (that is, if n only refers to variables whose addresses
  2489  	// have not been taken), no aliasing.
  2490  	if varexpr(n) {
  2491  		return false
  2492  	}
  2493  
  2494  	// Otherwise, both the writes and n refer to computed memory addresses.
  2495  	// Assume that they might conflict.
  2496  	return true
  2497  }
  2498  
  2499  // does the evaluation of n only refer to variables
  2500  // whose addresses have not been taken?
  2501  // (and no other memory)
  2502  func varexpr(n *Node) bool {
  2503  	if n == nil {
  2504  		return true
  2505  	}
  2506  
  2507  	switch n.Op {
  2508  	case OLITERAL:
  2509  		return true
  2510  
  2511  	case ONAME:
  2512  		switch n.Class() {
  2513  		case PAUTO, PPARAM, PPARAMOUT:
  2514  			if !n.Addrtaken() {
  2515  				return true
  2516  			}
  2517  		}
  2518  
  2519  		return false
  2520  
  2521  	case OADD,
  2522  		OSUB,
  2523  		OOR,
  2524  		OXOR,
  2525  		OMUL,
  2526  		ODIV,
  2527  		OMOD,
  2528  		OLSH,
  2529  		ORSH,
  2530  		OAND,
  2531  		OANDNOT,
  2532  		OPLUS,
  2533  		OMINUS,
  2534  		OCOM,
  2535  		OPAREN,
  2536  		OANDAND,
  2537  		OOROR,
  2538  		OCONV,
  2539  		OCONVNOP,
  2540  		OCONVIFACE,
  2541  		ODOTTYPE:
  2542  		return varexpr(n.Left) && varexpr(n.Right)
  2543  
  2544  	case ODOT: // but not ODOTPTR
  2545  		// Should have been handled in aliased.
  2546  		Fatalf("varexpr unexpected ODOT")
  2547  	}
  2548  
  2549  	// Be conservative.
  2550  	return false
  2551  }
  2552  
  2553  // is the name l mentioned in r?
  2554  func vmatch2(l *Node, r *Node) bool {
  2555  	if r == nil {
  2556  		return false
  2557  	}
  2558  	switch r.Op {
  2559  	// match each right given left
  2560  	case ONAME:
  2561  		return l == r
  2562  
  2563  	case OLITERAL:
  2564  		return false
  2565  	}
  2566  
  2567  	if vmatch2(l, r.Left) {
  2568  		return true
  2569  	}
  2570  	if vmatch2(l, r.Right) {
  2571  		return true
  2572  	}
  2573  	for _, n := range r.List.Slice() {
  2574  		if vmatch2(l, n) {
  2575  			return true
  2576  		}
  2577  	}
  2578  	return false
  2579  }
  2580  
  2581  // is any name mentioned in l also mentioned in r?
  2582  // called by sinit.go
  2583  func vmatch1(l *Node, r *Node) bool {
  2584  	// isolate all left sides
  2585  	if l == nil || r == nil {
  2586  		return false
  2587  	}
  2588  	switch l.Op {
  2589  	case ONAME:
  2590  		switch l.Class() {
  2591  		case PPARAM, PAUTO:
  2592  			break
  2593  
  2594  		default:
  2595  			// assignment to non-stack variable must be
  2596  			// delayed if right has function calls.
  2597  			if r.HasCall() {
  2598  				return true
  2599  			}
  2600  		}
  2601  
  2602  		return vmatch2(l, r)
  2603  
  2604  	case OLITERAL:
  2605  		return false
  2606  	}
  2607  
  2608  	if vmatch1(l.Left, r) {
  2609  		return true
  2610  	}
  2611  	if vmatch1(l.Right, r) {
  2612  		return true
  2613  	}
  2614  	for _, n := range l.List.Slice() {
  2615  		if vmatch1(n, r) {
  2616  			return true
  2617  		}
  2618  	}
  2619  	return false
  2620  }
  2621  
  2622  // paramstoheap returns code to allocate memory for heap-escaped parameters
  2623  // and to copy non-result parameters' values from the stack.
  2624  func paramstoheap(params *types.Type) []*Node {
  2625  	var nn []*Node
  2626  	for _, t := range params.Fields().Slice() {
  2627  		v := asNode(t.Nname)
  2628  		if v != nil && v.Sym != nil && strings.HasPrefix(v.Sym.Name, "~r") { // unnamed result
  2629  			v = nil
  2630  		}
  2631  		if v == nil {
  2632  			continue
  2633  		}
  2634  
  2635  		if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil {
  2636  			nn = append(nn, walkstmt(nod(ODCL, v, nil)))
  2637  			if stackcopy.Class() == PPARAM {
  2638  				nn = append(nn, walkstmt(typecheck(nod(OAS, v, stackcopy), Etop)))
  2639  			}
  2640  		}
  2641  	}
  2642  
  2643  	return nn
  2644  }
  2645  
  2646  // zeroResults zeros the return values at the start of the function.
  2647  // We need to do this very early in the function.  Defer might stop a
  2648  // panic and show the return values as they exist at the time of
  2649  // panic.  For precise stacks, the garbage collector assumes results
  2650  // are always live, so we need to zero them before any allocations,
  2651  // even allocations to move params/results to the heap.
  2652  // The generated code is added to Curfn's Enter list.
  2653  func zeroResults() {
  2654  	lno := lineno
  2655  	lineno = Curfn.Pos
  2656  	for _, f := range Curfn.Type.Results().Fields().Slice() {
  2657  		if v := asNode(f.Nname); v != nil && v.Name.Param.Heapaddr != nil {
  2658  			// The local which points to the return value is the
  2659  			// thing that needs zeroing. This is already handled
  2660  			// by a Needzero annotation in plive.go:livenessepilogue.
  2661  			continue
  2662  		}
  2663  		// Zero the stack location containing f.
  2664  		Curfn.Func.Enter.Append(nod(OAS, nodarg(f, 1), nil))
  2665  	}
  2666  	lineno = lno
  2667  }
  2668  
  2669  // returnsfromheap returns code to copy values for heap-escaped parameters
  2670  // back to the stack.
  2671  func returnsfromheap(params *types.Type) []*Node {
  2672  	var nn []*Node
  2673  	for _, t := range params.Fields().Slice() {
  2674  		v := asNode(t.Nname)
  2675  		if v == nil {
  2676  			continue
  2677  		}
  2678  		if stackcopy := v.Name.Param.Stackcopy; stackcopy != nil && stackcopy.Class() == PPARAMOUT {
  2679  			nn = append(nn, walkstmt(typecheck(nod(OAS, stackcopy, v), Etop)))
  2680  		}
  2681  	}
  2682  
  2683  	return nn
  2684  }
  2685  
  2686  // heapmoves generates code to handle migrating heap-escaped parameters
  2687  // between the stack and the heap. The generated code is added to Curfn's
  2688  // Enter and Exit lists.
  2689  func heapmoves() {
  2690  	lno := lineno
  2691  	lineno = Curfn.Pos
  2692  	nn := paramstoheap(Curfn.Type.Recvs())
  2693  	nn = append(nn, paramstoheap(Curfn.Type.Params())...)
  2694  	nn = append(nn, paramstoheap(Curfn.Type.Results())...)
  2695  	Curfn.Func.Enter.Append(nn...)
  2696  	lineno = Curfn.Func.Endlineno
  2697  	Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.Results())...)
  2698  	lineno = lno
  2699  }
  2700  
  2701  func vmkcall(fn *Node, t *types.Type, init *Nodes, va []*Node) *Node {
  2702  	if fn.Type == nil || fn.Type.Etype != TFUNC {
  2703  		Fatalf("mkcall %v %v", fn, fn.Type)
  2704  	}
  2705  
  2706  	n := fn.Type.Params().NumFields()
  2707  
  2708  	r := nod(OCALL, fn, nil)
  2709  	r.List.Set(va[:n])
  2710  	if fn.Type.Results().NumFields() > 0 {
  2711  		r = typecheck(r, Erv|Efnstruct)
  2712  	} else {
  2713  		r = typecheck(r, Etop)
  2714  	}
  2715  	r = walkexpr(r, init)
  2716  	r.Type = t
  2717  	return r
  2718  }
  2719  
  2720  func mkcall(name string, t *types.Type, init *Nodes, args ...*Node) *Node {
  2721  	return vmkcall(syslook(name), t, init, args)
  2722  }
  2723  
  2724  func mkcall1(fn *Node, t *types.Type, init *Nodes, args ...*Node) *Node {
  2725  	return vmkcall(fn, t, init, args)
  2726  }
  2727  
  2728  func conv(n *Node, t *types.Type) *Node {
  2729  	if eqtype(n.Type, t) {
  2730  		return n
  2731  	}
  2732  	n = nod(OCONV, n, nil)
  2733  	n.Type = t
  2734  	n = typecheck(n, Erv)
  2735  	return n
  2736  }
  2737  
  2738  // byteindex converts n, which is byte-sized, to a uint8.
  2739  // We cannot use conv, because we allow converting bool to uint8 here,
  2740  // which is forbidden in user code.
  2741  func byteindex(n *Node) *Node {
  2742  	if eqtype(n.Type, types.Types[TUINT8]) {
  2743  		return n
  2744  	}
  2745  	n = nod(OCONV, n, nil)
  2746  	n.Type = types.Types[TUINT8]
  2747  	n.SetTypecheck(1)
  2748  	return n
  2749  }
  2750  
  2751  func chanfn(name string, n int, t *types.Type) *Node {
  2752  	if !t.IsChan() {
  2753  		Fatalf("chanfn %v", t)
  2754  	}
  2755  	fn := syslook(name)
  2756  	switch n {
  2757  	default:
  2758  		Fatalf("chanfn %d", n)
  2759  	case 1:
  2760  		fn = substArgTypes(fn, t.Elem())
  2761  	case 2:
  2762  		fn = substArgTypes(fn, t.Elem(), t.Elem())
  2763  	}
  2764  	return fn
  2765  }
  2766  
  2767  func mapfn(name string, t *types.Type) *Node {
  2768  	if !t.IsMap() {
  2769  		Fatalf("mapfn %v", t)
  2770  	}
  2771  	fn := syslook(name)
  2772  	fn = substArgTypes(fn, t.Key(), t.Val(), t.Key(), t.Val())
  2773  	return fn
  2774  }
  2775  
  2776  func mapfndel(name string, t *types.Type) *Node {
  2777  	if !t.IsMap() {
  2778  		Fatalf("mapfn %v", t)
  2779  	}
  2780  	fn := syslook(name)
  2781  	fn = substArgTypes(fn, t.Key(), t.Val(), t.Key())
  2782  	return fn
  2783  }
  2784  
  2785  const (
  2786  	mapslow = iota
  2787  	mapfast32
  2788  	mapfast64
  2789  	mapfaststr
  2790  	nmapfast
  2791  )
  2792  
  2793  type mapnames [nmapfast]string
  2794  
  2795  func mkmapnames(base string) mapnames {
  2796  	return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
  2797  }
  2798  
  2799  var mapaccess1 mapnames = mkmapnames("mapaccess1")
  2800  var mapaccess2 mapnames = mkmapnames("mapaccess2")
  2801  var mapassign mapnames = mkmapnames("mapassign")
  2802  var mapdelete mapnames = mkmapnames("mapdelete")
  2803  
  2804  func mapfast(t *types.Type) int {
  2805  	// Check ../../runtime/hashmap.go:maxValueSize before changing.
  2806  	if t.Val().Width > 128 {
  2807  		return mapslow
  2808  	}
  2809  	switch algtype(t.Key()) {
  2810  	case AMEM32:
  2811  		return mapfast32
  2812  	case AMEM64:
  2813  		return mapfast64
  2814  	case ASTRING:
  2815  		return mapfaststr
  2816  	}
  2817  	return mapslow
  2818  }
  2819  
  2820  func writebarrierfn(name string, l *types.Type, r *types.Type) *Node {
  2821  	fn := syslook(name)
  2822  	fn = substArgTypes(fn, l, r)
  2823  	return fn
  2824  }
  2825  
  2826  func addstr(n *Node, init *Nodes) *Node {
  2827  	// orderexpr rewrote OADDSTR to have a list of strings.
  2828  	c := n.List.Len()
  2829  
  2830  	if c < 2 {
  2831  		Fatalf("addstr count %d too small", c)
  2832  	}
  2833  
  2834  	buf := nodnil()
  2835  	if n.Esc == EscNone {
  2836  		sz := int64(0)
  2837  		for _, n1 := range n.List.Slice() {
  2838  			if n1.Op == OLITERAL {
  2839  				sz += int64(len(n1.Val().U.(string)))
  2840  			}
  2841  		}
  2842  
  2843  		// Don't allocate the buffer if the result won't fit.
  2844  		if sz < tmpstringbufsize {
  2845  			// Create temporary buffer for result string on stack.
  2846  			t := types.NewArray(types.Types[TUINT8], tmpstringbufsize)
  2847  
  2848  			buf = nod(OADDR, temp(t), nil)
  2849  		}
  2850  	}
  2851  
  2852  	// build list of string arguments
  2853  	args := []*Node{buf}
  2854  	for _, n2 := range n.List.Slice() {
  2855  		args = append(args, conv(n2, types.Types[TSTRING]))
  2856  	}
  2857  
  2858  	var fn string
  2859  	if c <= 5 {
  2860  		// small numbers of strings use direct runtime helpers.
  2861  		// note: orderexpr knows this cutoff too.
  2862  		fn = fmt.Sprintf("concatstring%d", c)
  2863  	} else {
  2864  		// large numbers of strings are passed to the runtime as a slice.
  2865  		fn = "concatstrings"
  2866  
  2867  		t := types.NewSlice(types.Types[TSTRING])
  2868  		slice := nod(OCOMPLIT, nil, typenod(t))
  2869  		if prealloc[n] != nil {
  2870  			prealloc[slice] = prealloc[n]
  2871  		}
  2872  		slice.List.Set(args[1:]) // skip buf arg
  2873  		args = []*Node{buf, slice}
  2874  		slice.Esc = EscNone
  2875  	}
  2876  
  2877  	cat := syslook(fn)
  2878  	r := nod(OCALL, cat, nil)
  2879  	r.List.Set(args)
  2880  	r = typecheck(r, Erv)
  2881  	r = walkexpr(r, init)
  2882  	r.Type = n.Type
  2883  
  2884  	return r
  2885  }
  2886  
  2887  // expand append(l1, l2...) to
  2888  //   init {
  2889  //     s := l1
  2890  //     n := len(s) + len(l2)
  2891  //     // Compare as uint so growslice can panic on overflow.
  2892  //     if uint(n) > uint(cap(s)) {
  2893  //       s = growslice(s, n)
  2894  //     }
  2895  //     s = s[:n]
  2896  //     memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
  2897  //   }
  2898  //   s
  2899  //
  2900  // l2 is allowed to be a string.
  2901  func appendslice(n *Node, init *Nodes) *Node {
  2902  	walkexprlistsafe(n.List.Slice(), init)
  2903  
  2904  	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
  2905  	// and n are name or literal, but those may index the slice we're
  2906  	// modifying here. Fix explicitly.
  2907  	ls := n.List.Slice()
  2908  	for i1, n1 := range ls {
  2909  		ls[i1] = cheapexpr(n1, init)
  2910  	}
  2911  
  2912  	l1 := n.List.First()
  2913  	l2 := n.List.Second()
  2914  
  2915  	var l []*Node
  2916  
  2917  	// var s []T
  2918  	s := temp(l1.Type)
  2919  	l = append(l, nod(OAS, s, l1)) // s = l1
  2920  
  2921  	// n := len(s) + len(l2)
  2922  	nn := temp(types.Types[TINT])
  2923  	l = append(l, nod(OAS, nn, nod(OADD, nod(OLEN, s, nil), nod(OLEN, l2, nil))))
  2924  
  2925  	// if uint(n) > uint(cap(s))
  2926  	nif := nod(OIF, nil, nil)
  2927  	nif.Left = nod(OGT, nod(OCONV, nn, nil), nod(OCONV, nod(OCAP, s, nil), nil))
  2928  	nif.Left.Left.Type = types.Types[TUINT]
  2929  	nif.Left.Right.Type = types.Types[TUINT]
  2930  
  2931  	// instantiate growslice(Type*, []any, int) []any
  2932  	fn := syslook("growslice")
  2933  	fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem())
  2934  
  2935  	// s = growslice(T, s, n)
  2936  	nif.Nbody.Set1(nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type.Elem()), s, nn)))
  2937  	l = append(l, nif)
  2938  
  2939  	// s = s[:n]
  2940  	nt := nod(OSLICE, s, nil)
  2941  	nt.SetSliceBounds(nil, nn, nil)
  2942  	nt.Etype = 1
  2943  	l = append(l, nod(OAS, s, nt))
  2944  
  2945  	if types.Haspointers(l1.Type.Elem()) {
  2946  		// copy(s[len(l1):], l2)
  2947  		nptr1 := nod(OSLICE, s, nil)
  2948  		nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
  2949  		nptr1.Etype = 1
  2950  		nptr2 := l2
  2951  		fn := syslook("typedslicecopy")
  2952  		fn = substArgTypes(fn, l1.Type, l2.Type)
  2953  		var ln Nodes
  2954  		ln.Set(l)
  2955  		nt := mkcall1(fn, types.Types[TINT], &ln, typename(l1.Type.Elem()), nptr1, nptr2)
  2956  		l = append(ln.Slice(), nt)
  2957  	} else if instrumenting && !compiling_runtime {
  2958  		// rely on runtime to instrument copy.
  2959  		// copy(s[len(l1):], l2)
  2960  		nptr1 := nod(OSLICE, s, nil)
  2961  		nptr1.SetSliceBounds(nod(OLEN, l1, nil), nil, nil)
  2962  		nptr1.Etype = 1
  2963  		nptr2 := l2
  2964  		var fn *Node
  2965  		if l2.Type.IsString() {
  2966  			fn = syslook("slicestringcopy")
  2967  		} else {
  2968  			fn = syslook("slicecopy")
  2969  		}
  2970  		fn = substArgTypes(fn, l1.Type, l2.Type)
  2971  		var ln Nodes
  2972  		ln.Set(l)
  2973  		nt := mkcall1(fn, types.Types[TINT], &ln, nptr1, nptr2, nodintconst(s.Type.Elem().Width))
  2974  		l = append(ln.Slice(), nt)
  2975  	} else {
  2976  		// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
  2977  		nptr1 := nod(OINDEX, s, nod(OLEN, l1, nil))
  2978  		nptr1.SetBounded(true)
  2979  
  2980  		nptr1 = nod(OADDR, nptr1, nil)
  2981  
  2982  		nptr2 := nod(OSPTR, l2, nil)
  2983  
  2984  		fn := syslook("memmove")
  2985  		fn = substArgTypes(fn, s.Type.Elem(), s.Type.Elem())
  2986  
  2987  		var ln Nodes
  2988  		ln.Set(l)
  2989  		nwid := cheapexpr(conv(nod(OLEN, l2, nil), types.Types[TUINTPTR]), &ln)
  2990  
  2991  		nwid = nod(OMUL, nwid, nodintconst(s.Type.Elem().Width))
  2992  		nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid)
  2993  		l = append(ln.Slice(), nt)
  2994  	}
  2995  
  2996  	typecheckslice(l, Etop)
  2997  	walkstmtlist(l)
  2998  	init.Append(l...)
  2999  	return s
  3000  }
  3001  
  3002  // Rewrite append(src, x, y, z) so that any side effects in
  3003  // x, y, z (including runtime panics) are evaluated in
  3004  // initialization statements before the append.
  3005  // For normal code generation, stop there and leave the
  3006  // rest to cgen_append.
  3007  //
  3008  // For race detector, expand append(src, a [, b]* ) to
  3009  //
  3010  //   init {
  3011  //     s := src
  3012  //     const argc = len(args) - 1
  3013  //     if cap(s) - len(s) < argc {
  3014  //	    s = growslice(s, len(s)+argc)
  3015  //     }
  3016  //     n := len(s)
  3017  //     s = s[:n+argc]
  3018  //     s[n] = a
  3019  //     s[n+1] = b
  3020  //     ...
  3021  //   }
  3022  //   s
  3023  func walkappend(n *Node, init *Nodes, dst *Node) *Node {
  3024  	if !samesafeexpr(dst, n.List.First()) {
  3025  		n.List.SetFirst(safeexpr(n.List.First(), init))
  3026  		n.List.SetFirst(walkexpr(n.List.First(), init))
  3027  	}
  3028  	walkexprlistsafe(n.List.Slice()[1:], init)
  3029  
  3030  	// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
  3031  	// and n are name or literal, but those may index the slice we're
  3032  	// modifying here. Fix explicitly.
  3033  	// Using cheapexpr also makes sure that the evaluation
  3034  	// of all arguments (and especially any panics) happen
  3035  	// before we begin to modify the slice in a visible way.
  3036  	ls := n.List.Slice()[1:]
  3037  	for i, n := range ls {
  3038  		ls[i] = cheapexpr(n, init)
  3039  	}
  3040  
  3041  	nsrc := n.List.First()
  3042  
  3043  	argc := n.List.Len() - 1
  3044  	if argc < 1 {
  3045  		return nsrc
  3046  	}
  3047  
  3048  	// General case, with no function calls left as arguments.
  3049  	// Leave for gen, except that instrumentation requires old form.
  3050  	if !instrumenting || compiling_runtime {
  3051  		return n
  3052  	}
  3053  
  3054  	var l []*Node
  3055  
  3056  	ns := temp(nsrc.Type)
  3057  	l = append(l, nod(OAS, ns, nsrc)) // s = src
  3058  
  3059  	na := nodintconst(int64(argc)) // const argc
  3060  	nx := nod(OIF, nil, nil)       // if cap(s) - len(s) < argc
  3061  	nx.Left = nod(OLT, nod(OSUB, nod(OCAP, ns, nil), nod(OLEN, ns, nil)), na)
  3062  
  3063  	fn := syslook("growslice") //   growslice(<type>, old []T, mincap int) (ret []T)
  3064  	fn = substArgTypes(fn, ns.Type.Elem(), ns.Type.Elem())
  3065  
  3066  	nx.Nbody.Set1(nod(OAS, ns,
  3067  		mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type.Elem()), ns,
  3068  			nod(OADD, nod(OLEN, ns, nil), na))))
  3069  
  3070  	l = append(l, nx)
  3071  
  3072  	nn := temp(types.Types[TINT])
  3073  	l = append(l, nod(OAS, nn, nod(OLEN, ns, nil))) // n = len(s)
  3074  
  3075  	nx = nod(OSLICE, ns, nil) // ...s[:n+argc]
  3076  	nx.SetSliceBounds(nil, nod(OADD, nn, na), nil)
  3077  	nx.Etype = 1
  3078  	l = append(l, nod(OAS, ns, nx)) // s = s[:n+argc]
  3079  
  3080  	ls = n.List.Slice()[1:]
  3081  	for i, n := range ls {
  3082  		nx = nod(OINDEX, ns, nn) // s[n] ...
  3083  		nx.SetBounded(true)
  3084  		l = append(l, nod(OAS, nx, n)) // s[n] = arg
  3085  		if i+1 < len(ls) {
  3086  			l = append(l, nod(OAS, nn, nod(OADD, nn, nodintconst(1)))) // n = n + 1
  3087  		}
  3088  	}
  3089  
  3090  	typecheckslice(l, Etop)
  3091  	walkstmtlist(l)
  3092  	init.Append(l...)
  3093  	return ns
  3094  }
  3095  
  3096  // Lower copy(a, b) to a memmove call or a runtime call.
  3097  //
  3098  // init {
  3099  //   n := len(a)
  3100  //   if n > len(b) { n = len(b) }
  3101  //   memmove(a.ptr, b.ptr, n*sizeof(elem(a)))
  3102  // }
  3103  // n;
  3104  //
  3105  // Also works if b is a string.
  3106  //
  3107  func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
  3108  	if types.Haspointers(n.Left.Type.Elem()) {
  3109  		fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
  3110  		return mkcall1(fn, n.Type, init, typename(n.Left.Type.Elem()), n.Left, n.Right)
  3111  	}
  3112  
  3113  	if runtimecall {
  3114  		var fn *Node
  3115  		if n.Right.Type.IsString() {
  3116  			fn = syslook("slicestringcopy")
  3117  		} else {
  3118  			fn = syslook("slicecopy")
  3119  		}
  3120  		fn = substArgTypes(fn, n.Left.Type, n.Right.Type)
  3121  		return mkcall1(fn, n.Type, init, n.Left, n.Right, nodintconst(n.Left.Type.Elem().Width))
  3122  	}
  3123  
  3124  	n.Left = walkexpr(n.Left, init)
  3125  	n.Right = walkexpr(n.Right, init)
  3126  	nl := temp(n.Left.Type)
  3127  	nr := temp(n.Right.Type)
  3128  	var l []*Node
  3129  	l = append(l, nod(OAS, nl, n.Left))
  3130  	l = append(l, nod(OAS, nr, n.Right))
  3131  
  3132  	nfrm := nod(OSPTR, nr, nil)
  3133  	nto := nod(OSPTR, nl, nil)
  3134  
  3135  	nlen := temp(types.Types[TINT])
  3136  
  3137  	// n = len(to)
  3138  	l = append(l, nod(OAS, nlen, nod(OLEN, nl, nil)))
  3139  
  3140  	// if n > len(frm) { n = len(frm) }
  3141  	nif := nod(OIF, nil, nil)
  3142  
  3143  	nif.Left = nod(OGT, nlen, nod(OLEN, nr, nil))
  3144  	nif.Nbody.Append(nod(OAS, nlen, nod(OLEN, nr, nil)))
  3145  	l = append(l, nif)
  3146  
  3147  	// Call memmove.
  3148  	fn := syslook("memmove")
  3149  
  3150  	fn = substArgTypes(fn, nl.Type.Elem(), nl.Type.Elem())
  3151  	nwid := temp(types.Types[TUINTPTR])
  3152  	l = append(l, nod(OAS, nwid, conv(nlen, types.Types[TUINTPTR])))
  3153  	nwid = nod(OMUL, nwid, nodintconst(nl.Type.Elem().Width))
  3154  	l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
  3155  
  3156  	typecheckslice(l, Etop)
  3157  	walkstmtlist(l)
  3158  	init.Append(l...)
  3159  	return nlen
  3160  }
  3161  
  3162  func eqfor(t *types.Type, needsize *int) *Node {
  3163  	// Should only arrive here with large memory or
  3164  	// a struct/array containing a non-memory field/element.
  3165  	// Small memory is handled inline, and single non-memory
  3166  	// is handled during type check (OCMPSTR etc).
  3167  	switch a, _ := algtype1(t); a {
  3168  	case AMEM:
  3169  		n := syslook("memequal")
  3170  		n = substArgTypes(n, t, t)
  3171  		*needsize = 1
  3172  		return n
  3173  	case ASPECIAL:
  3174  		sym := typesymprefix(".eq", t)
  3175  		n := newname(sym)
  3176  		n.SetClass(PFUNC)
  3177  		ntype := nod(OTFUNC, nil, nil)
  3178  		ntype.List.Append(anonfield(types.NewPtr(t)))
  3179  		ntype.List.Append(anonfield(types.NewPtr(t)))
  3180  		ntype.Rlist.Append(anonfield(types.Types[TBOOL]))
  3181  		ntype = typecheck(ntype, Etype)
  3182  		n.Type = ntype.Type
  3183  		*needsize = 0
  3184  		return n
  3185  	}
  3186  	Fatalf("eqfor %v", t)
  3187  	return nil
  3188  }
  3189  
  3190  // The result of walkcompare MUST be assigned back to n, e.g.
  3191  // 	n.Left = walkcompare(n.Left, init)
  3192  func walkcompare(n *Node, init *Nodes) *Node {
  3193  	// Given interface value l and concrete value r, rewrite
  3194  	//   l == r
  3195  	// into types-equal && data-equal.
  3196  	// This is efficient, avoids allocations, and avoids runtime calls.
  3197  	var l, r *Node
  3198  	if n.Left.Type.IsInterface() && !n.Right.Type.IsInterface() {
  3199  		l = n.Left
  3200  		r = n.Right
  3201  	} else if !n.Left.Type.IsInterface() && n.Right.Type.IsInterface() {
  3202  		l = n.Right
  3203  		r = n.Left
  3204  	}
  3205  
  3206  	if l != nil {
  3207  		// Handle both == and !=.
  3208  		eq := n.Op
  3209  		var andor Op
  3210  		if eq == OEQ {
  3211  			andor = OANDAND
  3212  		} else {
  3213  			andor = OOROR
  3214  		}
  3215  		// Check for types equal.
  3216  		// For empty interface, this is:
  3217  		//   l.tab == type(r)
  3218  		// For non-empty interface, this is:
  3219  		//   l.tab != nil && l.tab._type == type(r)
  3220  		var eqtype *Node
  3221  		tab := nod(OITAB, l, nil)
  3222  		rtyp := typename(r.Type)
  3223  		if l.Type.IsEmptyInterface() {
  3224  			tab.Type = types.NewPtr(types.Types[TUINT8])
  3225  			tab.SetTypecheck(1)
  3226  			eqtype = nod(eq, tab, rtyp)
  3227  		} else {
  3228  			nonnil := nod(brcom(eq), nodnil(), tab)
  3229  			match := nod(eq, itabType(tab), rtyp)
  3230  			eqtype = nod(andor, nonnil, match)
  3231  		}
  3232  		// Check for data equal.
  3233  		eqdata := nod(eq, ifaceData(l, r.Type), r)
  3234  		// Put it all together.
  3235  		expr := nod(andor, eqtype, eqdata)
  3236  		n = finishcompare(n, expr, init)
  3237  		return n
  3238  	}
  3239  
  3240  	// Must be comparison of array or struct.
  3241  	// Otherwise back end handles it.
  3242  	// While we're here, decide whether to
  3243  	// inline or call an eq alg.
  3244  	t := n.Left.Type
  3245  	var inline bool
  3246  
  3247  	maxcmpsize := int64(4)
  3248  	unalignedLoad := false
  3249  	switch thearch.LinkArch.Family {
  3250  	case sys.AMD64, sys.ARM64, sys.S390X:
  3251  		// Keep this low enough, to generate less code than function call.
  3252  		maxcmpsize = 16
  3253  		unalignedLoad = true
  3254  	case sys.I386:
  3255  		maxcmpsize = 8
  3256  		unalignedLoad = true
  3257  	}
  3258  
  3259  	switch t.Etype {
  3260  	default:
  3261  		return n
  3262  	case TARRAY:
  3263  		// We can compare several elements at once with 2/4/8 byte integer compares
  3264  		inline = t.NumElem() <= 1 || (issimple[t.Elem().Etype] && (t.NumElem() <= 4 || t.Elem().Width*t.NumElem() <= maxcmpsize))
  3265  	case TSTRUCT:
  3266  		inline = t.NumFields() <= 4
  3267  	}
  3268  
  3269  	cmpl := n.Left
  3270  	for cmpl != nil && cmpl.Op == OCONVNOP {
  3271  		cmpl = cmpl.Left
  3272  	}
  3273  	cmpr := n.Right
  3274  	for cmpr != nil && cmpr.Op == OCONVNOP {
  3275  		cmpr = cmpr.Left
  3276  	}
  3277  
  3278  	// Chose not to inline. Call equality function directly.
  3279  	if !inline {
  3280  		if isvaluelit(cmpl) {
  3281  			var_ := temp(cmpl.Type)
  3282  			anylit(cmpl, var_, init)
  3283  			cmpl = var_
  3284  		}
  3285  		if isvaluelit(cmpr) {
  3286  			var_ := temp(cmpr.Type)
  3287  			anylit(cmpr, var_, init)
  3288  			cmpr = var_
  3289  		}
  3290  		if !islvalue(cmpl) || !islvalue(cmpr) {
  3291  			Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
  3292  		}
  3293  
  3294  		// eq algs take pointers
  3295  		pl := temp(types.NewPtr(t))
  3296  		al := nod(OAS, pl, nod(OADDR, cmpl, nil))
  3297  		al.Right.Etype = 1 // addr does not escape
  3298  		al = typecheck(al, Etop)
  3299  		init.Append(al)
  3300  
  3301  		pr := temp(types.NewPtr(t))
  3302  		ar := nod(OAS, pr, nod(OADDR, cmpr, nil))
  3303  		ar.Right.Etype = 1 // addr does not escape
  3304  		ar = typecheck(ar, Etop)
  3305  		init.Append(ar)
  3306  
  3307  		var needsize int
  3308  		call := nod(OCALL, eqfor(t, &needsize), nil)
  3309  		call.List.Append(pl)
  3310  		call.List.Append(pr)
  3311  		if needsize != 0 {
  3312  			call.List.Append(nodintconst(t.Width))
  3313  		}
  3314  		res := call
  3315  		if n.Op != OEQ {
  3316  			res = nod(ONOT, res, nil)
  3317  		}
  3318  		n = finishcompare(n, res, init)
  3319  		return n
  3320  	}
  3321  
  3322  	// inline: build boolean expression comparing element by element
  3323  	andor := OANDAND
  3324  	if n.Op == ONE {
  3325  		andor = OOROR
  3326  	}
  3327  	var expr *Node
  3328  	compare := func(el, er *Node) {
  3329  		a := nod(n.Op, el, er)
  3330  		if expr == nil {
  3331  			expr = a
  3332  		} else {
  3333  			expr = nod(andor, expr, a)
  3334  		}
  3335  	}
  3336  	cmpl = safeexpr(cmpl, init)
  3337  	cmpr = safeexpr(cmpr, init)
  3338  	if t.IsStruct() {
  3339  		for _, f := range t.Fields().Slice() {
  3340  			sym := f.Sym
  3341  			if sym.IsBlank() {
  3342  				continue
  3343  			}
  3344  			compare(
  3345  				nodSym(OXDOT, cmpl, sym),
  3346  				nodSym(OXDOT, cmpr, sym),
  3347  			)
  3348  		}
  3349  	} else {
  3350  		step := int64(1)
  3351  		remains := t.NumElem() * t.Elem().Width
  3352  		combine64bit := unalignedLoad && Widthreg == 8 && t.Elem().Width <= 4 && t.Elem().IsInteger()
  3353  		combine32bit := unalignedLoad && t.Elem().Width <= 2 && t.Elem().IsInteger()
  3354  		combine16bit := unalignedLoad && t.Elem().Width == 1 && t.Elem().IsInteger()
  3355  		for i := int64(0); remains > 0; {
  3356  			var convType *types.Type
  3357  			switch {
  3358  			case remains >= 8 && combine64bit:
  3359  				convType = types.Types[TINT64]
  3360  				step = 8 / t.Elem().Width
  3361  			case remains >= 4 && combine32bit:
  3362  				convType = types.Types[TUINT32]
  3363  				step = 4 / t.Elem().Width
  3364  			case remains >= 2 && combine16bit:
  3365  				convType = types.Types[TUINT16]
  3366  				step = 2 / t.Elem().Width
  3367  			default:
  3368  				step = 1
  3369  			}
  3370  			if step == 1 {
  3371  				compare(
  3372  					nod(OINDEX, cmpl, nodintconst(int64(i))),
  3373  					nod(OINDEX, cmpr, nodintconst(int64(i))),
  3374  				)
  3375  				i++
  3376  				remains -= t.Elem().Width
  3377  			} else {
  3378  				cmplw := nod(OINDEX, cmpl, nodintconst(int64(i)))
  3379  				cmplw = conv(cmplw, convType)
  3380  				cmprw := nod(OINDEX, cmpr, nodintconst(int64(i)))
  3381  				cmprw = conv(cmprw, convType)
  3382  				// For code like this:  uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
  3383  				// ssa will generate a single large load.
  3384  				for offset := int64(1); offset < step; offset++ {
  3385  					lb := nod(OINDEX, cmpl, nodintconst(int64(i+offset)))
  3386  					lb = conv(lb, convType)
  3387  					lb = nod(OLSH, lb, nodintconst(int64(8*t.Elem().Width*offset)))
  3388  					cmplw = nod(OOR, cmplw, lb)
  3389  					rb := nod(OINDEX, cmpr, nodintconst(int64(i+offset)))
  3390  					rb = conv(rb, convType)
  3391  					rb = nod(OLSH, rb, nodintconst(int64(8*t.Elem().Width*offset)))
  3392  					cmprw = nod(OOR, cmprw, rb)
  3393  				}
  3394  				compare(cmplw, cmprw)
  3395  				i += step
  3396  				remains -= step * t.Elem().Width
  3397  			}
  3398  		}
  3399  	}
  3400  	if expr == nil {
  3401  		expr = nodbool(n.Op == OEQ)
  3402  	}
  3403  	n = finishcompare(n, expr, init)
  3404  	return n
  3405  }
  3406  
  3407  // The result of finishcompare MUST be assigned back to n, e.g.
  3408  // 	n.Left = finishcompare(n.Left, x, r, init)
  3409  func finishcompare(n, r *Node, init *Nodes) *Node {
  3410  	// Use nn here to avoid passing r to typecheck.
  3411  	nn := r
  3412  	nn = typecheck(nn, Erv)
  3413  	nn = walkexpr(nn, init)
  3414  	r = nn
  3415  	if r.Type != n.Type {
  3416  		r = nod(OCONVNOP, r, nil)
  3417  		r.Type = n.Type
  3418  		r.SetTypecheck(1)
  3419  		nn = r
  3420  	}
  3421  	return nn
  3422  }
  3423  
  3424  // isIntOrdering reports whether n is a <, ≤, >, or ≥ ordering between integers.
  3425  func (n *Node) isIntOrdering() bool {
  3426  	switch n.Op {
  3427  	case OLE, OLT, OGE, OGT:
  3428  	default:
  3429  		return false
  3430  	}
  3431  	return n.Left.Type.IsInteger() && n.Right.Type.IsInteger()
  3432  }
  3433  
  3434  // walkinrange optimizes integer-in-range checks, such as 4 <= x && x < 10.
  3435  // n must be an OANDAND or OOROR node.
  3436  // The result of walkinrange MUST be assigned back to n, e.g.
  3437  // 	n.Left = walkinrange(n.Left)
  3438  func walkinrange(n *Node, init *Nodes) *Node {
  3439  	// We are looking for something equivalent to a opl b OP b opr c, where:
  3440  	// * a, b, and c have integer type
  3441  	// * b is side-effect-free
  3442  	// * opl and opr are each < or ≤
  3443  	// * OP is &&
  3444  	l := n.Left
  3445  	r := n.Right
  3446  	if !l.isIntOrdering() || !r.isIntOrdering() {
  3447  		return n
  3448  	}
  3449  
  3450  	// Find b, if it exists, and rename appropriately.
  3451  	// Input is: l.Left l.Op l.Right ANDAND/OROR r.Left r.Op r.Right
  3452  	// Output is: a opl b(==x) ANDAND/OROR b(==x) opr c
  3453  	a, opl, b := l.Left, l.Op, l.Right
  3454  	x, opr, c := r.Left, r.Op, r.Right
  3455  	for i := 0; ; i++ {
  3456  		if samesafeexpr(b, x) {
  3457  			break
  3458  		}
  3459  		if i == 3 {
  3460  			// Tried all permutations and couldn't find an appropriate b == x.
  3461  			return n
  3462  		}
  3463  		if i&1 == 0 {
  3464  			a, opl, b = b, brrev(opl), a
  3465  		} else {
  3466  			x, opr, c = c, brrev(opr), x
  3467  		}
  3468  	}
  3469  
  3470  	// If n.Op is ||, apply de Morgan.
  3471  	// Negate the internal ops now; we'll negate the top level op at the end.
  3472  	// Henceforth assume &&.
  3473  	negateResult := n.Op == OOROR
  3474  	if negateResult {
  3475  		opl = brcom(opl)
  3476  		opr = brcom(opr)
  3477  	}
  3478  
  3479  	cmpdir := func(o Op) int {
  3480  		switch o {
  3481  		case OLE, OLT:
  3482  			return -1
  3483  		case OGE, OGT:
  3484  			return +1
  3485  		}
  3486  		Fatalf("walkinrange cmpdir %v", o)
  3487  		return 0
  3488  	}
  3489  	if cmpdir(opl) != cmpdir(opr) {
  3490  		// Not a range check; something like b < a && b < c.
  3491  		return n
  3492  	}
  3493  
  3494  	switch opl {
  3495  	case OGE, OGT:
  3496  		// We have something like a > b && b ≥ c.
  3497  		// Switch and reverse ops and rename constants,
  3498  		// to make it look like a ≤ b && b < c.
  3499  		a, c = c, a
  3500  		opl, opr = brrev(opr), brrev(opl)
  3501  	}
  3502  
  3503  	// We must ensure that c-a is non-negative.
  3504  	// For now, require a and c to be constants.
  3505  	// In the future, we could also support a == 0 and c == len/cap(...).
  3506  	// Unfortunately, by this point, most len/cap expressions have been
  3507  	// stored into temporary variables.
  3508  	if !Isconst(a, CTINT) || !Isconst(c, CTINT) {
  3509  		return n
  3510  	}
  3511  
  3512  	if opl == OLT {
  3513  		// We have a < b && ...
  3514  		// We need a ≤ b && ... to safely use unsigned comparison tricks.
  3515  		// If a is not the maximum constant for b's type,
  3516  		// we can increment a and switch to ≤.
  3517  		if a.Int64() >= maxintval[b.Type.Etype].Int64() {
  3518  			return n
  3519  		}
  3520  		a = nodintconst(a.Int64() + 1)
  3521  		opl = OLE
  3522  	}
  3523  
  3524  	bound := c.Int64() - a.Int64()
  3525  	if bound < 0 {
  3526  		// Bad news. Something like 5 <= x && x < 3.
  3527  		// Rare in practice, and we still need to generate side-effects,
  3528  		// so just leave it alone.
  3529  		return n
  3530  	}
  3531  
  3532  	// We have a ≤ b && b < c (or a ≤ b && b ≤ c).
  3533  	// This is equivalent to (a-a) ≤ (b-a) && (b-a) < (c-a),
  3534  	// which is equivalent to 0 ≤ (b-a) && (b-a) < (c-a),
  3535  	// which is equivalent to uint(b-a) < uint(c-a).
  3536  	ut := b.Type.ToUnsigned()
  3537  	lhs := conv(nod(OSUB, b, a), ut)
  3538  	rhs := nodintconst(bound)
  3539  	if negateResult {
  3540  		// Negate top level.
  3541  		opr = brcom(opr)
  3542  	}
  3543  	cmp := nod(opr, lhs, rhs)
  3544  	cmp.Pos = n.Pos
  3545  	cmp = addinit(cmp, l.Ninit.Slice())
  3546  	cmp = addinit(cmp, r.Ninit.Slice())
  3547  	// Typecheck the AST rooted at cmp...
  3548  	cmp = typecheck(cmp, Erv)
  3549  	// ...but then reset cmp's type to match n's type.
  3550  	cmp.Type = n.Type
  3551  	cmp = walkexpr(cmp, init)
  3552  	return cmp
  3553  }
  3554  
  3555  // return 1 if integer n must be in range [0, max), 0 otherwise
  3556  func bounded(n *Node, max int64) bool {
  3557  	if n.Type == nil || !n.Type.IsInteger() {
  3558  		return false
  3559  	}
  3560  
  3561  	sign := n.Type.IsSigned()
  3562  	bits := int32(8 * n.Type.Width)
  3563  
  3564  	if smallintconst(n) {
  3565  		v := n.Int64()
  3566  		return 0 <= v && v < max
  3567  	}
  3568  
  3569  	switch n.Op {
  3570  	case OAND:
  3571  		v := int64(-1)
  3572  		if smallintconst(n.Left) {
  3573  			v = n.Left.Int64()
  3574  		} else if smallintconst(n.Right) {
  3575  			v = n.Right.Int64()
  3576  		}
  3577  
  3578  		if 0 <= v && v < max {
  3579  			return true
  3580  		}
  3581  
  3582  	case OMOD:
  3583  		if !sign && smallintconst(n.Right) {
  3584  			v := n.Right.Int64()
  3585  			if 0 <= v && v <= max {
  3586  				return true
  3587  			}
  3588  		}
  3589  
  3590  	case ODIV:
  3591  		if !sign && smallintconst(n.Right) {
  3592  			v := n.Right.Int64()
  3593  			for bits > 0 && v >= 2 {
  3594  				bits--
  3595  				v >>= 1
  3596  			}
  3597  		}
  3598  
  3599  	case ORSH:
  3600  		if !sign && smallintconst(n.Right) {
  3601  			v := n.Right.Int64()
  3602  			if v > int64(bits) {
  3603  				return true
  3604  			}
  3605  			bits -= int32(v)
  3606  		}
  3607  	}
  3608  
  3609  	if !sign && bits <= 62 && 1<<uint(bits) <= max {
  3610  		return true
  3611  	}
  3612  
  3613  	return false
  3614  }
  3615  
  3616  // usemethod checks interface method calls for uses of reflect.Type.Method.
  3617  func usemethod(n *Node) {
  3618  	t := n.Left.Type
  3619  
  3620  	// Looking for either of:
  3621  	//	Method(int) reflect.Method
  3622  	//	MethodByName(string) (reflect.Method, bool)
  3623  	//
  3624  	// TODO(crawshaw): improve precision of match by working out
  3625  	//                 how to check the method name.
  3626  	if n := t.Params().NumFields(); n != 1 {
  3627  		return
  3628  	}
  3629  	if n := t.Results().NumFields(); n != 1 && n != 2 {
  3630  		return
  3631  	}
  3632  	p0 := t.Params().Field(0)
  3633  	res0 := t.Results().Field(0)
  3634  	var res1 *types.Field
  3635  	if t.Results().NumFields() == 2 {
  3636  		res1 = t.Results().Field(1)
  3637  	}
  3638  
  3639  	if res1 == nil {
  3640  		if p0.Type.Etype != TINT {
  3641  			return
  3642  		}
  3643  	} else {
  3644  		if !p0.Type.IsString() {
  3645  			return
  3646  		}
  3647  		if !res1.Type.IsBoolean() {
  3648  			return
  3649  		}
  3650  	}
  3651  
  3652  	// Note: Don't rely on res0.Type.String() since its formatting depends on multiple factors
  3653  	//       (including global variables such as numImports - was issue #19028).
  3654  	if s := res0.Type.Sym; s != nil && s.Name == "Method" && s.Pkg != nil && s.Pkg.Path == "reflect" {
  3655  		Curfn.Func.SetReflectMethod(true)
  3656  	}
  3657  }
  3658  
  3659  func usefield(n *Node) {
  3660  	if objabi.Fieldtrack_enabled == 0 {
  3661  		return
  3662  	}
  3663  
  3664  	switch n.Op {
  3665  	default:
  3666  		Fatalf("usefield %v", n.Op)
  3667  
  3668  	case ODOT, ODOTPTR:
  3669  		break
  3670  	}
  3671  	if n.Sym == nil {
  3672  		// No field name.  This DOTPTR was built by the compiler for access
  3673  		// to runtime data structures.  Ignore.
  3674  		return
  3675  	}
  3676  
  3677  	t := n.Left.Type
  3678  	if t.IsPtr() {
  3679  		t = t.Elem()
  3680  	}
  3681  	field := dotField[typeSymKey{t.Orig, n.Sym}]
  3682  	if field == nil {
  3683  		Fatalf("usefield %v %v without paramfld", n.Left.Type, n.Sym)
  3684  	}
  3685  	if !strings.Contains(field.Note, "go:\"track\"") {
  3686  		return
  3687  	}
  3688  
  3689  	outer := n.Left.Type
  3690  	if outer.IsPtr() {
  3691  		outer = outer.Elem()
  3692  	}
  3693  	if outer.Sym == nil {
  3694  		yyerror("tracked field must be in named struct type")
  3695  	}
  3696  	if !exportname(field.Sym.Name) {
  3697  		yyerror("tracked field must be exported (upper case)")
  3698  	}
  3699  
  3700  	sym := tracksym(outer, field)
  3701  	if Curfn.Func.FieldTrack == nil {
  3702  		Curfn.Func.FieldTrack = make(map[*types.Sym]struct{})
  3703  	}
  3704  	Curfn.Func.FieldTrack[sym] = struct{}{}
  3705  }
  3706  
  3707  func candiscardlist(l Nodes) bool {
  3708  	for _, n := range l.Slice() {
  3709  		if !candiscard(n) {
  3710  			return false
  3711  		}
  3712  	}
  3713  	return true
  3714  }
  3715  
  3716  func candiscard(n *Node) bool {
  3717  	if n == nil {
  3718  		return true
  3719  	}
  3720  
  3721  	switch n.Op {
  3722  	default:
  3723  		return false
  3724  
  3725  		// Discardable as long as the subpieces are.
  3726  	case ONAME,
  3727  		ONONAME,
  3728  		OTYPE,
  3729  		OPACK,
  3730  		OLITERAL,
  3731  		OADD,
  3732  		OSUB,
  3733  		OOR,
  3734  		OXOR,
  3735  		OADDSTR,
  3736  		OADDR,
  3737  		OANDAND,
  3738  		OARRAYBYTESTR,
  3739  		OARRAYRUNESTR,
  3740  		OSTRARRAYBYTE,
  3741  		OSTRARRAYRUNE,
  3742  		OCAP,
  3743  		OCMPIFACE,
  3744  		OCMPSTR,
  3745  		OCOMPLIT,
  3746  		OMAPLIT,
  3747  		OSTRUCTLIT,
  3748  		OARRAYLIT,
  3749  		OSLICELIT,
  3750  		OPTRLIT,
  3751  		OCONV,
  3752  		OCONVIFACE,
  3753  		OCONVNOP,
  3754  		ODOT,
  3755  		OEQ,
  3756  		ONE,
  3757  		OLT,
  3758  		OLE,
  3759  		OGT,
  3760  		OGE,
  3761  		OKEY,
  3762  		OSTRUCTKEY,
  3763  		OLEN,
  3764  		OMUL,
  3765  		OLSH,
  3766  		ORSH,
  3767  		OAND,
  3768  		OANDNOT,
  3769  		ONEW,
  3770  		ONOT,
  3771  		OCOM,
  3772  		OPLUS,
  3773  		OMINUS,
  3774  		OOROR,
  3775  		OPAREN,
  3776  		ORUNESTR,
  3777  		OREAL,
  3778  		OIMAG,
  3779  		OCOMPLEX:
  3780  		break
  3781  
  3782  		// Discardable as long as we know it's not division by zero.
  3783  	case ODIV, OMOD:
  3784  		if Isconst(n.Right, CTINT) && n.Right.Val().U.(*Mpint).CmpInt64(0) != 0 {
  3785  			break
  3786  		}
  3787  		if Isconst(n.Right, CTFLT) && n.Right.Val().U.(*Mpflt).CmpFloat64(0) != 0 {
  3788  			break
  3789  		}
  3790  		return false
  3791  
  3792  		// Discardable as long as we know it won't fail because of a bad size.
  3793  	case OMAKECHAN, OMAKEMAP:
  3794  		if Isconst(n.Left, CTINT) && n.Left.Val().U.(*Mpint).CmpInt64(0) == 0 {
  3795  			break
  3796  		}
  3797  		return false
  3798  
  3799  		// Difficult to tell what sizes are okay.
  3800  	case OMAKESLICE:
  3801  		return false
  3802  	}
  3803  
  3804  	if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
  3805  		return false
  3806  	}
  3807  
  3808  	return true
  3809  }
  3810  
  3811  // rewrite
  3812  //	print(x, y, z)
  3813  // into
  3814  //	func(a1, a2, a3) {
  3815  //		print(a1, a2, a3)
  3816  //	}(x, y, z)
  3817  // and same for println.
  3818  
  3819  var walkprintfunc_prgen int
  3820  
  3821  // The result of walkprintfunc MUST be assigned back to n, e.g.
  3822  // 	n.Left = walkprintfunc(n.Left, init)
  3823  func walkprintfunc(n *Node, init *Nodes) *Node {
  3824  	if n.Ninit.Len() != 0 {
  3825  		walkstmtlist(n.Ninit.Slice())
  3826  		init.AppendNodes(&n.Ninit)
  3827  	}
  3828  
  3829  	t := nod(OTFUNC, nil, nil)
  3830  	num := 0
  3831  	var printargs []*Node
  3832  	var a *Node
  3833  	var buf string
  3834  	for _, n1 := range n.List.Slice() {
  3835  		buf = fmt.Sprintf("a%d", num)
  3836  		num++
  3837  		a = namedfield(buf, n1.Type)
  3838  		t.List.Append(a)
  3839  		printargs = append(printargs, a.Left)
  3840  	}
  3841  
  3842  	oldfn := Curfn
  3843  	Curfn = nil
  3844  
  3845  	walkprintfunc_prgen++
  3846  	sym := lookupN("print·%d", walkprintfunc_prgen)
  3847  	fn := dclfunc(sym, t)
  3848  
  3849  	a = nod(n.Op, nil, nil)
  3850  	a.List.Set(printargs)
  3851  	a = typecheck(a, Etop)
  3852  	a = walkstmt(a)
  3853  
  3854  	fn.Nbody.Set1(a)
  3855  
  3856  	funcbody()
  3857  
  3858  	fn = typecheck(fn, Etop)
  3859  	typecheckslice(fn.Nbody.Slice(), Etop)
  3860  	xtop = append(xtop, fn)
  3861  	Curfn = oldfn
  3862  
  3863  	a = nod(OCALL, nil, nil)
  3864  	a.Left = fn.Func.Nname
  3865  	a.List.Set(n.List.Slice())
  3866  	a = typecheck(a, Etop)
  3867  	a = walkexpr(a, init)
  3868  	return a
  3869  }
  3870  
  3871  // substArgTypes substitutes the given list of types for
  3872  // successive occurrences of the "any" placeholder in the
  3873  // type syntax expression n.Type.
  3874  // The result of substArgTypes MUST be assigned back to old, e.g.
  3875  // 	n.Left = substArgTypes(n.Left, t1, t2)
  3876  func substArgTypes(old *Node, types_ ...*types.Type) *Node {
  3877  	n := *old // make shallow copy
  3878  
  3879  	for _, t := range types_ {
  3880  		dowidth(t)
  3881  	}
  3882  	n.Type = types.SubstAny(n.Type, &types_)
  3883  	if len(types_) > 0 {
  3884  		Fatalf("substArgTypes: too many argument types")
  3885  	}
  3886  	return &n
  3887  }