github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/mips64/gsubr.go (about)

     1  // Derived from Inferno utils/6c/txt.c
     2  // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
     3  //
     4  //	Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
     5  //	Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
     6  //	Portions Copyright © 1997-1999 Vita Nuova Limited
     7  //	Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
     8  //	Portions Copyright © 2004,2006 Bruce Ellis
     9  //	Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
    10  //	Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
    11  //	Portions Copyright © 2009 The Go Authors.  All rights reserved.
    12  //
    13  // Permission is hereby granted, free of charge, to any person obtaining a copy
    14  // of this software and associated documentation files (the "Software"), to deal
    15  // in the Software without restriction, including without limitation the rights
    16  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    17  // copies of the Software, and to permit persons to whom the Software is
    18  // furnished to do so, subject to the following conditions:
    19  //
    20  // The above copyright notice and this permission notice shall be included in
    21  // all copies or substantial portions of the Software.
    22  //
    23  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    24  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    25  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
    26  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    27  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    28  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    29  // THE SOFTWARE.
    30  
    31  package ppc64
    32  
    33  import (
    34  	"cmd/compile/internal/big"
    35  	"cmd/compile/internal/gc"
    36  	"cmd/internal/obj"
    37  	"cmd/internal/obj/ppc64"
    38  	"fmt"
    39  )
    40  
    41  var resvd = []int{
    42  	ppc64.REGZERO,
    43  	ppc64.REGSP, // reserved for SP
    44  	// We need to preserve the C ABI TLS pointer because sigtramp
    45  	// may happen during C code and needs to access the g.  C
    46  	// clobbers REGG, so if Go were to clobber REGTLS, sigtramp
    47  	// won't know which convention to use.  By preserving REGTLS,
    48  	// we can just retrieve g from TLS when we aren't sure.
    49  	ppc64.REGTLS,
    50  
    51  	// TODO(austin): Consolidate REGTLS and REGG?
    52  	ppc64.REGG,
    53  	ppc64.REGTMP, // REGTMP
    54  	ppc64.FREGCVI,
    55  	ppc64.FREGZERO,
    56  	ppc64.FREGHALF,
    57  	ppc64.FREGONE,
    58  	ppc64.FREGTWO,
    59  }
    60  
    61  /*
    62   * generate
    63   *	as $c, n
    64   */
    65  func ginscon(as int, c int64, n2 *gc.Node) {
    66  	var n1 gc.Node
    67  
    68  	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
    69  
    70  	if as != ppc64.AMOVD && (c < -ppc64.BIG || c > ppc64.BIG) || n2.Op != gc.OREGISTER || as == ppc64.AMULLD {
    71  		// cannot have more than 16-bit of immediate in ADD, etc.
    72  		// instead, MOV into register first.
    73  		var ntmp gc.Node
    74  		gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
    75  
    76  		rawgins(ppc64.AMOVD, &n1, &ntmp)
    77  		rawgins(as, &ntmp, n2)
    78  		gc.Regfree(&ntmp)
    79  		return
    80  	}
    81  
    82  	rawgins(as, &n1, n2)
    83  }
    84  
    85  /*
    86   * generate
    87   *	as n, $c (CMP/CMPU)
    88   */
    89  func ginscon2(as int, n2 *gc.Node, c int64) {
    90  	var n1 gc.Node
    91  
    92  	gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
    93  
    94  	switch as {
    95  	default:
    96  		gc.Fatalf("ginscon2")
    97  
    98  	case ppc64.ACMP:
    99  		if -ppc64.BIG <= c && c <= ppc64.BIG {
   100  			rawgins(as, n2, &n1)
   101  			return
   102  		}
   103  
   104  	case ppc64.ACMPU:
   105  		if 0 <= c && c <= 2*ppc64.BIG {
   106  			rawgins(as, n2, &n1)
   107  			return
   108  		}
   109  	}
   110  
   111  	// MOV n1 into register first
   112  	var ntmp gc.Node
   113  	gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
   114  
   115  	rawgins(ppc64.AMOVD, &n1, &ntmp)
   116  	rawgins(as, n2, &ntmp)
   117  	gc.Regfree(&ntmp)
   118  }
   119  
   120  func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
   121  	if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
   122  		// Reverse comparison to place constant last.
   123  		op = gc.Brrev(op)
   124  		n1, n2 = n2, n1
   125  	}
   126  
   127  	var r1, r2, g1, g2 gc.Node
   128  	gc.Regalloc(&r1, t, n1)
   129  	gc.Regalloc(&g1, n1.Type, &r1)
   130  	gc.Cgen(n1, &g1)
   131  	gmove(&g1, &r1)
   132  	if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
   133  		ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
   134  	} else {
   135  		gc.Regalloc(&r2, t, n2)
   136  		gc.Regalloc(&g2, n1.Type, &r2)
   137  		gc.Cgen(n2, &g2)
   138  		gmove(&g2, &r2)
   139  		rawgins(optoas(gc.OCMP, t), &r1, &r2)
   140  		gc.Regfree(&g2)
   141  		gc.Regfree(&r2)
   142  	}
   143  	gc.Regfree(&g1)
   144  	gc.Regfree(&r1)
   145  	return gc.Gbranch(optoas(op, t), nil, likely)
   146  }
   147  
   148  // set up nodes representing 2^63
   149  var (
   150  	bigi         gc.Node
   151  	bigf         gc.Node
   152  	bignodes_did bool
   153  )
   154  
   155  func bignodes() {
   156  	if bignodes_did {
   157  		return
   158  	}
   159  	bignodes_did = true
   160  
   161  	var i big.Int
   162  	i.SetInt64(1)
   163  	i.Lsh(&i, 63)
   164  
   165  	gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
   166  	bigi.SetBigInt(&i)
   167  
   168  	bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
   169  }
   170  
   171  /*
   172   * generate move:
   173   *	t = f
   174   * hard part is conversions.
   175   */
   176  func gmove(f *gc.Node, t *gc.Node) {
   177  	if gc.Debug['M'] != 0 {
   178  		fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
   179  	}
   180  
   181  	ft := int(gc.Simsimtype(f.Type))
   182  	tt := int(gc.Simsimtype(t.Type))
   183  	cvt := (*gc.Type)(t.Type)
   184  
   185  	if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
   186  		gc.Complexmove(f, t)
   187  		return
   188  	}
   189  
   190  	// cannot have two memory operands
   191  	var r2 gc.Node
   192  	var r1 gc.Node
   193  	var a int
   194  	if gc.Ismem(f) && gc.Ismem(t) {
   195  		goto hard
   196  	}
   197  
   198  	// convert constant to desired type
   199  	if f.Op == gc.OLITERAL {
   200  		var con gc.Node
   201  		switch tt {
   202  		default:
   203  			f.Convconst(&con, t.Type)
   204  
   205  		case gc.TINT32,
   206  			gc.TINT16,
   207  			gc.TINT8:
   208  			var con gc.Node
   209  			f.Convconst(&con, gc.Types[gc.TINT64])
   210  			var r1 gc.Node
   211  			gc.Regalloc(&r1, con.Type, t)
   212  			gins(ppc64.AMOVD, &con, &r1)
   213  			gmove(&r1, t)
   214  			gc.Regfree(&r1)
   215  			return
   216  
   217  		case gc.TUINT32,
   218  			gc.TUINT16,
   219  			gc.TUINT8:
   220  			var con gc.Node
   221  			f.Convconst(&con, gc.Types[gc.TUINT64])
   222  			var r1 gc.Node
   223  			gc.Regalloc(&r1, con.Type, t)
   224  			gins(ppc64.AMOVD, &con, &r1)
   225  			gmove(&r1, t)
   226  			gc.Regfree(&r1)
   227  			return
   228  		}
   229  
   230  		f = &con
   231  		ft = tt // so big switch will choose a simple mov
   232  
   233  		// constants can't move directly to memory.
   234  		if gc.Ismem(t) {
   235  			goto hard
   236  		}
   237  	}
   238  
   239  	// float constants come from memory.
   240  	//if(isfloat[tt])
   241  	//	goto hard;
   242  
   243  	// 64-bit immediates are also from memory.
   244  	//if(isint[tt])
   245  	//	goto hard;
   246  	//// 64-bit immediates are really 32-bit sign-extended
   247  	//// unless moving into a register.
   248  	//if(isint[tt]) {
   249  	//	if(mpcmpfixfix(con.val.u.xval, minintval[TINT32]) < 0)
   250  	//		goto hard;
   251  	//	if(mpcmpfixfix(con.val.u.xval, maxintval[TINT32]) > 0)
   252  	//		goto hard;
   253  	//}
   254  
   255  	// value -> value copy, only one memory operand.
   256  	// figure out the instruction to use.
   257  	// break out of switch for one-instruction gins.
   258  	// goto rdst for "destination must be register".
   259  	// goto hard for "convert to cvt type first".
   260  	// otherwise handle and return.
   261  
   262  	switch uint32(ft)<<16 | uint32(tt) {
   263  	default:
   264  		gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
   265  
   266  		/*
   267  		 * integer copy and truncate
   268  		 */
   269  	case gc.TINT8<<16 | gc.TINT8, // same size
   270  		gc.TUINT8<<16 | gc.TINT8,
   271  		gc.TINT16<<16 | gc.TINT8,
   272  		// truncate
   273  		gc.TUINT16<<16 | gc.TINT8,
   274  		gc.TINT32<<16 | gc.TINT8,
   275  		gc.TUINT32<<16 | gc.TINT8,
   276  		gc.TINT64<<16 | gc.TINT8,
   277  		gc.TUINT64<<16 | gc.TINT8:
   278  		a = ppc64.AMOVB
   279  
   280  	case gc.TINT8<<16 | gc.TUINT8, // same size
   281  		gc.TUINT8<<16 | gc.TUINT8,
   282  		gc.TINT16<<16 | gc.TUINT8,
   283  		// truncate
   284  		gc.TUINT16<<16 | gc.TUINT8,
   285  		gc.TINT32<<16 | gc.TUINT8,
   286  		gc.TUINT32<<16 | gc.TUINT8,
   287  		gc.TINT64<<16 | gc.TUINT8,
   288  		gc.TUINT64<<16 | gc.TUINT8:
   289  		a = ppc64.AMOVBZ
   290  
   291  	case gc.TINT16<<16 | gc.TINT16, // same size
   292  		gc.TUINT16<<16 | gc.TINT16,
   293  		gc.TINT32<<16 | gc.TINT16,
   294  		// truncate
   295  		gc.TUINT32<<16 | gc.TINT16,
   296  		gc.TINT64<<16 | gc.TINT16,
   297  		gc.TUINT64<<16 | gc.TINT16:
   298  		a = ppc64.AMOVH
   299  
   300  	case gc.TINT16<<16 | gc.TUINT16, // same size
   301  		gc.TUINT16<<16 | gc.TUINT16,
   302  		gc.TINT32<<16 | gc.TUINT16,
   303  		// truncate
   304  		gc.TUINT32<<16 | gc.TUINT16,
   305  		gc.TINT64<<16 | gc.TUINT16,
   306  		gc.TUINT64<<16 | gc.TUINT16:
   307  		a = ppc64.AMOVHZ
   308  
   309  	case gc.TINT32<<16 | gc.TINT32, // same size
   310  		gc.TUINT32<<16 | gc.TINT32,
   311  		gc.TINT64<<16 | gc.TINT32,
   312  		// truncate
   313  		gc.TUINT64<<16 | gc.TINT32:
   314  		a = ppc64.AMOVW
   315  
   316  	case gc.TINT32<<16 | gc.TUINT32, // same size
   317  		gc.TUINT32<<16 | gc.TUINT32,
   318  		gc.TINT64<<16 | gc.TUINT32,
   319  		gc.TUINT64<<16 | gc.TUINT32:
   320  		a = ppc64.AMOVWZ
   321  
   322  	case gc.TINT64<<16 | gc.TINT64, // same size
   323  		gc.TINT64<<16 | gc.TUINT64,
   324  		gc.TUINT64<<16 | gc.TINT64,
   325  		gc.TUINT64<<16 | gc.TUINT64:
   326  		a = ppc64.AMOVD
   327  
   328  		/*
   329  		 * integer up-conversions
   330  		 */
   331  	case gc.TINT8<<16 | gc.TINT16, // sign extend int8
   332  		gc.TINT8<<16 | gc.TUINT16,
   333  		gc.TINT8<<16 | gc.TINT32,
   334  		gc.TINT8<<16 | gc.TUINT32,
   335  		gc.TINT8<<16 | gc.TINT64,
   336  		gc.TINT8<<16 | gc.TUINT64:
   337  		a = ppc64.AMOVB
   338  
   339  		goto rdst
   340  
   341  	case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8
   342  		gc.TUINT8<<16 | gc.TUINT16,
   343  		gc.TUINT8<<16 | gc.TINT32,
   344  		gc.TUINT8<<16 | gc.TUINT32,
   345  		gc.TUINT8<<16 | gc.TINT64,
   346  		gc.TUINT8<<16 | gc.TUINT64:
   347  		a = ppc64.AMOVBZ
   348  
   349  		goto rdst
   350  
   351  	case gc.TINT16<<16 | gc.TINT32, // sign extend int16
   352  		gc.TINT16<<16 | gc.TUINT32,
   353  		gc.TINT16<<16 | gc.TINT64,
   354  		gc.TINT16<<16 | gc.TUINT64:
   355  		a = ppc64.AMOVH
   356  
   357  		goto rdst
   358  
   359  	case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16
   360  		gc.TUINT16<<16 | gc.TUINT32,
   361  		gc.TUINT16<<16 | gc.TINT64,
   362  		gc.TUINT16<<16 | gc.TUINT64:
   363  		a = ppc64.AMOVHZ
   364  
   365  		goto rdst
   366  
   367  	case gc.TINT32<<16 | gc.TINT64, // sign extend int32
   368  		gc.TINT32<<16 | gc.TUINT64:
   369  		a = ppc64.AMOVW
   370  
   371  		goto rdst
   372  
   373  	case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32
   374  		gc.TUINT32<<16 | gc.TUINT64:
   375  		a = ppc64.AMOVWZ
   376  
   377  		goto rdst
   378  
   379  		//warn("gmove: convert float to int not implemented: %N -> %N\n", f, t);
   380  	//return;
   381  	// algorithm is:
   382  	//	if small enough, use native float64 -> int64 conversion.
   383  	//	otherwise, subtract 2^63, convert, and add it back.
   384  	/*
   385  	* float to integer
   386  	 */
   387  	case gc.TFLOAT32<<16 | gc.TINT32,
   388  		gc.TFLOAT64<<16 | gc.TINT32,
   389  		gc.TFLOAT32<<16 | gc.TINT64,
   390  		gc.TFLOAT64<<16 | gc.TINT64,
   391  		gc.TFLOAT32<<16 | gc.TINT16,
   392  		gc.TFLOAT32<<16 | gc.TINT8,
   393  		gc.TFLOAT32<<16 | gc.TUINT16,
   394  		gc.TFLOAT32<<16 | gc.TUINT8,
   395  		gc.TFLOAT64<<16 | gc.TINT16,
   396  		gc.TFLOAT64<<16 | gc.TINT8,
   397  		gc.TFLOAT64<<16 | gc.TUINT16,
   398  		gc.TFLOAT64<<16 | gc.TUINT8,
   399  		gc.TFLOAT32<<16 | gc.TUINT32,
   400  		gc.TFLOAT64<<16 | gc.TUINT32,
   401  		gc.TFLOAT32<<16 | gc.TUINT64,
   402  		gc.TFLOAT64<<16 | gc.TUINT64:
   403  		bignodes()
   404  
   405  		var r1 gc.Node
   406  		gc.Regalloc(&r1, gc.Types[ft], f)
   407  		gmove(f, &r1)
   408  		if tt == gc.TUINT64 {
   409  			gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
   410  			gmove(&bigf, &r2)
   411  			gins(ppc64.AFCMPU, &r1, &r2)
   412  			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
   413  			gins(ppc64.AFSUB, &r2, &r1)
   414  			gc.Patch(p1, gc.Pc)
   415  			gc.Regfree(&r2)
   416  		}
   417  
   418  		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
   419  		var r3 gc.Node
   420  		gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
   421  		gins(ppc64.AFCTIDZ, &r1, &r2)
   422  		p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
   423  		p1.To.Type = obj.TYPE_MEM
   424  		p1.To.Reg = ppc64.REGSP
   425  		p1.To.Offset = -8
   426  		p1 = gins(ppc64.AMOVD, nil, &r3)
   427  		p1.From.Type = obj.TYPE_MEM
   428  		p1.From.Reg = ppc64.REGSP
   429  		p1.From.Offset = -8
   430  		gc.Regfree(&r2)
   431  		gc.Regfree(&r1)
   432  		if tt == gc.TUINT64 {
   433  			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
   434  			gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
   435  			gins(ppc64.AMOVD, &bigi, &r1)
   436  			gins(ppc64.AADD, &r1, &r3)
   437  			gc.Patch(p1, gc.Pc)
   438  		}
   439  
   440  		gmove(&r3, t)
   441  		gc.Regfree(&r3)
   442  		return
   443  
   444  		//warn("gmove: convert int to float not implemented: %N -> %N\n", f, t);
   445  	//return;
   446  	// algorithm is:
   447  	//	if small enough, use native int64 -> uint64 conversion.
   448  	//	otherwise, halve (rounding to odd?), convert, and double.
   449  	/*
   450  	 * integer to float
   451  	 */
   452  	case gc.TINT32<<16 | gc.TFLOAT32,
   453  		gc.TINT32<<16 | gc.TFLOAT64,
   454  		gc.TINT64<<16 | gc.TFLOAT32,
   455  		gc.TINT64<<16 | gc.TFLOAT64,
   456  		gc.TINT16<<16 | gc.TFLOAT32,
   457  		gc.TINT16<<16 | gc.TFLOAT64,
   458  		gc.TINT8<<16 | gc.TFLOAT32,
   459  		gc.TINT8<<16 | gc.TFLOAT64,
   460  		gc.TUINT16<<16 | gc.TFLOAT32,
   461  		gc.TUINT16<<16 | gc.TFLOAT64,
   462  		gc.TUINT8<<16 | gc.TFLOAT32,
   463  		gc.TUINT8<<16 | gc.TFLOAT64,
   464  		gc.TUINT32<<16 | gc.TFLOAT32,
   465  		gc.TUINT32<<16 | gc.TFLOAT64,
   466  		gc.TUINT64<<16 | gc.TFLOAT32,
   467  		gc.TUINT64<<16 | gc.TFLOAT64:
   468  		bignodes()
   469  
   470  		var r1 gc.Node
   471  		gc.Regalloc(&r1, gc.Types[gc.TINT64], nil)
   472  		gmove(f, &r1)
   473  		if ft == gc.TUINT64 {
   474  			gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
   475  			gmove(&bigi, &r2)
   476  			gins(ppc64.ACMPU, &r1, &r2)
   477  			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
   478  			p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
   479  			p2.From.Type = obj.TYPE_CONST
   480  			p2.From.Offset = 1
   481  			gc.Patch(p1, gc.Pc)
   482  		}
   483  
   484  		gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
   485  		p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
   486  		p1.To.Type = obj.TYPE_MEM
   487  		p1.To.Reg = ppc64.REGSP
   488  		p1.To.Offset = -8
   489  		p1 = gins(ppc64.AFMOVD, nil, &r2)
   490  		p1.From.Type = obj.TYPE_MEM
   491  		p1.From.Reg = ppc64.REGSP
   492  		p1.From.Offset = -8
   493  		gins(ppc64.AFCFID, &r2, &r2)
   494  		gc.Regfree(&r1)
   495  		if ft == gc.TUINT64 {
   496  			p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
   497  			gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
   498  			gins(ppc64.AFMUL, &r1, &r2)
   499  			gc.Patch(p1, gc.Pc)
   500  		}
   501  
   502  		gmove(&r2, t)
   503  		gc.Regfree(&r2)
   504  		return
   505  
   506  		/*
   507  		 * float to float
   508  		 */
   509  	case gc.TFLOAT32<<16 | gc.TFLOAT32:
   510  		a = ppc64.AFMOVS
   511  
   512  	case gc.TFLOAT64<<16 | gc.TFLOAT64:
   513  		a = ppc64.AFMOVD
   514  
   515  	case gc.TFLOAT32<<16 | gc.TFLOAT64:
   516  		a = ppc64.AFMOVS
   517  		goto rdst
   518  
   519  	case gc.TFLOAT64<<16 | gc.TFLOAT32:
   520  		a = ppc64.AFRSP
   521  		goto rdst
   522  	}
   523  
   524  	gins(a, f, t)
   525  	return
   526  
   527  	// requires register destination
   528  rdst:
   529  	{
   530  		gc.Regalloc(&r1, t.Type, t)
   531  
   532  		gins(a, f, &r1)
   533  		gmove(&r1, t)
   534  		gc.Regfree(&r1)
   535  		return
   536  	}
   537  
   538  	// requires register intermediate
   539  hard:
   540  	gc.Regalloc(&r1, cvt, t)
   541  
   542  	gmove(f, &r1)
   543  	gmove(&r1, t)
   544  	gc.Regfree(&r1)
   545  	return
   546  }
   547  
   548  // gins is called by the front end.
   549  // It synthesizes some multiple-instruction sequences
   550  // so the front end can stay simpler.
   551  func gins(as int, f, t *gc.Node) *obj.Prog {
   552  	if as >= obj.A_ARCHSPECIFIC {
   553  		if x, ok := f.IntLiteral(); ok {
   554  			ginscon(as, x, t)
   555  			return nil // caller must not use
   556  		}
   557  	}
   558  	if as == ppc64.ACMP || as == ppc64.ACMPU {
   559  		if x, ok := t.IntLiteral(); ok {
   560  			ginscon2(as, f, x)
   561  			return nil // caller must not use
   562  		}
   563  	}
   564  	return rawgins(as, f, t)
   565  }
   566  
   567  /*
   568   * generate one instruction:
   569   *	as f, t
   570   */
   571  func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
   572  	// TODO(austin): Add self-move test like in 6g (but be careful
   573  	// of truncation moves)
   574  
   575  	p := gc.Prog(as)
   576  	gc.Naddr(&p.From, f)
   577  	gc.Naddr(&p.To, t)
   578  
   579  	switch as {
   580  	case obj.ACALL:
   581  		if p.To.Type == obj.TYPE_REG && p.To.Reg != ppc64.REG_CTR {
   582  			// Allow front end to emit CALL REG, and rewrite into MOV REG, CTR; CALL CTR.
   583  			pp := gc.Prog(as)
   584  			pp.From = p.From
   585  			pp.To.Type = obj.TYPE_REG
   586  			pp.To.Reg = ppc64.REG_CTR
   587  
   588  			p.As = ppc64.AMOVD
   589  			p.From = p.To
   590  			p.To.Type = obj.TYPE_REG
   591  			p.To.Reg = ppc64.REG_CTR
   592  
   593  			if gc.Debug['g'] != 0 {
   594  				fmt.Printf("%v\n", p)
   595  				fmt.Printf("%v\n", pp)
   596  			}
   597  
   598  			return pp
   599  		}
   600  
   601  	// Bad things the front end has done to us. Crash to find call stack.
   602  	case ppc64.AAND, ppc64.AMULLD:
   603  		if p.From.Type == obj.TYPE_CONST {
   604  			gc.Debug['h'] = 1
   605  			gc.Fatalf("bad inst: %v", p)
   606  		}
   607  	case ppc64.ACMP, ppc64.ACMPU:
   608  		if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
   609  			gc.Debug['h'] = 1
   610  			gc.Fatalf("bad inst: %v", p)
   611  		}
   612  	}
   613  
   614  	if gc.Debug['g'] != 0 {
   615  		fmt.Printf("%v\n", p)
   616  	}
   617  
   618  	w := int32(0)
   619  	switch as {
   620  	case ppc64.AMOVB,
   621  		ppc64.AMOVBU,
   622  		ppc64.AMOVBZ,
   623  		ppc64.AMOVBZU:
   624  		w = 1
   625  
   626  	case ppc64.AMOVH,
   627  		ppc64.AMOVHU,
   628  		ppc64.AMOVHZ,
   629  		ppc64.AMOVHZU:
   630  		w = 2
   631  
   632  	case ppc64.AMOVW,
   633  		ppc64.AMOVWU,
   634  		ppc64.AMOVWZ,
   635  		ppc64.AMOVWZU:
   636  		w = 4
   637  
   638  	case ppc64.AMOVD,
   639  		ppc64.AMOVDU:
   640  		if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
   641  			break
   642  		}
   643  		w = 8
   644  	}
   645  
   646  	if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
   647  		gc.Dump("f", f)
   648  		gc.Dump("t", t)
   649  		gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
   650  	}
   651  
   652  	return p
   653  }
   654  
   655  /*
   656   * return Axxx for Oxxx on type t.
   657   */
   658  func optoas(op gc.Op, t *gc.Type) int {
   659  	if t == nil {
   660  		gc.Fatalf("optoas: t is nil")
   661  	}
   662  
   663  	// avoid constant conversions in switches below
   664  	const (
   665  		OMINUS_ = uint32(gc.OMINUS) << 16
   666  		OLSH_   = uint32(gc.OLSH) << 16
   667  		ORSH_   = uint32(gc.ORSH) << 16
   668  		OADD_   = uint32(gc.OADD) << 16
   669  		OSUB_   = uint32(gc.OSUB) << 16
   670  		OMUL_   = uint32(gc.OMUL) << 16
   671  		ODIV_   = uint32(gc.ODIV) << 16
   672  		OOR_    = uint32(gc.OOR) << 16
   673  		OAND_   = uint32(gc.OAND) << 16
   674  		OXOR_   = uint32(gc.OXOR) << 16
   675  		OEQ_    = uint32(gc.OEQ) << 16
   676  		ONE_    = uint32(gc.ONE) << 16
   677  		OLT_    = uint32(gc.OLT) << 16
   678  		OLE_    = uint32(gc.OLE) << 16
   679  		OGE_    = uint32(gc.OGE) << 16
   680  		OGT_    = uint32(gc.OGT) << 16
   681  		OCMP_   = uint32(gc.OCMP) << 16
   682  		OAS_    = uint32(gc.OAS) << 16
   683  		OHMUL_  = uint32(gc.OHMUL) << 16
   684  	)
   685  
   686  	a := int(obj.AXXX)
   687  	switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
   688  	default:
   689  		gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
   690  
   691  	case OEQ_ | gc.TBOOL,
   692  		OEQ_ | gc.TINT8,
   693  		OEQ_ | gc.TUINT8,
   694  		OEQ_ | gc.TINT16,
   695  		OEQ_ | gc.TUINT16,
   696  		OEQ_ | gc.TINT32,
   697  		OEQ_ | gc.TUINT32,
   698  		OEQ_ | gc.TINT64,
   699  		OEQ_ | gc.TUINT64,
   700  		OEQ_ | gc.TPTR32,
   701  		OEQ_ | gc.TPTR64,
   702  		OEQ_ | gc.TFLOAT32,
   703  		OEQ_ | gc.TFLOAT64:
   704  		a = ppc64.ABEQ
   705  
   706  	case ONE_ | gc.TBOOL,
   707  		ONE_ | gc.TINT8,
   708  		ONE_ | gc.TUINT8,
   709  		ONE_ | gc.TINT16,
   710  		ONE_ | gc.TUINT16,
   711  		ONE_ | gc.TINT32,
   712  		ONE_ | gc.TUINT32,
   713  		ONE_ | gc.TINT64,
   714  		ONE_ | gc.TUINT64,
   715  		ONE_ | gc.TPTR32,
   716  		ONE_ | gc.TPTR64,
   717  		ONE_ | gc.TFLOAT32,
   718  		ONE_ | gc.TFLOAT64:
   719  		a = ppc64.ABNE
   720  
   721  	case OLT_ | gc.TINT8, // ACMP
   722  		OLT_ | gc.TINT16,
   723  		OLT_ | gc.TINT32,
   724  		OLT_ | gc.TINT64,
   725  		OLT_ | gc.TUINT8,
   726  		// ACMPU
   727  		OLT_ | gc.TUINT16,
   728  		OLT_ | gc.TUINT32,
   729  		OLT_ | gc.TUINT64,
   730  		OLT_ | gc.TFLOAT32,
   731  		// AFCMPU
   732  		OLT_ | gc.TFLOAT64:
   733  		a = ppc64.ABLT
   734  
   735  	case OLE_ | gc.TINT8, // ACMP
   736  		OLE_ | gc.TINT16,
   737  		OLE_ | gc.TINT32,
   738  		OLE_ | gc.TINT64,
   739  		OLE_ | gc.TUINT8,
   740  		// ACMPU
   741  		OLE_ | gc.TUINT16,
   742  		OLE_ | gc.TUINT32,
   743  		OLE_ | gc.TUINT64:
   744  		// No OLE for floats, because it mishandles NaN.
   745  		// Front end must reverse comparison or use OLT and OEQ together.
   746  		a = ppc64.ABLE
   747  
   748  	case OGT_ | gc.TINT8,
   749  		OGT_ | gc.TINT16,
   750  		OGT_ | gc.TINT32,
   751  		OGT_ | gc.TINT64,
   752  		OGT_ | gc.TUINT8,
   753  		OGT_ | gc.TUINT16,
   754  		OGT_ | gc.TUINT32,
   755  		OGT_ | gc.TUINT64,
   756  		OGT_ | gc.TFLOAT32,
   757  		OGT_ | gc.TFLOAT64:
   758  		a = ppc64.ABGT
   759  
   760  	case OGE_ | gc.TINT8,
   761  		OGE_ | gc.TINT16,
   762  		OGE_ | gc.TINT32,
   763  		OGE_ | gc.TINT64,
   764  		OGE_ | gc.TUINT8,
   765  		OGE_ | gc.TUINT16,
   766  		OGE_ | gc.TUINT32,
   767  		OGE_ | gc.TUINT64:
   768  		// No OGE for floats, because it mishandles NaN.
   769  		// Front end must reverse comparison or use OLT and OEQ together.
   770  		a = ppc64.ABGE
   771  
   772  	case OCMP_ | gc.TBOOL,
   773  		OCMP_ | gc.TINT8,
   774  		OCMP_ | gc.TINT16,
   775  		OCMP_ | gc.TINT32,
   776  		OCMP_ | gc.TPTR32,
   777  		OCMP_ | gc.TINT64:
   778  		a = ppc64.ACMP
   779  
   780  	case OCMP_ | gc.TUINT8,
   781  		OCMP_ | gc.TUINT16,
   782  		OCMP_ | gc.TUINT32,
   783  		OCMP_ | gc.TUINT64,
   784  		OCMP_ | gc.TPTR64:
   785  		a = ppc64.ACMPU
   786  
   787  	case OCMP_ | gc.TFLOAT32,
   788  		OCMP_ | gc.TFLOAT64:
   789  		a = ppc64.AFCMPU
   790  
   791  	case OAS_ | gc.TBOOL,
   792  		OAS_ | gc.TINT8:
   793  		a = ppc64.AMOVB
   794  
   795  	case OAS_ | gc.TUINT8:
   796  		a = ppc64.AMOVBZ
   797  
   798  	case OAS_ | gc.TINT16:
   799  		a = ppc64.AMOVH
   800  
   801  	case OAS_ | gc.TUINT16:
   802  		a = ppc64.AMOVHZ
   803  
   804  	case OAS_ | gc.TINT32:
   805  		a = ppc64.AMOVW
   806  
   807  	case OAS_ | gc.TUINT32,
   808  		OAS_ | gc.TPTR32:
   809  		a = ppc64.AMOVWZ
   810  
   811  	case OAS_ | gc.TINT64,
   812  		OAS_ | gc.TUINT64,
   813  		OAS_ | gc.TPTR64:
   814  		a = ppc64.AMOVD
   815  
   816  	case OAS_ | gc.TFLOAT32:
   817  		a = ppc64.AFMOVS
   818  
   819  	case OAS_ | gc.TFLOAT64:
   820  		a = ppc64.AFMOVD
   821  
   822  	case OADD_ | gc.TINT8,
   823  		OADD_ | gc.TUINT8,
   824  		OADD_ | gc.TINT16,
   825  		OADD_ | gc.TUINT16,
   826  		OADD_ | gc.TINT32,
   827  		OADD_ | gc.TUINT32,
   828  		OADD_ | gc.TPTR32,
   829  		OADD_ | gc.TINT64,
   830  		OADD_ | gc.TUINT64,
   831  		OADD_ | gc.TPTR64:
   832  		a = ppc64.AADD
   833  
   834  	case OADD_ | gc.TFLOAT32:
   835  		a = ppc64.AFADDS
   836  
   837  	case OADD_ | gc.TFLOAT64:
   838  		a = ppc64.AFADD
   839  
   840  	case OSUB_ | gc.TINT8,
   841  		OSUB_ | gc.TUINT8,
   842  		OSUB_ | gc.TINT16,
   843  		OSUB_ | gc.TUINT16,
   844  		OSUB_ | gc.TINT32,
   845  		OSUB_ | gc.TUINT32,
   846  		OSUB_ | gc.TPTR32,
   847  		OSUB_ | gc.TINT64,
   848  		OSUB_ | gc.TUINT64,
   849  		OSUB_ | gc.TPTR64:
   850  		a = ppc64.ASUB
   851  
   852  	case OSUB_ | gc.TFLOAT32:
   853  		a = ppc64.AFSUBS
   854  
   855  	case OSUB_ | gc.TFLOAT64:
   856  		a = ppc64.AFSUB
   857  
   858  	case OMINUS_ | gc.TINT8,
   859  		OMINUS_ | gc.TUINT8,
   860  		OMINUS_ | gc.TINT16,
   861  		OMINUS_ | gc.TUINT16,
   862  		OMINUS_ | gc.TINT32,
   863  		OMINUS_ | gc.TUINT32,
   864  		OMINUS_ | gc.TPTR32,
   865  		OMINUS_ | gc.TINT64,
   866  		OMINUS_ | gc.TUINT64,
   867  		OMINUS_ | gc.TPTR64:
   868  		a = ppc64.ANEG
   869  
   870  	case OAND_ | gc.TINT8,
   871  		OAND_ | gc.TUINT8,
   872  		OAND_ | gc.TINT16,
   873  		OAND_ | gc.TUINT16,
   874  		OAND_ | gc.TINT32,
   875  		OAND_ | gc.TUINT32,
   876  		OAND_ | gc.TPTR32,
   877  		OAND_ | gc.TINT64,
   878  		OAND_ | gc.TUINT64,
   879  		OAND_ | gc.TPTR64:
   880  		a = ppc64.AAND
   881  
   882  	case OOR_ | gc.TINT8,
   883  		OOR_ | gc.TUINT8,
   884  		OOR_ | gc.TINT16,
   885  		OOR_ | gc.TUINT16,
   886  		OOR_ | gc.TINT32,
   887  		OOR_ | gc.TUINT32,
   888  		OOR_ | gc.TPTR32,
   889  		OOR_ | gc.TINT64,
   890  		OOR_ | gc.TUINT64,
   891  		OOR_ | gc.TPTR64:
   892  		a = ppc64.AOR
   893  
   894  	case OXOR_ | gc.TINT8,
   895  		OXOR_ | gc.TUINT8,
   896  		OXOR_ | gc.TINT16,
   897  		OXOR_ | gc.TUINT16,
   898  		OXOR_ | gc.TINT32,
   899  		OXOR_ | gc.TUINT32,
   900  		OXOR_ | gc.TPTR32,
   901  		OXOR_ | gc.TINT64,
   902  		OXOR_ | gc.TUINT64,
   903  		OXOR_ | gc.TPTR64:
   904  		a = ppc64.AXOR
   905  
   906  		// TODO(minux): handle rotates
   907  	//case CASE(OLROT, TINT8):
   908  	//case CASE(OLROT, TUINT8):
   909  	//case CASE(OLROT, TINT16):
   910  	//case CASE(OLROT, TUINT16):
   911  	//case CASE(OLROT, TINT32):
   912  	//case CASE(OLROT, TUINT32):
   913  	//case CASE(OLROT, TPTR32):
   914  	//case CASE(OLROT, TINT64):
   915  	//case CASE(OLROT, TUINT64):
   916  	//case CASE(OLROT, TPTR64):
   917  	//	a = 0//???; RLDC?
   918  	//	break;
   919  
   920  	case OLSH_ | gc.TINT8,
   921  		OLSH_ | gc.TUINT8,
   922  		OLSH_ | gc.TINT16,
   923  		OLSH_ | gc.TUINT16,
   924  		OLSH_ | gc.TINT32,
   925  		OLSH_ | gc.TUINT32,
   926  		OLSH_ | gc.TPTR32,
   927  		OLSH_ | gc.TINT64,
   928  		OLSH_ | gc.TUINT64,
   929  		OLSH_ | gc.TPTR64:
   930  		a = ppc64.ASLD
   931  
   932  	case ORSH_ | gc.TUINT8,
   933  		ORSH_ | gc.TUINT16,
   934  		ORSH_ | gc.TUINT32,
   935  		ORSH_ | gc.TPTR32,
   936  		ORSH_ | gc.TUINT64,
   937  		ORSH_ | gc.TPTR64:
   938  		a = ppc64.ASRD
   939  
   940  	case ORSH_ | gc.TINT8,
   941  		ORSH_ | gc.TINT16,
   942  		ORSH_ | gc.TINT32,
   943  		ORSH_ | gc.TINT64:
   944  		a = ppc64.ASRAD
   945  
   946  		// TODO(minux): handle rotates
   947  	//case CASE(ORROTC, TINT8):
   948  	//case CASE(ORROTC, TUINT8):
   949  	//case CASE(ORROTC, TINT16):
   950  	//case CASE(ORROTC, TUINT16):
   951  	//case CASE(ORROTC, TINT32):
   952  	//case CASE(ORROTC, TUINT32):
   953  	//case CASE(ORROTC, TINT64):
   954  	//case CASE(ORROTC, TUINT64):
   955  	//	a = 0//??? RLDC??
   956  	//	break;
   957  
   958  	case OHMUL_ | gc.TINT64:
   959  		a = ppc64.AMULHD
   960  
   961  	case OHMUL_ | gc.TUINT64,
   962  		OHMUL_ | gc.TPTR64:
   963  		a = ppc64.AMULHDU
   964  
   965  	case OMUL_ | gc.TINT8,
   966  		OMUL_ | gc.TINT16,
   967  		OMUL_ | gc.TINT32,
   968  		OMUL_ | gc.TINT64:
   969  		a = ppc64.AMULLD
   970  
   971  	case OMUL_ | gc.TUINT8,
   972  		OMUL_ | gc.TUINT16,
   973  		OMUL_ | gc.TUINT32,
   974  		OMUL_ | gc.TPTR32,
   975  		// don't use word multiply, the high 32-bit are undefined.
   976  		OMUL_ | gc.TUINT64,
   977  		OMUL_ | gc.TPTR64:
   978  		// for 64-bit multiplies, signedness doesn't matter.
   979  		a = ppc64.AMULLD
   980  
   981  	case OMUL_ | gc.TFLOAT32:
   982  		a = ppc64.AFMULS
   983  
   984  	case OMUL_ | gc.TFLOAT64:
   985  		a = ppc64.AFMUL
   986  
   987  	case ODIV_ | gc.TINT8,
   988  		ODIV_ | gc.TINT16,
   989  		ODIV_ | gc.TINT32,
   990  		ODIV_ | gc.TINT64:
   991  		a = ppc64.ADIVD
   992  
   993  	case ODIV_ | gc.TUINT8,
   994  		ODIV_ | gc.TUINT16,
   995  		ODIV_ | gc.TUINT32,
   996  		ODIV_ | gc.TPTR32,
   997  		ODIV_ | gc.TUINT64,
   998  		ODIV_ | gc.TPTR64:
   999  		a = ppc64.ADIVDU
  1000  
  1001  	case ODIV_ | gc.TFLOAT32:
  1002  		a = ppc64.AFDIVS
  1003  
  1004  	case ODIV_ | gc.TFLOAT64:
  1005  		a = ppc64.AFDIV
  1006  	}
  1007  
  1008  	return a
  1009  }
  1010  
  1011  const (
  1012  	ODynam   = 1 << 0
  1013  	OAddable = 1 << 1
  1014  )
  1015  
  1016  func xgen(n *gc.Node, a *gc.Node, o int) bool {
  1017  	// TODO(minux)
  1018  
  1019  	return -1 != 0 /*TypeKind(100016)*/
  1020  }
  1021  
  1022  func sudoclean() {
  1023  	return
  1024  }
  1025  
  1026  /*
  1027   * generate code to compute address of n,
  1028   * a reference to a (perhaps nested) field inside
  1029   * an array or struct.
  1030   * return 0 on failure, 1 on success.
  1031   * on success, leaves usable address in a.
  1032   *
  1033   * caller is responsible for calling sudoclean
  1034   * after successful sudoaddable,
  1035   * to release the register used for a.
  1036   */
  1037  func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
  1038  	// TODO(minux)
  1039  
  1040  	*a = obj.Addr{}
  1041  	return false
  1042  }