github.com/euank/go@v0.0.0-20160829210321-495514729181/src/cmd/compile/internal/ppc64/ssa.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package ppc64
     6  
     7  import (
     8  	"cmd/compile/internal/gc"
     9  	"cmd/compile/internal/ssa"
    10  	"cmd/internal/obj"
    11  	"cmd/internal/obj/ppc64"
    12  	"math"
    13  )
    14  
    15  var ssaRegToReg = []int16{
    16  	// ppc64.REGZERO, // not an SSA reg
    17  	ppc64.REGSP,
    18  	ppc64.REG_R2,
    19  	ppc64.REG_R3,
    20  	ppc64.REG_R4,
    21  	ppc64.REG_R5,
    22  	ppc64.REG_R6,
    23  	ppc64.REG_R7,
    24  	ppc64.REG_R8,
    25  	ppc64.REG_R9,
    26  	ppc64.REG_R10,
    27  	ppc64.REGCTXT,
    28  	ppc64.REG_R12,
    29  	ppc64.REG_R13,
    30  	ppc64.REG_R14,
    31  	ppc64.REG_R15,
    32  	ppc64.REG_R16,
    33  	ppc64.REG_R17,
    34  	ppc64.REG_R18,
    35  	ppc64.REG_R19,
    36  	ppc64.REG_R20,
    37  	ppc64.REG_R21,
    38  	ppc64.REG_R22,
    39  	ppc64.REG_R23,
    40  	ppc64.REG_R24,
    41  	ppc64.REG_R25,
    42  	ppc64.REG_R26,
    43  	ppc64.REG_R27,
    44  	ppc64.REG_R28,
    45  	ppc64.REG_R29,
    46  	ppc64.REGG,
    47  	ppc64.REGTMP,
    48  
    49  	ppc64.REG_F0,
    50  	ppc64.REG_F1,
    51  	ppc64.REG_F2,
    52  	ppc64.REG_F3,
    53  	ppc64.REG_F4,
    54  	ppc64.REG_F5,
    55  	ppc64.REG_F6,
    56  	ppc64.REG_F7,
    57  	ppc64.REG_F8,
    58  	ppc64.REG_F9,
    59  	ppc64.REG_F10,
    60  	ppc64.REG_F11,
    61  	ppc64.REG_F12,
    62  	ppc64.REG_F13,
    63  	ppc64.REG_F14,
    64  	ppc64.REG_F15,
    65  	ppc64.REG_F16,
    66  	ppc64.REG_F17,
    67  	ppc64.REG_F18,
    68  	ppc64.REG_F19,
    69  	ppc64.REG_F20,
    70  	ppc64.REG_F21,
    71  	ppc64.REG_F22,
    72  	ppc64.REG_F23,
    73  	ppc64.REG_F24,
    74  	ppc64.REG_F25,
    75  	ppc64.REG_F26,
    76  	// ppc64.REG_F27, // reserved for "floating conversion constant"
    77  	// ppc64.REG_F28, // 0.0
    78  	// ppc64.REG_F29, // 0.5
    79  	// ppc64.REG_F30, // 1.0
    80  	// ppc64.REG_F31, // 2.0
    81  
    82  	// ppc64.REG_CR0,
    83  	// ppc64.REG_CR1,
    84  	// ppc64.REG_CR2,
    85  	// ppc64.REG_CR3,
    86  	// ppc64.REG_CR4,
    87  	// ppc64.REG_CR5,
    88  	// ppc64.REG_CR6,
    89  	// ppc64.REG_CR7,
    90  
    91  	// ppc64.REG_CR,
    92  	// ppc64.REG_XER,
    93  	// ppc64.REG_LR,
    94  	// ppc64.REG_CTR,
    95  }
    96  
    97  var condOps = map[ssa.Op]obj.As{
    98  	ssa.OpPPC64Equal:        ppc64.ABEQ,
    99  	ssa.OpPPC64NotEqual:     ppc64.ABNE,
   100  	ssa.OpPPC64LessThan:     ppc64.ABLT,
   101  	ssa.OpPPC64GreaterEqual: ppc64.ABGE,
   102  	ssa.OpPPC64GreaterThan:  ppc64.ABGT,
   103  	ssa.OpPPC64LessEqual:    ppc64.ABLE,
   104  
   105  	ssa.OpPPC64FLessThan:     ppc64.ABLT, // 1 branch for FCMP
   106  	ssa.OpPPC64FGreaterThan:  ppc64.ABGT, // 1 branch for FCMP
   107  	ssa.OpPPC64FLessEqual:    ppc64.ABLT, // 2 branches for FCMP <=, second is BEQ
   108  	ssa.OpPPC64FGreaterEqual: ppc64.ABGT, // 2 branches for FCMP >=, second is BEQ
   109  }
   110  
   111  // markMoves marks any MOVXconst ops that need to avoid clobbering flags.
   112  func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {
   113  	//	flive := b.FlagsLiveAtEnd
   114  	//	if b.Control != nil && b.Control.Type.IsFlags() {
   115  	//		flive = true
   116  	//	}
   117  	//	for i := len(b.Values) - 1; i >= 0; i-- {
   118  	//		v := b.Values[i]
   119  	//		if flive && (v.Op == ssa.OpPPC64MOVWconst || v.Op == ssa.OpPPC64MOVDconst) {
   120  	//			// The "mark" is any non-nil Aux value.
   121  	//			v.Aux = v
   122  	//		}
   123  	//		if v.Type.IsFlags() {
   124  	//			flive = false
   125  	//		}
   126  	//		for _, a := range v.Args {
   127  	//			if a.Type.IsFlags() {
   128  	//				flive = true
   129  	//			}
   130  	//		}
   131  	//	}
   132  }
   133  
   134  // loadByType returns the load instruction of the given type.
   135  func loadByType(t ssa.Type) obj.As {
   136  	if t.IsFloat() {
   137  		switch t.Size() {
   138  		case 4:
   139  			return ppc64.AFMOVS
   140  		case 8:
   141  			return ppc64.AFMOVD
   142  		}
   143  	} else {
   144  		switch t.Size() {
   145  		case 1:
   146  			if t.IsSigned() {
   147  				return ppc64.AMOVB
   148  			} else {
   149  				return ppc64.AMOVBZ
   150  			}
   151  		case 2:
   152  			if t.IsSigned() {
   153  				return ppc64.AMOVH
   154  			} else {
   155  				return ppc64.AMOVHZ
   156  			}
   157  		case 4:
   158  			if t.IsSigned() {
   159  				return ppc64.AMOVW
   160  			} else {
   161  				return ppc64.AMOVWZ
   162  			}
   163  		case 8:
   164  			return ppc64.AMOVD
   165  		}
   166  	}
   167  	panic("bad load type")
   168  }
   169  
   170  // storeByType returns the store instruction of the given type.
   171  func storeByType(t ssa.Type) obj.As {
   172  	if t.IsFloat() {
   173  		switch t.Size() {
   174  		case 4:
   175  			return ppc64.AFMOVS
   176  		case 8:
   177  			return ppc64.AFMOVD
   178  		}
   179  	} else {
   180  		switch t.Size() {
   181  		case 1:
   182  			return ppc64.AMOVB
   183  		case 2:
   184  			return ppc64.AMOVH
   185  		case 4:
   186  			return ppc64.AMOVW
   187  		case 8:
   188  			return ppc64.AMOVD
   189  		}
   190  	}
   191  	panic("bad store type")
   192  }
   193  
   194  // scratchFpMem initializes an Addr (field of a Prog)
   195  // to reference the scratchpad memory for movement between
   196  // F and G registers for FP conversions.
   197  func scratchFpMem(s *gc.SSAGenState, a *obj.Addr) {
   198  	a.Type = obj.TYPE_MEM
   199  	a.Name = obj.NAME_AUTO
   200  	a.Node = s.ScratchFpMem
   201  	a.Sym = gc.Linksym(s.ScratchFpMem.Sym)
   202  	a.Reg = ppc64.REGSP
   203  }
   204  
   205  func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
   206  	s.SetLineno(v.Line)
   207  	switch v.Op {
   208  	case ssa.OpInitMem:
   209  		// memory arg needs no code
   210  	case ssa.OpArg:
   211  		// input args need no code
   212  	case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
   213  		// nothing to do
   214  
   215  	case ssa.OpCopy, ssa.OpPPC64MOVDconvert:
   216  		t := v.Type
   217  		if t.IsMemory() {
   218  			return
   219  		}
   220  		x := gc.SSARegNum(v.Args[0])
   221  		y := gc.SSARegNum(v)
   222  		if x != y {
   223  			rt := obj.TYPE_REG
   224  			op := ppc64.AMOVD
   225  
   226  			if t.IsFloat() {
   227  				op = ppc64.AFMOVD
   228  			}
   229  			p := gc.Prog(op)
   230  			p.From.Type = rt
   231  			p.From.Reg = x
   232  			p.To.Type = rt
   233  			p.To.Reg = y
   234  		}
   235  
   236  	case ssa.OpPPC64Xf2i64:
   237  		{
   238  			x := gc.SSARegNum(v.Args[0])
   239  			y := gc.SSARegNum(v)
   240  			p := gc.Prog(ppc64.AFMOVD)
   241  			p.From.Type = obj.TYPE_REG
   242  			p.From.Reg = x
   243  			scratchFpMem(s, &p.To)
   244  			p = gc.Prog(ppc64.AMOVD)
   245  			p.To.Type = obj.TYPE_REG
   246  			p.To.Reg = y
   247  			scratchFpMem(s, &p.From)
   248  		}
   249  	case ssa.OpPPC64Xi2f64:
   250  		{
   251  			x := gc.SSARegNum(v.Args[0])
   252  			y := gc.SSARegNum(v)
   253  			p := gc.Prog(ppc64.AMOVD)
   254  			p.From.Type = obj.TYPE_REG
   255  			p.From.Reg = x
   256  			scratchFpMem(s, &p.To)
   257  			p = gc.Prog(ppc64.AFMOVD)
   258  			p.To.Type = obj.TYPE_REG
   259  			p.To.Reg = y
   260  			scratchFpMem(s, &p.From)
   261  		}
   262  
   263  	case ssa.OpPPC64LoweredGetClosurePtr:
   264  		// Closure pointer is R11 (already)
   265  		gc.CheckLoweredGetClosurePtr(v)
   266  
   267  	case ssa.OpLoadReg:
   268  		loadOp := loadByType(v.Type)
   269  		n, off := gc.AutoVar(v.Args[0])
   270  		p := gc.Prog(loadOp)
   271  		p.From.Type = obj.TYPE_MEM
   272  		p.From.Node = n
   273  		p.From.Sym = gc.Linksym(n.Sym)
   274  		p.From.Offset = off
   275  		if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
   276  			p.From.Name = obj.NAME_PARAM
   277  			p.From.Offset += n.Xoffset
   278  		} else {
   279  			p.From.Name = obj.NAME_AUTO
   280  		}
   281  		p.To.Type = obj.TYPE_REG
   282  		p.To.Reg = gc.SSARegNum(v)
   283  
   284  	case ssa.OpStoreReg:
   285  		storeOp := storeByType(v.Type)
   286  		n, off := gc.AutoVar(v)
   287  		p := gc.Prog(storeOp)
   288  		p.From.Type = obj.TYPE_REG
   289  		p.From.Reg = gc.SSARegNum(v.Args[0])
   290  		p.To.Type = obj.TYPE_MEM
   291  		p.To.Node = n
   292  		p.To.Sym = gc.Linksym(n.Sym)
   293  		p.To.Offset = off
   294  		if n.Class == gc.PPARAM || n.Class == gc.PPARAMOUT {
   295  			p.To.Name = obj.NAME_PARAM
   296  			p.To.Offset += n.Xoffset
   297  		} else {
   298  			p.To.Name = obj.NAME_AUTO
   299  		}
   300  
   301  	case ssa.OpPPC64DIVD:
   302  		// For now,
   303  		//
   304  		// cmp arg1, -1
   305  		// be  ahead
   306  		// v = arg0 / arg1
   307  		// b over
   308  		// ahead: v = - arg0
   309  		// over: nop
   310  		r := gc.SSARegNum(v)
   311  		r0 := gc.SSARegNum(v.Args[0])
   312  		r1 := gc.SSARegNum(v.Args[1])
   313  
   314  		p := gc.Prog(ppc64.ACMP)
   315  		p.From.Type = obj.TYPE_REG
   316  		p.From.Reg = r1
   317  		p.To.Type = obj.TYPE_CONST
   318  		p.To.Offset = -1
   319  
   320  		pbahead := gc.Prog(ppc64.ABEQ)
   321  		pbahead.To.Type = obj.TYPE_BRANCH
   322  
   323  		p = gc.Prog(v.Op.Asm())
   324  		p.From.Type = obj.TYPE_REG
   325  		p.From.Reg = r1
   326  		p.Reg = r0
   327  		p.To.Type = obj.TYPE_REG
   328  		p.To.Reg = r
   329  
   330  		pbover := gc.Prog(obj.AJMP)
   331  		pbover.To.Type = obj.TYPE_BRANCH
   332  
   333  		p = gc.Prog(ppc64.ANEG)
   334  		p.To.Type = obj.TYPE_REG
   335  		p.To.Reg = r
   336  		p.From.Type = obj.TYPE_REG
   337  		p.From.Reg = r0
   338  		gc.Patch(pbahead, p)
   339  
   340  		p = gc.Prog(obj.ANOP)
   341  		gc.Patch(pbover, p)
   342  
   343  	case ssa.OpPPC64DIVW:
   344  		// word-width version of above
   345  		r := gc.SSARegNum(v)
   346  		r0 := gc.SSARegNum(v.Args[0])
   347  		r1 := gc.SSARegNum(v.Args[1])
   348  
   349  		p := gc.Prog(ppc64.ACMPW)
   350  		p.From.Type = obj.TYPE_REG
   351  		p.From.Reg = r1
   352  		p.To.Type = obj.TYPE_CONST
   353  		p.To.Offset = -1
   354  
   355  		pbahead := gc.Prog(ppc64.ABEQ)
   356  		pbahead.To.Type = obj.TYPE_BRANCH
   357  
   358  		p = gc.Prog(v.Op.Asm())
   359  		p.From.Type = obj.TYPE_REG
   360  		p.From.Reg = r1
   361  		p.Reg = r0
   362  		p.To.Type = obj.TYPE_REG
   363  		p.To.Reg = r
   364  
   365  		pbover := gc.Prog(obj.AJMP)
   366  		pbover.To.Type = obj.TYPE_BRANCH
   367  
   368  		p = gc.Prog(ppc64.ANEG)
   369  		p.To.Type = obj.TYPE_REG
   370  		p.To.Reg = r
   371  		p.From.Type = obj.TYPE_REG
   372  		p.From.Reg = r0
   373  		gc.Patch(pbahead, p)
   374  
   375  		p = gc.Prog(obj.ANOP)
   376  		gc.Patch(pbover, p)
   377  
   378  	case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
   379  		ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
   380  		ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
   381  		ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU,
   382  		ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS,
   383  		ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64XOR, ssa.OpPPC64EQV:
   384  		r := gc.SSARegNum(v)
   385  		r1 := gc.SSARegNum(v.Args[0])
   386  		r2 := gc.SSARegNum(v.Args[1])
   387  		p := gc.Prog(v.Op.Asm())
   388  		p.From.Type = obj.TYPE_REG
   389  		p.From.Reg = r2
   390  		p.Reg = r1
   391  		p.To.Type = obj.TYPE_REG
   392  		p.To.Reg = r
   393  
   394  	case ssa.OpPPC64MaskIfNotCarry:
   395  		r := gc.SSARegNum(v)
   396  		p := gc.Prog(v.Op.Asm())
   397  		p.From.Type = obj.TYPE_REG
   398  		p.From.Reg = ppc64.REGZERO
   399  		p.To.Type = obj.TYPE_REG
   400  		p.To.Reg = r
   401  
   402  	case ssa.OpPPC64ADDconstForCarry:
   403  		r1 := gc.SSARegNum(v.Args[0])
   404  		p := gc.Prog(v.Op.Asm())
   405  		p.Reg = r1
   406  		p.From.Type = obj.TYPE_CONST
   407  		p.From.Offset = v.AuxInt
   408  		p.To.Type = obj.TYPE_REG
   409  		p.To.Reg = ppc64.REGTMP // Ignored; this is for the carry effect.
   410  
   411  	case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FRSP:
   412  		r := gc.SSARegNum(v)
   413  		p := gc.Prog(v.Op.Asm())
   414  		p.To.Type = obj.TYPE_REG
   415  		p.To.Reg = r
   416  		p.From.Type = obj.TYPE_REG
   417  		p.From.Reg = gc.SSARegNum(v.Args[0])
   418  
   419  	case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
   420  		ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst:
   421  		p := gc.Prog(v.Op.Asm())
   422  		p.Reg = gc.SSARegNum(v.Args[0])
   423  
   424  		if v.Aux != nil {
   425  			p.From.Type = obj.TYPE_CONST
   426  			p.From.Offset = gc.AuxOffset(v)
   427  		} else {
   428  			p.From.Type = obj.TYPE_CONST
   429  			p.From.Offset = v.AuxInt
   430  		}
   431  
   432  		p.To.Type = obj.TYPE_REG
   433  		p.To.Reg = gc.SSARegNum(v)
   434  
   435  	case ssa.OpPPC64MOVDaddr:
   436  		p := gc.Prog(ppc64.AMOVD)
   437  		p.From.Type = obj.TYPE_ADDR
   438  		p.To.Type = obj.TYPE_REG
   439  		p.To.Reg = gc.SSARegNum(v)
   440  
   441  		var wantreg string
   442  		// Suspect comment, copied from ARM code
   443  		// MOVD $sym+off(base), R
   444  		// the assembler expands it as the following:
   445  		// - base is SP: add constant offset to SP
   446  		//               when constant is large, tmp register (R11) may be used
   447  		// - base is SB: load external address from constant pool (use relocation)
   448  		switch v.Aux.(type) {
   449  		default:
   450  			v.Fatalf("aux is of unknown type %T", v.Aux)
   451  		case *ssa.ExternSymbol:
   452  			wantreg = "SB"
   453  			gc.AddAux(&p.From, v)
   454  		case *ssa.ArgSymbol, *ssa.AutoSymbol:
   455  			wantreg = "SP"
   456  			gc.AddAux(&p.From, v)
   457  		case nil:
   458  			// No sym, just MOVD $off(SP), R
   459  			wantreg = "SP"
   460  			p.From.Reg = ppc64.REGSP
   461  			p.From.Offset = v.AuxInt
   462  		}
   463  		if reg := gc.SSAReg(v.Args[0]); reg.Name() != wantreg {
   464  			v.Fatalf("bad reg %s for symbol type %T, want %s", reg.Name(), v.Aux, wantreg)
   465  		}
   466  
   467  	case ssa.OpPPC64MOVDconst, ssa.OpPPC64MOVWconst:
   468  		p := gc.Prog(v.Op.Asm())
   469  		p.From.Type = obj.TYPE_CONST
   470  		p.From.Offset = v.AuxInt
   471  		p.To.Type = obj.TYPE_REG
   472  		p.To.Reg = gc.SSARegNum(v)
   473  
   474  	case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
   475  		p := gc.Prog(v.Op.Asm())
   476  		p.From.Type = obj.TYPE_FCONST
   477  		p.From.Val = math.Float64frombits(uint64(v.AuxInt))
   478  		p.To.Type = obj.TYPE_REG
   479  		p.To.Reg = gc.SSARegNum(v)
   480  
   481  	case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
   482  		p := gc.Prog(v.Op.Asm())
   483  		p.From.Type = obj.TYPE_REG
   484  		p.From.Reg = gc.SSARegNum(v.Args[0])
   485  		p.To.Type = obj.TYPE_REG
   486  		p.To.Reg = gc.SSARegNum(v.Args[1])
   487  
   488  	case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
   489  		p := gc.Prog(v.Op.Asm())
   490  		p.From.Type = obj.TYPE_REG
   491  		p.From.Reg = gc.SSARegNum(v.Args[0])
   492  		p.To.Type = obj.TYPE_CONST
   493  		p.To.Offset = v.AuxInt
   494  
   495  	case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
   496  		// Shift in register to required size
   497  		p := gc.Prog(v.Op.Asm())
   498  		p.From.Type = obj.TYPE_REG
   499  		p.From.Reg = gc.SSARegNum(v.Args[0])
   500  		p.To.Reg = gc.SSARegNum(v)
   501  		p.To.Type = obj.TYPE_REG
   502  
   503  	case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload, ssa.OpPPC64MOVBload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload:
   504  		p := gc.Prog(v.Op.Asm())
   505  		p.From.Type = obj.TYPE_MEM
   506  		p.From.Reg = gc.SSARegNum(v.Args[0])
   507  		gc.AddAux(&p.From, v)
   508  		p.To.Type = obj.TYPE_REG
   509  		p.To.Reg = gc.SSARegNum(v)
   510  
   511  	case ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
   512  		p := gc.Prog(v.Op.Asm())
   513  		p.From.Type = obj.TYPE_MEM
   514  		p.From.Reg = gc.SSARegNum(v.Args[0])
   515  		gc.AddAux(&p.From, v)
   516  		p.To.Type = obj.TYPE_REG
   517  		p.To.Reg = gc.SSARegNum(v)
   518  
   519  	case ssa.OpPPC64MOVDstorezero, ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
   520  		p := gc.Prog(v.Op.Asm())
   521  		p.From.Type = obj.TYPE_REG
   522  		p.From.Reg = ppc64.REGZERO
   523  		p.To.Type = obj.TYPE_MEM
   524  		p.To.Reg = gc.SSARegNum(v.Args[0])
   525  		gc.AddAux(&p.To, v)
   526  
   527  	case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore:
   528  		p := gc.Prog(v.Op.Asm())
   529  		p.From.Type = obj.TYPE_REG
   530  		p.From.Reg = gc.SSARegNum(v.Args[1])
   531  		p.To.Type = obj.TYPE_MEM
   532  		p.To.Reg = gc.SSARegNum(v.Args[0])
   533  		gc.AddAux(&p.To, v)
   534  	case ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
   535  		p := gc.Prog(v.Op.Asm())
   536  		p.From.Type = obj.TYPE_REG
   537  		p.From.Reg = gc.SSARegNum(v.Args[1])
   538  		p.To.Type = obj.TYPE_MEM
   539  		p.To.Reg = gc.SSARegNum(v.Args[0])
   540  		gc.AddAux(&p.To, v)
   541  
   542  	case ssa.OpPPC64Equal,
   543  		ssa.OpPPC64NotEqual,
   544  		ssa.OpPPC64LessThan,
   545  		ssa.OpPPC64FLessThan,
   546  		ssa.OpPPC64LessEqual,
   547  		ssa.OpPPC64GreaterThan,
   548  		ssa.OpPPC64FGreaterThan,
   549  		ssa.OpPPC64GreaterEqual:
   550  		// On Power7 or later, can use isel instruction:
   551  		// for a < b, a > b, a = b:
   552  		//   rt := 1
   553  		//   isel rt,rt,r0,cond
   554  
   555  		// for  a >= b, a <= b, a != b:
   556  		//   rt := 1
   557  		//   isel rt,0,rt,!cond
   558  
   559  		// However, PPCbe support is for older machines than that,
   560  		// and isel (which looks a lot like fsel) isn't recognized
   561  		// yet by the Go assembler.  So for now, use the old instruction
   562  		// sequence, which we'll need anyway.
   563  		// TODO: add support for isel on PPCle and use it.
   564  
   565  		// generate boolean values
   566  		// use conditional move
   567  
   568  		p := gc.Prog(ppc64.AMOVW)
   569  		p.From.Type = obj.TYPE_CONST
   570  		p.From.Offset = 1
   571  		p.To.Type = obj.TYPE_REG
   572  		p.To.Reg = gc.SSARegNum(v)
   573  
   574  		pb := gc.Prog(condOps[v.Op])
   575  		pb.To.Type = obj.TYPE_BRANCH
   576  
   577  		p = gc.Prog(ppc64.AMOVW)
   578  		p.From.Type = obj.TYPE_CONST
   579  		p.From.Offset = 0
   580  		p.To.Type = obj.TYPE_REG
   581  		p.To.Reg = gc.SSARegNum(v)
   582  
   583  		p = gc.Prog(obj.ANOP)
   584  		gc.Patch(pb, p)
   585  
   586  	case ssa.OpPPC64FLessEqual, // These include a second branch for EQ -- dealing with NaN prevents REL= to !REL conversion
   587  		ssa.OpPPC64FGreaterEqual:
   588  
   589  		p := gc.Prog(ppc64.AMOVW)
   590  		p.From.Type = obj.TYPE_CONST
   591  		p.From.Offset = 1
   592  		p.To.Type = obj.TYPE_REG
   593  		p.To.Reg = gc.SSARegNum(v)
   594  
   595  		pb0 := gc.Prog(condOps[v.Op])
   596  		pb0.To.Type = obj.TYPE_BRANCH
   597  		pb1 := gc.Prog(ppc64.ABEQ)
   598  		pb1.To.Type = obj.TYPE_BRANCH
   599  
   600  		p = gc.Prog(ppc64.AMOVW)
   601  		p.From.Type = obj.TYPE_CONST
   602  		p.From.Offset = 0
   603  		p.To.Type = obj.TYPE_REG
   604  		p.To.Reg = gc.SSARegNum(v)
   605  
   606  		p = gc.Prog(obj.ANOP)
   607  		gc.Patch(pb0, p)
   608  		gc.Patch(pb1, p)
   609  
   610  	case ssa.OpPPC64LoweredZero:
   611  		// Similar to how this is done on ARM,
   612  		// except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off
   613  		// not store-and-increment.
   614  		// Therefore R3 should be dest-align
   615  		// and arg1 should be dest+size-align
   616  		// HOWEVER, the input dest address cannot be dest-align because
   617  		// that does not necessarily address valid memory and it's not
   618  		// known how that might be optimized.  Therefore, correct it in
   619  		// in the expansion:
   620  		//
   621  		// ADD    -8,R3,R3
   622  		// MOVDU  R0, 8(R3)
   623  		// CMP	  R3, Rarg1
   624  		// BL	  -2(PC)
   625  		// arg1 is the address of the last element to zero
   626  		// auxint is alignment
   627  		var sz int64
   628  		var movu obj.As
   629  		switch {
   630  		case v.AuxInt%8 == 0:
   631  			sz = 8
   632  			movu = ppc64.AMOVDU
   633  		case v.AuxInt%4 == 0:
   634  			sz = 4
   635  			movu = ppc64.AMOVWZU // MOVWU instruction not implemented
   636  		case v.AuxInt%2 == 0:
   637  			sz = 2
   638  			movu = ppc64.AMOVHU
   639  		default:
   640  			sz = 1
   641  			movu = ppc64.AMOVBU
   642  		}
   643  
   644  		p := gc.Prog(ppc64.AADD)
   645  		p.Reg = gc.SSARegNum(v.Args[0])
   646  		p.From.Type = obj.TYPE_CONST
   647  		p.From.Offset = -sz
   648  		p.To.Type = obj.TYPE_REG
   649  		p.To.Reg = gc.SSARegNum(v.Args[0])
   650  
   651  		p = gc.Prog(movu)
   652  		p.From.Type = obj.TYPE_REG
   653  		p.From.Reg = ppc64.REG_R0
   654  		p.To.Type = obj.TYPE_MEM
   655  		p.To.Reg = gc.SSARegNum(v.Args[0])
   656  		p.To.Offset = sz
   657  
   658  		p2 := gc.Prog(ppc64.ACMPU)
   659  		p2.From.Type = obj.TYPE_REG
   660  		p2.From.Reg = gc.SSARegNum(v.Args[0])
   661  		p2.To.Reg = gc.SSARegNum(v.Args[1])
   662  		p2.To.Type = obj.TYPE_REG
   663  
   664  		p3 := gc.Prog(ppc64.ABLT)
   665  		p3.To.Type = obj.TYPE_BRANCH
   666  		gc.Patch(p3, p)
   667  
   668  	case ssa.OpPPC64LoweredMove:
   669  		// Similar to how this is done on ARM,
   670  		// except that PPC MOVDU x,off(y) is *(y+off) = x; y=y+off,
   671  		// not store-and-increment.
   672  		// Inputs must be valid pointers to memory,
   673  		// so adjust arg0 and arg1 as part of the expansion.
   674  		// arg2 should be src+size-align,
   675  		//
   676  		// ADD    -8,R3,R3
   677  		// ADD    -8,R4,R4
   678  		// MOVDU	8(R4), Rtmp
   679  		// MOVDU 	Rtmp, 8(R3)
   680  		// CMP	R4, Rarg2
   681  		// BL	-3(PC)
   682  		// arg2 is the address of the last element of src
   683  		// auxint is alignment
   684  		var sz int64
   685  		var movu obj.As
   686  		switch {
   687  		case v.AuxInt%8 == 0:
   688  			sz = 8
   689  			movu = ppc64.AMOVDU
   690  		case v.AuxInt%4 == 0:
   691  			sz = 4
   692  			movu = ppc64.AMOVWZU // MOVWU instruction not implemented
   693  		case v.AuxInt%2 == 0:
   694  			sz = 2
   695  			movu = ppc64.AMOVHU
   696  		default:
   697  			sz = 1
   698  			movu = ppc64.AMOVBU
   699  		}
   700  
   701  		p := gc.Prog(ppc64.AADD)
   702  		p.Reg = gc.SSARegNum(v.Args[0])
   703  		p.From.Type = obj.TYPE_CONST
   704  		p.From.Offset = -sz
   705  		p.To.Type = obj.TYPE_REG
   706  		p.To.Reg = gc.SSARegNum(v.Args[0])
   707  
   708  		p = gc.Prog(ppc64.AADD)
   709  		p.Reg = gc.SSARegNum(v.Args[1])
   710  		p.From.Type = obj.TYPE_CONST
   711  		p.From.Offset = -sz
   712  		p.To.Type = obj.TYPE_REG
   713  		p.To.Reg = gc.SSARegNum(v.Args[1])
   714  
   715  		p = gc.Prog(movu)
   716  		p.From.Type = obj.TYPE_MEM
   717  		p.From.Reg = gc.SSARegNum(v.Args[1])
   718  		p.From.Offset = sz
   719  		p.To.Type = obj.TYPE_REG
   720  		p.To.Reg = ppc64.REGTMP
   721  
   722  		p2 := gc.Prog(movu)
   723  		p2.From.Type = obj.TYPE_REG
   724  		p2.From.Reg = ppc64.REGTMP
   725  		p2.To.Type = obj.TYPE_MEM
   726  		p2.To.Reg = gc.SSARegNum(v.Args[0])
   727  		p2.To.Offset = sz
   728  
   729  		p3 := gc.Prog(ppc64.ACMPU)
   730  		p3.From.Reg = gc.SSARegNum(v.Args[1])
   731  		p3.From.Type = obj.TYPE_REG
   732  		p3.To.Reg = gc.SSARegNum(v.Args[2])
   733  		p3.To.Type = obj.TYPE_REG
   734  
   735  		p4 := gc.Prog(ppc64.ABLT)
   736  		p4.To.Type = obj.TYPE_BRANCH
   737  		gc.Patch(p4, p)
   738  
   739  	case ssa.OpPPC64CALLstatic:
   740  		if v.Aux.(*gc.Sym) == gc.Deferreturn.Sym {
   741  			// Deferred calls will appear to be returning to
   742  			// the CALL deferreturn(SB) that we are about to emit.
   743  			// However, the stack trace code will show the line
   744  			// of the instruction byte before the return PC.
   745  			// To avoid that being an unrelated instruction,
   746  			// insert two actual hardware NOPs that will have the right line number.
   747  			// This is different from obj.ANOP, which is a virtual no-op
   748  			// that doesn't make it into the instruction stream.
   749  			// PPC64 is unusual because TWO nops are required
   750  			// (see gc/cgen.go, gc/plive.go -- copy of comment below)
   751  			//
   752  			// On ppc64, when compiling Go into position
   753  			// independent code on ppc64le we insert an
   754  			// instruction to reload the TOC pointer from the
   755  			// stack as well. See the long comment near
   756  			// jmpdefer in runtime/asm_ppc64.s for why.
   757  			// If the MOVD is not needed, insert a hardware NOP
   758  			// so that the same number of instructions are used
   759  			// on ppc64 in both shared and non-shared modes.
   760  			ginsnop()
   761  			if gc.Ctxt.Flag_shared {
   762  				p := gc.Prog(ppc64.AMOVD)
   763  				p.From.Type = obj.TYPE_MEM
   764  				p.From.Offset = 24
   765  				p.From.Reg = ppc64.REGSP
   766  				p.To.Type = obj.TYPE_REG
   767  				p.To.Reg = ppc64.REG_R2
   768  			} else {
   769  				ginsnop()
   770  			}
   771  		}
   772  		p := gc.Prog(obj.ACALL)
   773  		p.To.Type = obj.TYPE_MEM
   774  		p.To.Name = obj.NAME_EXTERN
   775  		p.To.Sym = gc.Linksym(v.Aux.(*gc.Sym))
   776  		if gc.Maxarg < v.AuxInt {
   777  			gc.Maxarg = v.AuxInt
   778  		}
   779  
   780  	case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
   781  		p := gc.Prog(ppc64.AMOVD)
   782  		p.From.Type = obj.TYPE_REG
   783  		p.From.Reg = gc.SSARegNum(v.Args[0])
   784  		p.To.Type = obj.TYPE_REG
   785  		p.To.Reg = ppc64.REG_CTR
   786  
   787  		if gc.Ctxt.Flag_shared && p.From.Reg != ppc64.REG_R12 {
   788  			// Make sure function pointer is in R12 as well when
   789  			// compiling Go into PIC.
   790  			// TODO(mwhudson): it would obviously be better to
   791  			// change the register allocation to put the value in
   792  			// R12 already, but I don't know how to do that.
   793  			// TODO: We have the technology now to implement TODO above.
   794  			q := gc.Prog(ppc64.AMOVD)
   795  			q.From = p.From
   796  			q.To.Type = obj.TYPE_REG
   797  			q.To.Reg = ppc64.REG_R12
   798  		}
   799  
   800  		pp := gc.Prog(obj.ACALL)
   801  		pp.To.Type = obj.TYPE_REG
   802  		pp.To.Reg = ppc64.REG_CTR
   803  
   804  		if gc.Ctxt.Flag_shared {
   805  			// When compiling Go into PIC, the function we just
   806  			// called via pointer might have been implemented in
   807  			// a separate module and so overwritten the TOC
   808  			// pointer in R2; reload it.
   809  			q := gc.Prog(ppc64.AMOVD)
   810  			q.From.Type = obj.TYPE_MEM
   811  			q.From.Offset = 24
   812  			q.From.Reg = ppc64.REGSP
   813  			q.To.Type = obj.TYPE_REG
   814  			q.To.Reg = ppc64.REG_R2
   815  		}
   816  
   817  		if gc.Maxarg < v.AuxInt {
   818  			gc.Maxarg = v.AuxInt
   819  		}
   820  
   821  	case ssa.OpPPC64CALLdefer:
   822  		p := gc.Prog(obj.ACALL)
   823  		p.To.Type = obj.TYPE_MEM
   824  		p.To.Name = obj.NAME_EXTERN
   825  		p.To.Sym = gc.Linksym(gc.Deferproc.Sym)
   826  		if gc.Maxarg < v.AuxInt {
   827  			gc.Maxarg = v.AuxInt
   828  		}
   829  	case ssa.OpPPC64CALLgo:
   830  		p := gc.Prog(obj.ACALL)
   831  		p.To.Type = obj.TYPE_MEM
   832  		p.To.Name = obj.NAME_EXTERN
   833  		p.To.Sym = gc.Linksym(gc.Newproc.Sym)
   834  		if gc.Maxarg < v.AuxInt {
   835  			gc.Maxarg = v.AuxInt
   836  		}
   837  	case ssa.OpVarDef:
   838  		gc.Gvardef(v.Aux.(*gc.Node))
   839  	case ssa.OpVarKill:
   840  		gc.Gvarkill(v.Aux.(*gc.Node))
   841  	case ssa.OpVarLive:
   842  		gc.Gvarlive(v.Aux.(*gc.Node))
   843  	case ssa.OpKeepAlive:
   844  		if !v.Args[0].Type.IsPtrShaped() {
   845  			v.Fatalf("keeping non-pointer alive %v", v.Args[0])
   846  		}
   847  		n, off := gc.AutoVar(v.Args[0])
   848  		if n == nil {
   849  			v.Fatalf("KeepLive with non-spilled value %s %s", v, v.Args[0])
   850  		}
   851  		if off != 0 {
   852  			v.Fatalf("KeepLive with non-zero offset spill location %s:%d", n, off)
   853  		}
   854  		gc.Gvarlive(n)
   855  
   856  	case ssa.OpPhi:
   857  		// just check to make sure regalloc and stackalloc did it right
   858  		if v.Type.IsMemory() {
   859  			return
   860  		}
   861  		f := v.Block.Func
   862  		loc := f.RegAlloc[v.ID]
   863  		for _, a := range v.Args {
   864  			if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
   865  				v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
   866  			}
   867  		}
   868  
   869  	case ssa.OpPPC64LoweredNilCheck:
   870  		// Optimization - if the subsequent block has a load or store
   871  		// at the same address, we don't need to issue this instruction.
   872  		// mem := v.Args[1]
   873  		// for _, w := range v.Block.Succs[0].Block().Values {
   874  		// 	if w.Op == ssa.OpPhi {
   875  		// 		if w.Type.IsMemory() {
   876  		// 			mem = w
   877  		// 		}
   878  		// 		continue
   879  		// 	}
   880  		// 	if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
   881  		// 		// w doesn't use a store - can't be a memory op.
   882  		// 		continue
   883  		// 	}
   884  		// 	if w.Args[len(w.Args)-1] != mem {
   885  		// 		v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
   886  		// 	}
   887  		// 	switch w.Op {
   888  		// 	case ssa.OpPPC64MOVBload, ssa.OpPPC64MOVBUload, ssa.OpPPC64MOVHload, ssa.OpPPC64MOVHUload,
   889  		// 		ssa.OpPPC64MOVWload, ssa.OpPPC64MOVFload, ssa.OpPPC64MOVDload,
   890  		// 		ssa.OpPPC64MOVBstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVWstore,
   891  		// 		ssa.OpPPC64MOVFstore, ssa.OpPPC64MOVDstore:
   892  		// 		// arg0 is ptr, auxint is offset
   893  		// 		if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
   894  		// 			if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   895  		// 				gc.Warnl(v.Line, "removed nil check")
   896  		// 			}
   897  		// 			return
   898  		// 		}
   899  		// 	case ssa.OpPPC64DUFFZERO, ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroU:
   900  		// 		// arg0 is ptr
   901  		// 		if w.Args[0] == v.Args[0] {
   902  		// 			if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   903  		// 				gc.Warnl(v.Line, "removed nil check")
   904  		// 			}
   905  		// 			return
   906  		// 		}
   907  		// 	case ssa.OpPPC64DUFFCOPY, ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveU:
   908  		// 		// arg0 is dst ptr, arg1 is src ptr
   909  		// 		if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
   910  		// 			if gc.Debug_checknil != 0 && int(v.Line) > 1 {
   911  		// 				gc.Warnl(v.Line, "removed nil check")
   912  		// 			}
   913  		// 			return
   914  		// 		}
   915  		// 	default:
   916  		// 	}
   917  		// 	if w.Type.IsMemory() {
   918  		// 		if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
   919  		// 			// these ops are OK
   920  		// 			mem = w
   921  		// 			continue
   922  		// 		}
   923  		// 		// We can't delay the nil check past the next store.
   924  		// 		break
   925  		// 	}
   926  		// }
   927  		// Issue a load which will fault if arg is nil.
   928  		p := gc.Prog(ppc64.AMOVB)
   929  		p.From.Type = obj.TYPE_MEM
   930  		p.From.Reg = gc.SSARegNum(v.Args[0])
   931  		gc.AddAux(&p.From, v)
   932  		p.To.Type = obj.TYPE_REG
   933  		p.To.Reg = ppc64.REGTMP
   934  		if gc.Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
   935  			gc.Warnl(v.Line, "generated nil check")
   936  		}
   937  
   938  	case ssa.OpPPC64InvertFlags:
   939  		v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
   940  	case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
   941  		v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
   942  
   943  	default:
   944  		v.Unimplementedf("genValue not implemented: %s", v.LongString())
   945  	}
   946  }
   947  
   948  var blockJump = [...]struct {
   949  	asm, invasm     obj.As
   950  	asmeq, invasmun bool
   951  }{
   952  	ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false},
   953  	ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false},
   954  
   955  	ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false},
   956  	ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false},
   957  	ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false},
   958  	ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false},
   959  
   960  	// TODO: need to work FP comparisons into block jumps
   961  	ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGE, false, false},
   962  	ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, true}, // GE = GT or EQ; !GE = LT or UN
   963  	ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, true}, // LE = LT or EQ; !LE = GT or UN
   964  	ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
   965  }
   966  
   967  func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
   968  	s.SetLineno(b.Line)
   969  
   970  	switch b.Kind {
   971  
   972  	case ssa.BlockDefer:
   973  		// defer returns in R3:
   974  		// 0 if we should continue executing
   975  		// 1 if we should jump to deferreturn call
   976  		p := gc.Prog(ppc64.ACMP)
   977  		p.From.Type = obj.TYPE_REG
   978  		p.From.Reg = ppc64.REG_R3
   979  		p.To.Type = obj.TYPE_REG
   980  		p.To.Reg = ppc64.REG_R0
   981  
   982  		p = gc.Prog(ppc64.ABNE)
   983  		p.To.Type = obj.TYPE_BRANCH
   984  		s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
   985  		if b.Succs[0].Block() != next {
   986  			p := gc.Prog(obj.AJMP)
   987  			p.To.Type = obj.TYPE_BRANCH
   988  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   989  		}
   990  
   991  	case ssa.BlockPlain, ssa.BlockCall, ssa.BlockCheck:
   992  		if b.Succs[0].Block() != next {
   993  			p := gc.Prog(obj.AJMP)
   994  			p.To.Type = obj.TYPE_BRANCH
   995  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
   996  		}
   997  	case ssa.BlockExit:
   998  		gc.Prog(obj.AUNDEF) // tell plive.go that we never reach here
   999  	case ssa.BlockRet:
  1000  		gc.Prog(obj.ARET)
  1001  	case ssa.BlockRetJmp:
  1002  		p := gc.Prog(obj.AJMP)
  1003  		p.To.Type = obj.TYPE_MEM
  1004  		p.To.Name = obj.NAME_EXTERN
  1005  		p.To.Sym = gc.Linksym(b.Aux.(*gc.Sym))
  1006  
  1007  	case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
  1008  		ssa.BlockPPC64LT, ssa.BlockPPC64GE,
  1009  		ssa.BlockPPC64LE, ssa.BlockPPC64GT,
  1010  		ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
  1011  		ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
  1012  		jmp := blockJump[b.Kind]
  1013  		likely := b.Likely
  1014  		var p *obj.Prog
  1015  		switch next {
  1016  		case b.Succs[0].Block():
  1017  			p = gc.Prog(jmp.invasm)
  1018  			likely *= -1
  1019  			p.To.Type = obj.TYPE_BRANCH
  1020  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()})
  1021  			if jmp.invasmun {
  1022  				// TODO: The second branch is probably predict-not-taken since it is for FP unordered
  1023  				q := gc.Prog(ppc64.ABVS)
  1024  				q.To.Type = obj.TYPE_BRANCH
  1025  				s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
  1026  			}
  1027  		case b.Succs[1].Block():
  1028  			p = gc.Prog(jmp.asm)
  1029  			p.To.Type = obj.TYPE_BRANCH
  1030  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1031  			if jmp.asmeq {
  1032  				q := gc.Prog(ppc64.ABEQ)
  1033  				q.To.Type = obj.TYPE_BRANCH
  1034  				s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
  1035  			}
  1036  		default:
  1037  			p = gc.Prog(jmp.asm)
  1038  			p.To.Type = obj.TYPE_BRANCH
  1039  			s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()})
  1040  			if jmp.asmeq {
  1041  				q := gc.Prog(ppc64.ABEQ)
  1042  				q.To.Type = obj.TYPE_BRANCH
  1043  				s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[0].Block()})
  1044  			}
  1045  			q := gc.Prog(obj.AJMP)
  1046  			q.To.Type = obj.TYPE_BRANCH
  1047  			s.Branches = append(s.Branches, gc.Branch{P: q, B: b.Succs[1].Block()})
  1048  		}
  1049  
  1050  		// liblink reorders the instruction stream as it sees fit.
  1051  		// Pass along what we know so liblink can make use of it.
  1052  		// TODO: Once we've fully switched to SSA,
  1053  		// make liblink leave our output alone.
  1054  		//switch likely {
  1055  		//case ssa.BranchUnlikely:
  1056  		//	p.From.Type = obj.TYPE_CONST
  1057  		//	p.From.Offset = 0
  1058  		//case ssa.BranchLikely:
  1059  		//	p.From.Type = obj.TYPE_CONST
  1060  		//	p.From.Offset = 1
  1061  		//}
  1062  
  1063  	default:
  1064  		b.Unimplementedf("branch not implemented: %s. Control: %s", b.LongString(), b.Control.LongString())
  1065  	}
  1066  }