github.com/bir3/gocompiler@v0.3.205/src/cmd/internal/obj/ppc64/asm9.go (about) 1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. 2 // 3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 5 // Portions Copyright © 1997-1999 Vita Nuova Limited 6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7 // Portions Copyright © 2004,2006 Bruce Ellis 8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10 // Portions Copyright © 2009 The Go Authors. All rights reserved. 11 // 12 // Permission is hereby granted, free of charge, to any person obtaining a copy 13 // of this software and associated documentation files (the "Software"), to deal 14 // in the Software without restriction, including without limitation the rights 15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 // copies of the Software, and to permit persons to whom the Software is 17 // furnished to do so, subject to the following conditions: 18 // 19 // The above copyright notice and this permission notice shall be included in 20 // all copies or substantial portions of the Software. 21 // 22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 // THE SOFTWARE. 29 30 package ppc64 31 32 import ( 33 "github.com/bir3/gocompiler/src/cmd/internal/obj" 34 "github.com/bir3/gocompiler/src/cmd/internal/objabi" 35 "encoding/binary" 36 "fmt" 37 "log" 38 "math" 39 "math/bits" 40 "sort" 41 ) 42 43 // ctxt9 holds state while assembling a single function. 44 // Each function gets a fresh ctxt9. 45 // This allows for multiple functions to be safely concurrently assembled. 46 type ctxt9 struct { 47 ctxt *obj.Link 48 newprog obj.ProgAlloc 49 cursym *obj.LSym 50 autosize int32 51 instoffset int64 52 pc int64 53 } 54 55 // Instruction layout. 56 57 const ( 58 r0iszero = 1 59 ) 60 61 type Optab struct { 62 as obj.As // Opcode 63 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog. 64 a2 uint8 // p.Reg argument (int16 Register) 65 a3 uint8 // p.RestArgs[0] (obj.AddrPos) 66 a4 uint8 // p.RestArgs[1] 67 a5 uint8 // p.RestARgs[2] 68 a6 uint8 // p.To (obj.Addr) 69 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r 70 size int8 // Text space in bytes to lay operation 71 72 // A prefixed instruction is generated by this opcode. This cannot be placed 73 // across a 64B PC address. Opcodes should not translate to more than one 74 // prefixed instruction. The prefixed instruction should be written first 75 // (e.g when Optab.size > 8). 76 ispfx bool 77 78 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32) 79 } 80 81 // optab contains an array to be sliced of accepted operand combinations for an 82 // instruction. Unused arguments and fields are not explicitly enumerated, and 83 // should not be listed for clarity. Unused arguments and values should always 84 // assume the default value for the given type. 85 // 86 // optab does not list every valid ppc64 opcode, it enumerates representative 87 // operand combinations for a class of instruction. The variable oprange indexes 88 // all valid ppc64 opcodes. 89 // 90 // oprange is initialized to point a slice within optab which contains the valid 91 // operand combinations for a given instruction. This is initialized from buildop. 92 // 93 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface 94 // to arrange entries to minimize text size of each opcode. 95 var optab = []Optab{ 96 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, 97 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, 98 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, 99 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, 100 /* move register */ 101 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 102 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 103 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 104 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4}, 105 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 106 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 107 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, 108 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4}, 109 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, 110 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8}, 111 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 112 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 113 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, 114 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4}, 115 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 116 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 117 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 118 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 119 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 120 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 121 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ 122 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 123 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 124 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 125 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 126 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 127 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4}, 128 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, 129 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, 130 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 131 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, 132 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 133 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4}, 134 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, 135 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 136 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 137 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 138 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 139 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 140 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4}, 141 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 142 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 143 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, 144 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 145 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4}, 146 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12}, 147 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ 148 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 149 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 150 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 151 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4}, 152 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, 153 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, 154 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 155 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, 156 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 157 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4}, 158 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, 159 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ 160 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 161 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ 162 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 163 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 164 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 165 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 166 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 167 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 168 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, 169 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, 170 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 171 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, 172 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4}, 173 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 174 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 175 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 176 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, 177 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 178 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 179 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 180 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, 181 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, 182 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4}, 183 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, 184 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4}, 185 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, 186 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4}, 187 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, 188 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, 189 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 190 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 191 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 192 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 193 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, 194 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, 195 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 196 {as: AFABS, a6: C_FREG, type_: 33, size: 4}, 197 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4}, 198 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4}, 199 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4}, 200 201 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 202 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 203 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 204 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 205 206 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 207 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 209 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 210 211 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 212 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 213 214 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, 215 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, 216 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 217 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 218 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, 219 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 220 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, 221 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 222 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 223 224 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, 225 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, 226 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 227 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 228 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, 229 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 230 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, 231 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 232 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 233 234 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, 235 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, 236 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4}, 237 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, 238 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 239 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, 240 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, 241 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 242 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 243 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8}, 244 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, 245 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, 246 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, 247 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 248 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, 249 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 250 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 251 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8}, 252 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, 253 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 254 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 255 256 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, 257 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, 258 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4}, 259 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, 260 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 261 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, 262 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, 263 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, 264 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 265 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, 266 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 267 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 268 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, 269 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 270 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 271 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, 272 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 273 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 274 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 275 276 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8}, 277 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, 278 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4}, 279 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, 280 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, 281 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, 282 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 283 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4}, 284 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4}, 285 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, 286 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, 287 288 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 289 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4}, 290 291 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, 292 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 293 294 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, 295 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, 296 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, 297 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4}, 298 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, 299 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4}, 300 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 301 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4}, 302 303 {as: ASYSCALL, type_: 5, size: 4}, 304 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, 305 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12}, 306 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4}, 307 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4}, 308 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label 309 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop 310 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr 311 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr 312 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label 313 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label 314 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi 315 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh 316 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi 317 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4}, 318 {as: ASYNC, type_: 46, size: 4}, 319 {as: AWORD, a1: C_LCON, type_: 40, size: 4}, 320 {as: ADWORD, a1: C_64CON, type_: 31, size: 8}, 321 {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, 322 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 323 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4}, 324 {as: AEXTSB, a6: C_REG, type_: 48, size: 4}, 325 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 326 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 327 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 328 {as: ANEG, a6: C_REG, type_: 47, size: 4}, 329 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12}, 330 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12}, 331 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16}, 332 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, 333 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, 334 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, 335 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4}, 336 /* Other ISA 2.05+ instructions */ 337 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ 338 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ 339 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ 340 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 341 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ 342 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ 343 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ 344 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ 345 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ 346 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ 347 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ 348 349 /* Vector instructions */ 350 351 /* Vector load */ 352 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */ 353 354 /* Vector store */ 355 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */ 356 357 /* Vector logical */ 358 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */ 359 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */ 360 361 /* Vector add */ 362 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */ 363 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */ 364 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */ 365 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */ 366 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */ 367 368 /* Vector subtract */ 369 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */ 370 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */ 371 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */ 372 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */ 373 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */ 374 375 /* Vector multiply */ 376 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ 377 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ 378 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ 379 380 /* Vector rotate */ 381 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */ 382 383 /* Vector shift */ 384 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ 385 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ 386 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ 387 388 /* Vector count */ 389 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ 390 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */ 391 392 /* Vector compare */ 393 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */ 394 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */ 395 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */ 396 397 /* Vector merge */ 398 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */ 399 400 /* Vector permute */ 401 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */ 402 403 /* Vector bit permute */ 404 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */ 405 406 /* Vector select */ 407 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ 408 409 /* Vector splat */ 410 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */ 411 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, 412 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */ 413 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4}, 414 415 /* Vector AES */ 416 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ 417 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */ 418 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ 419 420 /* Vector SHA */ 421 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ 422 423 /* VSX vector load */ 424 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ 425 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */ 426 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */ 427 428 /* VSX vector store */ 429 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */ 430 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */ 431 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */ 432 433 /* VSX scalar load */ 434 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */ 435 436 /* VSX scalar store */ 437 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */ 438 439 /* VSX scalar as integer load */ 440 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */ 441 442 /* VSX scalar store as integer */ 443 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ 444 445 /* VSX move from VSR */ 446 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, 447 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, 448 449 /* VSX move to VSR */ 450 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4}, 451 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4}, 452 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4}, 453 454 /* VSX logical */ 455 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */ 456 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */ 457 458 /* VSX select */ 459 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */ 460 461 /* VSX merge */ 462 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ 463 464 /* VSX splat */ 465 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ 466 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ 467 468 /* VSX permute */ 469 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ 470 471 /* VSX shift */ 472 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ 473 474 /* VSX reverse bytes */ 475 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ 476 477 /* VSX scalar FP-FP conversion */ 478 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */ 479 480 /* VSX vector FP-FP conversion */ 481 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */ 482 483 /* VSX scalar FP-integer conversion */ 484 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */ 485 486 /* VSX scalar integer-FP conversion */ 487 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */ 488 489 /* VSX vector FP-integer conversion */ 490 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */ 491 492 /* VSX vector integer-FP conversion */ 493 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */ 494 495 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 496 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 497 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4}, 498 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4}, 499 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 500 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 501 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4}, 502 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4}, 503 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, 504 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4}, 505 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, 506 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4}, 507 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, 508 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4}, 509 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4}, 510 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4}, 511 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4}, 512 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4}, 513 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 514 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 515 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4}, 516 {as: AEIEIO, type_: 46, size: 4}, 517 {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, 518 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4}, 519 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 520 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 521 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 522 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4}, 523 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 524 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4}, 525 526 {as: obj.AUNDEF, type_: 78, size: 4}, 527 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0}, 528 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0}, 529 {as: obj.ANOP, type_: 0, size: 0}, 530 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689 531 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior 532 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, 533 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL 534 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL 535 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code 536 } 537 538 var oprange [ALAST & obj.AMask][]Optab 539 540 var xcmp [C_NCLASS][C_NCLASS]bool 541 542 // padding bytes to add to align code as requested. 543 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { 544 // For 16 and 32 byte alignment, there is a tradeoff 545 // between aligning the code and adding too many NOPs. 546 switch a { 547 case 8: 548 if pc&7 != 0 { 549 return 4 550 } 551 case 16: 552 // Align to 16 bytes if possible but add at 553 // most 2 NOPs. 554 switch pc & 15 { 555 case 4, 12: 556 return 4 557 case 8: 558 return 8 559 } 560 case 32: 561 // Align to 32 bytes if possible but add at 562 // most 3 NOPs. 563 switch pc & 31 { 564 case 4, 20: 565 return 12 566 case 8, 24: 567 return 8 568 case 12, 28: 569 return 4 570 } 571 // When 32 byte alignment is requested on Linux, 572 // promote the function's alignment to 32. On AIX 573 // the function alignment is not changed which might 574 // result in 16 byte alignment but that is still fine. 575 // TODO: alignment on AIX 576 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 { 577 cursym.Func().Align = 32 578 } 579 default: 580 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a) 581 } 582 return 0 583 } 584 585 // Get the implied register of a operand which doesn't specify one. These show up 586 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied, 587 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when 588 // generating constants in register like "MOVD $constant, Rx". 589 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int { 590 class := oclass(a) 591 if class >= C_ZCON && class <= C_64CON { 592 return REGZERO 593 } 594 switch class { 595 case C_SACON, C_LACON: 596 return REGSP 597 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG: 598 switch a.Name { 599 case obj.NAME_EXTERN, obj.NAME_STATIC: 600 return REGSB 601 case obj.NAME_AUTO, obj.NAME_PARAM: 602 return REGSP 603 case obj.NAME_NONE: 604 return REGZERO 605 } 606 } 607 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p) 608 return 0 609 } 610 611 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 612 p := cursym.Func().Text 613 if p == nil || p.Link == nil { // handle external functions and ELF section symbols 614 return 615 } 616 617 if oprange[AANDN&obj.AMask] == nil { 618 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first") 619 } 620 621 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} 622 623 pc := int64(0) 624 p.Pc = pc 625 626 var m int 627 var o *Optab 628 for p = p.Link; p != nil; p = p.Link { 629 p.Pc = pc 630 o = c.oplook(p) 631 m = int(o.size) 632 if m == 0 { 633 if p.As == obj.APCALIGN { 634 a := c.vregoff(&p.From) 635 m = addpad(pc, a, ctxt, cursym) 636 } else { 637 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 638 ctxt.Diag("zero-width instruction\n%v", p) 639 } 640 continue 641 } 642 } 643 pc += int64(m) 644 } 645 646 c.cursym.Size = pc 647 648 /* 649 * if any procedure is large enough to 650 * generate a large SBRA branch, then 651 * generate extra passes putting branches 652 * around jmps to fix. this is rare. 653 */ 654 bflag := 1 655 656 var otxt int64 657 var q *obj.Prog 658 var out [5]uint32 659 var falign int32 // Track increased alignment requirements for prefix. 660 for bflag != 0 { 661 bflag = 0 662 pc = 0 663 falign = 0 // Note, linker bumps function symbols to funcAlign. 664 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { 665 p.Pc = pc 666 o = c.oplook(p) 667 668 // very large conditional branches 669 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { 670 otxt = p.To.Target().Pc - pc 671 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { 672 // Assemble the instruction with a target not too far to figure out BI and BO fields. 673 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted, 674 // and only one extra branch is needed to reach the target. 675 tgt := p.To.Target() 676 p.To.SetTarget(p.Link) 677 o.asmout(&c, p, o, &out) 678 p.To.SetTarget(tgt) 679 680 bo := int64(out[0]>>21) & 31 681 bi := int16((out[0] >> 16) & 31) 682 invertible := false 683 684 if bo&0x14 == 0x14 { 685 // A conditional branch that is unconditionally taken. This cannot be inverted. 686 } else if bo&0x10 == 0x10 { 687 // A branch based on the value of CTR. Invert the CTR comparison against zero bit. 688 bo ^= 0x2 689 invertible = true 690 } else if bo&0x04 == 0x04 { 691 // A branch based on CR bit. Invert the BI comparison bit. 692 bo ^= 0x8 693 invertible = true 694 } 695 696 if invertible { 697 // Rewrite 698 // BC bo,...,far_away_target 699 // NEXT_INSN 700 // to: 701 // BC invert(bo),next_insn 702 // JMP far_away_target 703 // next_insn: 704 // NEXT_INSN 705 p.As = ABC 706 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo} 707 q = c.newprog() 708 q.As = ABR 709 q.To.Type = obj.TYPE_BRANCH 710 q.To.SetTarget(p.To.Target()) 711 q.Link = p.Link 712 p.To.SetTarget(p.Link) 713 p.Link = q 714 p.Reg = REG_CRBIT0 + bi 715 } else { 716 // Rewrite 717 // BC ...,far_away_target 718 // NEXT_INSN 719 // to 720 // BC ...,tmp 721 // JMP next_insn 722 // tmp: 723 // JMP far_away_target 724 // next_insn: 725 // NEXT_INSN 726 q = c.newprog() 727 q.Link = p.Link 728 p.Link = q 729 q.As = ABR 730 q.To.Type = obj.TYPE_BRANCH 731 q.To.SetTarget(p.To.Target()) 732 p.To.SetTarget(q) 733 q = c.newprog() 734 q.Link = p.Link 735 p.Link = q 736 q.As = ABR 737 q.To.Type = obj.TYPE_BRANCH 738 q.To.SetTarget(q.Link.Link) 739 } 740 bflag = 1 741 } 742 } 743 744 m = int(o.size) 745 if m == 0 { 746 if p.As == obj.APCALIGN { 747 a := c.vregoff(&p.From) 748 m = addpad(pc, a, ctxt, cursym) 749 } else { 750 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 751 ctxt.Diag("zero-width instruction\n%v", p) 752 } 753 continue 754 } 755 } 756 757 // Prefixed instructions cannot be placed across a 64B boundary. 758 // Mark and adjust the PC of those which do. A nop will be 759 // inserted during final assembly. 760 if o.ispfx { 761 mark := p.Mark &^ PFX_X64B 762 if pc&63 == 60 { 763 p.Pc += 4 764 m += 4 765 mark |= PFX_X64B 766 } 767 768 // Marks may be adjusted if a too-far conditional branch is 769 // fixed up above. Likewise, inserting a NOP may cause a 770 // branch target to become too far away. We need to run 771 // another iteration and verify no additional changes 772 // are needed. 773 if mark != p.Mark { 774 bflag = 1 775 p.Mark = mark 776 } 777 778 // Check for 16 or 32B crossing of this prefixed insn. 779 // These do no require padding, but do require increasing 780 // the function alignment to prevent them from potentially 781 // crossing a 64B boundary when the linker assigns the final 782 // PC. 783 switch p.Pc & 31 { 784 case 28: // 32B crossing 785 falign = 64 786 case 12: // 16B crossing 787 if falign < 64 { 788 falign = 32 789 } 790 } 791 } 792 793 pc += int64(m) 794 } 795 796 c.cursym.Size = pc 797 } 798 799 c.cursym.Size = pc 800 c.cursym.Func().Align = falign 801 c.cursym.Grow(c.cursym.Size) 802 803 // lay out the code, emitting code and data relocations. 804 805 bp := c.cursym.P 806 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0) 807 var i int32 808 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { 809 c.pc = p.Pc 810 o = c.oplook(p) 811 if int(o.size) > 4*len(out) { 812 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) 813 } 814 // asmout is not set up to add large amounts of padding 815 if o.type_ == 0 && p.As == obj.APCALIGN { 816 aln := c.vregoff(&p.From) 817 v := addpad(p.Pc, aln, c.ctxt, c.cursym) 818 if v > 0 { 819 // Same padding instruction for all 820 for i = 0; i < int32(v/4); i++ { 821 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop) 822 bp = bp[4:] 823 } 824 } 825 } else { 826 if p.Mark&PFX_X64B != 0 { 827 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop) 828 bp = bp[4:] 829 } 830 o.asmout(&c, p, o, &out) 831 for i = 0; i < int32(o.size/4); i++ { 832 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) 833 bp = bp[4:] 834 } 835 } 836 } 837 } 838 839 func isint32(v int64) bool { 840 return int64(int32(v)) == v 841 } 842 843 func isuint32(v uint64) bool { 844 return uint64(uint32(v)) == v 845 } 846 847 func (c *ctxt9) aclassreg(reg int16) int { 848 if REG_R0 <= reg && reg <= REG_R31 { 849 return C_REGP + int(reg&1) 850 } 851 if REG_F0 <= reg && reg <= REG_F31 { 852 return C_FREGP + int(reg&1) 853 } 854 if REG_V0 <= reg && reg <= REG_V31 { 855 return C_VREG 856 } 857 if REG_VS0 <= reg && reg <= REG_VS63 { 858 return C_VSREGP + int(reg&1) 859 } 860 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR { 861 return C_CREG 862 } 863 if REG_CR0LT <= reg && reg <= REG_CR7SO { 864 return C_CRBIT 865 } 866 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 { 867 switch reg { 868 case REG_LR: 869 return C_LR 870 871 case REG_XER: 872 return C_XER 873 874 case REG_CTR: 875 return C_CTR 876 } 877 878 return C_SPR 879 } 880 if REG_A0 <= reg && reg <= REG_A7 { 881 return C_AREG 882 } 883 if reg == REG_FPSCR { 884 return C_FPSCR 885 } 886 return C_GOK 887 } 888 889 func (c *ctxt9) aclass(a *obj.Addr) int { 890 switch a.Type { 891 case obj.TYPE_NONE: 892 return C_NONE 893 894 case obj.TYPE_REG: 895 return c.aclassreg(a.Reg) 896 897 case obj.TYPE_MEM: 898 if a.Index != 0 { 899 if a.Name != obj.NAME_NONE || a.Offset != 0 { 900 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class) 901 902 } 903 return C_XOREG 904 } 905 switch a.Name { 906 case obj.NAME_GOTREF, obj.NAME_TOCREF: 907 return C_ADDR 908 909 case obj.NAME_EXTERN, 910 obj.NAME_STATIC: 911 c.instoffset = a.Offset 912 if a.Sym == nil { 913 break 914 } else if a.Sym.Type == objabi.STLSBSS { 915 // For PIC builds, use 12 byte got initial-exec TLS accesses. 916 if c.ctxt.Flag_shared { 917 return C_TLS_IE 918 } 919 // Otherwise, use 8 byte local-exec TLS accesses. 920 return C_TLS_LE 921 } else { 922 return C_ADDR 923 } 924 925 case obj.NAME_AUTO: 926 c.instoffset = int64(c.autosize) + a.Offset 927 928 if c.instoffset >= -BIG && c.instoffset < BIG { 929 return C_SOREG 930 } 931 return C_LOREG 932 933 case obj.NAME_PARAM: 934 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 935 if c.instoffset >= -BIG && c.instoffset < BIG { 936 return C_SOREG 937 } 938 return C_LOREG 939 940 case obj.NAME_NONE: 941 c.instoffset = a.Offset 942 if a.Offset == 0 && a.Index == 0 { 943 return C_ZOREG 944 } else if c.instoffset >= -BIG && c.instoffset < BIG { 945 return C_SOREG 946 } else { 947 return C_LOREG 948 } 949 } 950 951 return C_GOK 952 953 case obj.TYPE_TEXTSIZE: 954 return C_TEXTSIZE 955 956 case obj.TYPE_FCONST: 957 // The only cases where FCONST will occur are with float64 +/- 0. 958 // All other float constants are generated in memory. 959 f64 := a.Val.(float64) 960 if f64 == 0 { 961 if math.Signbit(f64) { 962 return C_ADDCON 963 } 964 return C_ZCON 965 } 966 log.Fatalf("Unexpected nonzero FCONST operand %v", a) 967 968 case obj.TYPE_CONST, 969 obj.TYPE_ADDR: 970 switch a.Name { 971 case obj.NAME_NONE: 972 c.instoffset = a.Offset 973 if a.Reg != 0 { 974 if -BIG <= c.instoffset && c.instoffset < BIG { 975 return C_SACON 976 } 977 if isint32(c.instoffset) { 978 return C_LACON 979 } 980 return C_DACON 981 } 982 983 case obj.NAME_EXTERN, 984 obj.NAME_STATIC: 985 s := a.Sym 986 if s == nil { 987 return C_GOK 988 } 989 c.instoffset = a.Offset 990 return C_LACON 991 992 case obj.NAME_AUTO: 993 c.instoffset = int64(c.autosize) + a.Offset 994 if c.instoffset >= -BIG && c.instoffset < BIG { 995 return C_SACON 996 } 997 return C_LACON 998 999 case obj.NAME_PARAM: 1000 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 1001 if c.instoffset >= -BIG && c.instoffset < BIG { 1002 return C_SACON 1003 } 1004 return C_LACON 1005 1006 default: 1007 return C_GOK 1008 } 1009 1010 if c.instoffset >= 0 { 1011 sbits := bits.Len64(uint64(c.instoffset)) 1012 switch { 1013 case sbits <= 5: 1014 return C_ZCON + sbits 1015 case sbits <= 8: 1016 return C_U8CON 1017 case sbits <= 15: 1018 return C_U15CON 1019 case sbits <= 16: 1020 return C_U16CON 1021 case sbits <= 31: 1022 // Special case, a positive int32 value which is a multiple of 2^16 1023 if c.instoffset&0xFFFF == 0 { 1024 return C_U3216CON 1025 } 1026 return C_U32CON 1027 case sbits <= 32: 1028 return C_U32CON 1029 case sbits <= 33: 1030 return C_S34CON 1031 default: 1032 return C_64CON 1033 } 1034 } else { 1035 sbits := bits.Len64(uint64(^c.instoffset)) 1036 switch { 1037 case sbits <= 15: 1038 return C_S16CON 1039 case sbits <= 31: 1040 // Special case, a negative int32 value which is a multiple of 2^16 1041 if c.instoffset&0xFFFF == 0 { 1042 return C_S3216CON 1043 } 1044 return C_S32CON 1045 case sbits <= 33: 1046 return C_S34CON 1047 default: 1048 return C_64CON 1049 } 1050 } 1051 1052 case obj.TYPE_BRANCH: 1053 if a.Sym != nil && c.ctxt.Flag_dynlink { 1054 return C_LBRAPIC 1055 } 1056 return C_SBRA 1057 } 1058 1059 return C_GOK 1060 } 1061 1062 func prasm(p *obj.Prog) { 1063 fmt.Printf("%v\n", p) 1064 } 1065 1066 func (c *ctxt9) oplook(p *obj.Prog) *Optab { 1067 a1 := int(p.Optab) 1068 if a1 != 0 { 1069 return &optab[a1-1] 1070 } 1071 a1 = int(p.From.Class) 1072 if a1 == 0 { 1073 a1 = c.aclass(&p.From) + 1 1074 p.From.Class = int8(a1) 1075 } 1076 a1-- 1077 1078 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1} 1079 for i, ap := range p.RestArgs { 1080 argsv[i] = int(ap.Addr.Class) 1081 if argsv[i] == 0 { 1082 argsv[i] = c.aclass(&ap.Addr) + 1 1083 ap.Addr.Class = int8(argsv[i]) 1084 } 1085 1086 } 1087 a3 := argsv[0] - 1 1088 a4 := argsv[1] - 1 1089 a5 := argsv[2] - 1 1090 1091 a6 := int(p.To.Class) 1092 if a6 == 0 { 1093 a6 = c.aclass(&p.To) + 1 1094 p.To.Class = int8(a6) 1095 } 1096 a6-- 1097 1098 a2 := C_NONE 1099 if p.Reg != 0 { 1100 a2 = c.aclassreg(p.Reg) 1101 } 1102 1103 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6) 1104 ops := oprange[p.As&obj.AMask] 1105 c1 := &xcmp[a1] 1106 c2 := &xcmp[a2] 1107 c3 := &xcmp[a3] 1108 c4 := &xcmp[a4] 1109 c5 := &xcmp[a5] 1110 c6 := &xcmp[a6] 1111 for i := range ops { 1112 op := &ops[i] 1113 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] { 1114 p.Optab = uint16(cap(optab) - cap(ops) + i + 1) 1115 return op 1116 } 1117 } 1118 1119 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6)) 1120 prasm(p) 1121 if ops == nil { 1122 ops = optab 1123 } 1124 return &ops[0] 1125 } 1126 1127 // Compare two operand types (ex C_REG, or C_SCON) 1128 // and return true if b is compatible with a. 1129 // 1130 // Argument comparison isn't reflexitive, so care must be taken. 1131 // a is the argument type as found in optab, b is the argument as 1132 // fitted by aclass. 1133 func cmp(a int, b int) bool { 1134 if a == b { 1135 return true 1136 } 1137 switch a { 1138 1139 case C_SPR: 1140 if b == C_LR || b == C_XER || b == C_CTR { 1141 return true 1142 } 1143 1144 case C_U1CON: 1145 return cmp(C_ZCON, b) 1146 case C_U2CON: 1147 return cmp(C_U1CON, b) 1148 case C_U3CON: 1149 return cmp(C_U2CON, b) 1150 case C_U4CON: 1151 return cmp(C_U3CON, b) 1152 case C_U5CON: 1153 return cmp(C_U4CON, b) 1154 case C_U8CON: 1155 return cmp(C_U5CON, b) 1156 case C_U15CON: 1157 return cmp(C_U8CON, b) 1158 case C_U16CON: 1159 return cmp(C_U15CON, b) 1160 1161 case C_S16CON: 1162 return cmp(C_U15CON, b) 1163 case C_32CON: 1164 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b) 1165 case C_S34CON: 1166 return cmp(C_32CON, b) 1167 case C_64CON: 1168 return cmp(C_S34CON, b) 1169 1170 case C_32S16CON: 1171 return cmp(C_ZCON, b) 1172 1173 case C_LACON: 1174 return cmp(C_SACON, b) 1175 1176 case C_LBRA: 1177 return cmp(C_SBRA, b) 1178 1179 case C_SOREG: 1180 return cmp(C_ZOREG, b) 1181 1182 case C_LOREG: 1183 return cmp(C_SOREG, b) 1184 1185 case C_XOREG: 1186 return cmp(C_REG, b) || cmp(C_ZOREG, b) 1187 1188 // An even/odd register input always matches the regular register types. 1189 case C_REG: 1190 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0) 1191 case C_FREG: 1192 return cmp(C_FREGP, b) 1193 case C_VSREG: 1194 /* Allow any VR argument as a VSR operand. */ 1195 return cmp(C_VSREGP, b) || cmp(C_VREG, b) 1196 1197 case C_ANY: 1198 return true 1199 } 1200 1201 return false 1202 } 1203 1204 // Used when sorting the optab. Sorting is 1205 // done in a way so that the best choice of 1206 // opcode/operand combination is considered first. 1207 func optabLess(i, j int) bool { 1208 p1 := &optab[i] 1209 p2 := &optab[j] 1210 n := int(p1.as) - int(p2.as) 1211 // same opcode 1212 if n != 0 { 1213 return n < 0 1214 } 1215 // Consider those that generate fewer 1216 // instructions first. 1217 n = int(p1.size) - int(p2.size) 1218 if n != 0 { 1219 return n < 0 1220 } 1221 // operand order should match 1222 // better choices first 1223 n = int(p1.a1) - int(p2.a1) 1224 if n != 0 { 1225 return n < 0 1226 } 1227 n = int(p1.a2) - int(p2.a2) 1228 if n != 0 { 1229 return n < 0 1230 } 1231 n = int(p1.a3) - int(p2.a3) 1232 if n != 0 { 1233 return n < 0 1234 } 1235 n = int(p1.a4) - int(p2.a4) 1236 if n != 0 { 1237 return n < 0 1238 } 1239 n = int(p1.a5) - int(p2.a5) 1240 if n != 0 { 1241 return n < 0 1242 } 1243 n = int(p1.a6) - int(p2.a6) 1244 if n != 0 { 1245 return n < 0 1246 } 1247 return false 1248 } 1249 1250 // Add an entry to the opcode table for 1251 // a new opcode b0 with the same operand combinations 1252 // as opcode a. 1253 func opset(a, b0 obj.As) { 1254 oprange[a&obj.AMask] = oprange[b0] 1255 } 1256 1257 // Build the opcode table 1258 func buildop(ctxt *obj.Link) { 1259 if oprange[AANDN&obj.AMask] != nil { 1260 // Already initialized; stop now. 1261 // This happens in the cmd/asm tests, 1262 // each of which re-initializes the arch. 1263 return 1264 } 1265 1266 for i := 0; i < C_NCLASS; i++ { 1267 for n := 0; n < C_NCLASS; n++ { 1268 if cmp(n, i) { 1269 xcmp[i][n] = true 1270 } 1271 } 1272 } 1273 for i := range optab { 1274 // Use the legacy assembler function if none provided. 1275 if optab[i].asmout == nil { 1276 optab[i].asmout = asmout 1277 } 1278 } 1279 // Append the generated entries, sort, and fill out oprange. 1280 optab = append(optab, optabGen...) 1281 sort.Slice(optab, optabLess) 1282 for i := 0; i < len(optab); { 1283 r := optab[i].as 1284 r0 := r & obj.AMask 1285 start := i 1286 for i < len(optab) && optab[i].as == r { 1287 i++ 1288 } 1289 oprange[r0] = optab[start:i] 1290 1291 switch r { 1292 default: 1293 if !opsetGen(r) { 1294 ctxt.Diag("unknown op in build: %v", r) 1295 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r) 1296 } 1297 1298 case ADCBF: /* unary indexed: op (b+a); op (b) */ 1299 opset(ADCBI, r0) 1300 1301 opset(ADCBST, r0) 1302 opset(ADCBT, r0) 1303 opset(ADCBTST, r0) 1304 opset(ADCBZ, r0) 1305 opset(AICBI, r0) 1306 1307 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */ 1308 opset(ASTWCCC, r0) 1309 opset(ASTHCCC, r0) 1310 opset(ASTBCCC, r0) 1311 1312 case AREM: /* macro */ 1313 opset(AREM, r0) 1314 1315 case AREMU: 1316 opset(AREMU, r0) 1317 1318 case AREMD: 1319 opset(AREMDU, r0) 1320 1321 case AMULLW: 1322 opset(AMULLD, r0) 1323 1324 case ADIVW: /* op Rb[,Ra],Rd */ 1325 opset(AMULHW, r0) 1326 1327 opset(AMULHWCC, r0) 1328 opset(AMULHWU, r0) 1329 opset(AMULHWUCC, r0) 1330 opset(AMULLWCC, r0) 1331 opset(AMULLWVCC, r0) 1332 opset(AMULLWV, r0) 1333 opset(ADIVWCC, r0) 1334 opset(ADIVWV, r0) 1335 opset(ADIVWVCC, r0) 1336 opset(ADIVWU, r0) 1337 opset(ADIVWUCC, r0) 1338 opset(ADIVWUV, r0) 1339 opset(ADIVWUVCC, r0) 1340 opset(AMODUD, r0) 1341 opset(AMODUW, r0) 1342 opset(AMODSD, r0) 1343 opset(AMODSW, r0) 1344 opset(AADDCC, r0) 1345 opset(AADDCV, r0) 1346 opset(AADDCVCC, r0) 1347 opset(AADDV, r0) 1348 opset(AADDVCC, r0) 1349 opset(AADDE, r0) 1350 opset(AADDECC, r0) 1351 opset(AADDEV, r0) 1352 opset(AADDEVCC, r0) 1353 opset(AMULHD, r0) 1354 opset(AMULHDCC, r0) 1355 opset(AMULHDU, r0) 1356 opset(AMULHDUCC, r0) 1357 opset(AMULLDCC, r0) 1358 opset(AMULLDVCC, r0) 1359 opset(AMULLDV, r0) 1360 opset(ADIVD, r0) 1361 opset(ADIVDCC, r0) 1362 opset(ADIVDE, r0) 1363 opset(ADIVDEU, r0) 1364 opset(ADIVDECC, r0) 1365 opset(ADIVDEUCC, r0) 1366 opset(ADIVDVCC, r0) 1367 opset(ADIVDV, r0) 1368 opset(ADIVDU, r0) 1369 opset(ADIVDUV, r0) 1370 opset(ADIVDUVCC, r0) 1371 opset(ADIVDUCC, r0) 1372 1373 case ACRAND: 1374 opset(ACRANDN, r0) 1375 opset(ACREQV, r0) 1376 opset(ACRNAND, r0) 1377 opset(ACRNOR, r0) 1378 opset(ACROR, r0) 1379 opset(ACRORN, r0) 1380 opset(ACRXOR, r0) 1381 1382 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */ 1383 opset(APOPCNTW, r0) 1384 opset(APOPCNTB, r0) 1385 opset(ACNTTZW, r0) 1386 opset(ACNTTZWCC, r0) 1387 opset(ACNTTZD, r0) 1388 opset(ACNTTZDCC, r0) 1389 1390 case ACOPY: /* copy, paste. */ 1391 opset(APASTECC, r0) 1392 1393 case AMADDHD: /* maddhd, maddhdu, maddld */ 1394 opset(AMADDHDU, r0) 1395 opset(AMADDLD, r0) 1396 1397 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ 1398 opset(AMOVH, r0) 1399 opset(AMOVHZ, r0) 1400 1401 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ 1402 opset(AMOVHU, r0) 1403 1404 opset(AMOVHZU, r0) 1405 opset(AMOVWU, r0) 1406 opset(AMOVWZU, r0) 1407 opset(AMOVDU, r0) 1408 opset(AMOVMW, r0) 1409 1410 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */ 1411 opset(ALVEHX, r0) 1412 opset(ALVEWX, r0) 1413 opset(ALVX, r0) 1414 opset(ALVXL, r0) 1415 opset(ALVSL, r0) 1416 opset(ALVSR, r0) 1417 1418 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */ 1419 opset(ASTVEHX, r0) 1420 opset(ASTVEWX, r0) 1421 opset(ASTVX, r0) 1422 opset(ASTVXL, r0) 1423 1424 case AVAND: /* vand, vandc, vnand */ 1425 opset(AVAND, r0) 1426 opset(AVANDC, r0) 1427 opset(AVNAND, r0) 1428 1429 case AVMRGOW: /* vmrgew, vmrgow */ 1430 opset(AVMRGEW, r0) 1431 1432 case AVOR: /* vor, vorc, vxor, vnor, veqv */ 1433 opset(AVOR, r0) 1434 opset(AVORC, r0) 1435 opset(AVXOR, r0) 1436 opset(AVNOR, r0) 1437 opset(AVEQV, r0) 1438 1439 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */ 1440 opset(AVADDUBM, r0) 1441 opset(AVADDUHM, r0) 1442 opset(AVADDUWM, r0) 1443 opset(AVADDUDM, r0) 1444 opset(AVADDUQM, r0) 1445 1446 case AVADDCU: /* vaddcuq, vaddcuw */ 1447 opset(AVADDCUQ, r0) 1448 opset(AVADDCUW, r0) 1449 1450 case AVADDUS: /* vaddubs, vadduhs, vadduws */ 1451 opset(AVADDUBS, r0) 1452 opset(AVADDUHS, r0) 1453 opset(AVADDUWS, r0) 1454 1455 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */ 1456 opset(AVADDSBS, r0) 1457 opset(AVADDSHS, r0) 1458 opset(AVADDSWS, r0) 1459 1460 case AVADDE: /* vaddeuqm, vaddecuq */ 1461 opset(AVADDEUQM, r0) 1462 opset(AVADDECUQ, r0) 1463 1464 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */ 1465 opset(AVSUBUBM, r0) 1466 opset(AVSUBUHM, r0) 1467 opset(AVSUBUWM, r0) 1468 opset(AVSUBUDM, r0) 1469 opset(AVSUBUQM, r0) 1470 1471 case AVSUBCU: /* vsubcuq, vsubcuw */ 1472 opset(AVSUBCUQ, r0) 1473 opset(AVSUBCUW, r0) 1474 1475 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */ 1476 opset(AVSUBUBS, r0) 1477 opset(AVSUBUHS, r0) 1478 opset(AVSUBUWS, r0) 1479 1480 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */ 1481 opset(AVSUBSBS, r0) 1482 opset(AVSUBSHS, r0) 1483 opset(AVSUBSWS, r0) 1484 1485 case AVSUBE: /* vsubeuqm, vsubecuq */ 1486 opset(AVSUBEUQM, r0) 1487 opset(AVSUBECUQ, r0) 1488 1489 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */ 1490 opset(AVMULOSB, r0) 1491 opset(AVMULEUB, r0) 1492 opset(AVMULOUB, r0) 1493 opset(AVMULESH, r0) 1494 opset(AVMULOSH, r0) 1495 opset(AVMULEUH, r0) 1496 opset(AVMULOUH, r0) 1497 opset(AVMULESW, r0) 1498 opset(AVMULOSW, r0) 1499 opset(AVMULEUW, r0) 1500 opset(AVMULOUW, r0) 1501 opset(AVMULUWM, r0) 1502 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */ 1503 opset(AVPMSUMB, r0) 1504 opset(AVPMSUMH, r0) 1505 opset(AVPMSUMW, r0) 1506 opset(AVPMSUMD, r0) 1507 1508 case AVR: /* vrlb, vrlh, vrlw, vrld */ 1509 opset(AVRLB, r0) 1510 opset(AVRLH, r0) 1511 opset(AVRLW, r0) 1512 opset(AVRLD, r0) 1513 1514 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */ 1515 opset(AVSLB, r0) 1516 opset(AVSLH, r0) 1517 opset(AVSLW, r0) 1518 opset(AVSL, r0) 1519 opset(AVSLO, r0) 1520 opset(AVSRB, r0) 1521 opset(AVSRH, r0) 1522 opset(AVSRW, r0) 1523 opset(AVSR, r0) 1524 opset(AVSRO, r0) 1525 opset(AVSLD, r0) 1526 opset(AVSRD, r0) 1527 1528 case AVSA: /* vsrab, vsrah, vsraw, vsrad */ 1529 opset(AVSRAB, r0) 1530 opset(AVSRAH, r0) 1531 opset(AVSRAW, r0) 1532 opset(AVSRAD, r0) 1533 1534 case AVSOI: /* vsldoi */ 1535 opset(AVSLDOI, r0) 1536 1537 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */ 1538 opset(AVCLZB, r0) 1539 opset(AVCLZH, r0) 1540 opset(AVCLZW, r0) 1541 opset(AVCLZD, r0) 1542 1543 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */ 1544 opset(AVPOPCNTB, r0) 1545 opset(AVPOPCNTH, r0) 1546 opset(AVPOPCNTW, r0) 1547 opset(AVPOPCNTD, r0) 1548 1549 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */ 1550 opset(AVCMPEQUB, r0) 1551 opset(AVCMPEQUBCC, r0) 1552 opset(AVCMPEQUH, r0) 1553 opset(AVCMPEQUHCC, r0) 1554 opset(AVCMPEQUW, r0) 1555 opset(AVCMPEQUWCC, r0) 1556 opset(AVCMPEQUD, r0) 1557 opset(AVCMPEQUDCC, r0) 1558 1559 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */ 1560 opset(AVCMPGTUB, r0) 1561 opset(AVCMPGTUBCC, r0) 1562 opset(AVCMPGTUH, r0) 1563 opset(AVCMPGTUHCC, r0) 1564 opset(AVCMPGTUW, r0) 1565 opset(AVCMPGTUWCC, r0) 1566 opset(AVCMPGTUD, r0) 1567 opset(AVCMPGTUDCC, r0) 1568 opset(AVCMPGTSB, r0) 1569 opset(AVCMPGTSBCC, r0) 1570 opset(AVCMPGTSH, r0) 1571 opset(AVCMPGTSHCC, r0) 1572 opset(AVCMPGTSW, r0) 1573 opset(AVCMPGTSWCC, r0) 1574 opset(AVCMPGTSD, r0) 1575 opset(AVCMPGTSDCC, r0) 1576 1577 case AVCMPNEZB: /* vcmpnezb[.] */ 1578 opset(AVCMPNEZBCC, r0) 1579 opset(AVCMPNEB, r0) 1580 opset(AVCMPNEBCC, r0) 1581 opset(AVCMPNEH, r0) 1582 opset(AVCMPNEHCC, r0) 1583 opset(AVCMPNEW, r0) 1584 opset(AVCMPNEWCC, r0) 1585 1586 case AVPERM: /* vperm */ 1587 opset(AVPERMXOR, r0) 1588 opset(AVPERMR, r0) 1589 1590 case AVBPERMQ: /* vbpermq, vbpermd */ 1591 opset(AVBPERMD, r0) 1592 1593 case AVSEL: /* vsel */ 1594 opset(AVSEL, r0) 1595 1596 case AVSPLTB: /* vspltb, vsplth, vspltw */ 1597 opset(AVSPLTH, r0) 1598 opset(AVSPLTW, r0) 1599 1600 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */ 1601 opset(AVSPLTISH, r0) 1602 opset(AVSPLTISW, r0) 1603 1604 case AVCIPH: /* vcipher, vcipherlast */ 1605 opset(AVCIPHER, r0) 1606 opset(AVCIPHERLAST, r0) 1607 1608 case AVNCIPH: /* vncipher, vncipherlast */ 1609 opset(AVNCIPHER, r0) 1610 opset(AVNCIPHERLAST, r0) 1611 1612 case AVSBOX: /* vsbox */ 1613 opset(AVSBOX, r0) 1614 1615 case AVSHASIGMA: /* vshasigmaw, vshasigmad */ 1616 opset(AVSHASIGMAW, r0) 1617 opset(AVSHASIGMAD, r0) 1618 1619 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */ 1620 opset(ALXVDSX, r0) 1621 opset(ALXVW4X, r0) 1622 opset(ALXVH8X, r0) 1623 opset(ALXVB16X, r0) 1624 1625 case ALXV: /* lxv */ 1626 opset(ALXV, r0) 1627 1628 case ALXVL: /* lxvl, lxvll, lxvx */ 1629 opset(ALXVLL, r0) 1630 opset(ALXVX, r0) 1631 1632 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */ 1633 opset(ASTXVW4X, r0) 1634 opset(ASTXVH8X, r0) 1635 opset(ASTXVB16X, r0) 1636 1637 case ASTXV: /* stxv */ 1638 opset(ASTXV, r0) 1639 1640 case ASTXVL: /* stxvl, stxvll, stvx */ 1641 opset(ASTXVLL, r0) 1642 opset(ASTXVX, r0) 1643 1644 case ALXSDX: /* lxsdx */ 1645 opset(ALXSDX, r0) 1646 1647 case ASTXSDX: /* stxsdx */ 1648 opset(ASTXSDX, r0) 1649 1650 case ALXSIWAX: /* lxsiwax, lxsiwzx */ 1651 opset(ALXSIWZX, r0) 1652 1653 case ASTXSIWX: /* stxsiwx */ 1654 opset(ASTXSIWX, r0) 1655 1656 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ 1657 opset(AMFFPRD, r0) 1658 opset(AMFVRD, r0) 1659 opset(AMFVSRWZ, r0) 1660 opset(AMFVSRLD, r0) 1661 1662 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ 1663 opset(AMTFPRD, r0) 1664 opset(AMTVRD, r0) 1665 opset(AMTVSRWA, r0) 1666 opset(AMTVSRWZ, r0) 1667 opset(AMTVSRWS, r0) 1668 1669 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */ 1670 opset(AXXLANDC, r0) 1671 opset(AXXLEQV, r0) 1672 opset(AXXLNAND, r0) 1673 1674 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */ 1675 opset(AXXLORC, r0) 1676 opset(AXXLNOR, r0) 1677 opset(AXXLORQ, r0) 1678 opset(AXXLXOR, r0) 1679 1680 case AXXSEL: /* xxsel */ 1681 opset(AXXSEL, r0) 1682 1683 case AXXMRGHW: /* xxmrghw, xxmrglw */ 1684 opset(AXXMRGLW, r0) 1685 1686 case AXXSPLTW: /* xxspltw */ 1687 opset(AXXSPLTW, r0) 1688 1689 case AXXSPLTIB: /* xxspltib */ 1690 opset(AXXSPLTIB, r0) 1691 1692 case AXXPERM: /* xxpermdi */ 1693 opset(AXXPERM, r0) 1694 1695 case AXXSLDWI: /* xxsldwi */ 1696 opset(AXXPERMDI, r0) 1697 opset(AXXSLDWI, r0) 1698 1699 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */ 1700 opset(AXXBRD, r0) 1701 opset(AXXBRW, r0) 1702 opset(AXXBRH, r0) 1703 1704 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */ 1705 opset(AXSCVSPDP, r0) 1706 opset(AXSCVDPSPN, r0) 1707 opset(AXSCVSPDPN, r0) 1708 1709 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */ 1710 opset(AXVCVSPDP, r0) 1711 1712 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */ 1713 opset(AXSCVDPSXWS, r0) 1714 opset(AXSCVDPUXDS, r0) 1715 opset(AXSCVDPUXWS, r0) 1716 1717 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */ 1718 opset(AXSCVUXDDP, r0) 1719 opset(AXSCVSXDSP, r0) 1720 opset(AXSCVUXDSP, r0) 1721 1722 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */ 1723 opset(AXVCVDPSXDS, r0) 1724 opset(AXVCVDPSXWS, r0) 1725 opset(AXVCVDPUXDS, r0) 1726 opset(AXVCVDPUXWS, r0) 1727 opset(AXVCVSPSXDS, r0) 1728 opset(AXVCVSPSXWS, r0) 1729 opset(AXVCVSPUXDS, r0) 1730 opset(AXVCVSPUXWS, r0) 1731 1732 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */ 1733 opset(AXVCVSXWDP, r0) 1734 opset(AXVCVUXDDP, r0) 1735 opset(AXVCVUXWDP, r0) 1736 opset(AXVCVSXDSP, r0) 1737 opset(AXVCVSXWSP, r0) 1738 opset(AXVCVUXDSP, r0) 1739 opset(AXVCVUXWSP, r0) 1740 1741 case AAND: /* logical op Rb,Rs,Ra; no literal */ 1742 opset(AANDN, r0) 1743 opset(AANDNCC, r0) 1744 opset(AEQV, r0) 1745 opset(AEQVCC, r0) 1746 opset(ANAND, r0) 1747 opset(ANANDCC, r0) 1748 opset(ANOR, r0) 1749 opset(ANORCC, r0) 1750 opset(AORCC, r0) 1751 opset(AORN, r0) 1752 opset(AORNCC, r0) 1753 opset(AXORCC, r0) 1754 1755 case AADDME: /* op Ra, Rd */ 1756 opset(AADDMECC, r0) 1757 1758 opset(AADDMEV, r0) 1759 opset(AADDMEVCC, r0) 1760 opset(AADDZE, r0) 1761 opset(AADDZECC, r0) 1762 opset(AADDZEV, r0) 1763 opset(AADDZEVCC, r0) 1764 opset(ASUBME, r0) 1765 opset(ASUBMECC, r0) 1766 opset(ASUBMEV, r0) 1767 opset(ASUBMEVCC, r0) 1768 opset(ASUBZE, r0) 1769 opset(ASUBZECC, r0) 1770 opset(ASUBZEV, r0) 1771 opset(ASUBZEVCC, r0) 1772 1773 case AADDC: 1774 opset(AADDCCC, r0) 1775 1776 case ABEQ: 1777 opset(ABGE, r0) 1778 opset(ABGT, r0) 1779 opset(ABLE, r0) 1780 opset(ABLT, r0) 1781 opset(ABNE, r0) 1782 opset(ABVC, r0) 1783 opset(ABVS, r0) 1784 1785 case ABR: 1786 opset(ABL, r0) 1787 1788 case ABC: 1789 opset(ABCL, r0) 1790 1791 case ABDNZ: 1792 opset(ABDZ, r0) 1793 1794 case AEXTSB: /* op Rs, Ra */ 1795 opset(AEXTSBCC, r0) 1796 1797 opset(AEXTSH, r0) 1798 opset(AEXTSHCC, r0) 1799 opset(ACNTLZW, r0) 1800 opset(ACNTLZWCC, r0) 1801 opset(ACNTLZD, r0) 1802 opset(AEXTSW, r0) 1803 opset(AEXTSWCC, r0) 1804 opset(ACNTLZDCC, r0) 1805 1806 case AFABS: /* fop [s,]d */ 1807 opset(AFABSCC, r0) 1808 1809 opset(AFNABS, r0) 1810 opset(AFNABSCC, r0) 1811 opset(AFNEG, r0) 1812 opset(AFNEGCC, r0) 1813 opset(AFRSP, r0) 1814 opset(AFRSPCC, r0) 1815 opset(AFCTIW, r0) 1816 opset(AFCTIWCC, r0) 1817 opset(AFCTIWZ, r0) 1818 opset(AFCTIWZCC, r0) 1819 opset(AFCTID, r0) 1820 opset(AFCTIDCC, r0) 1821 opset(AFCTIDZ, r0) 1822 opset(AFCTIDZCC, r0) 1823 opset(AFCFID, r0) 1824 opset(AFCFIDCC, r0) 1825 opset(AFCFIDU, r0) 1826 opset(AFCFIDUCC, r0) 1827 opset(AFCFIDS, r0) 1828 opset(AFCFIDSCC, r0) 1829 opset(AFRES, r0) 1830 opset(AFRESCC, r0) 1831 opset(AFRIM, r0) 1832 opset(AFRIMCC, r0) 1833 opset(AFRIP, r0) 1834 opset(AFRIPCC, r0) 1835 opset(AFRIZ, r0) 1836 opset(AFRIZCC, r0) 1837 opset(AFRIN, r0) 1838 opset(AFRINCC, r0) 1839 opset(AFRSQRTE, r0) 1840 opset(AFRSQRTECC, r0) 1841 opset(AFSQRT, r0) 1842 opset(AFSQRTCC, r0) 1843 opset(AFSQRTS, r0) 1844 opset(AFSQRTSCC, r0) 1845 1846 case AFADD: 1847 opset(AFADDS, r0) 1848 opset(AFADDCC, r0) 1849 opset(AFADDSCC, r0) 1850 opset(AFCPSGN, r0) 1851 opset(AFCPSGNCC, r0) 1852 opset(AFDIV, r0) 1853 opset(AFDIVS, r0) 1854 opset(AFDIVCC, r0) 1855 opset(AFDIVSCC, r0) 1856 opset(AFSUB, r0) 1857 opset(AFSUBS, r0) 1858 opset(AFSUBCC, r0) 1859 opset(AFSUBSCC, r0) 1860 1861 case AFMADD: 1862 opset(AFMADDCC, r0) 1863 opset(AFMADDS, r0) 1864 opset(AFMADDSCC, r0) 1865 opset(AFMSUB, r0) 1866 opset(AFMSUBCC, r0) 1867 opset(AFMSUBS, r0) 1868 opset(AFMSUBSCC, r0) 1869 opset(AFNMADD, r0) 1870 opset(AFNMADDCC, r0) 1871 opset(AFNMADDS, r0) 1872 opset(AFNMADDSCC, r0) 1873 opset(AFNMSUB, r0) 1874 opset(AFNMSUBCC, r0) 1875 opset(AFNMSUBS, r0) 1876 opset(AFNMSUBSCC, r0) 1877 opset(AFSEL, r0) 1878 opset(AFSELCC, r0) 1879 1880 case AFMUL: 1881 opset(AFMULS, r0) 1882 opset(AFMULCC, r0) 1883 opset(AFMULSCC, r0) 1884 1885 case AFCMPO: 1886 opset(AFCMPU, r0) 1887 1888 case AMTFSB0: 1889 opset(AMTFSB0CC, r0) 1890 opset(AMTFSB1, r0) 1891 opset(AMTFSB1CC, r0) 1892 1893 case ANEG: /* op [Ra,] Rd */ 1894 opset(ANEGCC, r0) 1895 1896 opset(ANEGV, r0) 1897 opset(ANEGVCC, r0) 1898 1899 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */ 1900 opset(AXOR, r0) 1901 1902 case AORIS: /* oris/xoris $uimm,Rs,Ra */ 1903 opset(AXORIS, r0) 1904 1905 case ASLW: 1906 opset(ASLWCC, r0) 1907 opset(ASRW, r0) 1908 opset(ASRWCC, r0) 1909 opset(AROTLW, r0) 1910 1911 case ASLD: 1912 opset(ASLDCC, r0) 1913 opset(ASRD, r0) 1914 opset(ASRDCC, r0) 1915 opset(AROTL, r0) 1916 1917 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1918 opset(ASRAWCC, r0) 1919 1920 case AEXTSWSLI: 1921 opset(AEXTSWSLICC, r0) 1922 1923 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1924 opset(ASRADCC, r0) 1925 1926 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */ 1927 opset(ASUB, r0) 1928 1929 opset(ASUBCC, r0) 1930 opset(ASUBV, r0) 1931 opset(ASUBVCC, r0) 1932 opset(ASUBCCC, r0) 1933 opset(ASUBCV, r0) 1934 opset(ASUBCVCC, r0) 1935 opset(ASUBE, r0) 1936 opset(ASUBECC, r0) 1937 opset(ASUBEV, r0) 1938 opset(ASUBEVCC, r0) 1939 1940 case ASYNC: 1941 opset(AISYNC, r0) 1942 opset(ALWSYNC, r0) 1943 opset(APTESYNC, r0) 1944 opset(ATLBSYNC, r0) 1945 1946 case ARLWMI: 1947 opset(ARLWMICC, r0) 1948 opset(ARLWNM, r0) 1949 opset(ARLWNMCC, r0) 1950 1951 case ARLDMI: 1952 opset(ARLDMICC, r0) 1953 opset(ARLDIMI, r0) 1954 opset(ARLDIMICC, r0) 1955 1956 case ARLDC: 1957 opset(ARLDCCC, r0) 1958 1959 case ARLDCL: 1960 opset(ARLDCR, r0) 1961 opset(ARLDCLCC, r0) 1962 opset(ARLDCRCC, r0) 1963 1964 case ARLDICL: 1965 opset(ARLDICLCC, r0) 1966 opset(ARLDICR, r0) 1967 opset(ARLDICRCC, r0) 1968 opset(ARLDIC, r0) 1969 opset(ARLDICCC, r0) 1970 opset(ACLRLSLDI, r0) 1971 1972 case AFMOVD: 1973 opset(AFMOVDCC, r0) 1974 opset(AFMOVDU, r0) 1975 opset(AFMOVS, r0) 1976 opset(AFMOVSU, r0) 1977 1978 case ALDAR: 1979 opset(ALBAR, r0) 1980 opset(ALHAR, r0) 1981 opset(ALWAR, r0) 1982 1983 case ASYSCALL: /* just the op; flow of control */ 1984 opset(ARFI, r0) 1985 1986 opset(ARFCI, r0) 1987 opset(ARFID, r0) 1988 opset(AHRFID, r0) 1989 1990 case AMOVHBR: 1991 opset(AMOVWBR, r0) 1992 opset(AMOVDBR, r0) 1993 1994 case ASLBMFEE: 1995 opset(ASLBMFEV, r0) 1996 1997 case ATW: 1998 opset(ATD, r0) 1999 2000 case ATLBIE: 2001 opset(ASLBIE, r0) 2002 opset(ATLBIEL, r0) 2003 2004 case AEIEIO: 2005 opset(ASLBIA, r0) 2006 2007 case ACMP: 2008 opset(ACMPW, r0) 2009 2010 case ACMPU: 2011 opset(ACMPWU, r0) 2012 2013 case ACMPB: 2014 opset(ACMPB, r0) 2015 2016 case AFTDIV: 2017 opset(AFTDIV, r0) 2018 2019 case AFTSQRT: 2020 opset(AFTSQRT, r0) 2021 2022 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */ 2023 opset(AMOVWZ, r0) /* Same as above, but zero extended */ 2024 2025 case AADD, 2026 AADDIS, 2027 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */ 2028 AANDISCC, 2029 AFMOVSX, 2030 AFMOVSZ, 2031 ALSW, 2032 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ 2033 AMOVB, /* macro: move byte with sign extension */ 2034 AMOVBU, /* macro: move byte with sign extension & update */ 2035 AMOVFL, 2036 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ 2037 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ 2038 ASTSW, 2039 ASLBMTE, 2040 AWORD, 2041 ADWORD, 2042 ADARN, 2043 AVMSUMUDM, 2044 AADDEX, 2045 ACMPEQB, 2046 ACLRLSLWI, 2047 AMTVSRDD, 2048 APNOP, 2049 AISEL, 2050 obj.ANOP, 2051 obj.ATEXT, 2052 obj.AUNDEF, 2053 obj.AFUNCDATA, 2054 obj.APCALIGN, 2055 obj.APCDATA, 2056 obj.ADUFFZERO, 2057 obj.ADUFFCOPY: 2058 break 2059 } 2060 } 2061 } 2062 2063 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 { 2064 return o<<26 | xo<<1 | oe<<11 2065 } 2066 2067 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 { 2068 return o<<26 | xo<<2 | oe<<11 2069 } 2070 2071 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 { 2072 return o<<26 | xo<<2 | oe<<16 2073 } 2074 2075 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 { 2076 return o<<26 | xo<<3 | oe<<11 2077 } 2078 2079 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 { 2080 return o<<26 | xo<<4 | oe<<11 2081 } 2082 2083 func OPDQ(o uint32, xo uint32, oe uint32) uint32 { 2084 return o<<26 | xo | oe<<4 2085 } 2086 2087 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2088 return o<<26 | xo | oe<<11 | rc&1 2089 } 2090 2091 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2092 return o<<26 | xo | oe<<11 | (rc&1)<<10 2093 } 2094 2095 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2096 return o<<26 | xo<<1 | oe<<10 | rc&1 2097 } 2098 2099 func OPCC(o uint32, xo uint32, rc uint32) uint32 { 2100 return OPVCC(o, xo, 0, rc) 2101 } 2102 2103 /* Generate MD-form opcode */ 2104 func OPMD(o, xo, rc uint32) uint32 { 2105 return o<<26 | xo<<2 | rc&1 2106 } 2107 2108 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */ 2109 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { 2110 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 2111 } 2112 2113 /* VX-form 2-register operands, r/none/r */ 2114 func AOP_RR(op uint32, d uint32, a uint32) uint32 { 2115 return op | (d&31)<<21 | (a&31)<<11 2116 } 2117 2118 /* VA-form 4-register operands */ 2119 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2120 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6 2121 } 2122 2123 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2124 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF 2125 } 2126 2127 /* VX-form 2-register + UIM operands */ 2128 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2129 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11 2130 } 2131 2132 /* VX-form 2-register + ST + SIX operands */ 2133 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 { 2134 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11 2135 } 2136 2137 /* VA-form 3-register + SHB operands */ 2138 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 { 2139 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6 2140 } 2141 2142 /* VX-form 1-register + SIM operands */ 2143 func AOP_IR(op uint32, d uint32, simm uint32) uint32 { 2144 return op | (d&31)<<21 | (simm&31)<<16 2145 } 2146 2147 /* XX1-form 3-register operands, 1 VSR operand */ 2148 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 { 2149 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5 2150 } 2151 2152 /* XX2-form 3-register operands, 2 VSR operands */ 2153 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 { 2154 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5 2155 } 2156 2157 /* XX3-form 3 VSR operands */ 2158 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 { 2159 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2160 } 2161 2162 /* XX3-form 3 VSR operands + immediate */ 2163 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 { 2164 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2165 } 2166 2167 /* XX4-form, 4 VSR operands */ 2168 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 { 2169 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2170 } 2171 2172 /* DQ-form, VSR register, register + offset operands */ 2173 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 { 2174 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */ 2175 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */ 2176 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */ 2177 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */ 2178 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */ 2179 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */ 2180 dq := b >> 4 2181 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2 2182 } 2183 2184 /* Z23-form, 3-register operands + CY field */ 2185 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2186 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9 2187 } 2188 2189 /* X-form, 3-register operands + EH field */ 2190 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2191 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1) 2192 } 2193 2194 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { 2195 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 2196 } 2197 2198 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 { 2199 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF 2200 } 2201 2202 func OP_BR(op uint32, li uint32, aa uint32) uint32 { 2203 return op | li&0x03FFFFFC | aa<<1 2204 } 2205 2206 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 { 2207 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1 2208 } 2209 2210 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 { 2211 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 2212 } 2213 2214 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 { 2215 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1 2216 } 2217 2218 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { 2219 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 2220 } 2221 2222 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 { 2223 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 2224 } 2225 2226 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { 2227 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 2228 } 2229 2230 const ( 2231 /* each rhs is OPVCC(_, _, _, _) */ 2232 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 2233 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 2234 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 2235 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 2236 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 2237 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 2238 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 2239 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 2240 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 2241 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 2242 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 2243 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 2244 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 2245 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 2246 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 2247 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 2248 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 2249 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 2250 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 2251 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 2252 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 2253 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 2254 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 2255 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 2256 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 2257 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 2258 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 2259 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 2260 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 2261 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 2262 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 2263 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 2264 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 2265 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 2266 OP_EXTSWSLI = 31<<26 | 445<<2 2267 ) 2268 2269 func oclass(a *obj.Addr) int { 2270 return int(a.Class) - 1 2271 } 2272 2273 const ( 2274 D_FORM = iota 2275 DS_FORM 2276 ) 2277 2278 // This function determines when a non-indexed load or store is D or 2279 // DS form for use in finding the size of the offset field in the instruction. 2280 // The size is needed when setting the offset value in the instruction 2281 // and when generating relocation for that field. 2282 // DS form instructions include: ld, ldu, lwa, std, stdu. All other 2283 // loads and stores with an offset field are D form. This function should 2284 // only be called with the same opcodes as are handled by opstore and opload. 2285 func (c *ctxt9) opform(insn uint32) int { 2286 switch insn { 2287 default: 2288 c.ctxt.Diag("bad insn in loadform: %x", insn) 2289 case OPVCC(58, 0, 0, 0), // ld 2290 OPVCC(58, 0, 0, 1), // ldu 2291 OPVCC(58, 0, 0, 0) | 1<<1, // lwa 2292 OPVCC(62, 0, 0, 0), // std 2293 OPVCC(62, 0, 0, 1): //stdu 2294 return DS_FORM 2295 case OP_ADDI, // add 2296 OPVCC(32, 0, 0, 0), // lwz 2297 OPVCC(33, 0, 0, 0), // lwzu 2298 OPVCC(34, 0, 0, 0), // lbz 2299 OPVCC(35, 0, 0, 0), // lbzu 2300 OPVCC(40, 0, 0, 0), // lhz 2301 OPVCC(41, 0, 0, 0), // lhzu 2302 OPVCC(42, 0, 0, 0), // lha 2303 OPVCC(43, 0, 0, 0), // lhau 2304 OPVCC(46, 0, 0, 0), // lmw 2305 OPVCC(48, 0, 0, 0), // lfs 2306 OPVCC(49, 0, 0, 0), // lfsu 2307 OPVCC(50, 0, 0, 0), // lfd 2308 OPVCC(51, 0, 0, 0), // lfdu 2309 OPVCC(36, 0, 0, 0), // stw 2310 OPVCC(37, 0, 0, 0), // stwu 2311 OPVCC(38, 0, 0, 0), // stb 2312 OPVCC(39, 0, 0, 0), // stbu 2313 OPVCC(44, 0, 0, 0), // sth 2314 OPVCC(45, 0, 0, 0), // sthu 2315 OPVCC(47, 0, 0, 0), // stmw 2316 OPVCC(52, 0, 0, 0), // stfs 2317 OPVCC(53, 0, 0, 0), // stfsu 2318 OPVCC(54, 0, 0, 0), // stfd 2319 OPVCC(55, 0, 0, 0): // stfdu 2320 return D_FORM 2321 } 2322 return 0 2323 } 2324 2325 // Encode instructions and create relocation for accessing s+d according to the 2326 // instruction op with source or destination (as appropriate) register reg. 2327 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) { 2328 if c.ctxt.Headtype == objabi.Haix { 2329 // Every symbol access must be made via a TOC anchor. 2330 c.ctxt.Diag("symbolAccess called for %s", s.Name) 2331 } 2332 var base uint32 2333 form := c.opform(op) 2334 if c.ctxt.Flag_shared { 2335 base = REG_R2 2336 } else { 2337 base = REG_R0 2338 } 2339 // If reg can be reused when computing the symbol address, 2340 // use it instead of REGTMP. 2341 if !reuse { 2342 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) 2343 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) 2344 } else { 2345 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0) 2346 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0) 2347 } 2348 rel := obj.Addrel(c.cursym) 2349 rel.Off = int32(c.pc) 2350 rel.Siz = 8 2351 rel.Sym = s 2352 rel.Add = d 2353 if c.ctxt.Flag_shared { 2354 switch form { 2355 case D_FORM: 2356 rel.Type = objabi.R_ADDRPOWER_TOCREL 2357 case DS_FORM: 2358 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 2359 } 2360 2361 } else { 2362 switch form { 2363 case D_FORM: 2364 rel.Type = objabi.R_ADDRPOWER 2365 case DS_FORM: 2366 rel.Type = objabi.R_ADDRPOWER_DS 2367 } 2368 } 2369 return 2370 } 2371 2372 /* 2373 * 32-bit masks 2374 */ 2375 func getmask(m []byte, v uint32) bool { 2376 m[1] = 0 2377 m[0] = m[1] 2378 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */ 2379 if getmask(m, ^v) { 2380 i := int(m[0]) 2381 m[0] = m[1] + 1 2382 m[1] = byte(i - 1) 2383 return true 2384 } 2385 2386 return false 2387 } 2388 2389 for i := 0; i < 32; i++ { 2390 if v&(1<<uint(31-i)) != 0 { 2391 m[0] = byte(i) 2392 for { 2393 m[1] = byte(i) 2394 i++ 2395 if i >= 32 || v&(1<<uint(31-i)) == 0 { 2396 break 2397 } 2398 } 2399 2400 for ; i < 32; i++ { 2401 if v&(1<<uint(31-i)) != 0 { 2402 return false 2403 } 2404 } 2405 return true 2406 } 2407 } 2408 2409 return false 2410 } 2411 2412 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) { 2413 if !getmask(m, v) { 2414 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p) 2415 } 2416 } 2417 2418 /* 2419 * 64-bit masks (rldic etc) 2420 */ 2421 func getmask64(m []byte, v uint64) bool { 2422 m[1] = 0 2423 m[0] = m[1] 2424 for i := 0; i < 64; i++ { 2425 if v&(uint64(1)<<uint(63-i)) != 0 { 2426 m[0] = byte(i) 2427 for { 2428 m[1] = byte(i) 2429 i++ 2430 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 { 2431 break 2432 } 2433 } 2434 2435 for ; i < 64; i++ { 2436 if v&(uint64(1)<<uint(63-i)) != 0 { 2437 return false 2438 } 2439 } 2440 return true 2441 } 2442 } 2443 2444 return false 2445 } 2446 2447 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) { 2448 if !getmask64(m, v) { 2449 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p) 2450 } 2451 } 2452 2453 func loadu32(r int, d int64) uint32 { 2454 v := int32(d >> 16) 2455 if isuint32(uint64(d)) { 2456 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v)) 2457 } 2458 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v)) 2459 } 2460 2461 func high16adjusted(d int32) uint16 { 2462 if d&0x8000 != 0 { 2463 return uint16((d >> 16) + 1) 2464 } 2465 return uint16(d >> 16) 2466 } 2467 2468 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { 2469 o1 := uint32(0) 2470 o2 := uint32(0) 2471 o3 := uint32(0) 2472 o4 := uint32(0) 2473 o5 := uint32(0) 2474 2475 //print("%v => case %d\n", p, o->type); 2476 switch o.type_ { 2477 default: 2478 c.ctxt.Diag("unknown type %d", o.type_) 2479 prasm(p) 2480 2481 case 0: /* pseudo ops */ 2482 break 2483 2484 case 2: /* int/cr/fp op Rb,[Ra],Rd */ 2485 r := int(p.Reg) 2486 2487 if r == 0 { 2488 r = int(p.To.Reg) 2489 } 2490 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2491 2492 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */ 2493 d := c.vregoff(&p.From) 2494 2495 v := int32(d) 2496 r := int(p.From.Reg) 2497 if r == 0 { 2498 r = c.getimpliedreg(&p.From, p) 2499 } 2500 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { 2501 c.ctxt.Diag("literal operation on R0\n%v", p) 2502 } 2503 a := OP_ADDI 2504 if o.a1 == C_UCON { 2505 if d&0xffff != 0 { 2506 log.Fatalf("invalid handling of %v", p) 2507 } 2508 // For UCON operands the value is right shifted 16, using ADDIS if the 2509 // value should be signed, ORIS if unsigned. 2510 v >>= 16 2511 if r == REGZERO && isuint32(uint64(d)) { 2512 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v)) 2513 break 2514 } 2515 2516 a = OP_ADDIS 2517 } else if int64(int16(d)) != d { 2518 // Operand is 16 bit value with sign bit set 2519 if o.a1 == C_ANDCON { 2520 // Needs unsigned 16 bit so use ORI 2521 if r == 0 || r == REGZERO { 2522 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) 2523 break 2524 } 2525 // With ADDCON, needs signed 16 bit value, fall through to use ADDI 2526 } else if o.a1 != C_ADDCON { 2527 log.Fatalf("invalid handling of %v", p) 2528 } 2529 } 2530 2531 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v)) 2532 2533 case 4: /* add/mul $scon,[r1],r2 */ 2534 v := c.regoff(&p.From) 2535 2536 r := int(p.Reg) 2537 if r == 0 { 2538 r = int(p.To.Reg) 2539 } 2540 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 { 2541 c.ctxt.Diag("literal operation on R0\n%v", p) 2542 } 2543 if int32(int16(v)) != v { 2544 log.Fatalf("mishandled instruction %v", p) 2545 } 2546 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2547 2548 case 5: /* syscall */ 2549 o1 = c.oprrr(p.As) 2550 2551 case 6: /* logical op Rb,[Rs,]Ra; no literal */ 2552 r := int(p.Reg) 2553 2554 if r == 0 { 2555 r = int(p.To.Reg) 2556 } 2557 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM. 2558 switch p.As { 2559 case AROTL: 2560 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0)) 2561 case AROTLW: 2562 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31) 2563 default: 2564 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { 2565 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred 2566 // hardware no-op. This happens because $0 matches C_REG before C_ZCON. 2567 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0) 2568 } else { 2569 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2570 } 2571 } 2572 2573 case 7: /* mov r, soreg ==> stw o(r) */ 2574 r := int(p.To.Reg) 2575 2576 if r == 0 { 2577 r = c.getimpliedreg(&p.To, p) 2578 } 2579 v := c.regoff(&p.To) 2580 if int32(int16(v)) != v { 2581 log.Fatalf("mishandled instruction %v", p) 2582 } 2583 // Offsets in DS form stores must be a multiple of 4 2584 inst := c.opstore(p.As) 2585 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2586 log.Fatalf("invalid offset for DS form load/store %v", p) 2587 } 2588 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v)) 2589 2590 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */ 2591 r := int(p.From.Reg) 2592 2593 if r == 0 { 2594 r = c.getimpliedreg(&p.From, p) 2595 } 2596 v := c.regoff(&p.From) 2597 if int32(int16(v)) != v { 2598 log.Fatalf("mishandled instruction %v", p) 2599 } 2600 // Offsets in DS form loads must be a multiple of 4 2601 inst := c.opload(p.As) 2602 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2603 log.Fatalf("invalid offset for DS form load/store %v", p) 2604 } 2605 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v)) 2606 2607 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 2608 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 2609 2610 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ 2611 r := int(p.Reg) 2612 2613 if r == 0 { 2614 r = int(p.To.Reg) 2615 } 2616 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) 2617 2618 case 11: /* br/bl lbra */ 2619 v := int32(0) 2620 2621 if p.To.Target() != nil { 2622 v = int32(p.To.Target().Pc - p.Pc) 2623 if v&03 != 0 { 2624 c.ctxt.Diag("odd branch target address\n%v", p) 2625 v &^= 03 2626 } 2627 2628 if v < -(1<<25) || v >= 1<<24 { 2629 c.ctxt.Diag("branch too far\n%v", p) 2630 } 2631 } 2632 2633 o1 = OP_BR(c.opirr(p.As), uint32(v), 0) 2634 if p.To.Sym != nil { 2635 rel := obj.Addrel(c.cursym) 2636 rel.Off = int32(c.pc) 2637 rel.Siz = 4 2638 rel.Sym = p.To.Sym 2639 v += int32(p.To.Offset) 2640 if v&03 != 0 { 2641 c.ctxt.Diag("odd branch target address\n%v", p) 2642 v &^= 03 2643 } 2644 2645 rel.Add = int64(v) 2646 rel.Type = objabi.R_CALLPOWER 2647 } 2648 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking 2649 2650 case 13: /* mov[bhwd]{z,} r,r */ 2651 // This needs to handle "MOV* $0, Rx". This shows up because $0 also 2652 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON 2653 // TODO: fix the above behavior and cleanup this exception. 2654 if p.From.Type == obj.TYPE_CONST { 2655 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) 2656 break 2657 } 2658 if p.To.Type == obj.TYPE_CONST { 2659 c.ctxt.Diag("cannot move into constant 0\n%v", p) 2660 } 2661 2662 switch p.As { 2663 case AMOVB: 2664 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2665 case AMOVBZ: 2666 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) 2667 case AMOVH: 2668 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2669 case AMOVHZ: 2670 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) 2671 case AMOVW: 2672 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2673 case AMOVWZ: 2674 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ 2675 case AMOVD: 2676 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) 2677 default: 2678 c.ctxt.Diag("internal: bad register move/truncation\n%v", p) 2679 } 2680 2681 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ 2682 r := int(p.Reg) 2683 2684 if r == 0 { 2685 r = int(p.To.Reg) 2686 } 2687 d := c.vregoff(p.GetFrom3()) 2688 var a int 2689 switch p.As { 2690 2691 // These opcodes expect a mask operand that has to be converted into the 2692 // appropriate operand. The way these were defined, not all valid masks are possible. 2693 // Left here for compatibility in case they were used or generated. 2694 case ARLDCL, ARLDCLCC: 2695 var mask [2]uint8 2696 c.maskgen64(p, mask[:], uint64(d)) 2697 2698 a = int(mask[0]) /* MB */ 2699 if mask[1] != 63 { 2700 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p) 2701 } 2702 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2703 o1 |= (uint32(a) & 31) << 6 2704 if a&0x20 != 0 { 2705 o1 |= 1 << 5 /* mb[5] is top bit */ 2706 } 2707 2708 case ARLDCR, ARLDCRCC: 2709 var mask [2]uint8 2710 c.maskgen64(p, mask[:], uint64(d)) 2711 2712 a = int(mask[1]) /* ME */ 2713 if mask[0] != 0 { 2714 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p) 2715 } 2716 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2717 o1 |= (uint32(a) & 31) << 6 2718 if a&0x20 != 0 { 2719 o1 |= 1 << 5 /* mb[5] is top bit */ 2720 } 2721 2722 // These opcodes use a shift count like the ppc64 asm, no mask conversion done 2723 case ARLDICR, ARLDICRCC: 2724 me := int(d) 2725 sh := c.regoff(&p.From) 2726 if me < 0 || me > 63 || sh > 63 { 2727 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p) 2728 } 2729 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me)) 2730 2731 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC: 2732 mb := int(d) 2733 sh := c.regoff(&p.From) 2734 if mb < 0 || mb > 63 || sh > 63 { 2735 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p) 2736 } 2737 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb)) 2738 2739 case ACLRLSLDI: 2740 // This is an extended mnemonic defined in the ISA section C.8.1 2741 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n 2742 // It maps onto RLDIC so is directly generated here based on the operands from 2743 // the clrlsldi. 2744 n := int32(d) 2745 b := c.regoff(&p.From) 2746 if n > b || b > 63 { 2747 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) 2748 } 2749 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) 2750 2751 default: 2752 c.ctxt.Diag("unexpected op in rldc case\n%v", p) 2753 a = 0 2754 } 2755 2756 case 17, /* bc bo,bi,lbra (same for now) */ 2757 16: /* bc bo,bi,sbra */ 2758 a := 0 2759 2760 r := int(p.Reg) 2761 2762 if p.From.Type == obj.TYPE_CONST { 2763 a = int(c.regoff(&p.From)) 2764 } else if p.From.Type == obj.TYPE_REG { 2765 if r != 0 { 2766 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r) 2767 } 2768 // BI values for the CR 2769 switch p.From.Reg { 2770 case REG_CR0: 2771 r = BI_CR0 2772 case REG_CR1: 2773 r = BI_CR1 2774 case REG_CR2: 2775 r = BI_CR2 2776 case REG_CR3: 2777 r = BI_CR3 2778 case REG_CR4: 2779 r = BI_CR4 2780 case REG_CR5: 2781 r = BI_CR5 2782 case REG_CR6: 2783 r = BI_CR6 2784 case REG_CR7: 2785 r = BI_CR7 2786 default: 2787 c.ctxt.Diag("unrecognized register: expecting CR\n") 2788 } 2789 } 2790 v := int32(0) 2791 if p.To.Target() != nil { 2792 v = int32(p.To.Target().Pc - p.Pc) 2793 } 2794 if v&03 != 0 { 2795 c.ctxt.Diag("odd branch target address\n%v", p) 2796 v &^= 03 2797 } 2798 2799 if v < -(1<<16) || v >= 1<<15 { 2800 c.ctxt.Diag("branch too far\n%v", p) 2801 } 2802 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0) 2803 2804 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */ 2805 var v int32 2806 var bh uint32 = 0 2807 if p.As == ABC || p.As == ABCL { 2808 v = c.regoff(&p.From) & 31 2809 } else { 2810 v = 20 /* unconditional */ 2811 } 2812 r := int(p.Reg) 2813 if r == 0 { 2814 r = 0 2815 } 2816 switch oclass(&p.To) { 2817 case C_CTR: 2818 o1 = OPVCC(19, 528, 0, 0) 2819 2820 case C_LR: 2821 o1 = OPVCC(19, 16, 0, 0) 2822 2823 default: 2824 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p) 2825 v = 0 2826 } 2827 2828 // Insert optional branch hint for bclr[l]/bcctr[l] 2829 if p.From3Type() != obj.TYPE_NONE { 2830 bh = uint32(p.GetFrom3().Offset) 2831 if bh == 2 || bh > 3 { 2832 log.Fatalf("BH must be 0,1,3 for %v", p) 2833 } 2834 o1 |= bh << 11 2835 } 2836 2837 if p.As == ABL || p.As == ABCL { 2838 o1 |= 1 2839 } 2840 o1 = OP_BCR(o1, uint32(v), uint32(r)) 2841 2842 case 19: /* mov $lcon,r ==> cau+or */ 2843 d := c.vregoff(&p.From) 2844 o1 = loadu32(int(p.To.Reg), d) 2845 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) 2846 2847 case 20: /* add $ucon,,r | addis $addcon,r,r */ 2848 v := c.regoff(&p.From) 2849 2850 r := int(p.Reg) 2851 if r == 0 { 2852 r = int(p.To.Reg) 2853 } 2854 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) { 2855 c.ctxt.Diag("literal operation on R0\n%v", p) 2856 } 2857 if p.As == AADDIS { 2858 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2859 } else { 2860 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) 2861 } 2862 2863 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */ 2864 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2865 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2866 } 2867 d := c.vregoff(&p.From) 2868 r := int(p.Reg) 2869 if r == 0 { 2870 r = int(p.To.Reg) 2871 } 2872 if p.From.Sym != nil { 2873 c.ctxt.Diag("%v is not supported", p) 2874 } 2875 // If operand is ANDCON, generate 2 instructions using 2876 // ORI for unsigned value; with LCON 3 instructions. 2877 if o.size == 8 { 2878 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) 2879 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2880 } else { 2881 o1 = loadu32(REGTMP, d) 2882 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) 2883 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2884 } 2885 2886 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */ 2887 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2888 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2889 } 2890 d := c.vregoff(&p.From) 2891 r := int(p.Reg) 2892 if r == 0 { 2893 r = int(p.To.Reg) 2894 } 2895 2896 // With ADDCON operand, generate 2 instructions using ADDI for signed value, 2897 // with LCON operand generate 3 instructions. 2898 if o.size == 8 { 2899 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d))) 2900 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2901 } else { 2902 o1 = loadu32(REGTMP, d) 2903 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) 2904 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2905 } 2906 if p.From.Sym != nil { 2907 c.ctxt.Diag("%v is not supported", p) 2908 } 2909 2910 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */ 2911 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0)) 2912 // This is needed for -0. 2913 if o.size == 8 { 2914 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg)) 2915 } 2916 2917 case 25: 2918 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ 2919 v := c.regoff(&p.From) 2920 2921 if v < 0 { 2922 v = 0 2923 } else if v > 63 { 2924 v = 63 2925 } 2926 r := int(p.Reg) 2927 if r == 0 { 2928 r = int(p.To.Reg) 2929 } 2930 var a int 2931 op := uint32(0) 2932 switch p.As { 2933 case ASLD, ASLDCC: 2934 a = int(63 - v) 2935 op = OP_RLDICR 2936 2937 case ASRD, ASRDCC: 2938 a = int(v) 2939 v = 64 - v 2940 op = OP_RLDICL 2941 case AROTL: 2942 a = int(0) 2943 op = OP_RLDICL 2944 case AEXTSWSLI, AEXTSWSLICC: 2945 a = int(v) 2946 default: 2947 c.ctxt.Diag("unexpected op in sldi case\n%v", p) 2948 a = 0 2949 o1 = 0 2950 } 2951 2952 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC { 2953 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v)) 2954 2955 } else { 2956 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) 2957 } 2958 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC { 2959 o1 |= 1 // Set the condition code bit 2960 } 2961 2962 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ 2963 v := c.vregoff(&p.From) 2964 r := int(p.From.Reg) 2965 2966 switch p.From.Name { 2967 case obj.NAME_EXTERN, obj.NAME_STATIC: 2968 // Load a 32 bit constant, or relocation depending on if a symbol is attached 2969 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true) 2970 default: 2971 if r == 0 { 2972 r = c.getimpliedreg(&p.From, p) 2973 } 2974 // Add a 32 bit offset to a register. 2975 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v)))) 2976 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 2977 } 2978 2979 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ 2980 v := c.regoff(p.GetFrom3()) 2981 2982 r := int(p.From.Reg) 2983 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2984 2985 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */ 2986 if p.To.Reg == REGTMP || p.From.Reg == REGTMP { 2987 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2988 } 2989 v := c.regoff(p.GetFrom3()) 2990 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) 2991 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v)) 2992 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) 2993 if p.From.Sym != nil { 2994 c.ctxt.Diag("%v is not supported", p) 2995 } 2996 2997 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ 2998 v := c.regoff(&p.From) 2999 3000 d := c.vregoff(p.GetFrom3()) 3001 var mask [2]uint8 3002 c.maskgen64(p, mask[:], uint64(d)) 3003 var a int 3004 switch p.As { 3005 case ARLDC, ARLDCCC: 3006 a = int(mask[0]) /* MB */ 3007 if int32(mask[1]) != (63 - v) { 3008 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p) 3009 } 3010 3011 case ARLDCL, ARLDCLCC: 3012 a = int(mask[0]) /* MB */ 3013 if mask[1] != 63 { 3014 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p) 3015 } 3016 3017 case ARLDCR, ARLDCRCC: 3018 a = int(mask[1]) /* ME */ 3019 if mask[0] != 0 { 3020 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p) 3021 } 3022 3023 default: 3024 c.ctxt.Diag("unexpected op in rldic case\n%v", p) 3025 a = 0 3026 } 3027 3028 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) 3029 o1 |= (uint32(a) & 31) << 6 3030 if v&0x20 != 0 { 3031 o1 |= 1 << 1 3032 } 3033 if a&0x20 != 0 { 3034 o1 |= 1 << 5 /* mb[5] is top bit */ 3035 } 3036 3037 case 30: /* rldimi $sh,s,$mask,a */ 3038 v := c.regoff(&p.From) 3039 3040 d := c.vregoff(p.GetFrom3()) 3041 3042 // Original opcodes had mask operands which had to be converted to a shift count as expected by 3043 // the ppc64 asm. 3044 switch p.As { 3045 case ARLDMI, ARLDMICC: 3046 var mask [2]uint8 3047 c.maskgen64(p, mask[:], uint64(d)) 3048 if int32(mask[1]) != (63 - v) { 3049 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p) 3050 } 3051 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) 3052 o1 |= (uint32(mask[0]) & 31) << 6 3053 if v&0x20 != 0 { 3054 o1 |= 1 << 1 3055 } 3056 if mask[0]&0x20 != 0 { 3057 o1 |= 1 << 5 /* mb[5] is top bit */ 3058 } 3059 3060 // Opcodes with shift count operands. 3061 case ARLDIMI, ARLDIMICC: 3062 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) 3063 o1 |= (uint32(d) & 31) << 6 3064 if d&0x20 != 0 { 3065 o1 |= 1 << 5 3066 } 3067 if v&0x20 != 0 { 3068 o1 |= 1 << 1 3069 } 3070 } 3071 3072 case 31: /* dword */ 3073 d := c.vregoff(&p.From) 3074 3075 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 3076 o1 = uint32(d >> 32) 3077 o2 = uint32(d) 3078 } else { 3079 o1 = uint32(d) 3080 o2 = uint32(d >> 32) 3081 } 3082 3083 if p.From.Sym != nil { 3084 rel := obj.Addrel(c.cursym) 3085 rel.Off = int32(c.pc) 3086 rel.Siz = 8 3087 rel.Sym = p.From.Sym 3088 rel.Add = p.From.Offset 3089 rel.Type = objabi.R_ADDR 3090 o2 = 0 3091 o1 = o2 3092 } 3093 3094 case 32: /* fmul frc,fra,frd */ 3095 r := int(p.Reg) 3096 3097 if r == 0 { 3098 r = int(p.To.Reg) 3099 } 3100 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6 3101 3102 case 33: /* fabs [frb,]frd; fmr. frb,frd */ 3103 r := int(p.From.Reg) 3104 3105 if oclass(&p.From) == C_NONE { 3106 r = int(p.To.Reg) 3107 } 3108 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) 3109 3110 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ 3111 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6 3112 3113 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ 3114 v := c.regoff(&p.To) 3115 3116 r := int(p.To.Reg) 3117 if r == 0 { 3118 r = c.getimpliedreg(&p.To, p) 3119 } 3120 // Offsets in DS form stores must be a multiple of 4 3121 inst := c.opstore(p.As) 3122 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3123 log.Fatalf("invalid offset for DS form load/store %v", p) 3124 } 3125 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 3126 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v)) 3127 3128 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */ 3129 v := c.regoff(&p.From) 3130 3131 r := int(p.From.Reg) 3132 if r == 0 { 3133 r = c.getimpliedreg(&p.From, p) 3134 } 3135 if o.a6 == C_REG { 3136 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v))) 3137 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3138 } else { 3139 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v))) 3140 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v)) 3141 } 3142 3143 // Sign extend MOVB if needed 3144 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3145 3146 case 40: /* word */ 3147 o1 = uint32(c.regoff(&p.From)) 3148 3149 case 41: /* stswi */ 3150 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 { 3151 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3152 } 3153 3154 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3155 3156 case 42: /* lswi */ 3157 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 { 3158 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3159 } 3160 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3161 3162 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */ 3163 /* TH field for dcbt/dcbtst: */ 3164 /* 0 = Block access - program will soon access EA. */ 3165 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */ 3166 /* 16 = Block access - program will soon make a transient access to EA. */ 3167 /* 17 = Block access - program will not access EA for a long time. */ 3168 3169 /* L field for dcbf: */ 3170 /* 0 = invalidates the block containing EA in all processors. */ 3171 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */ 3172 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */ 3173 if p.To.Type == obj.TYPE_NONE { 3174 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) 3175 } else { 3176 th := c.regoff(&p.To) 3177 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg)) 3178 } 3179 3180 case 44: /* indexed store */ 3181 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3182 3183 case 45: /* indexed load */ 3184 switch p.As { 3185 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */ 3186 /* The EH field can be used as a lock acquire/release hint as follows: */ 3187 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */ 3188 /* 1 = Exclusive Access (lock acquire and release) */ 3189 case ALBAR, ALHAR, ALWAR, ALDAR: 3190 if p.From3Type() != obj.TYPE_NONE { 3191 eh := int(c.regoff(p.GetFrom3())) 3192 if eh > 1 { 3193 c.ctxt.Diag("illegal EH field\n%v", p) 3194 } 3195 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh)) 3196 } else { 3197 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3198 } 3199 default: 3200 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3201 } 3202 case 46: /* plain op */ 3203 o1 = c.oprrr(p.As) 3204 3205 case 47: /* op Ra, Rd; also op [Ra,] Rd */ 3206 r := int(p.From.Reg) 3207 3208 if r == 0 { 3209 r = int(p.To.Reg) 3210 } 3211 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3212 3213 case 48: /* op Rs, Ra */ 3214 r := int(p.From.Reg) 3215 3216 if r == 0 { 3217 r = int(p.To.Reg) 3218 } 3219 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3220 3221 case 49: /* op Rb; op $n, Rb */ 3222 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */ 3223 v := c.regoff(&p.From) & 1 3224 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21 3225 } else { 3226 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg)) 3227 } 3228 3229 case 50: /* rem[u] r1[,r2],r3 */ 3230 r := int(p.Reg) 3231 3232 if r == 0 { 3233 r = int(p.To.Reg) 3234 } 3235 v := c.oprrr(p.As) 3236 t := v & (1<<10 | 1) /* OE|Rc */ 3237 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3238 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) 3239 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3240 if p.As == AREMU { 3241 o4 = o3 3242 3243 /* Clear top 32 bits */ 3244 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5 3245 } 3246 3247 case 51: /* remd[u] r1[,r2],r3 */ 3248 r := int(p.Reg) 3249 3250 if r == 0 { 3251 r = int(p.To.Reg) 3252 } 3253 v := c.oprrr(p.As) 3254 t := v & (1<<10 | 1) /* OE|Rc */ 3255 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3256 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) 3257 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3258 /* cases 50,51: removed; can be reused. */ 3259 3260 /* cases 50,51: removed; can be reused. */ 3261 3262 case 52: /* mtfsbNx cr(n) */ 3263 v := c.regoff(&p.From) & 31 3264 3265 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0) 3266 3267 case 53: /* mffsX ,fr1 */ 3268 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) 3269 3270 case 55: /* op Rb, Rd */ 3271 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) 3272 3273 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */ 3274 v := c.regoff(&p.From) 3275 3276 r := int(p.Reg) 3277 if r == 0 { 3278 r = int(p.To.Reg) 3279 } 3280 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31) 3281 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) { 3282 o1 |= 1 << 1 /* mb[5] */ 3283 } 3284 3285 case 57: /* slw $sh,[s,]a -> rlwinm ... */ 3286 v := c.regoff(&p.From) 3287 3288 r := int(p.Reg) 3289 if r == 0 { 3290 r = int(p.To.Reg) 3291 } 3292 3293 /* 3294 * Let user (gs) shoot himself in the foot. 3295 * qc has already complained. 3296 * 3297 if(v < 0 || v > 31) 3298 ctxt->diag("illegal shift %ld\n%v", v, p); 3299 */ 3300 if v < 0 { 3301 v = 0 3302 } else if v > 32 { 3303 v = 32 3304 } 3305 var mask [2]uint8 3306 switch p.As { 3307 case AROTLW: 3308 mask[0], mask[1] = 0, 31 3309 case ASRW, ASRWCC: 3310 mask[0], mask[1] = uint8(v), 31 3311 v = 32 - v 3312 default: 3313 mask[0], mask[1] = 0, uint8(31-v) 3314 } 3315 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1])) 3316 if p.As == ASLWCC || p.As == ASRWCC { 3317 o1 |= 1 // set the condition code 3318 } 3319 3320 case 58: /* logical $andcon,[s],a */ 3321 v := c.regoff(&p.From) 3322 3323 r := int(p.Reg) 3324 if r == 0 { 3325 r = int(p.To.Reg) 3326 } 3327 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3328 3329 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */ 3330 v := c.regoff(&p.From) 3331 3332 r := int(p.Reg) 3333 if r == 0 { 3334 r = int(p.To.Reg) 3335 } 3336 switch p.As { 3337 case AOR: 3338 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */ 3339 case AXOR: 3340 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) 3341 case AANDCC: 3342 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16) 3343 default: 3344 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3345 } 3346 3347 case 60: /* tw to,a,b */ 3348 r := int(c.regoff(&p.From) & 31) 3349 3350 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg)) 3351 3352 case 61: /* tw to,a,$simm */ 3353 r := int(c.regoff(&p.From) & 31) 3354 3355 v := c.regoff(&p.To) 3356 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v)) 3357 3358 case 62: /* rlwmi $sh,s,$mask,a */ 3359 v := c.regoff(&p.From) 3360 switch p.As { 3361 case ACLRLSLWI: 3362 n := c.regoff(p.GetFrom3()) 3363 // This is an extended mnemonic described in the ISA C.8.2 3364 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n 3365 // It maps onto rlwinm which is directly generated here. 3366 if n > v || v >= 32 { 3367 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) 3368 } 3369 3370 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) 3371 default: 3372 var mask [2]uint8 3373 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) 3374 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) 3375 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 3376 } 3377 3378 case 63: /* rlwmi b,s,$mask,a */ 3379 var mask [2]uint8 3380 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) 3381 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg)) 3382 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 3383 3384 case 64: /* mtfsf fr[, $m] {,fpcsr} */ 3385 var v int32 3386 if p.From3Type() != obj.TYPE_NONE { 3387 v = c.regoff(p.GetFrom3()) & 255 3388 } else { 3389 v = 255 3390 } 3391 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11 3392 3393 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */ 3394 if p.To.Reg == 0 { 3395 c.ctxt.Diag("must specify FPSCR(n)\n%v", p) 3396 } 3397 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12 3398 3399 case 66: /* mov spr,r1; mov r1,spr */ 3400 var r int 3401 var v int32 3402 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 { 3403 r = int(p.From.Reg) 3404 v = int32(p.To.Reg) 3405 o1 = OPVCC(31, 467, 0, 0) /* mtspr */ 3406 } else { 3407 r = int(p.To.Reg) 3408 v = int32(p.From.Reg) 3409 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3410 } 3411 3412 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3413 3414 case 67: /* mcrf crfD,crfS */ 3415 if p.From.Reg == REG_CR || p.To.Reg == REG_CR { 3416 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p) 3417 } 3418 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0) 3419 3420 case 68: /* mfcr rD; mfocrf CRM,rD */ 3421 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */ 3422 if p.From.Reg != REG_CR { 3423 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */ 3424 o1 |= 1<<20 | v<<12 /* new form, mfocrf */ 3425 } 3426 3427 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */ 3428 var v uint32 3429 if p.To.Reg == REG_CR { 3430 v = 0xff 3431 } else if p.To.Offset != 0 { // MOVFL gpr, constant 3432 v = uint32(p.To.Offset) 3433 } else { // p.To.Reg == REG_CRx 3434 v = 1 << uint(7-(p.To.Reg&7)) 3435 } 3436 // Use mtocrf form if only one CR field moved. 3437 if bits.OnesCount32(v) == 1 { 3438 v |= 1 << 8 3439 } 3440 3441 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 3442 3443 case 70: /* [f]cmp r,r,cr*/ 3444 var r int 3445 if p.Reg == 0 { 3446 r = 0 3447 } else { 3448 r = (int(p.Reg) & 7) << 2 3449 } 3450 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) 3451 3452 case 71: /* cmp[l] r,i,cr*/ 3453 var r int 3454 if p.Reg == 0 { 3455 r = 0 3456 } else { 3457 r = (int(p.Reg) & 7) << 2 3458 } 3459 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff 3460 3461 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */ 3462 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg)) 3463 3464 case 73: /* mcrfs crfD,crfS */ 3465 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { 3466 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p) 3467 } 3468 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0) 3469 3470 case 77: /* syscall $scon, syscall Rx */ 3471 if p.From.Type == obj.TYPE_CONST { 3472 if p.From.Offset > BIG || p.From.Offset < -BIG { 3473 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p) 3474 } 3475 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset)) 3476 } else if p.From.Type == obj.TYPE_REG { 3477 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg)) 3478 } else { 3479 c.ctxt.Diag("illegal syscall: %v", p) 3480 o1 = 0x7fe00008 // trap always 3481 } 3482 3483 o2 = c.oprrr(p.As) 3484 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0 3485 3486 case 78: /* undef */ 3487 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed 3488 always to be an illegal instruction." */ 3489 3490 /* relocation operations */ 3491 case 74: 3492 v := c.vregoff(&p.To) 3493 // Offsets in DS form stores must be a multiple of 4 3494 inst := c.opstore(p.As) 3495 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3496 log.Fatalf("invalid offset for DS form load/store %v", p) 3497 } 3498 // Can't reuse base for store instructions. 3499 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false) 3500 3501 case 75: // 32 bit offset symbol loads (got/toc/addr) 3502 v := p.From.Offset 3503 3504 // Offsets in DS form loads must be a multiple of 4 3505 inst := c.opload(p.As) 3506 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3507 log.Fatalf("invalid offset for DS form load/store %v", p) 3508 } 3509 switch p.From.Name { 3510 case obj.NAME_GOTREF, obj.NAME_TOCREF: 3511 if v != 0 { 3512 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p) 3513 } 3514 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3515 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3516 rel := obj.Addrel(c.cursym) 3517 rel.Off = int32(c.pc) 3518 rel.Siz = 8 3519 rel.Sym = p.From.Sym 3520 switch p.From.Name { 3521 case obj.NAME_GOTREF: 3522 rel.Type = objabi.R_ADDRPOWER_GOT 3523 case obj.NAME_TOCREF: 3524 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 3525 } 3526 default: 3527 reuseBaseReg := o.a6 == C_REG 3528 // Reuse To.Reg as base register if it is a GPR. 3529 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg) 3530 } 3531 3532 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3533 3534 case 79: 3535 if p.From.Offset != 0 { 3536 c.ctxt.Diag("invalid offset against tls var %v", p) 3537 } 3538 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0) 3539 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3540 rel := obj.Addrel(c.cursym) 3541 rel.Off = int32(c.pc) 3542 rel.Siz = 8 3543 rel.Sym = p.From.Sym 3544 rel.Type = objabi.R_POWER_TLS_LE 3545 3546 case 80: 3547 if p.From.Offset != 0 { 3548 c.ctxt.Diag("invalid offset against tls var %v", p) 3549 } 3550 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3551 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) 3552 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13) 3553 rel := obj.Addrel(c.cursym) 3554 rel.Off = int32(c.pc) 3555 rel.Siz = 8 3556 rel.Sym = p.From.Sym 3557 rel.Type = objabi.R_POWER_TLS_IE 3558 rel = obj.Addrel(c.cursym) 3559 rel.Off = int32(c.pc) + 8 3560 rel.Siz = 4 3561 rel.Sym = p.From.Sym 3562 rel.Type = objabi.R_POWER_TLS 3563 3564 case 82: /* vector instructions, VX-form and VC-form */ 3565 if p.From.Type == obj.TYPE_REG { 3566 /* reg reg none OR reg reg reg */ 3567 /* 3-register operand order: VRA, VRB, VRT */ 3568 /* 2-register operand order: VRA, VRT */ 3569 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3570 } else if p.From3Type() == obj.TYPE_CONST { 3571 /* imm imm reg reg */ 3572 /* operand order: SIX, VRA, ST, VRT */ 3573 six := int(c.regoff(&p.From)) 3574 st := int(c.regoff(p.GetFrom3())) 3575 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) 3576 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { 3577 /* imm reg reg */ 3578 /* operand order: UIM, VRB, VRT */ 3579 uim := int(c.regoff(&p.From)) 3580 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim)) 3581 } else { 3582 /* imm reg */ 3583 /* operand order: SIM, VRT */ 3584 sim := int(c.regoff(&p.From)) 3585 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim)) 3586 } 3587 3588 case 83: /* vector instructions, VA-form */ 3589 if p.From.Type == obj.TYPE_REG { 3590 /* reg reg reg reg */ 3591 /* 4-register operand order: VRA, VRB, VRC, VRT */ 3592 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3593 } else if p.From.Type == obj.TYPE_CONST { 3594 /* imm reg reg reg */ 3595 /* operand order: SHB, VRA, VRB, VRT */ 3596 shb := int(c.regoff(&p.From)) 3597 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb)) 3598 } 3599 3600 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc 3601 bc := c.vregoff(&p.From) 3602 if o.a1 == C_CRBIT { 3603 // CR bit is encoded as a register, not a constant. 3604 bc = int64(p.From.Reg) 3605 } 3606 3607 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg 3608 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc)) 3609 3610 case 85: /* vector instructions, VX-form */ 3611 /* reg none reg */ 3612 /* 2-register operand order: VRB, VRT */ 3613 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg)) 3614 3615 case 86: /* VSX indexed store, XX1-form */ 3616 /* reg reg reg */ 3617 /* 3-register operand order: XT, (RB)(RA*1) */ 3618 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3619 3620 case 87: /* VSX indexed load, XX1-form */ 3621 /* reg reg reg */ 3622 /* 3-register operand order: (RB)(RA*1), XT */ 3623 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3624 3625 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */ 3626 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3627 3628 case 89: /* VSX instructions, XX2-form */ 3629 /* reg none reg OR reg imm reg */ 3630 /* 2-register operand order: XB, XT or XB, UIM, XT*/ 3631 uim := int(c.regoff(p.GetFrom3())) 3632 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) 3633 3634 case 90: /* VSX instructions, XX3-form */ 3635 if p.From3Type() == obj.TYPE_NONE { 3636 /* reg reg reg */ 3637 /* 3-register operand order: XA, XB, XT */ 3638 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3639 } else if p.From3Type() == obj.TYPE_CONST { 3640 /* reg reg reg imm */ 3641 /* operand order: XA, XB, DM, XT */ 3642 dm := int(c.regoff(p.GetFrom3())) 3643 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) 3644 } 3645 3646 case 91: /* VSX instructions, XX4-form */ 3647 /* reg reg reg reg */ 3648 /* 3-register operand order: XA, XB, XC, XT */ 3649 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3650 3651 case 92: /* X-form instructions, 3-operands */ 3652 if p.To.Type == obj.TYPE_CONST { 3653 /* imm reg reg */ 3654 xf := int32(p.From.Reg) 3655 if REG_F0 <= xf && xf <= REG_F31 { 3656 /* operand order: FRA, FRB, BF */ 3657 bf := int(c.regoff(&p.To)) << 2 3658 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3659 } else { 3660 /* operand order: RA, RB, L */ 3661 l := int(c.regoff(&p.To)) 3662 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg)) 3663 } 3664 } else if p.From3Type() == obj.TYPE_CONST { 3665 /* reg reg imm */ 3666 /* operand order: RB, L, RA */ 3667 l := int(c.regoff(p.GetFrom3())) 3668 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg)) 3669 } else if p.To.Type == obj.TYPE_REG { 3670 cr := int32(p.To.Reg) 3671 if REG_CR0 <= cr && cr <= REG_CR7 { 3672 /* cr reg reg */ 3673 /* operand order: RA, RB, BF */ 3674 bf := (int(p.To.Reg) & 7) << 2 3675 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3676 } else if p.From.Type == obj.TYPE_CONST { 3677 /* reg imm */ 3678 /* operand order: L, RT */ 3679 l := int(c.regoff(&p.From)) 3680 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg)) 3681 } else { 3682 switch p.As { 3683 case ACOPY, APASTECC: 3684 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg)) 3685 default: 3686 /* reg reg reg */ 3687 /* operand order: RS, RB, RA */ 3688 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3689 } 3690 } 3691 } 3692 3693 case 93: /* X-form instructions, 2-operands */ 3694 if p.To.Type == obj.TYPE_CONST { 3695 /* imm reg */ 3696 /* operand order: FRB, BF */ 3697 bf := int(c.regoff(&p.To)) << 2 3698 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg)) 3699 } else if p.Reg == 0 { 3700 /* popcnt* r,r, X-form */ 3701 /* operand order: RS, RA */ 3702 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3703 } 3704 3705 case 94: /* Z23-form instructions, 4-operands */ 3706 /* reg reg reg imm */ 3707 /* operand order: RA, RB, CY, RT */ 3708 cy := int(c.regoff(p.GetFrom3())) 3709 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) 3710 3711 case 96: /* VSX load, DQ-form */ 3712 /* reg imm reg */ 3713 /* operand order: (RA)(DQ), XT */ 3714 dq := int16(c.regoff(&p.From)) 3715 if (dq & 15) != 0 { 3716 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3717 } 3718 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq)) 3719 3720 case 97: /* VSX store, DQ-form */ 3721 /* reg imm reg */ 3722 /* operand order: XT, (RA)(DQ) */ 3723 dq := int16(c.regoff(&p.To)) 3724 if (dq & 15) != 0 { 3725 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3726 } 3727 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq)) 3728 case 98: /* VSX indexed load or load with length (also left-justified), x-form */ 3729 /* vsreg, reg, reg */ 3730 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3731 case 99: /* VSX store with length (also left-justified) x-form */ 3732 /* reg, reg, vsreg */ 3733 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg)) 3734 case 100: /* VSX X-form XXSPLTIB */ 3735 if p.From.Type == obj.TYPE_CONST { 3736 /* imm reg */ 3737 uim := int(c.regoff(&p.From)) 3738 /* imm reg */ 3739 /* Use AOP_XX1 form with 0 for one of the registers. */ 3740 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim)) 3741 } else { 3742 c.ctxt.Diag("invalid ops for %v", p.As) 3743 } 3744 case 101: 3745 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) 3746 3747 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/ 3748 mb := uint32(c.regoff(&p.RestArgs[0].Addr)) 3749 me := uint32(c.regoff(&p.RestArgs[1].Addr)) 3750 sh := uint32(c.regoff(&p.From)) 3751 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me) 3752 3753 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/ 3754 mb := uint32(c.regoff(&p.RestArgs[0].Addr)) 3755 me := uint32(c.regoff(&p.RestArgs[1].Addr)) 3756 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me) 3757 3758 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */ 3759 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3760 3761 case 106: /* MOVD spr, soreg */ 3762 v := int32(p.From.Reg) 3763 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3764 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3765 so := c.regoff(&p.To) 3766 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so)) 3767 if so&0x3 != 0 { 3768 log.Fatalf("invalid offset for DS form load/store %v", p) 3769 } 3770 if p.To.Reg == REGTMP { 3771 log.Fatalf("SPR move to memory will clobber R31 %v", p) 3772 } 3773 3774 case 107: /* MOVD soreg, spr */ 3775 v := int32(p.From.Reg) 3776 so := c.regoff(&p.From) 3777 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so)) 3778 o2 = OPVCC(31, 467, 0, 0) /* mtspr */ 3779 v = int32(p.To.Reg) 3780 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3781 if so&0x3 != 0 { 3782 log.Fatalf("invalid offset for DS form load/store %v", p) 3783 } 3784 3785 case 108: /* mov r, xoreg ==> stwx rx,ry */ 3786 r := int(p.To.Reg) 3787 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) 3788 3789 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */ 3790 r := int(p.From.Reg) 3791 3792 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) 3793 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 3794 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3795 } 3796 3797 out[0] = o1 3798 out[1] = o2 3799 out[2] = o3 3800 out[3] = o4 3801 out[4] = o5 3802 } 3803 3804 func (c *ctxt9) vregoff(a *obj.Addr) int64 { 3805 c.instoffset = 0 3806 if a != nil { 3807 c.aclass(a) 3808 } 3809 return c.instoffset 3810 } 3811 3812 func (c *ctxt9) regoff(a *obj.Addr) int32 { 3813 return int32(c.vregoff(a)) 3814 } 3815 3816 func (c *ctxt9) oprrr(a obj.As) uint32 { 3817 switch a { 3818 case AADD: 3819 return OPVCC(31, 266, 0, 0) 3820 case AADDCC: 3821 return OPVCC(31, 266, 0, 1) 3822 case AADDV: 3823 return OPVCC(31, 266, 1, 0) 3824 case AADDVCC: 3825 return OPVCC(31, 266, 1, 1) 3826 case AADDC: 3827 return OPVCC(31, 10, 0, 0) 3828 case AADDCCC: 3829 return OPVCC(31, 10, 0, 1) 3830 case AADDCV: 3831 return OPVCC(31, 10, 1, 0) 3832 case AADDCVCC: 3833 return OPVCC(31, 10, 1, 1) 3834 case AADDE: 3835 return OPVCC(31, 138, 0, 0) 3836 case AADDECC: 3837 return OPVCC(31, 138, 0, 1) 3838 case AADDEV: 3839 return OPVCC(31, 138, 1, 0) 3840 case AADDEVCC: 3841 return OPVCC(31, 138, 1, 1) 3842 case AADDME: 3843 return OPVCC(31, 234, 0, 0) 3844 case AADDMECC: 3845 return OPVCC(31, 234, 0, 1) 3846 case AADDMEV: 3847 return OPVCC(31, 234, 1, 0) 3848 case AADDMEVCC: 3849 return OPVCC(31, 234, 1, 1) 3850 case AADDZE: 3851 return OPVCC(31, 202, 0, 0) 3852 case AADDZECC: 3853 return OPVCC(31, 202, 0, 1) 3854 case AADDZEV: 3855 return OPVCC(31, 202, 1, 0) 3856 case AADDZEVCC: 3857 return OPVCC(31, 202, 1, 1) 3858 case AADDEX: 3859 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */ 3860 3861 case AAND: 3862 return OPVCC(31, 28, 0, 0) 3863 case AANDCC: 3864 return OPVCC(31, 28, 0, 1) 3865 case AANDN: 3866 return OPVCC(31, 60, 0, 0) 3867 case AANDNCC: 3868 return OPVCC(31, 60, 0, 1) 3869 3870 case ACMP: 3871 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */ 3872 case ACMPU: 3873 return OPVCC(31, 32, 0, 0) | 1<<21 3874 case ACMPW: 3875 return OPVCC(31, 0, 0, 0) /* L=0 */ 3876 case ACMPWU: 3877 return OPVCC(31, 32, 0, 0) 3878 case ACMPB: 3879 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */ 3880 case ACMPEQB: 3881 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 3882 3883 case ACNTLZW: 3884 return OPVCC(31, 26, 0, 0) 3885 case ACNTLZWCC: 3886 return OPVCC(31, 26, 0, 1) 3887 case ACNTLZD: 3888 return OPVCC(31, 58, 0, 0) 3889 case ACNTLZDCC: 3890 return OPVCC(31, 58, 0, 1) 3891 3892 case ACRAND: 3893 return OPVCC(19, 257, 0, 0) 3894 case ACRANDN: 3895 return OPVCC(19, 129, 0, 0) 3896 case ACREQV: 3897 return OPVCC(19, 289, 0, 0) 3898 case ACRNAND: 3899 return OPVCC(19, 225, 0, 0) 3900 case ACRNOR: 3901 return OPVCC(19, 33, 0, 0) 3902 case ACROR: 3903 return OPVCC(19, 449, 0, 0) 3904 case ACRORN: 3905 return OPVCC(19, 417, 0, 0) 3906 case ACRXOR: 3907 return OPVCC(19, 193, 0, 0) 3908 3909 case ADCBF: 3910 return OPVCC(31, 86, 0, 0) 3911 case ADCBI: 3912 return OPVCC(31, 470, 0, 0) 3913 case ADCBST: 3914 return OPVCC(31, 54, 0, 0) 3915 case ADCBT: 3916 return OPVCC(31, 278, 0, 0) 3917 case ADCBTST: 3918 return OPVCC(31, 246, 0, 0) 3919 case ADCBZ: 3920 return OPVCC(31, 1014, 0, 0) 3921 3922 case AMODUD: 3923 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */ 3924 case AMODUW: 3925 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */ 3926 case AMODSD: 3927 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */ 3928 case AMODSW: 3929 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */ 3930 3931 case ADIVW, AREM: 3932 return OPVCC(31, 491, 0, 0) 3933 3934 case ADIVWCC: 3935 return OPVCC(31, 491, 0, 1) 3936 3937 case ADIVWV: 3938 return OPVCC(31, 491, 1, 0) 3939 3940 case ADIVWVCC: 3941 return OPVCC(31, 491, 1, 1) 3942 3943 case ADIVWU, AREMU: 3944 return OPVCC(31, 459, 0, 0) 3945 3946 case ADIVWUCC: 3947 return OPVCC(31, 459, 0, 1) 3948 3949 case ADIVWUV: 3950 return OPVCC(31, 459, 1, 0) 3951 3952 case ADIVWUVCC: 3953 return OPVCC(31, 459, 1, 1) 3954 3955 case ADIVD, AREMD: 3956 return OPVCC(31, 489, 0, 0) 3957 3958 case ADIVDCC: 3959 return OPVCC(31, 489, 0, 1) 3960 3961 case ADIVDE: 3962 return OPVCC(31, 425, 0, 0) 3963 3964 case ADIVDECC: 3965 return OPVCC(31, 425, 0, 1) 3966 3967 case ADIVDEU: 3968 return OPVCC(31, 393, 0, 0) 3969 3970 case ADIVDEUCC: 3971 return OPVCC(31, 393, 0, 1) 3972 3973 case ADIVDV: 3974 return OPVCC(31, 489, 1, 0) 3975 3976 case ADIVDVCC: 3977 return OPVCC(31, 489, 1, 1) 3978 3979 case ADIVDU, AREMDU: 3980 return OPVCC(31, 457, 0, 0) 3981 3982 case ADIVDUCC: 3983 return OPVCC(31, 457, 0, 1) 3984 3985 case ADIVDUV: 3986 return OPVCC(31, 457, 1, 0) 3987 3988 case ADIVDUVCC: 3989 return OPVCC(31, 457, 1, 1) 3990 3991 case AEIEIO: 3992 return OPVCC(31, 854, 0, 0) 3993 3994 case AEQV: 3995 return OPVCC(31, 284, 0, 0) 3996 case AEQVCC: 3997 return OPVCC(31, 284, 0, 1) 3998 3999 case AEXTSB: 4000 return OPVCC(31, 954, 0, 0) 4001 case AEXTSBCC: 4002 return OPVCC(31, 954, 0, 1) 4003 case AEXTSH: 4004 return OPVCC(31, 922, 0, 0) 4005 case AEXTSHCC: 4006 return OPVCC(31, 922, 0, 1) 4007 case AEXTSW: 4008 return OPVCC(31, 986, 0, 0) 4009 case AEXTSWCC: 4010 return OPVCC(31, 986, 0, 1) 4011 4012 case AFABS: 4013 return OPVCC(63, 264, 0, 0) 4014 case AFABSCC: 4015 return OPVCC(63, 264, 0, 1) 4016 case AFADD: 4017 return OPVCC(63, 21, 0, 0) 4018 case AFADDCC: 4019 return OPVCC(63, 21, 0, 1) 4020 case AFADDS: 4021 return OPVCC(59, 21, 0, 0) 4022 case AFADDSCC: 4023 return OPVCC(59, 21, 0, 1) 4024 case AFCMPO: 4025 return OPVCC(63, 32, 0, 0) 4026 case AFCMPU: 4027 return OPVCC(63, 0, 0, 0) 4028 case AFCFID: 4029 return OPVCC(63, 846, 0, 0) 4030 case AFCFIDCC: 4031 return OPVCC(63, 846, 0, 1) 4032 case AFCFIDU: 4033 return OPVCC(63, 974, 0, 0) 4034 case AFCFIDUCC: 4035 return OPVCC(63, 974, 0, 1) 4036 case AFCFIDS: 4037 return OPVCC(59, 846, 0, 0) 4038 case AFCFIDSCC: 4039 return OPVCC(59, 846, 0, 1) 4040 case AFCTIW: 4041 return OPVCC(63, 14, 0, 0) 4042 case AFCTIWCC: 4043 return OPVCC(63, 14, 0, 1) 4044 case AFCTIWZ: 4045 return OPVCC(63, 15, 0, 0) 4046 case AFCTIWZCC: 4047 return OPVCC(63, 15, 0, 1) 4048 case AFCTID: 4049 return OPVCC(63, 814, 0, 0) 4050 case AFCTIDCC: 4051 return OPVCC(63, 814, 0, 1) 4052 case AFCTIDZ: 4053 return OPVCC(63, 815, 0, 0) 4054 case AFCTIDZCC: 4055 return OPVCC(63, 815, 0, 1) 4056 case AFDIV: 4057 return OPVCC(63, 18, 0, 0) 4058 case AFDIVCC: 4059 return OPVCC(63, 18, 0, 1) 4060 case AFDIVS: 4061 return OPVCC(59, 18, 0, 0) 4062 case AFDIVSCC: 4063 return OPVCC(59, 18, 0, 1) 4064 case AFMADD: 4065 return OPVCC(63, 29, 0, 0) 4066 case AFMADDCC: 4067 return OPVCC(63, 29, 0, 1) 4068 case AFMADDS: 4069 return OPVCC(59, 29, 0, 0) 4070 case AFMADDSCC: 4071 return OPVCC(59, 29, 0, 1) 4072 4073 case AFMOVS, AFMOVD: 4074 return OPVCC(63, 72, 0, 0) /* load */ 4075 case AFMOVDCC: 4076 return OPVCC(63, 72, 0, 1) 4077 case AFMSUB: 4078 return OPVCC(63, 28, 0, 0) 4079 case AFMSUBCC: 4080 return OPVCC(63, 28, 0, 1) 4081 case AFMSUBS: 4082 return OPVCC(59, 28, 0, 0) 4083 case AFMSUBSCC: 4084 return OPVCC(59, 28, 0, 1) 4085 case AFMUL: 4086 return OPVCC(63, 25, 0, 0) 4087 case AFMULCC: 4088 return OPVCC(63, 25, 0, 1) 4089 case AFMULS: 4090 return OPVCC(59, 25, 0, 0) 4091 case AFMULSCC: 4092 return OPVCC(59, 25, 0, 1) 4093 case AFNABS: 4094 return OPVCC(63, 136, 0, 0) 4095 case AFNABSCC: 4096 return OPVCC(63, 136, 0, 1) 4097 case AFNEG: 4098 return OPVCC(63, 40, 0, 0) 4099 case AFNEGCC: 4100 return OPVCC(63, 40, 0, 1) 4101 case AFNMADD: 4102 return OPVCC(63, 31, 0, 0) 4103 case AFNMADDCC: 4104 return OPVCC(63, 31, 0, 1) 4105 case AFNMADDS: 4106 return OPVCC(59, 31, 0, 0) 4107 case AFNMADDSCC: 4108 return OPVCC(59, 31, 0, 1) 4109 case AFNMSUB: 4110 return OPVCC(63, 30, 0, 0) 4111 case AFNMSUBCC: 4112 return OPVCC(63, 30, 0, 1) 4113 case AFNMSUBS: 4114 return OPVCC(59, 30, 0, 0) 4115 case AFNMSUBSCC: 4116 return OPVCC(59, 30, 0, 1) 4117 case AFCPSGN: 4118 return OPVCC(63, 8, 0, 0) 4119 case AFCPSGNCC: 4120 return OPVCC(63, 8, 0, 1) 4121 case AFRES: 4122 return OPVCC(59, 24, 0, 0) 4123 case AFRESCC: 4124 return OPVCC(59, 24, 0, 1) 4125 case AFRIM: 4126 return OPVCC(63, 488, 0, 0) 4127 case AFRIMCC: 4128 return OPVCC(63, 488, 0, 1) 4129 case AFRIP: 4130 return OPVCC(63, 456, 0, 0) 4131 case AFRIPCC: 4132 return OPVCC(63, 456, 0, 1) 4133 case AFRIZ: 4134 return OPVCC(63, 424, 0, 0) 4135 case AFRIZCC: 4136 return OPVCC(63, 424, 0, 1) 4137 case AFRIN: 4138 return OPVCC(63, 392, 0, 0) 4139 case AFRINCC: 4140 return OPVCC(63, 392, 0, 1) 4141 case AFRSP: 4142 return OPVCC(63, 12, 0, 0) 4143 case AFRSPCC: 4144 return OPVCC(63, 12, 0, 1) 4145 case AFRSQRTE: 4146 return OPVCC(63, 26, 0, 0) 4147 case AFRSQRTECC: 4148 return OPVCC(63, 26, 0, 1) 4149 case AFSEL: 4150 return OPVCC(63, 23, 0, 0) 4151 case AFSELCC: 4152 return OPVCC(63, 23, 0, 1) 4153 case AFSQRT: 4154 return OPVCC(63, 22, 0, 0) 4155 case AFSQRTCC: 4156 return OPVCC(63, 22, 0, 1) 4157 case AFSQRTS: 4158 return OPVCC(59, 22, 0, 0) 4159 case AFSQRTSCC: 4160 return OPVCC(59, 22, 0, 1) 4161 case AFSUB: 4162 return OPVCC(63, 20, 0, 0) 4163 case AFSUBCC: 4164 return OPVCC(63, 20, 0, 1) 4165 case AFSUBS: 4166 return OPVCC(59, 20, 0, 0) 4167 case AFSUBSCC: 4168 return OPVCC(59, 20, 0, 1) 4169 4170 case AICBI: 4171 return OPVCC(31, 982, 0, 0) 4172 case AISYNC: 4173 return OPVCC(19, 150, 0, 0) 4174 4175 case AMTFSB0: 4176 return OPVCC(63, 70, 0, 0) 4177 case AMTFSB0CC: 4178 return OPVCC(63, 70, 0, 1) 4179 case AMTFSB1: 4180 return OPVCC(63, 38, 0, 0) 4181 case AMTFSB1CC: 4182 return OPVCC(63, 38, 0, 1) 4183 4184 case AMULHW: 4185 return OPVCC(31, 75, 0, 0) 4186 case AMULHWCC: 4187 return OPVCC(31, 75, 0, 1) 4188 case AMULHWU: 4189 return OPVCC(31, 11, 0, 0) 4190 case AMULHWUCC: 4191 return OPVCC(31, 11, 0, 1) 4192 case AMULLW: 4193 return OPVCC(31, 235, 0, 0) 4194 case AMULLWCC: 4195 return OPVCC(31, 235, 0, 1) 4196 case AMULLWV: 4197 return OPVCC(31, 235, 1, 0) 4198 case AMULLWVCC: 4199 return OPVCC(31, 235, 1, 1) 4200 4201 case AMULHD: 4202 return OPVCC(31, 73, 0, 0) 4203 case AMULHDCC: 4204 return OPVCC(31, 73, 0, 1) 4205 case AMULHDU: 4206 return OPVCC(31, 9, 0, 0) 4207 case AMULHDUCC: 4208 return OPVCC(31, 9, 0, 1) 4209 case AMULLD: 4210 return OPVCC(31, 233, 0, 0) 4211 case AMULLDCC: 4212 return OPVCC(31, 233, 0, 1) 4213 case AMULLDV: 4214 return OPVCC(31, 233, 1, 0) 4215 case AMULLDVCC: 4216 return OPVCC(31, 233, 1, 1) 4217 4218 case ANAND: 4219 return OPVCC(31, 476, 0, 0) 4220 case ANANDCC: 4221 return OPVCC(31, 476, 0, 1) 4222 case ANEG: 4223 return OPVCC(31, 104, 0, 0) 4224 case ANEGCC: 4225 return OPVCC(31, 104, 0, 1) 4226 case ANEGV: 4227 return OPVCC(31, 104, 1, 0) 4228 case ANEGVCC: 4229 return OPVCC(31, 104, 1, 1) 4230 case ANOR: 4231 return OPVCC(31, 124, 0, 0) 4232 case ANORCC: 4233 return OPVCC(31, 124, 0, 1) 4234 case AOR: 4235 return OPVCC(31, 444, 0, 0) 4236 case AORCC: 4237 return OPVCC(31, 444, 0, 1) 4238 case AORN: 4239 return OPVCC(31, 412, 0, 0) 4240 case AORNCC: 4241 return OPVCC(31, 412, 0, 1) 4242 4243 case APOPCNTD: 4244 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */ 4245 case APOPCNTW: 4246 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */ 4247 case APOPCNTB: 4248 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */ 4249 case ACNTTZW: 4250 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */ 4251 case ACNTTZWCC: 4252 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */ 4253 case ACNTTZD: 4254 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */ 4255 case ACNTTZDCC: 4256 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */ 4257 4258 case ARFI: 4259 return OPVCC(19, 50, 0, 0) 4260 case ARFCI: 4261 return OPVCC(19, 51, 0, 0) 4262 case ARFID: 4263 return OPVCC(19, 18, 0, 0) 4264 case AHRFID: 4265 return OPVCC(19, 274, 0, 0) 4266 4267 case ARLWMI: 4268 return OPVCC(20, 0, 0, 0) 4269 case ARLWMICC: 4270 return OPVCC(20, 0, 0, 1) 4271 case ARLWNM: 4272 return OPVCC(23, 0, 0, 0) 4273 case ARLWNMCC: 4274 return OPVCC(23, 0, 0, 1) 4275 4276 case ARLDCL: 4277 return OPVCC(30, 8, 0, 0) 4278 case ARLDCLCC: 4279 return OPVCC(30, 0, 0, 1) 4280 4281 case ARLDCR: 4282 return OPVCC(30, 9, 0, 0) 4283 case ARLDCRCC: 4284 return OPVCC(30, 9, 0, 1) 4285 4286 case ARLDICL: 4287 return OPVCC(30, 0, 0, 0) 4288 case ARLDICLCC: 4289 return OPVCC(30, 0, 0, 1) 4290 case ARLDICR: 4291 return OPMD(30, 1, 0) // rldicr 4292 case ARLDICRCC: 4293 return OPMD(30, 1, 1) // rldicr. 4294 4295 case ARLDIC: 4296 return OPMD(30, 2, 0) // rldic 4297 case ARLDICCC: 4298 return OPMD(30, 2, 1) // rldic. 4299 4300 case ASYSCALL: 4301 return OPVCC(17, 1, 0, 0) 4302 4303 case ASLW: 4304 return OPVCC(31, 24, 0, 0) 4305 case ASLWCC: 4306 return OPVCC(31, 24, 0, 1) 4307 case ASLD: 4308 return OPVCC(31, 27, 0, 0) 4309 case ASLDCC: 4310 return OPVCC(31, 27, 0, 1) 4311 4312 case ASRAW: 4313 return OPVCC(31, 792, 0, 0) 4314 case ASRAWCC: 4315 return OPVCC(31, 792, 0, 1) 4316 case ASRAD: 4317 return OPVCC(31, 794, 0, 0) 4318 case ASRADCC: 4319 return OPVCC(31, 794, 0, 1) 4320 4321 case AEXTSWSLI: 4322 return OPVCC(31, 445, 0, 0) 4323 case AEXTSWSLICC: 4324 return OPVCC(31, 445, 0, 1) 4325 4326 case ASRW: 4327 return OPVCC(31, 536, 0, 0) 4328 case ASRWCC: 4329 return OPVCC(31, 536, 0, 1) 4330 case ASRD: 4331 return OPVCC(31, 539, 0, 0) 4332 case ASRDCC: 4333 return OPVCC(31, 539, 0, 1) 4334 4335 case ASUB: 4336 return OPVCC(31, 40, 0, 0) 4337 case ASUBCC: 4338 return OPVCC(31, 40, 0, 1) 4339 case ASUBV: 4340 return OPVCC(31, 40, 1, 0) 4341 case ASUBVCC: 4342 return OPVCC(31, 40, 1, 1) 4343 case ASUBC: 4344 return OPVCC(31, 8, 0, 0) 4345 case ASUBCCC: 4346 return OPVCC(31, 8, 0, 1) 4347 case ASUBCV: 4348 return OPVCC(31, 8, 1, 0) 4349 case ASUBCVCC: 4350 return OPVCC(31, 8, 1, 1) 4351 case ASUBE: 4352 return OPVCC(31, 136, 0, 0) 4353 case ASUBECC: 4354 return OPVCC(31, 136, 0, 1) 4355 case ASUBEV: 4356 return OPVCC(31, 136, 1, 0) 4357 case ASUBEVCC: 4358 return OPVCC(31, 136, 1, 1) 4359 case ASUBME: 4360 return OPVCC(31, 232, 0, 0) 4361 case ASUBMECC: 4362 return OPVCC(31, 232, 0, 1) 4363 case ASUBMEV: 4364 return OPVCC(31, 232, 1, 0) 4365 case ASUBMEVCC: 4366 return OPVCC(31, 232, 1, 1) 4367 case ASUBZE: 4368 return OPVCC(31, 200, 0, 0) 4369 case ASUBZECC: 4370 return OPVCC(31, 200, 0, 1) 4371 case ASUBZEV: 4372 return OPVCC(31, 200, 1, 0) 4373 case ASUBZEVCC: 4374 return OPVCC(31, 200, 1, 1) 4375 4376 case ASYNC: 4377 return OPVCC(31, 598, 0, 0) 4378 case ALWSYNC: 4379 return OPVCC(31, 598, 0, 0) | 1<<21 4380 4381 case APTESYNC: 4382 return OPVCC(31, 598, 0, 0) | 2<<21 4383 4384 case ATLBIE: 4385 return OPVCC(31, 306, 0, 0) 4386 case ATLBIEL: 4387 return OPVCC(31, 274, 0, 0) 4388 case ATLBSYNC: 4389 return OPVCC(31, 566, 0, 0) 4390 case ASLBIA: 4391 return OPVCC(31, 498, 0, 0) 4392 case ASLBIE: 4393 return OPVCC(31, 434, 0, 0) 4394 case ASLBMFEE: 4395 return OPVCC(31, 915, 0, 0) 4396 case ASLBMFEV: 4397 return OPVCC(31, 851, 0, 0) 4398 case ASLBMTE: 4399 return OPVCC(31, 402, 0, 0) 4400 4401 case ATW: 4402 return OPVCC(31, 4, 0, 0) 4403 case ATD: 4404 return OPVCC(31, 68, 0, 0) 4405 4406 /* Vector (VMX/Altivec) instructions */ 4407 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4408 /* are enabled starting at POWER6 (ISA 2.05). */ 4409 case AVAND: 4410 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */ 4411 case AVANDC: 4412 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */ 4413 case AVNAND: 4414 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */ 4415 4416 case AVOR: 4417 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */ 4418 case AVORC: 4419 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */ 4420 case AVNOR: 4421 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */ 4422 case AVXOR: 4423 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */ 4424 case AVEQV: 4425 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */ 4426 4427 case AVADDUBM: 4428 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */ 4429 case AVADDUHM: 4430 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */ 4431 case AVADDUWM: 4432 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */ 4433 case AVADDUDM: 4434 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */ 4435 case AVADDUQM: 4436 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */ 4437 4438 case AVADDCUQ: 4439 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */ 4440 case AVADDCUW: 4441 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */ 4442 4443 case AVADDUBS: 4444 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */ 4445 case AVADDUHS: 4446 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */ 4447 case AVADDUWS: 4448 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */ 4449 4450 case AVADDSBS: 4451 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */ 4452 case AVADDSHS: 4453 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */ 4454 case AVADDSWS: 4455 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */ 4456 4457 case AVADDEUQM: 4458 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */ 4459 case AVADDECUQ: 4460 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */ 4461 4462 case AVMULESB: 4463 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */ 4464 case AVMULOSB: 4465 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */ 4466 case AVMULEUB: 4467 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */ 4468 case AVMULOUB: 4469 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */ 4470 case AVMULESH: 4471 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */ 4472 case AVMULOSH: 4473 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */ 4474 case AVMULEUH: 4475 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */ 4476 case AVMULOUH: 4477 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */ 4478 case AVMULESW: 4479 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */ 4480 case AVMULOSW: 4481 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */ 4482 case AVMULEUW: 4483 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */ 4484 case AVMULOUW: 4485 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */ 4486 case AVMULUWM: 4487 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */ 4488 4489 case AVPMSUMB: 4490 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */ 4491 case AVPMSUMH: 4492 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */ 4493 case AVPMSUMW: 4494 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */ 4495 case AVPMSUMD: 4496 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ 4497 4498 case AVMSUMUDM: 4499 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */ 4500 4501 case AVSUBUBM: 4502 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ 4503 case AVSUBUHM: 4504 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */ 4505 case AVSUBUWM: 4506 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */ 4507 case AVSUBUDM: 4508 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */ 4509 case AVSUBUQM: 4510 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */ 4511 4512 case AVSUBCUQ: 4513 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */ 4514 case AVSUBCUW: 4515 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */ 4516 4517 case AVSUBUBS: 4518 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */ 4519 case AVSUBUHS: 4520 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */ 4521 case AVSUBUWS: 4522 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */ 4523 4524 case AVSUBSBS: 4525 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */ 4526 case AVSUBSHS: 4527 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */ 4528 case AVSUBSWS: 4529 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */ 4530 4531 case AVSUBEUQM: 4532 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */ 4533 case AVSUBECUQ: 4534 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */ 4535 4536 case AVRLB: 4537 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */ 4538 case AVRLH: 4539 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */ 4540 case AVRLW: 4541 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */ 4542 case AVRLD: 4543 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */ 4544 4545 case AVMRGOW: 4546 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */ 4547 case AVMRGEW: 4548 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */ 4549 4550 case AVSLB: 4551 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */ 4552 case AVSLH: 4553 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */ 4554 case AVSLW: 4555 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */ 4556 case AVSL: 4557 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */ 4558 case AVSLO: 4559 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */ 4560 case AVSRB: 4561 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */ 4562 case AVSRH: 4563 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */ 4564 case AVSRW: 4565 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */ 4566 case AVSR: 4567 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */ 4568 case AVSRO: 4569 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */ 4570 case AVSLD: 4571 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */ 4572 case AVSRD: 4573 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */ 4574 4575 case AVSRAB: 4576 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */ 4577 case AVSRAH: 4578 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */ 4579 case AVSRAW: 4580 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */ 4581 case AVSRAD: 4582 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ 4583 4584 case AVBPERMQ: 4585 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */ 4586 case AVBPERMD: 4587 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */ 4588 4589 case AVCLZB: 4590 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ 4591 case AVCLZH: 4592 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */ 4593 case AVCLZW: 4594 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */ 4595 case AVCLZD: 4596 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */ 4597 4598 case AVPOPCNTB: 4599 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */ 4600 case AVPOPCNTH: 4601 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */ 4602 case AVPOPCNTW: 4603 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */ 4604 case AVPOPCNTD: 4605 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */ 4606 4607 case AVCMPEQUB: 4608 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */ 4609 case AVCMPEQUBCC: 4610 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */ 4611 case AVCMPEQUH: 4612 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */ 4613 case AVCMPEQUHCC: 4614 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */ 4615 case AVCMPEQUW: 4616 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */ 4617 case AVCMPEQUWCC: 4618 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */ 4619 case AVCMPEQUD: 4620 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */ 4621 case AVCMPEQUDCC: 4622 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */ 4623 4624 case AVCMPGTUB: 4625 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */ 4626 case AVCMPGTUBCC: 4627 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */ 4628 case AVCMPGTUH: 4629 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */ 4630 case AVCMPGTUHCC: 4631 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */ 4632 case AVCMPGTUW: 4633 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */ 4634 case AVCMPGTUWCC: 4635 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */ 4636 case AVCMPGTUD: 4637 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */ 4638 case AVCMPGTUDCC: 4639 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */ 4640 case AVCMPGTSB: 4641 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */ 4642 case AVCMPGTSBCC: 4643 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */ 4644 case AVCMPGTSH: 4645 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */ 4646 case AVCMPGTSHCC: 4647 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */ 4648 case AVCMPGTSW: 4649 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */ 4650 case AVCMPGTSWCC: 4651 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */ 4652 case AVCMPGTSD: 4653 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */ 4654 case AVCMPGTSDCC: 4655 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ 4656 4657 case AVCMPNEZB: 4658 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */ 4659 case AVCMPNEZBCC: 4660 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */ 4661 case AVCMPNEB: 4662 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */ 4663 case AVCMPNEBCC: 4664 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */ 4665 case AVCMPNEH: 4666 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */ 4667 case AVCMPNEHCC: 4668 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */ 4669 case AVCMPNEW: 4670 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */ 4671 case AVCMPNEWCC: 4672 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */ 4673 4674 case AVPERM: 4675 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ 4676 case AVPERMXOR: 4677 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */ 4678 case AVPERMR: 4679 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */ 4680 4681 case AVSEL: 4682 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ 4683 4684 case AVCIPHER: 4685 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */ 4686 case AVCIPHERLAST: 4687 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */ 4688 case AVNCIPHER: 4689 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */ 4690 case AVNCIPHERLAST: 4691 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */ 4692 case AVSBOX: 4693 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */ 4694 /* End of vector instructions */ 4695 4696 /* Vector scalar (VSX) instructions */ 4697 /* ISA 2.06 enables these for POWER7. */ 4698 case AMFVSRD, AMFVRD, AMFFPRD: 4699 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ 4700 case AMFVSRWZ: 4701 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ 4702 case AMFVSRLD: 4703 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */ 4704 4705 case AMTVSRD, AMTFPRD, AMTVRD: 4706 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ 4707 case AMTVSRWA: 4708 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ 4709 case AMTVSRWZ: 4710 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ 4711 case AMTVSRDD: 4712 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */ 4713 case AMTVSRWS: 4714 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */ 4715 4716 case AXXLAND: 4717 return OPVXX3(60, 130, 0) /* xxland - v2.06 */ 4718 case AXXLANDC: 4719 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */ 4720 case AXXLEQV: 4721 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */ 4722 case AXXLNAND: 4723 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */ 4724 4725 case AXXLORC: 4726 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */ 4727 case AXXLNOR: 4728 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */ 4729 case AXXLOR, AXXLORQ: 4730 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */ 4731 case AXXLXOR: 4732 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */ 4733 4734 case AXXSEL: 4735 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */ 4736 4737 case AXXMRGHW: 4738 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */ 4739 case AXXMRGLW: 4740 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */ 4741 4742 case AXXSPLTW: 4743 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */ 4744 4745 case AXXSPLTIB: 4746 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */ 4747 4748 case AXXPERM: 4749 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */ 4750 case AXXPERMDI: 4751 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */ 4752 4753 case AXXSLDWI: 4754 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */ 4755 4756 case AXXBRQ: 4757 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */ 4758 case AXXBRD: 4759 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */ 4760 case AXXBRW: 4761 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */ 4762 case AXXBRH: 4763 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */ 4764 4765 case AXSCVDPSP: 4766 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */ 4767 case AXSCVSPDP: 4768 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */ 4769 case AXSCVDPSPN: 4770 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */ 4771 case AXSCVSPDPN: 4772 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */ 4773 4774 case AXVCVDPSP: 4775 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */ 4776 case AXVCVSPDP: 4777 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */ 4778 4779 case AXSCVDPSXDS: 4780 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */ 4781 case AXSCVDPSXWS: 4782 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */ 4783 case AXSCVDPUXDS: 4784 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */ 4785 case AXSCVDPUXWS: 4786 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */ 4787 4788 case AXSCVSXDDP: 4789 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */ 4790 case AXSCVUXDDP: 4791 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */ 4792 case AXSCVSXDSP: 4793 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */ 4794 case AXSCVUXDSP: 4795 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */ 4796 4797 case AXVCVDPSXDS: 4798 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */ 4799 case AXVCVDPSXWS: 4800 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */ 4801 case AXVCVDPUXDS: 4802 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */ 4803 case AXVCVDPUXWS: 4804 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */ 4805 case AXVCVSPSXDS: 4806 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */ 4807 case AXVCVSPSXWS: 4808 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */ 4809 case AXVCVSPUXDS: 4810 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */ 4811 case AXVCVSPUXWS: 4812 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */ 4813 4814 case AXVCVSXDDP: 4815 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */ 4816 case AXVCVSXWDP: 4817 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */ 4818 case AXVCVUXDDP: 4819 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */ 4820 case AXVCVUXWDP: 4821 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */ 4822 case AXVCVSXDSP: 4823 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */ 4824 case AXVCVSXWSP: 4825 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */ 4826 case AXVCVUXDSP: 4827 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */ 4828 case AXVCVUXWSP: 4829 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ 4830 /* End of VSX instructions */ 4831 4832 case AMADDHD: 4833 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */ 4834 case AMADDHDU: 4835 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */ 4836 case AMADDLD: 4837 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */ 4838 4839 case AXOR: 4840 return OPVCC(31, 316, 0, 0) 4841 case AXORCC: 4842 return OPVCC(31, 316, 0, 1) 4843 } 4844 4845 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a) 4846 return 0 4847 } 4848 4849 func (c *ctxt9) opirrr(a obj.As) uint32 { 4850 switch a { 4851 /* Vector (VMX/Altivec) instructions */ 4852 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4853 /* are enabled starting at POWER6 (ISA 2.05). */ 4854 case AVSLDOI: 4855 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */ 4856 } 4857 4858 c.ctxt.Diag("bad i/r/r/r opcode %v", a) 4859 return 0 4860 } 4861 4862 func (c *ctxt9) opiirr(a obj.As) uint32 { 4863 switch a { 4864 /* Vector (VMX/Altivec) instructions */ 4865 /* ISA 2.07 enables these for POWER8 and beyond. */ 4866 case AVSHASIGMAW: 4867 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */ 4868 case AVSHASIGMAD: 4869 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */ 4870 } 4871 4872 c.ctxt.Diag("bad i/i/r/r opcode %v", a) 4873 return 0 4874 } 4875 4876 func (c *ctxt9) opirr(a obj.As) uint32 { 4877 switch a { 4878 case AADD: 4879 return OPVCC(14, 0, 0, 0) 4880 case AADDC: 4881 return OPVCC(12, 0, 0, 0) 4882 case AADDCCC: 4883 return OPVCC(13, 0, 0, 0) 4884 case AADDIS: 4885 return OPVCC(15, 0, 0, 0) /* ADDIS */ 4886 4887 case AANDCC: 4888 return OPVCC(28, 0, 0, 0) 4889 case AANDISCC: 4890 return OPVCC(29, 0, 0, 0) /* ANDIS. */ 4891 4892 case ABR: 4893 return OPVCC(18, 0, 0, 0) 4894 case ABL: 4895 return OPVCC(18, 0, 0, 0) | 1 4896 case obj.ADUFFZERO: 4897 return OPVCC(18, 0, 0, 0) | 1 4898 case obj.ADUFFCOPY: 4899 return OPVCC(18, 0, 0, 0) | 1 4900 case ABC: 4901 return OPVCC(16, 0, 0, 0) 4902 case ABCL: 4903 return OPVCC(16, 0, 0, 0) | 1 4904 4905 case ABEQ: 4906 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0) 4907 case ABGE: 4908 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0) 4909 case ABGT: 4910 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0) 4911 case ABLE: 4912 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0) 4913 case ABLT: 4914 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0) 4915 case ABNE: 4916 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0) 4917 case ABVC: 4918 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0) 4919 case ABVS: 4920 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0) 4921 case ABDZ: 4922 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0) 4923 case ABDNZ: 4924 return AOP_RRR(16<<26, BO_BCTR, 0, 0) 4925 4926 case ACMP: 4927 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */ 4928 case ACMPU: 4929 return OPVCC(10, 0, 0, 0) | 1<<21 4930 case ACMPW: 4931 return OPVCC(11, 0, 0, 0) /* L=0 */ 4932 case ACMPWU: 4933 return OPVCC(10, 0, 0, 0) 4934 case ACMPEQB: 4935 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 4936 4937 case ALSW: 4938 return OPVCC(31, 597, 0, 0) 4939 4940 case ACOPY: 4941 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */ 4942 case APASTECC: 4943 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */ 4944 case ADARN: 4945 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ 4946 4947 case AMULLW, AMULLD: 4948 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */ 4949 4950 case AOR: 4951 return OPVCC(24, 0, 0, 0) 4952 case AORIS: 4953 return OPVCC(25, 0, 0, 0) /* ORIS */ 4954 4955 case ARLWMI: 4956 return OPVCC(20, 0, 0, 0) /* rlwimi */ 4957 case ARLWMICC: 4958 return OPVCC(20, 0, 0, 1) 4959 case ARLDMI: 4960 return OPMD(30, 3, 0) /* rldimi */ 4961 case ARLDMICC: 4962 return OPMD(30, 3, 1) /* rldimi. */ 4963 case ARLDIMI: 4964 return OPMD(30, 3, 0) /* rldimi */ 4965 case ARLDIMICC: 4966 return OPMD(30, 3, 1) /* rldimi. */ 4967 case ARLWNM: 4968 return OPVCC(21, 0, 0, 0) /* rlwinm */ 4969 case ARLWNMCC: 4970 return OPVCC(21, 0, 0, 1) 4971 4972 case ARLDCL: 4973 return OPMD(30, 0, 0) /* rldicl */ 4974 case ARLDCLCC: 4975 return OPMD(30, 0, 1) /* rldicl. */ 4976 case ARLDCR: 4977 return OPMD(30, 1, 0) /* rldicr */ 4978 case ARLDCRCC: 4979 return OPMD(30, 1, 1) /* rldicr. */ 4980 case ARLDC: 4981 return OPMD(30, 2, 0) /* rldic */ 4982 case ARLDCCC: 4983 return OPMD(30, 2, 1) /* rldic. */ 4984 4985 case ASRAW: 4986 return OPVCC(31, 824, 0, 0) 4987 case ASRAWCC: 4988 return OPVCC(31, 824, 0, 1) 4989 case ASRAD: 4990 return OPVCC(31, (413 << 1), 0, 0) 4991 case ASRADCC: 4992 return OPVCC(31, (413 << 1), 0, 1) 4993 case AEXTSWSLI: 4994 return OPVCC(31, 445, 0, 0) 4995 case AEXTSWSLICC: 4996 return OPVCC(31, 445, 0, 1) 4997 4998 case ASTSW: 4999 return OPVCC(31, 725, 0, 0) 5000 5001 case ASUBC: 5002 return OPVCC(8, 0, 0, 0) 5003 5004 case ATW: 5005 return OPVCC(3, 0, 0, 0) 5006 case ATD: 5007 return OPVCC(2, 0, 0, 0) 5008 5009 /* Vector (VMX/Altivec) instructions */ 5010 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 5011 /* are enabled starting at POWER6 (ISA 2.05). */ 5012 case AVSPLTB: 5013 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */ 5014 case AVSPLTH: 5015 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */ 5016 case AVSPLTW: 5017 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */ 5018 5019 case AVSPLTISB: 5020 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */ 5021 case AVSPLTISH: 5022 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */ 5023 case AVSPLTISW: 5024 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */ 5025 /* End of vector instructions */ 5026 5027 case AFTDIV: 5028 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */ 5029 case AFTSQRT: 5030 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */ 5031 5032 case AXOR: 5033 return OPVCC(26, 0, 0, 0) /* XORIL */ 5034 case AXORIS: 5035 return OPVCC(27, 0, 0, 0) /* XORIS */ 5036 } 5037 5038 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a) 5039 return 0 5040 } 5041 5042 /* 5043 * load o(a),d 5044 */ 5045 func (c *ctxt9) opload(a obj.As) uint32 { 5046 switch a { 5047 case AMOVD: 5048 return OPVCC(58, 0, 0, 0) /* ld */ 5049 case AMOVDU: 5050 return OPVCC(58, 0, 0, 1) /* ldu */ 5051 case AMOVWZ: 5052 return OPVCC(32, 0, 0, 0) /* lwz */ 5053 case AMOVWZU: 5054 return OPVCC(33, 0, 0, 0) /* lwzu */ 5055 case AMOVW: 5056 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */ 5057 case ALXV: 5058 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */ 5059 case ALXVL: 5060 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */ 5061 case ALXVLL: 5062 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */ 5063 case ALXVX: 5064 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5065 5066 /* no AMOVWU */ 5067 case AMOVB, AMOVBZ: 5068 return OPVCC(34, 0, 0, 0) 5069 /* load */ 5070 5071 case AMOVBU, AMOVBZU: 5072 return OPVCC(35, 0, 0, 0) 5073 case AFMOVD: 5074 return OPVCC(50, 0, 0, 0) 5075 case AFMOVDU: 5076 return OPVCC(51, 0, 0, 0) 5077 case AFMOVS: 5078 return OPVCC(48, 0, 0, 0) 5079 case AFMOVSU: 5080 return OPVCC(49, 0, 0, 0) 5081 case AMOVH: 5082 return OPVCC(42, 0, 0, 0) 5083 case AMOVHU: 5084 return OPVCC(43, 0, 0, 0) 5085 case AMOVHZ: 5086 return OPVCC(40, 0, 0, 0) 5087 case AMOVHZU: 5088 return OPVCC(41, 0, 0, 0) 5089 case AMOVMW: 5090 return OPVCC(46, 0, 0, 0) /* lmw */ 5091 } 5092 5093 c.ctxt.Diag("bad load opcode %v", a) 5094 return 0 5095 } 5096 5097 /* 5098 * indexed load a(b),d 5099 */ 5100 func (c *ctxt9) oploadx(a obj.As) uint32 { 5101 switch a { 5102 case AMOVWZ: 5103 return OPVCC(31, 23, 0, 0) /* lwzx */ 5104 case AMOVWZU: 5105 return OPVCC(31, 55, 0, 0) /* lwzux */ 5106 case AMOVW: 5107 return OPVCC(31, 341, 0, 0) /* lwax */ 5108 case AMOVWU: 5109 return OPVCC(31, 373, 0, 0) /* lwaux */ 5110 5111 case AMOVB, AMOVBZ: 5112 return OPVCC(31, 87, 0, 0) /* lbzx */ 5113 5114 case AMOVBU, AMOVBZU: 5115 return OPVCC(31, 119, 0, 0) /* lbzux */ 5116 case AFMOVD: 5117 return OPVCC(31, 599, 0, 0) /* lfdx */ 5118 case AFMOVDU: 5119 return OPVCC(31, 631, 0, 0) /* lfdux */ 5120 case AFMOVS: 5121 return OPVCC(31, 535, 0, 0) /* lfsx */ 5122 case AFMOVSU: 5123 return OPVCC(31, 567, 0, 0) /* lfsux */ 5124 case AFMOVSX: 5125 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */ 5126 case AFMOVSZ: 5127 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */ 5128 case AMOVH: 5129 return OPVCC(31, 343, 0, 0) /* lhax */ 5130 case AMOVHU: 5131 return OPVCC(31, 375, 0, 0) /* lhaux */ 5132 case AMOVHBR: 5133 return OPVCC(31, 790, 0, 0) /* lhbrx */ 5134 case AMOVWBR: 5135 return OPVCC(31, 534, 0, 0) /* lwbrx */ 5136 case AMOVDBR: 5137 return OPVCC(31, 532, 0, 0) /* ldbrx */ 5138 case AMOVHZ: 5139 return OPVCC(31, 279, 0, 0) /* lhzx */ 5140 case AMOVHZU: 5141 return OPVCC(31, 311, 0, 0) /* lhzux */ 5142 case ALBAR: 5143 return OPVCC(31, 52, 0, 0) /* lbarx */ 5144 case ALHAR: 5145 return OPVCC(31, 116, 0, 0) /* lharx */ 5146 case ALWAR: 5147 return OPVCC(31, 20, 0, 0) /* lwarx */ 5148 case ALDAR: 5149 return OPVCC(31, 84, 0, 0) /* ldarx */ 5150 case ALSW: 5151 return OPVCC(31, 533, 0, 0) /* lswx */ 5152 case AMOVD: 5153 return OPVCC(31, 21, 0, 0) /* ldx */ 5154 case AMOVDU: 5155 return OPVCC(31, 53, 0, 0) /* ldux */ 5156 5157 /* Vector (VMX/Altivec) instructions */ 5158 case ALVEBX: 5159 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */ 5160 case ALVEHX: 5161 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */ 5162 case ALVEWX: 5163 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */ 5164 case ALVX: 5165 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */ 5166 case ALVXL: 5167 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */ 5168 case ALVSL: 5169 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */ 5170 case ALVSR: 5171 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */ 5172 /* End of vector instructions */ 5173 5174 /* Vector scalar (VSX) instructions */ 5175 case ALXVX: 5176 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5177 case ALXVD2X: 5178 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */ 5179 case ALXVW4X: 5180 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */ 5181 case ALXVH8X: 5182 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */ 5183 case ALXVB16X: 5184 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */ 5185 case ALXVDSX: 5186 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */ 5187 case ALXSDX: 5188 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */ 5189 case ALXSIWAX: 5190 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */ 5191 case ALXSIWZX: 5192 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */ 5193 } 5194 5195 c.ctxt.Diag("bad loadx opcode %v", a) 5196 return 0 5197 } 5198 5199 /* 5200 * store s,o(d) 5201 */ 5202 func (c *ctxt9) opstore(a obj.As) uint32 { 5203 switch a { 5204 case AMOVB, AMOVBZ: 5205 return OPVCC(38, 0, 0, 0) /* stb */ 5206 5207 case AMOVBU, AMOVBZU: 5208 return OPVCC(39, 0, 0, 0) /* stbu */ 5209 case AFMOVD: 5210 return OPVCC(54, 0, 0, 0) /* stfd */ 5211 case AFMOVDU: 5212 return OPVCC(55, 0, 0, 0) /* stfdu */ 5213 case AFMOVS: 5214 return OPVCC(52, 0, 0, 0) /* stfs */ 5215 case AFMOVSU: 5216 return OPVCC(53, 0, 0, 0) /* stfsu */ 5217 5218 case AMOVHZ, AMOVH: 5219 return OPVCC(44, 0, 0, 0) /* sth */ 5220 5221 case AMOVHZU, AMOVHU: 5222 return OPVCC(45, 0, 0, 0) /* sthu */ 5223 case AMOVMW: 5224 return OPVCC(47, 0, 0, 0) /* stmw */ 5225 case ASTSW: 5226 return OPVCC(31, 725, 0, 0) /* stswi */ 5227 5228 case AMOVWZ, AMOVW: 5229 return OPVCC(36, 0, 0, 0) /* stw */ 5230 5231 case AMOVWZU, AMOVWU: 5232 return OPVCC(37, 0, 0, 0) /* stwu */ 5233 case AMOVD: 5234 return OPVCC(62, 0, 0, 0) /* std */ 5235 case AMOVDU: 5236 return OPVCC(62, 0, 0, 1) /* stdu */ 5237 case ASTXV: 5238 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */ 5239 case ASTXVL: 5240 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */ 5241 case ASTXVLL: 5242 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */ 5243 case ASTXVX: 5244 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */ 5245 5246 } 5247 5248 c.ctxt.Diag("unknown store opcode %v", a) 5249 return 0 5250 } 5251 5252 /* 5253 * indexed store s,a(b) 5254 */ 5255 func (c *ctxt9) opstorex(a obj.As) uint32 { 5256 switch a { 5257 case AMOVB, AMOVBZ: 5258 return OPVCC(31, 215, 0, 0) /* stbx */ 5259 5260 case AMOVBU, AMOVBZU: 5261 return OPVCC(31, 247, 0, 0) /* stbux */ 5262 case AFMOVD: 5263 return OPVCC(31, 727, 0, 0) /* stfdx */ 5264 case AFMOVDU: 5265 return OPVCC(31, 759, 0, 0) /* stfdux */ 5266 case AFMOVS: 5267 return OPVCC(31, 663, 0, 0) /* stfsx */ 5268 case AFMOVSU: 5269 return OPVCC(31, 695, 0, 0) /* stfsux */ 5270 case AFMOVSX: 5271 return OPVCC(31, 983, 0, 0) /* stfiwx */ 5272 5273 case AMOVHZ, AMOVH: 5274 return OPVCC(31, 407, 0, 0) /* sthx */ 5275 case AMOVHBR: 5276 return OPVCC(31, 918, 0, 0) /* sthbrx */ 5277 5278 case AMOVHZU, AMOVHU: 5279 return OPVCC(31, 439, 0, 0) /* sthux */ 5280 5281 case AMOVWZ, AMOVW: 5282 return OPVCC(31, 151, 0, 0) /* stwx */ 5283 5284 case AMOVWZU, AMOVWU: 5285 return OPVCC(31, 183, 0, 0) /* stwux */ 5286 case ASTSW: 5287 return OPVCC(31, 661, 0, 0) /* stswx */ 5288 case AMOVWBR: 5289 return OPVCC(31, 662, 0, 0) /* stwbrx */ 5290 case AMOVDBR: 5291 return OPVCC(31, 660, 0, 0) /* stdbrx */ 5292 case ASTBCCC: 5293 return OPVCC(31, 694, 0, 1) /* stbcx. */ 5294 case ASTHCCC: 5295 return OPVCC(31, 726, 0, 1) /* sthcx. */ 5296 case ASTWCCC: 5297 return OPVCC(31, 150, 0, 1) /* stwcx. */ 5298 case ASTDCCC: 5299 return OPVCC(31, 214, 0, 1) /* stwdx. */ 5300 case AMOVD: 5301 return OPVCC(31, 149, 0, 0) /* stdx */ 5302 case AMOVDU: 5303 return OPVCC(31, 181, 0, 0) /* stdux */ 5304 5305 /* Vector (VMX/Altivec) instructions */ 5306 case ASTVEBX: 5307 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */ 5308 case ASTVEHX: 5309 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */ 5310 case ASTVEWX: 5311 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */ 5312 case ASTVX: 5313 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */ 5314 case ASTVXL: 5315 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */ 5316 /* End of vector instructions */ 5317 5318 /* Vector scalar (VSX) instructions */ 5319 case ASTXVX: 5320 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */ 5321 case ASTXVD2X: 5322 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */ 5323 case ASTXVW4X: 5324 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */ 5325 case ASTXVH8X: 5326 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */ 5327 case ASTXVB16X: 5328 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */ 5329 5330 case ASTXSDX: 5331 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */ 5332 5333 case ASTXSIWX: 5334 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */ 5335 5336 /* End of vector scalar instructions */ 5337 5338 } 5339 5340 c.ctxt.Diag("unknown storex opcode %v", a) 5341 return 0 5342 }