github.com/bir3/gocompiler@v0.9.2202/src/cmd/internal/obj/ppc64/asm9.go (about) 1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. 2 // 3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 5 // Portions Copyright © 1997-1999 Vita Nuova Limited 6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7 // Portions Copyright © 2004,2006 Bruce Ellis 8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10 // Portions Copyright © 2009 The Go Authors. All rights reserved. 11 // 12 // Permission is hereby granted, free of charge, to any person obtaining a copy 13 // of this software and associated documentation files (the "Software"), to deal 14 // in the Software without restriction, including without limitation the rights 15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 // copies of the Software, and to permit persons to whom the Software is 17 // furnished to do so, subject to the following conditions: 18 // 19 // The above copyright notice and this permission notice shall be included in 20 // all copies or substantial portions of the Software. 21 // 22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 // THE SOFTWARE. 29 30 package ppc64 31 32 import ( 33 "github.com/bir3/gocompiler/src/cmd/internal/obj" 34 "github.com/bir3/gocompiler/src/cmd/internal/objabi" 35 "encoding/binary" 36 "fmt" 37 "github.com/bir3/gocompiler/src/internal/buildcfg" 38 "log" 39 "math" 40 "math/bits" 41 "sort" 42 ) 43 44 // ctxt9 holds state while assembling a single function. 45 // Each function gets a fresh ctxt9. 46 // This allows for multiple functions to be safely concurrently assembled. 47 type ctxt9 struct { 48 ctxt *obj.Link 49 newprog obj.ProgAlloc 50 cursym *obj.LSym 51 autosize int32 52 instoffset int64 53 pc int64 54 } 55 56 // Instruction layout. 57 58 const ( 59 r0iszero = 1 60 ) 61 62 const ( 63 // R bit option in prefixed load/store/add D-form operations 64 PFX_R_ABS = 0 // Offset is absolute 65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0 66 ) 67 68 const ( 69 // The preferred hardware nop instruction. 70 NOP = 0x60000000 71 ) 72 73 type Optab struct { 74 as obj.As // Opcode 75 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog. 76 a2 uint8 // p.Reg argument (int16 Register) 77 a3 uint8 // p.RestArgs[0] (obj.AddrPos) 78 a4 uint8 // p.RestArgs[1] 79 a5 uint8 // p.RestARgs[2] 80 a6 uint8 // p.To (obj.Addr) 81 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r 82 size int8 // Text space in bytes to lay operation 83 84 // A prefixed instruction is generated by this opcode. This cannot be placed 85 // across a 64B PC address. Opcodes should not translate to more than one 86 // prefixed instruction. The prefixed instruction should be written first 87 // (e.g when Optab.size > 8). 88 ispfx bool 89 90 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32) 91 } 92 93 // optab contains an array to be sliced of accepted operand combinations for an 94 // instruction. Unused arguments and fields are not explicitly enumerated, and 95 // should not be listed for clarity. Unused arguments and values should always 96 // assume the default value for the given type. 97 // 98 // optab does not list every valid ppc64 opcode, it enumerates representative 99 // operand combinations for a class of instruction. The variable oprange indexes 100 // all valid ppc64 opcodes. 101 // 102 // oprange is initialized to point a slice within optab which contains the valid 103 // operand combinations for a given instruction. This is initialized from buildop. 104 // 105 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface 106 // to arrange entries to minimize text size of each opcode. 107 // 108 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab. 109 var optab []Optab 110 111 var optabBase = []Optab{ 112 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, 113 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, 114 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, 115 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, 116 /* move register */ 117 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 118 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 119 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 120 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4}, 121 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 122 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 123 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, 124 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8}, 125 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, 126 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4}, 127 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 128 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 129 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 130 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 131 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 132 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 133 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ 134 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 135 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 136 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 137 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 138 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 139 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, 140 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 141 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, 142 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 143 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 144 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 145 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 146 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 147 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 148 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 149 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 150 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4}, 151 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 152 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 153 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, 154 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 155 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4}, 156 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12}, 157 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ 158 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 159 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 160 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 161 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, 162 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 163 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, 164 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 165 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 166 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 167 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ 168 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 169 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ 170 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 171 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 172 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 173 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 174 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 175 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 176 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, 177 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, 178 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 179 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, 180 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4}, 181 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 182 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 183 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 184 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, 185 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 186 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 187 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 188 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, 189 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, 190 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4}, 191 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, 192 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4}, 193 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, 194 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4}, 195 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, 196 {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4}, 197 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, 198 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 199 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 200 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 201 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 202 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, 203 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, 204 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 205 {as: AFABS, a6: C_FREG, type_: 33, size: 4}, 206 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4}, 207 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4}, 208 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4}, 209 210 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 211 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 212 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 213 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 214 215 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 216 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 217 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 218 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 219 220 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 221 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 222 223 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 224 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 225 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 226 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 227 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 228 229 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 230 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 231 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 232 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 233 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 234 235 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, 236 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, 237 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 238 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 239 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 240 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8}, 241 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 242 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 243 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 244 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8}, 245 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 246 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 247 248 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, 249 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, 250 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 251 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, 252 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 253 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 254 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 255 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 256 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 257 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 258 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 259 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 260 261 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8}, 262 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, 263 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4}, 264 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, 265 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 266 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4}, 267 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4}, 268 269 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 270 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4}, 271 272 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, 273 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 274 275 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, 276 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, 277 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, 278 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4}, 279 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, 280 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4}, 281 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 282 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4}, 283 284 {as: ASYSCALL, type_: 5, size: 4}, 285 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, 286 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12}, 287 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4}, 288 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4}, 289 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label 290 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop 291 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr 292 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr 293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label 294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label 295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi 296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh 297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi 298 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4}, 299 {as: ASYNC, type_: 46, size: 4}, 300 {as: AWORD, a1: C_LCON, type_: 40, size: 4}, 301 {as: ADWORD, a1: C_64CON, type_: 31, size: 8}, 302 {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, 303 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 304 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4}, 305 {as: AEXTSB, a6: C_REG, type_: 48, size: 4}, 306 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 307 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 308 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 309 {as: ANEG, a6: C_REG, type_: 47, size: 4}, 310 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12}, 311 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12}, 312 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16}, 313 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, 314 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, 315 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, 316 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4}, 317 /* Other ISA 2.05+ instructions */ 318 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ 319 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ 320 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ 321 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 322 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ 323 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ 324 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ 325 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ 326 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ 327 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ 328 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ 329 330 /* Misc ISA 3.0 instructions */ 331 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4}, 332 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4}, 333 334 /* Vector instructions */ 335 336 /* Vector load */ 337 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */ 338 339 /* Vector store */ 340 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */ 341 342 /* Vector logical */ 343 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */ 344 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */ 345 346 /* Vector add */ 347 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */ 348 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */ 349 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */ 350 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */ 351 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */ 352 353 /* Vector subtract */ 354 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */ 355 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */ 356 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */ 357 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */ 358 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */ 359 360 /* Vector multiply */ 361 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ 362 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ 363 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ 364 365 /* Vector rotate */ 366 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */ 367 368 /* Vector shift */ 369 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ 370 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ 371 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ 372 373 /* Vector count */ 374 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ 375 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */ 376 377 /* Vector compare */ 378 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */ 379 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */ 380 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */ 381 382 /* Vector merge */ 383 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */ 384 385 /* Vector permute */ 386 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */ 387 388 /* Vector bit permute */ 389 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */ 390 391 /* Vector select */ 392 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ 393 394 /* Vector splat */ 395 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */ 396 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, 397 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */ 398 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4}, 399 400 /* Vector AES */ 401 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ 402 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */ 403 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ 404 405 /* Vector SHA */ 406 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ 407 408 /* VSX vector load */ 409 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ 410 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */ 411 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */ 412 413 /* VSX vector store */ 414 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */ 415 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */ 416 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */ 417 418 /* VSX scalar load */ 419 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */ 420 421 /* VSX scalar store */ 422 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */ 423 424 /* VSX scalar as integer load */ 425 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */ 426 427 /* VSX scalar store as integer */ 428 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ 429 430 /* VSX move from VSR */ 431 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, 432 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, 433 434 /* VSX move to VSR */ 435 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4}, 436 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4}, 437 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4}, 438 439 /* VSX logical */ 440 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */ 441 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */ 442 443 /* VSX select */ 444 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */ 445 446 /* VSX merge */ 447 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ 448 449 /* VSX splat */ 450 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ 451 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ 452 453 /* VSX permute */ 454 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ 455 456 /* VSX shift */ 457 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ 458 459 /* VSX reverse bytes */ 460 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ 461 462 /* VSX scalar FP-FP conversion */ 463 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */ 464 465 /* VSX vector FP-FP conversion */ 466 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */ 467 468 /* VSX scalar FP-integer conversion */ 469 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */ 470 471 /* VSX scalar integer-FP conversion */ 472 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */ 473 474 /* VSX vector FP-integer conversion */ 475 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */ 476 477 /* VSX vector integer-FP conversion */ 478 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */ 479 480 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 482 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4}, 483 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4}, 484 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 486 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4}, 487 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4}, 488 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, 489 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4}, 490 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, 491 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4}, 492 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, 493 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4}, 494 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4}, 495 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4}, 496 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4}, 497 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4}, 498 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 499 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 500 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4}, 501 {as: AEIEIO, type_: 46, size: 4}, 502 {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, 503 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4}, 504 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 505 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 506 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 507 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4}, 508 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 509 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4}, 510 511 {as: obj.AUNDEF, type_: 78, size: 4}, 512 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0}, 513 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0}, 514 {as: obj.ANOP, type_: 0, size: 0}, 515 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689 516 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior 517 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, 518 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL 519 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL 520 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code 521 } 522 523 // These are opcodes above which may generate different sequences depending on whether prefix opcode support 524 // is available 525 type PrefixableOptab struct { 526 Optab 527 minGOPPC64 int // Minimum GOPPC64 required to support this. 528 pfxsize int8 // Instruction sequence size when prefixed opcodes are used 529 } 530 531 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate 532 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add). 533 // 534 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative 535 // sequence. It also encompasses several transformations which do not involve relocations, those could be 536 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding 537 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x 538 // 539 // MOVD something-byte-aligned(Rx), Ry 540 // MOVD 3(Rx), Ry 541 // 542 // is allowed when the prefixed forms are used. 543 // 544 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant. 545 var prefixableOptab = []PrefixableOptab{ 546 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, 547 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 548 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8}, 549 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12}, 550 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, 551 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 552 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 553 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 554 555 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, 556 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, 557 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 558 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 559 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 560 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 561 562 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 563 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12}, 564 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12}, 565 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 566 567 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 568 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 569 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 570 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 571 572 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 573 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 574 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 575 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 576 577 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, 578 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, 579 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, 580 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, 581 } 582 583 var oprange [ALAST & obj.AMask][]Optab 584 585 var xcmp [C_NCLASS][C_NCLASS]bool 586 587 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported. 588 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop 589 590 // padding bytes to add to align code as requested. 591 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { 592 switch a { 593 case 8, 16, 32, 64: 594 // By default function alignment is 16. If an alignment > 16 is 595 // requested then the function alignment must also be promoted. 596 // The function alignment is not promoted on AIX at this time. 597 // TODO: Investigate AIX function alignment. 598 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) { 599 cursym.Func().Align = int32(a) 600 } 601 if pc&(a-1) != 0 { 602 return int(a - (pc & (a - 1))) 603 } 604 default: 605 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a) 606 } 607 return 0 608 } 609 610 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 611 p := cursym.Func().Text 612 if p == nil || p.Link == nil { // handle external functions and ELF section symbols 613 return 614 } 615 616 if oprange[AANDN&obj.AMask] == nil { 617 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first") 618 } 619 620 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} 621 622 pc := int64(0) 623 p.Pc = pc 624 625 var m int 626 var o *Optab 627 for p = p.Link; p != nil; p = p.Link { 628 p.Pc = pc 629 o = c.oplook(p) 630 m = int(o.size) 631 if m == 0 { 632 if p.As == obj.APCALIGN { 633 a := c.vregoff(&p.From) 634 m = addpad(pc, a, ctxt, cursym) 635 } else { 636 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 637 ctxt.Diag("zero-width instruction\n%v", p) 638 } 639 continue 640 } 641 } 642 pc += int64(m) 643 } 644 645 c.cursym.Size = pc 646 647 /* 648 * if any procedure is large enough to 649 * generate a large SBRA branch, then 650 * generate extra passes putting branches 651 * around jmps to fix. this is rare. 652 */ 653 bflag := 1 654 655 var otxt int64 656 var q *obj.Prog 657 var out [5]uint32 658 var falign int32 // Track increased alignment requirements for prefix. 659 for bflag != 0 { 660 bflag = 0 661 pc = 0 662 falign = 0 // Note, linker bumps function symbols to funcAlign. 663 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { 664 p.Pc = pc 665 o = c.oplook(p) 666 667 // very large conditional branches 668 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { 669 otxt = p.To.Target().Pc - pc 670 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { 671 // Assemble the instruction with a target not too far to figure out BI and BO fields. 672 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted, 673 // and only one extra branch is needed to reach the target. 674 tgt := p.To.Target() 675 p.To.SetTarget(p.Link) 676 o.asmout(&c, p, o, &out) 677 p.To.SetTarget(tgt) 678 679 bo := int64(out[0]>>21) & 31 680 bi := int16((out[0] >> 16) & 31) 681 invertible := false 682 683 if bo&0x14 == 0x14 { 684 // A conditional branch that is unconditionally taken. This cannot be inverted. 685 } else if bo&0x10 == 0x10 { 686 // A branch based on the value of CTR. Invert the CTR comparison against zero bit. 687 bo ^= 0x2 688 invertible = true 689 } else if bo&0x04 == 0x04 { 690 // A branch based on CR bit. Invert the BI comparison bit. 691 bo ^= 0x8 692 invertible = true 693 } 694 695 if invertible { 696 // Rewrite 697 // BC bo,...,far_away_target 698 // NEXT_INSN 699 // to: 700 // BC invert(bo),next_insn 701 // JMP far_away_target 702 // next_insn: 703 // NEXT_INSN 704 p.As = ABC 705 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo} 706 q = c.newprog() 707 q.As = ABR 708 q.To.Type = obj.TYPE_BRANCH 709 q.To.SetTarget(p.To.Target()) 710 q.Link = p.Link 711 p.To.SetTarget(p.Link) 712 p.Link = q 713 p.Reg = REG_CRBIT0 + bi 714 } else { 715 // Rewrite 716 // BC ...,far_away_target 717 // NEXT_INSN 718 // to 719 // BC ...,tmp 720 // JMP next_insn 721 // tmp: 722 // JMP far_away_target 723 // next_insn: 724 // NEXT_INSN 725 q = c.newprog() 726 q.Link = p.Link 727 p.Link = q 728 q.As = ABR 729 q.To.Type = obj.TYPE_BRANCH 730 q.To.SetTarget(p.To.Target()) 731 p.To.SetTarget(q) 732 q = c.newprog() 733 q.Link = p.Link 734 p.Link = q 735 q.As = ABR 736 q.To.Type = obj.TYPE_BRANCH 737 q.To.SetTarget(q.Link.Link) 738 } 739 bflag = 1 740 } 741 } 742 743 m = int(o.size) 744 if m == 0 { 745 if p.As == obj.APCALIGN { 746 a := c.vregoff(&p.From) 747 m = addpad(pc, a, ctxt, cursym) 748 } else { 749 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 750 ctxt.Diag("zero-width instruction\n%v", p) 751 } 752 continue 753 } 754 } 755 756 // Prefixed instructions cannot be placed across a 64B boundary. 757 // Mark and adjust the PC of those which do. A nop will be 758 // inserted during final assembly. 759 if o.ispfx { 760 mark := p.Mark &^ PFX_X64B 761 if pc&63 == 60 { 762 p.Pc += 4 763 m += 4 764 mark |= PFX_X64B 765 } 766 767 // Marks may be adjusted if a too-far conditional branch is 768 // fixed up above. Likewise, inserting a NOP may cause a 769 // branch target to become too far away. We need to run 770 // another iteration and verify no additional changes 771 // are needed. 772 if mark != p.Mark { 773 bflag = 1 774 p.Mark = mark 775 } 776 777 // Check for 16 or 32B crossing of this prefixed insn. 778 // These do no require padding, but do require increasing 779 // the function alignment to prevent them from potentially 780 // crossing a 64B boundary when the linker assigns the final 781 // PC. 782 switch p.Pc & 31 { 783 case 28: // 32B crossing 784 falign = 64 785 case 12: // 16B crossing 786 if falign < 64 { 787 falign = 32 788 } 789 } 790 } 791 792 pc += int64(m) 793 } 794 795 c.cursym.Size = pc 796 } 797 798 c.cursym.Size = pc 799 c.cursym.Func().Align = falign 800 c.cursym.Grow(c.cursym.Size) 801 802 // lay out the code, emitting code and data relocations. 803 804 bp := c.cursym.P 805 var i int32 806 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { 807 c.pc = p.Pc 808 o = c.oplook(p) 809 if int(o.size) > 4*len(out) { 810 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) 811 } 812 // asmout is not set up to add large amounts of padding 813 if o.type_ == 0 && p.As == obj.APCALIGN { 814 aln := c.vregoff(&p.From) 815 v := addpad(p.Pc, aln, c.ctxt, c.cursym) 816 if v > 0 { 817 // Same padding instruction for all 818 for i = 0; i < int32(v/4); i++ { 819 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP) 820 bp = bp[4:] 821 } 822 } 823 } else { 824 if p.Mark&PFX_X64B != 0 { 825 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP) 826 bp = bp[4:] 827 } 828 o.asmout(&c, p, o, &out) 829 for i = 0; i < int32(o.size/4); i++ { 830 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) 831 bp = bp[4:] 832 } 833 } 834 } 835 } 836 837 func isint32(v int64) bool { 838 return int64(int32(v)) == v 839 } 840 841 func isuint32(v uint64) bool { 842 return uint64(uint32(v)) == v 843 } 844 845 func (c *ctxt9) aclassreg(reg int16) int { 846 if REG_R0 <= reg && reg <= REG_R31 { 847 return C_REGP + int(reg&1) 848 } 849 if REG_F0 <= reg && reg <= REG_F31 { 850 return C_FREGP + int(reg&1) 851 } 852 if REG_V0 <= reg && reg <= REG_V31 { 853 return C_VREG 854 } 855 if REG_VS0 <= reg && reg <= REG_VS63 { 856 return C_VSREGP + int(reg&1) 857 } 858 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR { 859 return C_CREG 860 } 861 if REG_CR0LT <= reg && reg <= REG_CR7SO { 862 return C_CRBIT 863 } 864 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 { 865 switch reg { 866 case REG_LR: 867 return C_LR 868 869 case REG_CTR: 870 return C_CTR 871 } 872 873 return C_SPR 874 } 875 if REG_A0 <= reg && reg <= REG_A7 { 876 return C_AREG 877 } 878 if reg == REG_FPSCR { 879 return C_FPSCR 880 } 881 return C_GOK 882 } 883 884 func (c *ctxt9) aclass(a *obj.Addr) int { 885 switch a.Type { 886 case obj.TYPE_NONE: 887 return C_NONE 888 889 case obj.TYPE_REG: 890 return c.aclassreg(a.Reg) 891 892 case obj.TYPE_MEM: 893 if a.Index != 0 { 894 if a.Name != obj.NAME_NONE || a.Offset != 0 { 895 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class) 896 897 } 898 return C_XOREG 899 } 900 switch a.Name { 901 case obj.NAME_GOTREF, obj.NAME_TOCREF: 902 return C_ADDR 903 904 case obj.NAME_EXTERN, 905 obj.NAME_STATIC: 906 c.instoffset = a.Offset 907 if a.Sym == nil { 908 break 909 } else if a.Sym.Type == objabi.STLSBSS { 910 // For PIC builds, use 12 byte got initial-exec TLS accesses. 911 if c.ctxt.Flag_shared { 912 return C_TLS_IE 913 } 914 // Otherwise, use 8 byte local-exec TLS accesses. 915 return C_TLS_LE 916 } else { 917 return C_ADDR 918 } 919 920 case obj.NAME_AUTO: 921 a.Reg = REGSP 922 c.instoffset = int64(c.autosize) + a.Offset 923 if c.instoffset >= -BIG && c.instoffset < BIG { 924 return C_SOREG 925 } 926 return C_LOREG 927 928 case obj.NAME_PARAM: 929 a.Reg = REGSP 930 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 931 if c.instoffset >= -BIG && c.instoffset < BIG { 932 return C_SOREG 933 } 934 return C_LOREG 935 936 case obj.NAME_NONE: 937 c.instoffset = a.Offset 938 if a.Offset == 0 && a.Index == 0 { 939 return C_ZOREG 940 } else if c.instoffset >= -BIG && c.instoffset < BIG { 941 return C_SOREG 942 } else { 943 return C_LOREG 944 } 945 } 946 947 return C_GOK 948 949 case obj.TYPE_TEXTSIZE: 950 return C_TEXTSIZE 951 952 case obj.TYPE_FCONST: 953 // The only cases where FCONST will occur are with float64 +/- 0. 954 // All other float constants are generated in memory. 955 f64 := a.Val.(float64) 956 if f64 == 0 { 957 if math.Signbit(f64) { 958 return C_ADDCON 959 } 960 return C_ZCON 961 } 962 log.Fatalf("Unexpected nonzero FCONST operand %v", a) 963 964 case obj.TYPE_CONST, 965 obj.TYPE_ADDR: 966 switch a.Name { 967 case obj.NAME_NONE: 968 c.instoffset = a.Offset 969 if a.Reg != 0 { 970 if -BIG <= c.instoffset && c.instoffset < BIG { 971 return C_SACON 972 } 973 if isint32(c.instoffset) { 974 return C_LACON 975 } 976 return C_DACON 977 } 978 979 case obj.NAME_EXTERN, 980 obj.NAME_STATIC: 981 s := a.Sym 982 if s == nil { 983 return C_GOK 984 } 985 c.instoffset = a.Offset 986 return C_LACON 987 988 case obj.NAME_AUTO: 989 a.Reg = REGSP 990 c.instoffset = int64(c.autosize) + a.Offset 991 if c.instoffset >= -BIG && c.instoffset < BIG { 992 return C_SACON 993 } 994 return C_LACON 995 996 case obj.NAME_PARAM: 997 a.Reg = REGSP 998 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 999 if c.instoffset >= -BIG && c.instoffset < BIG { 1000 return C_SACON 1001 } 1002 return C_LACON 1003 1004 default: 1005 return C_GOK 1006 } 1007 1008 if c.instoffset >= 0 { 1009 sbits := bits.Len64(uint64(c.instoffset)) 1010 switch { 1011 case sbits <= 5: 1012 return C_ZCON + sbits 1013 case sbits <= 8: 1014 return C_U8CON 1015 case sbits <= 15: 1016 return C_U15CON 1017 case sbits <= 16: 1018 return C_U16CON 1019 case sbits <= 31: 1020 return C_U32CON 1021 case sbits <= 32: 1022 return C_U32CON 1023 case sbits <= 33: 1024 return C_S34CON 1025 default: 1026 return C_64CON 1027 } 1028 } else { 1029 sbits := bits.Len64(uint64(^c.instoffset)) 1030 switch { 1031 case sbits <= 15: 1032 return C_S16CON 1033 case sbits <= 31: 1034 return C_S32CON 1035 case sbits <= 33: 1036 return C_S34CON 1037 default: 1038 return C_64CON 1039 } 1040 } 1041 1042 case obj.TYPE_BRANCH: 1043 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled { 1044 return C_LBRAPIC 1045 } 1046 return C_SBRA 1047 } 1048 1049 return C_GOK 1050 } 1051 1052 func prasm(p *obj.Prog) { 1053 fmt.Printf("%v\n", p) 1054 } 1055 1056 func (c *ctxt9) oplook(p *obj.Prog) *Optab { 1057 a1 := int(p.Optab) 1058 if a1 != 0 { 1059 return &optab[a1-1] 1060 } 1061 a1 = int(p.From.Class) 1062 if a1 == 0 { 1063 a1 = c.aclass(&p.From) + 1 1064 p.From.Class = int8(a1) 1065 } 1066 a1-- 1067 1068 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1} 1069 for i, ap := range p.RestArgs { 1070 argsv[i] = int(ap.Addr.Class) 1071 if argsv[i] == 0 { 1072 argsv[i] = c.aclass(&ap.Addr) + 1 1073 ap.Addr.Class = int8(argsv[i]) 1074 } 1075 1076 } 1077 a3 := argsv[0] - 1 1078 a4 := argsv[1] - 1 1079 a5 := argsv[2] - 1 1080 1081 a6 := int(p.To.Class) 1082 if a6 == 0 { 1083 a6 = c.aclass(&p.To) + 1 1084 p.To.Class = int8(a6) 1085 } 1086 a6-- 1087 1088 a2 := C_NONE 1089 if p.Reg != 0 { 1090 a2 = c.aclassreg(p.Reg) 1091 } 1092 1093 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6) 1094 ops := oprange[p.As&obj.AMask] 1095 c1 := &xcmp[a1] 1096 c2 := &xcmp[a2] 1097 c3 := &xcmp[a3] 1098 c4 := &xcmp[a4] 1099 c5 := &xcmp[a5] 1100 c6 := &xcmp[a6] 1101 for i := range ops { 1102 op := &ops[i] 1103 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] { 1104 p.Optab = uint16(cap(optab) - cap(ops) + i + 1) 1105 return op 1106 } 1107 } 1108 1109 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6)) 1110 prasm(p) 1111 if ops == nil { 1112 ops = optab 1113 } 1114 return &ops[0] 1115 } 1116 1117 // Compare two operand types (ex C_REG, or C_SCON) 1118 // and return true if b is compatible with a. 1119 // 1120 // Argument comparison isn't reflexitive, so care must be taken. 1121 // a is the argument type as found in optab, b is the argument as 1122 // fitted by aclass. 1123 func cmp(a int, b int) bool { 1124 if a == b { 1125 return true 1126 } 1127 switch a { 1128 1129 case C_SPR: 1130 if b == C_LR || b == C_CTR { 1131 return true 1132 } 1133 1134 case C_U1CON: 1135 return cmp(C_ZCON, b) 1136 case C_U2CON: 1137 return cmp(C_U1CON, b) 1138 case C_U3CON: 1139 return cmp(C_U2CON, b) 1140 case C_U4CON: 1141 return cmp(C_U3CON, b) 1142 case C_U5CON: 1143 return cmp(C_U4CON, b) 1144 case C_U8CON: 1145 return cmp(C_U5CON, b) 1146 case C_U15CON: 1147 return cmp(C_U8CON, b) 1148 case C_U16CON: 1149 return cmp(C_U15CON, b) 1150 1151 case C_S16CON: 1152 return cmp(C_U15CON, b) 1153 case C_32CON: 1154 return cmp(C_S16CON, b) || cmp(C_U16CON, b) 1155 case C_S34CON: 1156 return cmp(C_32CON, b) 1157 case C_64CON: 1158 return cmp(C_S34CON, b) 1159 1160 case C_LACON: 1161 return cmp(C_SACON, b) 1162 1163 case C_LBRA: 1164 return cmp(C_SBRA, b) 1165 1166 case C_SOREG: 1167 return cmp(C_ZOREG, b) 1168 1169 case C_LOREG: 1170 return cmp(C_SOREG, b) 1171 1172 case C_XOREG: 1173 return cmp(C_REG, b) || cmp(C_ZOREG, b) 1174 1175 // An even/odd register input always matches the regular register types. 1176 case C_REG: 1177 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0) 1178 case C_FREG: 1179 return cmp(C_FREGP, b) 1180 case C_VSREG: 1181 /* Allow any VR argument as a VSR operand. */ 1182 return cmp(C_VSREGP, b) || cmp(C_VREG, b) 1183 1184 case C_ANY: 1185 return true 1186 } 1187 1188 return false 1189 } 1190 1191 // Used when sorting the optab. Sorting is 1192 // done in a way so that the best choice of 1193 // opcode/operand combination is considered first. 1194 func optabLess(i, j int) bool { 1195 p1 := &optab[i] 1196 p2 := &optab[j] 1197 n := int(p1.as) - int(p2.as) 1198 // same opcode 1199 if n != 0 { 1200 return n < 0 1201 } 1202 // Consider those that generate fewer 1203 // instructions first. 1204 n = int(p1.size) - int(p2.size) 1205 if n != 0 { 1206 return n < 0 1207 } 1208 // operand order should match 1209 // better choices first 1210 n = int(p1.a1) - int(p2.a1) 1211 if n != 0 { 1212 return n < 0 1213 } 1214 n = int(p1.a2) - int(p2.a2) 1215 if n != 0 { 1216 return n < 0 1217 } 1218 n = int(p1.a3) - int(p2.a3) 1219 if n != 0 { 1220 return n < 0 1221 } 1222 n = int(p1.a4) - int(p2.a4) 1223 if n != 0 { 1224 return n < 0 1225 } 1226 n = int(p1.a5) - int(p2.a5) 1227 if n != 0 { 1228 return n < 0 1229 } 1230 n = int(p1.a6) - int(p2.a6) 1231 if n != 0 { 1232 return n < 0 1233 } 1234 return false 1235 } 1236 1237 // Add an entry to the opcode table for 1238 // a new opcode b0 with the same operand combinations 1239 // as opcode a. 1240 func opset(a, b0 obj.As) { 1241 oprange[a&obj.AMask] = oprange[b0] 1242 } 1243 1244 // Determine if the build configuration requires a TOC pointer. 1245 // It is assumed this always called after buildop. 1246 func NeedTOCpointer(ctxt *obj.Link) bool { 1247 return !pfxEnabled && ctxt.Flag_shared 1248 } 1249 1250 // Build the opcode table 1251 func buildop(ctxt *obj.Link) { 1252 // Limit PC-relative prefix instruction usage to supported and tested targets. 1253 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" 1254 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS) 1255 if cfg == buildOpCfg { 1256 // Already initialized to correct OS/cpu; stop now. 1257 // This happens in the cmd/asm tests, 1258 // each of which re-initializes the arch. 1259 return 1260 } 1261 buildOpCfg = cfg 1262 1263 // Configure the optab entries which may generate prefix opcodes. 1264 prefixOptab := make([]Optab, 0, len(prefixableOptab)) 1265 for _, entry := range prefixableOptab { 1266 entry := entry 1267 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 { 1268 // Enable prefix opcode generation and resize. 1269 entry.ispfx = true 1270 entry.size = entry.pfxsize 1271 } 1272 prefixOptab = append(prefixOptab, entry.Optab) 1273 1274 } 1275 1276 for i := 0; i < C_NCLASS; i++ { 1277 for n := 0; n < C_NCLASS; n++ { 1278 if cmp(n, i) { 1279 xcmp[i][n] = true 1280 } 1281 } 1282 } 1283 1284 // Append the generated entries, sort, and fill out oprange. 1285 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab)) 1286 optab = append(optab, optabBase...) 1287 optab = append(optab, optabGen...) 1288 optab = append(optab, prefixOptab...) 1289 sort.Slice(optab, optabLess) 1290 1291 for i := range optab { 1292 // Use the legacy assembler function if none provided. 1293 if optab[i].asmout == nil { 1294 optab[i].asmout = asmout 1295 } 1296 } 1297 1298 for i := 0; i < len(optab); { 1299 r := optab[i].as 1300 r0 := r & obj.AMask 1301 start := i 1302 for i < len(optab) && optab[i].as == r { 1303 i++ 1304 } 1305 oprange[r0] = optab[start:i] 1306 1307 switch r { 1308 default: 1309 if !opsetGen(r) { 1310 ctxt.Diag("unknown op in build: %v", r) 1311 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r) 1312 } 1313 1314 case ADCBF: /* unary indexed: op (b+a); op (b) */ 1315 opset(ADCBI, r0) 1316 1317 opset(ADCBST, r0) 1318 opset(ADCBT, r0) 1319 opset(ADCBTST, r0) 1320 opset(ADCBZ, r0) 1321 opset(AICBI, r0) 1322 1323 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */ 1324 opset(ASTWCCC, r0) 1325 opset(ASTHCCC, r0) 1326 opset(ASTBCCC, r0) 1327 1328 case AREM: /* macro */ 1329 opset(AREM, r0) 1330 1331 case AREMU: 1332 opset(AREMU, r0) 1333 1334 case AREMD: 1335 opset(AREMDU, r0) 1336 1337 case AMULLW: 1338 opset(AMULLD, r0) 1339 1340 case ADIVW: /* op Rb[,Ra],Rd */ 1341 opset(AMULHW, r0) 1342 1343 opset(AMULHWCC, r0) 1344 opset(AMULHWU, r0) 1345 opset(AMULHWUCC, r0) 1346 opset(AMULLWCC, r0) 1347 opset(AMULLWVCC, r0) 1348 opset(AMULLWV, r0) 1349 opset(ADIVWCC, r0) 1350 opset(ADIVWV, r0) 1351 opset(ADIVWVCC, r0) 1352 opset(ADIVWU, r0) 1353 opset(ADIVWUCC, r0) 1354 opset(ADIVWUV, r0) 1355 opset(ADIVWUVCC, r0) 1356 opset(AMODUD, r0) 1357 opset(AMODUW, r0) 1358 opset(AMODSD, r0) 1359 opset(AMODSW, r0) 1360 opset(AADDCC, r0) 1361 opset(AADDCV, r0) 1362 opset(AADDCVCC, r0) 1363 opset(AADDV, r0) 1364 opset(AADDVCC, r0) 1365 opset(AADDE, r0) 1366 opset(AADDECC, r0) 1367 opset(AADDEV, r0) 1368 opset(AADDEVCC, r0) 1369 opset(AMULHD, r0) 1370 opset(AMULHDCC, r0) 1371 opset(AMULHDU, r0) 1372 opset(AMULHDUCC, r0) 1373 opset(AMULLDCC, r0) 1374 opset(AMULLDVCC, r0) 1375 opset(AMULLDV, r0) 1376 opset(ADIVD, r0) 1377 opset(ADIVDCC, r0) 1378 opset(ADIVDE, r0) 1379 opset(ADIVDEU, r0) 1380 opset(ADIVDECC, r0) 1381 opset(ADIVDEUCC, r0) 1382 opset(ADIVDVCC, r0) 1383 opset(ADIVDV, r0) 1384 opset(ADIVDU, r0) 1385 opset(ADIVDUV, r0) 1386 opset(ADIVDUVCC, r0) 1387 opset(ADIVDUCC, r0) 1388 1389 case ACRAND: 1390 opset(ACRANDN, r0) 1391 opset(ACREQV, r0) 1392 opset(ACRNAND, r0) 1393 opset(ACRNOR, r0) 1394 opset(ACROR, r0) 1395 opset(ACRORN, r0) 1396 opset(ACRXOR, r0) 1397 1398 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */ 1399 opset(APOPCNTW, r0) 1400 opset(APOPCNTB, r0) 1401 opset(ACNTTZW, r0) 1402 opset(ACNTTZWCC, r0) 1403 opset(ACNTTZD, r0) 1404 opset(ACNTTZDCC, r0) 1405 1406 case ACOPY: /* copy, paste. */ 1407 opset(APASTECC, r0) 1408 1409 case AMADDHD: /* maddhd, maddhdu, maddld */ 1410 opset(AMADDHDU, r0) 1411 opset(AMADDLD, r0) 1412 1413 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ 1414 opset(AMOVH, r0) 1415 opset(AMOVHZ, r0) 1416 1417 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ 1418 opset(AMOVHU, r0) 1419 1420 opset(AMOVHZU, r0) 1421 opset(AMOVWU, r0) 1422 opset(AMOVWZU, r0) 1423 opset(AMOVDU, r0) 1424 opset(AMOVMW, r0) 1425 1426 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */ 1427 opset(ALVEHX, r0) 1428 opset(ALVEWX, r0) 1429 opset(ALVX, r0) 1430 opset(ALVXL, r0) 1431 opset(ALVSL, r0) 1432 opset(ALVSR, r0) 1433 1434 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */ 1435 opset(ASTVEHX, r0) 1436 opset(ASTVEWX, r0) 1437 opset(ASTVX, r0) 1438 opset(ASTVXL, r0) 1439 1440 case AVAND: /* vand, vandc, vnand */ 1441 opset(AVAND, r0) 1442 opset(AVANDC, r0) 1443 opset(AVNAND, r0) 1444 1445 case AVMRGOW: /* vmrgew, vmrgow */ 1446 opset(AVMRGEW, r0) 1447 1448 case AVOR: /* vor, vorc, vxor, vnor, veqv */ 1449 opset(AVOR, r0) 1450 opset(AVORC, r0) 1451 opset(AVXOR, r0) 1452 opset(AVNOR, r0) 1453 opset(AVEQV, r0) 1454 1455 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */ 1456 opset(AVADDUBM, r0) 1457 opset(AVADDUHM, r0) 1458 opset(AVADDUWM, r0) 1459 opset(AVADDUDM, r0) 1460 opset(AVADDUQM, r0) 1461 1462 case AVADDCU: /* vaddcuq, vaddcuw */ 1463 opset(AVADDCUQ, r0) 1464 opset(AVADDCUW, r0) 1465 1466 case AVADDUS: /* vaddubs, vadduhs, vadduws */ 1467 opset(AVADDUBS, r0) 1468 opset(AVADDUHS, r0) 1469 opset(AVADDUWS, r0) 1470 1471 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */ 1472 opset(AVADDSBS, r0) 1473 opset(AVADDSHS, r0) 1474 opset(AVADDSWS, r0) 1475 1476 case AVADDE: /* vaddeuqm, vaddecuq */ 1477 opset(AVADDEUQM, r0) 1478 opset(AVADDECUQ, r0) 1479 1480 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */ 1481 opset(AVSUBUBM, r0) 1482 opset(AVSUBUHM, r0) 1483 opset(AVSUBUWM, r0) 1484 opset(AVSUBUDM, r0) 1485 opset(AVSUBUQM, r0) 1486 1487 case AVSUBCU: /* vsubcuq, vsubcuw */ 1488 opset(AVSUBCUQ, r0) 1489 opset(AVSUBCUW, r0) 1490 1491 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */ 1492 opset(AVSUBUBS, r0) 1493 opset(AVSUBUHS, r0) 1494 opset(AVSUBUWS, r0) 1495 1496 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */ 1497 opset(AVSUBSBS, r0) 1498 opset(AVSUBSHS, r0) 1499 opset(AVSUBSWS, r0) 1500 1501 case AVSUBE: /* vsubeuqm, vsubecuq */ 1502 opset(AVSUBEUQM, r0) 1503 opset(AVSUBECUQ, r0) 1504 1505 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */ 1506 opset(AVMULOSB, r0) 1507 opset(AVMULEUB, r0) 1508 opset(AVMULOUB, r0) 1509 opset(AVMULESH, r0) 1510 opset(AVMULOSH, r0) 1511 opset(AVMULEUH, r0) 1512 opset(AVMULOUH, r0) 1513 opset(AVMULESW, r0) 1514 opset(AVMULOSW, r0) 1515 opset(AVMULEUW, r0) 1516 opset(AVMULOUW, r0) 1517 opset(AVMULUWM, r0) 1518 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */ 1519 opset(AVPMSUMB, r0) 1520 opset(AVPMSUMH, r0) 1521 opset(AVPMSUMW, r0) 1522 opset(AVPMSUMD, r0) 1523 1524 case AVR: /* vrlb, vrlh, vrlw, vrld */ 1525 opset(AVRLB, r0) 1526 opset(AVRLH, r0) 1527 opset(AVRLW, r0) 1528 opset(AVRLD, r0) 1529 1530 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */ 1531 opset(AVSLB, r0) 1532 opset(AVSLH, r0) 1533 opset(AVSLW, r0) 1534 opset(AVSL, r0) 1535 opset(AVSLO, r0) 1536 opset(AVSRB, r0) 1537 opset(AVSRH, r0) 1538 opset(AVSRW, r0) 1539 opset(AVSR, r0) 1540 opset(AVSRO, r0) 1541 opset(AVSLD, r0) 1542 opset(AVSRD, r0) 1543 1544 case AVSA: /* vsrab, vsrah, vsraw, vsrad */ 1545 opset(AVSRAB, r0) 1546 opset(AVSRAH, r0) 1547 opset(AVSRAW, r0) 1548 opset(AVSRAD, r0) 1549 1550 case AVSOI: /* vsldoi */ 1551 opset(AVSLDOI, r0) 1552 1553 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */ 1554 opset(AVCLZB, r0) 1555 opset(AVCLZH, r0) 1556 opset(AVCLZW, r0) 1557 opset(AVCLZD, r0) 1558 1559 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */ 1560 opset(AVPOPCNTB, r0) 1561 opset(AVPOPCNTH, r0) 1562 opset(AVPOPCNTW, r0) 1563 opset(AVPOPCNTD, r0) 1564 1565 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */ 1566 opset(AVCMPEQUB, r0) 1567 opset(AVCMPEQUBCC, r0) 1568 opset(AVCMPEQUH, r0) 1569 opset(AVCMPEQUHCC, r0) 1570 opset(AVCMPEQUW, r0) 1571 opset(AVCMPEQUWCC, r0) 1572 opset(AVCMPEQUD, r0) 1573 opset(AVCMPEQUDCC, r0) 1574 1575 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */ 1576 opset(AVCMPGTUB, r0) 1577 opset(AVCMPGTUBCC, r0) 1578 opset(AVCMPGTUH, r0) 1579 opset(AVCMPGTUHCC, r0) 1580 opset(AVCMPGTUW, r0) 1581 opset(AVCMPGTUWCC, r0) 1582 opset(AVCMPGTUD, r0) 1583 opset(AVCMPGTUDCC, r0) 1584 opset(AVCMPGTSB, r0) 1585 opset(AVCMPGTSBCC, r0) 1586 opset(AVCMPGTSH, r0) 1587 opset(AVCMPGTSHCC, r0) 1588 opset(AVCMPGTSW, r0) 1589 opset(AVCMPGTSWCC, r0) 1590 opset(AVCMPGTSD, r0) 1591 opset(AVCMPGTSDCC, r0) 1592 1593 case AVCMPNEZB: /* vcmpnezb[.] */ 1594 opset(AVCMPNEZBCC, r0) 1595 opset(AVCMPNEB, r0) 1596 opset(AVCMPNEBCC, r0) 1597 opset(AVCMPNEH, r0) 1598 opset(AVCMPNEHCC, r0) 1599 opset(AVCMPNEW, r0) 1600 opset(AVCMPNEWCC, r0) 1601 1602 case AVPERM: /* vperm */ 1603 opset(AVPERMXOR, r0) 1604 opset(AVPERMR, r0) 1605 1606 case AVBPERMQ: /* vbpermq, vbpermd */ 1607 opset(AVBPERMD, r0) 1608 1609 case AVSEL: /* vsel */ 1610 opset(AVSEL, r0) 1611 1612 case AVSPLTB: /* vspltb, vsplth, vspltw */ 1613 opset(AVSPLTH, r0) 1614 opset(AVSPLTW, r0) 1615 1616 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */ 1617 opset(AVSPLTISH, r0) 1618 opset(AVSPLTISW, r0) 1619 1620 case AVCIPH: /* vcipher, vcipherlast */ 1621 opset(AVCIPHER, r0) 1622 opset(AVCIPHERLAST, r0) 1623 1624 case AVNCIPH: /* vncipher, vncipherlast */ 1625 opset(AVNCIPHER, r0) 1626 opset(AVNCIPHERLAST, r0) 1627 1628 case AVSBOX: /* vsbox */ 1629 opset(AVSBOX, r0) 1630 1631 case AVSHASIGMA: /* vshasigmaw, vshasigmad */ 1632 opset(AVSHASIGMAW, r0) 1633 opset(AVSHASIGMAD, r0) 1634 1635 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */ 1636 opset(ALXVDSX, r0) 1637 opset(ALXVW4X, r0) 1638 opset(ALXVH8X, r0) 1639 opset(ALXVB16X, r0) 1640 1641 case ALXV: /* lxv */ 1642 opset(ALXV, r0) 1643 1644 case ALXVL: /* lxvl, lxvll, lxvx */ 1645 opset(ALXVLL, r0) 1646 opset(ALXVX, r0) 1647 1648 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */ 1649 opset(ASTXVW4X, r0) 1650 opset(ASTXVH8X, r0) 1651 opset(ASTXVB16X, r0) 1652 1653 case ASTXV: /* stxv */ 1654 opset(ASTXV, r0) 1655 1656 case ASTXVL: /* stxvl, stxvll, stvx */ 1657 opset(ASTXVLL, r0) 1658 opset(ASTXVX, r0) 1659 1660 case ALXSDX: /* lxsdx */ 1661 opset(ALXSDX, r0) 1662 1663 case ASTXSDX: /* stxsdx */ 1664 opset(ASTXSDX, r0) 1665 1666 case ALXSIWAX: /* lxsiwax, lxsiwzx */ 1667 opset(ALXSIWZX, r0) 1668 1669 case ASTXSIWX: /* stxsiwx */ 1670 opset(ASTXSIWX, r0) 1671 1672 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ 1673 opset(AMFFPRD, r0) 1674 opset(AMFVRD, r0) 1675 opset(AMFVSRWZ, r0) 1676 opset(AMFVSRLD, r0) 1677 1678 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ 1679 opset(AMTFPRD, r0) 1680 opset(AMTVRD, r0) 1681 opset(AMTVSRWA, r0) 1682 opset(AMTVSRWZ, r0) 1683 opset(AMTVSRWS, r0) 1684 1685 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */ 1686 opset(AXXLANDC, r0) 1687 opset(AXXLEQV, r0) 1688 opset(AXXLNAND, r0) 1689 1690 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */ 1691 opset(AXXLORC, r0) 1692 opset(AXXLNOR, r0) 1693 opset(AXXLORQ, r0) 1694 opset(AXXLXOR, r0) 1695 1696 case AXXSEL: /* xxsel */ 1697 opset(AXXSEL, r0) 1698 1699 case AXXMRGHW: /* xxmrghw, xxmrglw */ 1700 opset(AXXMRGLW, r0) 1701 1702 case AXXSPLTW: /* xxspltw */ 1703 opset(AXXSPLTW, r0) 1704 1705 case AXXSPLTIB: /* xxspltib */ 1706 opset(AXXSPLTIB, r0) 1707 1708 case AXXPERM: /* xxpermdi */ 1709 opset(AXXPERM, r0) 1710 1711 case AXXSLDWI: /* xxsldwi */ 1712 opset(AXXPERMDI, r0) 1713 opset(AXXSLDWI, r0) 1714 1715 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */ 1716 opset(AXXBRD, r0) 1717 opset(AXXBRW, r0) 1718 opset(AXXBRH, r0) 1719 1720 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */ 1721 opset(AXSCVSPDP, r0) 1722 opset(AXSCVDPSPN, r0) 1723 opset(AXSCVSPDPN, r0) 1724 1725 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */ 1726 opset(AXVCVSPDP, r0) 1727 1728 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */ 1729 opset(AXSCVDPSXWS, r0) 1730 opset(AXSCVDPUXDS, r0) 1731 opset(AXSCVDPUXWS, r0) 1732 1733 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */ 1734 opset(AXSCVUXDDP, r0) 1735 opset(AXSCVSXDSP, r0) 1736 opset(AXSCVUXDSP, r0) 1737 1738 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */ 1739 opset(AXVCVDPSXDS, r0) 1740 opset(AXVCVDPSXWS, r0) 1741 opset(AXVCVDPUXDS, r0) 1742 opset(AXVCVDPUXWS, r0) 1743 opset(AXVCVSPSXDS, r0) 1744 opset(AXVCVSPSXWS, r0) 1745 opset(AXVCVSPUXDS, r0) 1746 opset(AXVCVSPUXWS, r0) 1747 1748 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */ 1749 opset(AXVCVSXWDP, r0) 1750 opset(AXVCVUXDDP, r0) 1751 opset(AXVCVUXWDP, r0) 1752 opset(AXVCVSXDSP, r0) 1753 opset(AXVCVSXWSP, r0) 1754 opset(AXVCVUXDSP, r0) 1755 opset(AXVCVUXWSP, r0) 1756 1757 case AAND: /* logical op Rb,Rs,Ra; no literal */ 1758 opset(AANDN, r0) 1759 opset(AANDNCC, r0) 1760 opset(AEQV, r0) 1761 opset(AEQVCC, r0) 1762 opset(ANAND, r0) 1763 opset(ANANDCC, r0) 1764 opset(ANOR, r0) 1765 opset(ANORCC, r0) 1766 opset(AORCC, r0) 1767 opset(AORN, r0) 1768 opset(AORNCC, r0) 1769 opset(AXORCC, r0) 1770 1771 case AADDME: /* op Ra, Rd */ 1772 opset(AADDMECC, r0) 1773 1774 opset(AADDMEV, r0) 1775 opset(AADDMEVCC, r0) 1776 opset(AADDZE, r0) 1777 opset(AADDZECC, r0) 1778 opset(AADDZEV, r0) 1779 opset(AADDZEVCC, r0) 1780 opset(ASUBME, r0) 1781 opset(ASUBMECC, r0) 1782 opset(ASUBMEV, r0) 1783 opset(ASUBMEVCC, r0) 1784 opset(ASUBZE, r0) 1785 opset(ASUBZECC, r0) 1786 opset(ASUBZEV, r0) 1787 opset(ASUBZEVCC, r0) 1788 1789 case AADDC: 1790 opset(AADDCCC, r0) 1791 1792 case ABEQ: 1793 opset(ABGE, r0) 1794 opset(ABGT, r0) 1795 opset(ABLE, r0) 1796 opset(ABLT, r0) 1797 opset(ABNE, r0) 1798 opset(ABVC, r0) 1799 opset(ABVS, r0) 1800 1801 case ABR: 1802 opset(ABL, r0) 1803 1804 case ABC: 1805 opset(ABCL, r0) 1806 1807 case ABDNZ: 1808 opset(ABDZ, r0) 1809 1810 case AEXTSB: /* op Rs, Ra */ 1811 opset(AEXTSBCC, r0) 1812 1813 opset(AEXTSH, r0) 1814 opset(AEXTSHCC, r0) 1815 opset(ACNTLZW, r0) 1816 opset(ACNTLZWCC, r0) 1817 opset(ACNTLZD, r0) 1818 opset(AEXTSW, r0) 1819 opset(AEXTSWCC, r0) 1820 opset(ACNTLZDCC, r0) 1821 1822 case AFABS: /* fop [s,]d */ 1823 opset(AFABSCC, r0) 1824 1825 opset(AFNABS, r0) 1826 opset(AFNABSCC, r0) 1827 opset(AFNEG, r0) 1828 opset(AFNEGCC, r0) 1829 opset(AFRSP, r0) 1830 opset(AFRSPCC, r0) 1831 opset(AFCTIW, r0) 1832 opset(AFCTIWCC, r0) 1833 opset(AFCTIWZ, r0) 1834 opset(AFCTIWZCC, r0) 1835 opset(AFCTID, r0) 1836 opset(AFCTIDCC, r0) 1837 opset(AFCTIDZ, r0) 1838 opset(AFCTIDZCC, r0) 1839 opset(AFCFID, r0) 1840 opset(AFCFIDCC, r0) 1841 opset(AFCFIDU, r0) 1842 opset(AFCFIDUCC, r0) 1843 opset(AFCFIDS, r0) 1844 opset(AFCFIDSCC, r0) 1845 opset(AFRES, r0) 1846 opset(AFRESCC, r0) 1847 opset(AFRIM, r0) 1848 opset(AFRIMCC, r0) 1849 opset(AFRIP, r0) 1850 opset(AFRIPCC, r0) 1851 opset(AFRIZ, r0) 1852 opset(AFRIZCC, r0) 1853 opset(AFRIN, r0) 1854 opset(AFRINCC, r0) 1855 opset(AFRSQRTE, r0) 1856 opset(AFRSQRTECC, r0) 1857 opset(AFSQRT, r0) 1858 opset(AFSQRTCC, r0) 1859 opset(AFSQRTS, r0) 1860 opset(AFSQRTSCC, r0) 1861 1862 case AFADD: 1863 opset(AFADDS, r0) 1864 opset(AFADDCC, r0) 1865 opset(AFADDSCC, r0) 1866 opset(AFCPSGN, r0) 1867 opset(AFCPSGNCC, r0) 1868 opset(AFDIV, r0) 1869 opset(AFDIVS, r0) 1870 opset(AFDIVCC, r0) 1871 opset(AFDIVSCC, r0) 1872 opset(AFSUB, r0) 1873 opset(AFSUBS, r0) 1874 opset(AFSUBCC, r0) 1875 opset(AFSUBSCC, r0) 1876 1877 case AFMADD: 1878 opset(AFMADDCC, r0) 1879 opset(AFMADDS, r0) 1880 opset(AFMADDSCC, r0) 1881 opset(AFMSUB, r0) 1882 opset(AFMSUBCC, r0) 1883 opset(AFMSUBS, r0) 1884 opset(AFMSUBSCC, r0) 1885 opset(AFNMADD, r0) 1886 opset(AFNMADDCC, r0) 1887 opset(AFNMADDS, r0) 1888 opset(AFNMADDSCC, r0) 1889 opset(AFNMSUB, r0) 1890 opset(AFNMSUBCC, r0) 1891 opset(AFNMSUBS, r0) 1892 opset(AFNMSUBSCC, r0) 1893 opset(AFSEL, r0) 1894 opset(AFSELCC, r0) 1895 1896 case AFMUL: 1897 opset(AFMULS, r0) 1898 opset(AFMULCC, r0) 1899 opset(AFMULSCC, r0) 1900 1901 case AFCMPO: 1902 opset(AFCMPU, r0) 1903 1904 case AMTFSB0: 1905 opset(AMTFSB0CC, r0) 1906 opset(AMTFSB1, r0) 1907 opset(AMTFSB1CC, r0) 1908 1909 case ANEG: /* op [Ra,] Rd */ 1910 opset(ANEGCC, r0) 1911 1912 opset(ANEGV, r0) 1913 opset(ANEGVCC, r0) 1914 1915 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */ 1916 opset(AXOR, r0) 1917 1918 case AORIS: /* oris/xoris $uimm,Rs,Ra */ 1919 opset(AXORIS, r0) 1920 1921 case ASLW: 1922 opset(ASLWCC, r0) 1923 opset(ASRW, r0) 1924 opset(ASRWCC, r0) 1925 opset(AROTLW, r0) 1926 1927 case ASLD: 1928 opset(ASLDCC, r0) 1929 opset(ASRD, r0) 1930 opset(ASRDCC, r0) 1931 opset(AROTL, r0) 1932 1933 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1934 opset(ASRAWCC, r0) 1935 1936 case AEXTSWSLI: 1937 opset(AEXTSWSLICC, r0) 1938 1939 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1940 opset(ASRADCC, r0) 1941 1942 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */ 1943 opset(ASUB, r0) 1944 1945 opset(ASUBCC, r0) 1946 opset(ASUBV, r0) 1947 opset(ASUBVCC, r0) 1948 opset(ASUBCCC, r0) 1949 opset(ASUBCV, r0) 1950 opset(ASUBCVCC, r0) 1951 opset(ASUBE, r0) 1952 opset(ASUBECC, r0) 1953 opset(ASUBEV, r0) 1954 opset(ASUBEVCC, r0) 1955 1956 case ASYNC: 1957 opset(AISYNC, r0) 1958 opset(ALWSYNC, r0) 1959 opset(APTESYNC, r0) 1960 opset(ATLBSYNC, r0) 1961 1962 case ARLWNM: 1963 opset(ARLWNMCC, r0) 1964 opset(ARLWMI, r0) 1965 opset(ARLWMICC, r0) 1966 1967 case ARLDMI: 1968 opset(ARLDMICC, r0) 1969 opset(ARLDIMI, r0) 1970 opset(ARLDIMICC, r0) 1971 1972 case ARLDC: 1973 opset(ARLDCCC, r0) 1974 1975 case ARLDCL: 1976 opset(ARLDCR, r0) 1977 opset(ARLDCLCC, r0) 1978 opset(ARLDCRCC, r0) 1979 1980 case ARLDICL: 1981 opset(ARLDICLCC, r0) 1982 opset(ARLDICR, r0) 1983 opset(ARLDICRCC, r0) 1984 opset(ARLDIC, r0) 1985 opset(ARLDICCC, r0) 1986 opset(ACLRLSLDI, r0) 1987 1988 case AFMOVD: 1989 opset(AFMOVDCC, r0) 1990 opset(AFMOVDU, r0) 1991 opset(AFMOVS, r0) 1992 opset(AFMOVSU, r0) 1993 1994 case ALDAR: 1995 opset(ALBAR, r0) 1996 opset(ALHAR, r0) 1997 opset(ALWAR, r0) 1998 1999 case ASYSCALL: /* just the op; flow of control */ 2000 opset(ARFI, r0) 2001 2002 opset(ARFCI, r0) 2003 opset(ARFID, r0) 2004 opset(AHRFID, r0) 2005 2006 case AMOVHBR: 2007 opset(AMOVWBR, r0) 2008 opset(AMOVDBR, r0) 2009 2010 case ASLBMFEE: 2011 opset(ASLBMFEV, r0) 2012 2013 case ATW: 2014 opset(ATD, r0) 2015 2016 case ATLBIE: 2017 opset(ASLBIE, r0) 2018 opset(ATLBIEL, r0) 2019 2020 case AEIEIO: 2021 opset(ASLBIA, r0) 2022 2023 case ACMP: 2024 opset(ACMPW, r0) 2025 2026 case ACMPU: 2027 opset(ACMPWU, r0) 2028 2029 case ACMPB: 2030 opset(ACMPB, r0) 2031 2032 case AFTDIV: 2033 opset(AFTDIV, r0) 2034 2035 case AFTSQRT: 2036 opset(AFTSQRT, r0) 2037 2038 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */ 2039 opset(AMOVWZ, r0) /* Same as above, but zero extended */ 2040 2041 case AVCLZLSBB: 2042 opset(AVCTZLSBB, r0) 2043 2044 case AADD, 2045 AADDIS, 2046 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */ 2047 AANDISCC, 2048 AFMOVSX, 2049 AFMOVSZ, 2050 ALSW, 2051 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ 2052 AMOVB, /* macro: move byte with sign extension */ 2053 AMOVBU, /* macro: move byte with sign extension & update */ 2054 AMOVFL, 2055 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ 2056 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ 2057 ASTSW, 2058 ASLBMTE, 2059 AWORD, 2060 ADWORD, 2061 ADARN, 2062 AVMSUMUDM, 2063 AADDEX, 2064 ACMPEQB, 2065 ACLRLSLWI, 2066 AMTVSRDD, 2067 APNOP, 2068 AISEL, 2069 ASETB, 2070 obj.ANOP, 2071 obj.ATEXT, 2072 obj.AUNDEF, 2073 obj.AFUNCDATA, 2074 obj.APCALIGN, 2075 obj.APCDATA, 2076 obj.ADUFFZERO, 2077 obj.ADUFFCOPY: 2078 break 2079 } 2080 } 2081 } 2082 2083 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 { 2084 return o<<26 | xo<<1 | oe<<11 2085 } 2086 2087 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 { 2088 return o<<26 | xo<<2 | oe<<11 2089 } 2090 2091 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 { 2092 return o<<26 | xo<<2 | oe<<16 2093 } 2094 2095 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 { 2096 return o<<26 | xo<<3 | oe<<11 2097 } 2098 2099 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 { 2100 return o<<26 | xo<<4 | oe<<11 2101 } 2102 2103 func OPDQ(o uint32, xo uint32, oe uint32) uint32 { 2104 return o<<26 | xo | oe<<4 2105 } 2106 2107 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2108 return o<<26 | xo | oe<<11 | rc&1 2109 } 2110 2111 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2112 return o<<26 | xo | oe<<11 | (rc&1)<<10 2113 } 2114 2115 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2116 return o<<26 | xo<<1 | oe<<10 | rc&1 2117 } 2118 2119 func OPCC(o uint32, xo uint32, rc uint32) uint32 { 2120 return OPVCC(o, xo, 0, rc) 2121 } 2122 2123 /* Generate MD-form opcode */ 2124 func OPMD(o, xo, rc uint32) uint32 { 2125 return o<<26 | xo<<2 | rc&1 2126 } 2127 2128 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */ 2129 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { 2130 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 2131 } 2132 2133 /* VX-form 2-register operands, r/none/r */ 2134 func AOP_RR(op uint32, d uint32, a uint32) uint32 { 2135 return op | (d&31)<<21 | (a&31)<<11 2136 } 2137 2138 /* VA-form 4-register operands */ 2139 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2140 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6 2141 } 2142 2143 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2144 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF 2145 } 2146 2147 /* VX-form 2-register + UIM operands */ 2148 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2149 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11 2150 } 2151 2152 /* VX-form 2-register + ST + SIX operands */ 2153 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 { 2154 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11 2155 } 2156 2157 /* VA-form 3-register + SHB operands */ 2158 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 { 2159 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6 2160 } 2161 2162 /* VX-form 1-register + SIM operands */ 2163 func AOP_IR(op uint32, d uint32, simm uint32) uint32 { 2164 return op | (d&31)<<21 | (simm&31)<<16 2165 } 2166 2167 /* XX1-form 3-register operands, 1 VSR operand */ 2168 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 { 2169 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5 2170 } 2171 2172 /* XX2-form 3-register operands, 2 VSR operands */ 2173 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 { 2174 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5 2175 } 2176 2177 /* XX3-form 3 VSR operands */ 2178 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 { 2179 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2180 } 2181 2182 /* XX3-form 3 VSR operands + immediate */ 2183 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 { 2184 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2185 } 2186 2187 /* XX4-form, 4 VSR operands */ 2188 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 { 2189 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2190 } 2191 2192 /* DQ-form, VSR register, register + offset operands */ 2193 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 { 2194 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */ 2195 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */ 2196 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */ 2197 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */ 2198 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */ 2199 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */ 2200 dq := b >> 4 2201 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2 2202 } 2203 2204 /* Z23-form, 3-register operands + CY field */ 2205 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2206 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9 2207 } 2208 2209 /* X-form, 3-register operands + EH field */ 2210 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2211 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1) 2212 } 2213 2214 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { 2215 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 2216 } 2217 2218 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 { 2219 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF 2220 } 2221 2222 func OP_BR(op uint32, li uint32, aa uint32) uint32 { 2223 return op | li&0x03FFFFFC | aa<<1 2224 } 2225 2226 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 { 2227 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1 2228 } 2229 2230 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 { 2231 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 2232 } 2233 2234 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 { 2235 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1 2236 } 2237 2238 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 { 2239 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 2240 } 2241 2242 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { 2243 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 2244 } 2245 2246 /* MD-form 2-register, 2 6-bit immediate operands */ 2247 func AOP_MD(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { 2248 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 2249 } 2250 2251 /* MDS-form 3-register, 1 6-bit immediate operands. rsh argument is a register. */ 2252 func AOP_MDS(op, to, from, rsh, m uint32) uint32 { 2253 return AOP_MD(op, to, from, rsh&31, m) 2254 } 2255 2256 func AOP_PFX_00_8LS(r, ie uint32) uint32 { 2257 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF) 2258 } 2259 func AOP_PFX_10_MLS(r, ie uint32) uint32 { 2260 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF) 2261 } 2262 2263 const ( 2264 /* each rhs is OPVCC(_, _, _, _) */ 2265 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 2266 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 2267 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 2268 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 2269 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 2270 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 2271 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 2272 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 2273 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 2274 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 2275 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 2276 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 2277 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 2278 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 2279 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 2280 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 2281 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 2282 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 2283 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 2284 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 2285 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 2286 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 2287 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 2288 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 2289 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 2290 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 2291 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 2292 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 2293 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 2294 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 2295 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 2296 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 2297 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 2298 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 2299 OP_EXTSWSLI = 31<<26 | 445<<2 2300 OP_SETB = 31<<26 | 128<<1 2301 ) 2302 2303 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) { 2304 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32)) 2305 } 2306 2307 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) { 2308 switch a { 2309 case AMOVH: 2310 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0) 2311 case AMOVW: 2312 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0) 2313 case AMOVD: 2314 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0) 2315 case AMOVBZ, AMOVB: 2316 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0) 2317 case AMOVHZ: 2318 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0) 2319 case AMOVWZ: 2320 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0) 2321 case AFMOVS: 2322 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0) 2323 case AFMOVD: 2324 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0) 2325 } 2326 log.Fatalf("Error no pfxload for %v\n", a) 2327 return 0, 0 2328 } 2329 2330 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) { 2331 switch a { 2332 case AMOVD: 2333 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0) 2334 case AMOVBZ, AMOVB: 2335 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0) 2336 case AMOVHZ, AMOVH: 2337 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0) 2338 case AMOVWZ, AMOVW: 2339 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0) 2340 case AFMOVS: 2341 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0) 2342 case AFMOVD: 2343 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0) 2344 } 2345 log.Fatalf("Error no pfxstore for %v\n", a) 2346 return 0, 0 2347 } 2348 2349 func oclass(a *obj.Addr) int { 2350 return int(a.Class) - 1 2351 } 2352 2353 const ( 2354 D_FORM = iota 2355 DS_FORM 2356 ) 2357 2358 // This function determines when a non-indexed load or store is D or 2359 // DS form for use in finding the size of the offset field in the instruction. 2360 // The size is needed when setting the offset value in the instruction 2361 // and when generating relocation for that field. 2362 // DS form instructions include: ld, ldu, lwa, std, stdu. All other 2363 // loads and stores with an offset field are D form. This function should 2364 // only be called with the same opcodes as are handled by opstore and opload. 2365 func (c *ctxt9) opform(insn uint32) int { 2366 switch insn { 2367 default: 2368 c.ctxt.Diag("bad insn in loadform: %x", insn) 2369 case OPVCC(58, 0, 0, 0), // ld 2370 OPVCC(58, 0, 0, 1), // ldu 2371 OPVCC(58, 0, 0, 0) | 1<<1, // lwa 2372 OPVCC(62, 0, 0, 0), // std 2373 OPVCC(62, 0, 0, 1): //stdu 2374 return DS_FORM 2375 case OP_ADDI, // add 2376 OPVCC(32, 0, 0, 0), // lwz 2377 OPVCC(33, 0, 0, 0), // lwzu 2378 OPVCC(34, 0, 0, 0), // lbz 2379 OPVCC(35, 0, 0, 0), // lbzu 2380 OPVCC(40, 0, 0, 0), // lhz 2381 OPVCC(41, 0, 0, 0), // lhzu 2382 OPVCC(42, 0, 0, 0), // lha 2383 OPVCC(43, 0, 0, 0), // lhau 2384 OPVCC(46, 0, 0, 0), // lmw 2385 OPVCC(48, 0, 0, 0), // lfs 2386 OPVCC(49, 0, 0, 0), // lfsu 2387 OPVCC(50, 0, 0, 0), // lfd 2388 OPVCC(51, 0, 0, 0), // lfdu 2389 OPVCC(36, 0, 0, 0), // stw 2390 OPVCC(37, 0, 0, 0), // stwu 2391 OPVCC(38, 0, 0, 0), // stb 2392 OPVCC(39, 0, 0, 0), // stbu 2393 OPVCC(44, 0, 0, 0), // sth 2394 OPVCC(45, 0, 0, 0), // sthu 2395 OPVCC(47, 0, 0, 0), // stmw 2396 OPVCC(52, 0, 0, 0), // stfs 2397 OPVCC(53, 0, 0, 0), // stfsu 2398 OPVCC(54, 0, 0, 0), // stfd 2399 OPVCC(55, 0, 0, 0): // stfdu 2400 return D_FORM 2401 } 2402 return 0 2403 } 2404 2405 // Encode instructions and create relocation for accessing s+d according to the 2406 // instruction op with source or destination (as appropriate) register reg. 2407 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) { 2408 if c.ctxt.Headtype == objabi.Haix { 2409 // Every symbol access must be made via a TOC anchor. 2410 c.ctxt.Diag("symbolAccess called for %s", s.Name) 2411 } 2412 var base uint32 2413 form := c.opform(op) 2414 if c.ctxt.Flag_shared { 2415 base = REG_R2 2416 } else { 2417 base = REG_R0 2418 } 2419 // If reg can be reused when computing the symbol address, 2420 // use it instead of REGTMP. 2421 if !reuse { 2422 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) 2423 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) 2424 } else { 2425 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0) 2426 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0) 2427 } 2428 rel = obj.Addrel(c.cursym) 2429 rel.Off = int32(c.pc) 2430 rel.Siz = 8 2431 rel.Sym = s 2432 rel.Add = d 2433 if c.ctxt.Flag_shared { 2434 switch form { 2435 case D_FORM: 2436 rel.Type = objabi.R_ADDRPOWER_TOCREL 2437 case DS_FORM: 2438 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 2439 } 2440 2441 } else { 2442 switch form { 2443 case D_FORM: 2444 rel.Type = objabi.R_ADDRPOWER 2445 case DS_FORM: 2446 rel.Type = objabi.R_ADDRPOWER_DS 2447 } 2448 } 2449 return 2450 } 2451 2452 // Determine the mask begin (mb) and mask end (me) values 2453 // for a valid word rotate mask. A valid 32 bit mask is of 2454 // the form 1+0*1+ or 0*1+0*. 2455 // 2456 // Note, me is inclusive. 2457 func decodeMask32(mask uint32) (mb, me uint32, valid bool) { 2458 mb = uint32(bits.LeadingZeros32(mask)) 2459 me = uint32(32 - bits.TrailingZeros32(mask)) 2460 mbn := uint32(bits.LeadingZeros32(^mask)) 2461 men := uint32(32 - bits.TrailingZeros32(^mask)) 2462 // Check for a wrapping mask (e.g bits at 0 and 31) 2463 if mb == 0 && me == 32 { 2464 // swap the inverted values 2465 mb, me = men, mbn 2466 } 2467 2468 // Validate mask is of the binary form 1+0*1+ or 0*1+0* 2469 // Isolate rightmost 1 (if none 0) and add. 2470 v := mask 2471 vp := (v & -v) + v 2472 // Likewise, check for the wrapping (inverted) case. 2473 vn := ^v 2474 vpn := (vn & -vn) + vn 2475 return mb, (me - 1) & 31, (v&vp == 0 || vn&vpn == 0) && v != 0 2476 } 2477 2478 // Decompose a mask of contiguous bits into a begin (mb) and 2479 // end (me) value. 2480 // 2481 // 64b mask values cannot wrap on any valid PPC64 instruction. 2482 // Only masks of the form 0*1+0* are valid. 2483 // 2484 // Note, me is inclusive. 2485 func decodeMask64(mask int64) (mb, me uint32, valid bool) { 2486 m := uint64(mask) 2487 mb = uint32(bits.LeadingZeros64(m)) 2488 me = uint32(64 - bits.TrailingZeros64(m)) 2489 valid = ((m&-m)+m)&m == 0 && m != 0 2490 return mb, (me - 1) & 63, valid 2491 } 2492 2493 // Load the lower 16 bits of a constant into register r. 2494 func loadl16(r int, d int64) uint32 { 2495 v := uint16(d) 2496 if v == 0 { 2497 // Avoid generating "ori r,r,0", r != 0. Instead, generate the architectually preferred nop. 2498 // For example, "ori r31,r31,0" is a special execution serializing nop on Power10 called "exser". 2499 return NOP 2500 } 2501 return LOP_IRR(OP_ORI, uint32(r), uint32(r), uint32(v)) 2502 } 2503 2504 // Load the upper 16 bits of a 32b constant into register r. 2505 func loadu32(r int, d int64) uint32 { 2506 v := int32(d >> 16) 2507 if isuint32(uint64(d)) { 2508 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v)) 2509 } 2510 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v)) 2511 } 2512 2513 func high16adjusted(d int32) uint16 { 2514 if d&0x8000 != 0 { 2515 return uint16((d >> 16) + 1) 2516 } 2517 return uint16(d >> 16) 2518 } 2519 2520 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { 2521 o1 := uint32(0) 2522 o2 := uint32(0) 2523 o3 := uint32(0) 2524 o4 := uint32(0) 2525 o5 := uint32(0) 2526 2527 //print("%v => case %d\n", p, o->type); 2528 switch o.type_ { 2529 default: 2530 c.ctxt.Diag("unknown type %d", o.type_) 2531 prasm(p) 2532 2533 case 0: /* pseudo ops */ 2534 break 2535 2536 case 2: /* int/cr/fp op Rb,[Ra],Rd */ 2537 r := int(p.Reg) 2538 2539 if r == 0 { 2540 r = int(p.To.Reg) 2541 } 2542 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2543 2544 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */ 2545 d := c.vregoff(&p.From) 2546 2547 v := int32(d) 2548 r := int(p.From.Reg) 2549 // p.From may be a constant value or an offset(reg) type argument. 2550 isZeroOrR0 := r&0x1f == 0 2551 2552 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { 2553 c.ctxt.Diag("literal operation on R0\n%v", p) 2554 } 2555 a := OP_ADDI 2556 if int64(int16(d)) != d { 2557 // Operand is 16 bit value with sign bit set 2558 if o.a1 == C_ANDCON { 2559 // Needs unsigned 16 bit so use ORI 2560 if isZeroOrR0 { 2561 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) 2562 break 2563 } 2564 // With ADDCON, needs signed 16 bit value, fall through to use ADDI 2565 } else if o.a1 != C_ADDCON { 2566 log.Fatalf("invalid handling of %v", p) 2567 } 2568 } 2569 2570 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v)) 2571 2572 case 4: /* add/mul $scon,[r1],r2 */ 2573 v := c.regoff(&p.From) 2574 2575 r := int(p.Reg) 2576 if r == 0 { 2577 r = int(p.To.Reg) 2578 } 2579 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 { 2580 c.ctxt.Diag("literal operation on R0\n%v", p) 2581 } 2582 if int32(int16(v)) != v { 2583 log.Fatalf("mishandled instruction %v", p) 2584 } 2585 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2586 2587 case 5: /* syscall */ 2588 o1 = c.oprrr(p.As) 2589 2590 case 6: /* logical op Rb,[Rs,]Ra; no literal */ 2591 r := int(p.Reg) 2592 2593 if r == 0 { 2594 r = int(p.To.Reg) 2595 } 2596 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM. 2597 switch p.As { 2598 case AROTL: 2599 o1 = AOP_MD(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0)) 2600 case AROTLW: 2601 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31) 2602 default: 2603 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { 2604 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred 2605 // hardware no-op. This happens because $0 matches C_REG before C_ZCON. 2606 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0) 2607 } else { 2608 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2609 } 2610 } 2611 2612 case 7: /* mov r, soreg ==> stw o(r) */ 2613 r := int(p.To.Reg) 2614 v := c.regoff(&p.To) 2615 if int32(int16(v)) != v { 2616 log.Fatalf("mishandled instruction %v", p) 2617 } 2618 // Offsets in DS form stores must be a multiple of 4 2619 inst := c.opstore(p.As) 2620 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2621 log.Fatalf("invalid offset for DS form load/store %v", p) 2622 } 2623 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v)) 2624 2625 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */ 2626 r := int(p.From.Reg) 2627 v := c.regoff(&p.From) 2628 if int32(int16(v)) != v { 2629 log.Fatalf("mishandled instruction %v", p) 2630 } 2631 // Offsets in DS form loads must be a multiple of 4 2632 inst := c.opload(p.As) 2633 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2634 log.Fatalf("invalid offset for DS form load/store %v", p) 2635 } 2636 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v)) 2637 2638 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 2639 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 2640 2641 case 9: /* RLDC Ra, $sh, $mb, Rb */ 2642 sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F 2643 mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F 2644 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F)) 2645 o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1. 2646 o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10. 2647 o1 |= (mb & 0x20) // mb[5] is placed in bit 5 2648 2649 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ 2650 r := int(p.Reg) 2651 2652 if r == 0 { 2653 r = int(p.To.Reg) 2654 } 2655 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) 2656 2657 case 11: /* br/bl lbra */ 2658 v := int32(0) 2659 2660 if p.To.Target() != nil { 2661 v = int32(p.To.Target().Pc - p.Pc) 2662 if v&03 != 0 { 2663 c.ctxt.Diag("odd branch target address\n%v", p) 2664 v &^= 03 2665 } 2666 2667 if v < -(1<<25) || v >= 1<<24 { 2668 c.ctxt.Diag("branch too far\n%v", p) 2669 } 2670 } 2671 2672 o1 = OP_BR(c.opirr(p.As), uint32(v), 0) 2673 if p.To.Sym != nil { 2674 rel := obj.Addrel(c.cursym) 2675 rel.Off = int32(c.pc) 2676 rel.Siz = 4 2677 rel.Sym = p.To.Sym 2678 v += int32(p.To.Offset) 2679 if v&03 != 0 { 2680 c.ctxt.Diag("odd branch target address\n%v", p) 2681 v &^= 03 2682 } 2683 2684 rel.Add = int64(v) 2685 rel.Type = objabi.R_CALLPOWER 2686 } 2687 o2 = NOP // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking 2688 2689 case 13: /* mov[bhwd]{z,} r,r */ 2690 // This needs to handle "MOV* $0, Rx". This shows up because $0 also 2691 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON 2692 // TODO: fix the above behavior and cleanup this exception. 2693 if p.From.Type == obj.TYPE_CONST { 2694 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) 2695 break 2696 } 2697 if p.To.Type == obj.TYPE_CONST { 2698 c.ctxt.Diag("cannot move into constant 0\n%v", p) 2699 } 2700 2701 switch p.As { 2702 case AMOVB: 2703 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2704 case AMOVBZ: 2705 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) 2706 case AMOVH: 2707 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2708 case AMOVHZ: 2709 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) 2710 case AMOVW: 2711 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2712 case AMOVWZ: 2713 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ 2714 case AMOVD: 2715 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) 2716 default: 2717 c.ctxt.Diag("internal: bad register move/truncation\n%v", p) 2718 } 2719 2720 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ 2721 r := uint32(p.Reg) 2722 2723 if r == 0 { 2724 r = uint32(p.To.Reg) 2725 } 2726 d := c.vregoff(p.GetFrom3()) 2727 switch p.As { 2728 2729 // These opcodes expect a mask operand that has to be converted into the 2730 // appropriate operand. The way these were defined, not all valid masks are possible. 2731 // Left here for compatibility in case they were used or generated. 2732 case ARLDCL, ARLDCLCC: 2733 mb, me, valid := decodeMask64(d) 2734 if me != 63 || !valid { 2735 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p) 2736 } 2737 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), mb) 2738 2739 case ARLDCR, ARLDCRCC: 2740 mb, me, valid := decodeMask64(d) 2741 if mb != 0 || !valid { 2742 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p) 2743 } 2744 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), me) 2745 2746 // These opcodes use a shift count like the ppc64 asm, no mask conversion done 2747 case ARLDICR, ARLDICRCC: 2748 me := uint32(d) 2749 sh := c.regoff(&p.From) 2750 if me < 0 || me > 63 || sh > 63 { 2751 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p) 2752 } 2753 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), me) 2754 2755 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC: 2756 mb := uint32(d) 2757 sh := c.regoff(&p.From) 2758 if mb < 0 || mb > 63 || sh > 63 { 2759 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p) 2760 } 2761 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), mb) 2762 2763 case ACLRLSLDI: 2764 // This is an extended mnemonic defined in the ISA section C.8.1 2765 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n 2766 // It maps onto RLDIC so is directly generated here based on the operands from 2767 // the clrlsldi. 2768 n := int32(d) 2769 b := c.regoff(&p.From) 2770 if n > b || b > 63 { 2771 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) 2772 } 2773 o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) 2774 2775 default: 2776 c.ctxt.Diag("unexpected op in rldc case\n%v", p) 2777 } 2778 2779 case 17, /* bc bo,bi,lbra (same for now) */ 2780 16: /* bc bo,bi,sbra */ 2781 a := 0 2782 2783 r := int(p.Reg) 2784 2785 if p.From.Type == obj.TYPE_CONST { 2786 a = int(c.regoff(&p.From)) 2787 } else if p.From.Type == obj.TYPE_REG { 2788 if r != 0 { 2789 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r) 2790 } 2791 // BI values for the CR 2792 switch p.From.Reg { 2793 case REG_CR0: 2794 r = BI_CR0 2795 case REG_CR1: 2796 r = BI_CR1 2797 case REG_CR2: 2798 r = BI_CR2 2799 case REG_CR3: 2800 r = BI_CR3 2801 case REG_CR4: 2802 r = BI_CR4 2803 case REG_CR5: 2804 r = BI_CR5 2805 case REG_CR6: 2806 r = BI_CR6 2807 case REG_CR7: 2808 r = BI_CR7 2809 default: 2810 c.ctxt.Diag("unrecognized register: expecting CR\n") 2811 } 2812 } 2813 v := int32(0) 2814 if p.To.Target() != nil { 2815 v = int32(p.To.Target().Pc - p.Pc) 2816 } 2817 if v&03 != 0 { 2818 c.ctxt.Diag("odd branch target address\n%v", p) 2819 v &^= 03 2820 } 2821 2822 if v < -(1<<16) || v >= 1<<15 { 2823 c.ctxt.Diag("branch too far\n%v", p) 2824 } 2825 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0) 2826 2827 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */ 2828 var v int32 2829 var bh uint32 = 0 2830 if p.As == ABC || p.As == ABCL { 2831 v = c.regoff(&p.From) & 31 2832 } else { 2833 v = 20 /* unconditional */ 2834 } 2835 r := int(p.Reg) 2836 if r == 0 { 2837 r = 0 2838 } 2839 switch oclass(&p.To) { 2840 case C_CTR: 2841 o1 = OPVCC(19, 528, 0, 0) 2842 2843 case C_LR: 2844 o1 = OPVCC(19, 16, 0, 0) 2845 2846 default: 2847 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p) 2848 v = 0 2849 } 2850 2851 // Insert optional branch hint for bclr[l]/bcctr[l] 2852 if p.From3Type() != obj.TYPE_NONE { 2853 bh = uint32(p.GetFrom3().Offset) 2854 if bh == 2 || bh > 3 { 2855 log.Fatalf("BH must be 0,1,3 for %v", p) 2856 } 2857 o1 |= bh << 11 2858 } 2859 2860 if p.As == ABL || p.As == ABCL { 2861 o1 |= 1 2862 } 2863 o1 = OP_BCR(o1, uint32(v), uint32(r)) 2864 2865 case 19: /* mov $lcon,r ==> cau+or */ 2866 d := c.vregoff(&p.From) 2867 if o.ispfx { 2868 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d) 2869 } else { 2870 o1 = loadu32(int(p.To.Reg), d) 2871 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) 2872 } 2873 2874 case 20: /* add $ucon,,r | addis $addcon,r,r */ 2875 v := c.regoff(&p.From) 2876 2877 r := int(p.Reg) 2878 if r == 0 { 2879 r = int(p.To.Reg) 2880 } 2881 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2882 2883 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */ 2884 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2885 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2886 } 2887 d := c.vregoff(&p.From) 2888 r := int(p.Reg) 2889 if r == 0 { 2890 r = int(p.To.Reg) 2891 } 2892 if p.From.Sym != nil { 2893 c.ctxt.Diag("%v is not supported", p) 2894 } 2895 if o.ispfx { 2896 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d) 2897 } else if o.size == 8 { 2898 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d) 2899 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from 2900 } else if o.size == 12 { 2901 // Note, o1 is ADDIS if d is negative, ORIS otherwise. 2902 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000 2903 o2 = loadl16(REGTMP, d) // tmp |= d & 0xFFFF 2904 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp 2905 } else { 2906 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register. 2907 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000) 2908 o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF 2909 o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16 2910 o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF 2911 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2912 } 2913 2914 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */ 2915 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2916 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2917 } 2918 d := c.vregoff(&p.From) 2919 r := int(p.Reg) 2920 if r == 0 { 2921 r = int(p.To.Reg) 2922 } 2923 2924 // With ADDCON operand, generate 2 instructions using ADDI for signed value, 2925 // with LCON operand generate 3 instructions. 2926 if o.size == 8 { 2927 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d))) 2928 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2929 } else { 2930 o1 = loadu32(REGTMP, d) 2931 o2 = loadl16(REGTMP, d) 2932 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2933 } 2934 if p.From.Sym != nil { 2935 c.ctxt.Diag("%v is not supported", p) 2936 } 2937 2938 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */ 2939 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0)) 2940 // This is needed for -0. 2941 if o.size == 8 { 2942 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg)) 2943 } 2944 2945 case 25: 2946 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ 2947 v := c.regoff(&p.From) 2948 2949 if v < 0 { 2950 v = 0 2951 } else if v > 63 { 2952 v = 63 2953 } 2954 r := int(p.Reg) 2955 if r == 0 { 2956 r = int(p.To.Reg) 2957 } 2958 var a int 2959 op := uint32(0) 2960 switch p.As { 2961 case ASLD, ASLDCC: 2962 a = int(63 - v) 2963 op = OP_RLDICR 2964 2965 case ASRD, ASRDCC: 2966 a = int(v) 2967 v = 64 - v 2968 op = OP_RLDICL 2969 case AROTL: 2970 a = int(0) 2971 op = OP_RLDICL 2972 case AEXTSWSLI, AEXTSWSLICC: 2973 a = int(v) 2974 default: 2975 c.ctxt.Diag("unexpected op in sldi case\n%v", p) 2976 a = 0 2977 o1 = 0 2978 } 2979 2980 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC { 2981 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v)) 2982 2983 } else { 2984 o1 = AOP_MD(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) 2985 } 2986 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC { 2987 o1 |= 1 // Set the condition code bit 2988 } 2989 2990 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ 2991 v := c.vregoff(&p.From) 2992 r := int(p.From.Reg) 2993 var rel *obj.Reloc 2994 2995 switch p.From.Name { 2996 case obj.NAME_EXTERN, obj.NAME_STATIC: 2997 // Load a 32 bit constant, or relocation depending on if a symbol is attached 2998 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true) 2999 default: 3000 // Add a 32 bit offset to a register. 3001 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v)))) 3002 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3003 } 3004 3005 if o.ispfx { 3006 if rel == nil { 3007 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v) 3008 } else { 3009 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0) 3010 rel.Type = objabi.R_ADDRPOWER_PCREL34 3011 } 3012 } 3013 3014 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ 3015 v := c.regoff(p.GetFrom3()) 3016 3017 r := int(p.From.Reg) 3018 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3019 3020 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */ 3021 if p.To.Reg == REGTMP || p.From.Reg == REGTMP { 3022 c.ctxt.Diag("can't synthesize large constant\n%v", p) 3023 } 3024 v := c.vregoff(p.GetFrom3()) 3025 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) 3026 o2 = loadl16(REGTMP, v) 3027 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) 3028 if p.From.Sym != nil { 3029 c.ctxt.Diag("%v is not supported", p) 3030 } 3031 3032 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ 3033 sh := uint32(c.regoff(&p.From)) 3034 d := c.vregoff(p.GetFrom3()) 3035 mb, me, valid := decodeMask64(d) 3036 var a uint32 3037 switch p.As { 3038 case ARLDC, ARLDCCC: 3039 a = mb 3040 if me != (63-sh) || !valid { 3041 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3042 } 3043 3044 case ARLDCL, ARLDCLCC: 3045 a = mb 3046 if mb != 63 || !valid { 3047 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3048 } 3049 3050 case ARLDCR, ARLDCRCC: 3051 a = me 3052 if mb != 0 || !valid { 3053 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3054 } 3055 3056 default: 3057 c.ctxt.Diag("unexpected op in rldic case\n%v", p) 3058 } 3059 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, a) 3060 3061 case 30: /* rldimi $sh,s,$mask,a */ 3062 sh := uint32(c.regoff(&p.From)) 3063 d := c.vregoff(p.GetFrom3()) 3064 3065 // Original opcodes had mask operands which had to be converted to a shift count as expected by 3066 // the ppc64 asm. 3067 switch p.As { 3068 case ARLDMI, ARLDMICC: 3069 mb, me, valid := decodeMask64(d) 3070 if me != (63-sh) || !valid { 3071 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), me, sh, p) 3072 } 3073 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb) 3074 3075 // Opcodes with shift count operands. 3076 case ARLDIMI, ARLDIMICC: 3077 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, uint32(d)) 3078 } 3079 3080 case 31: /* dword */ 3081 d := c.vregoff(&p.From) 3082 3083 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 3084 o1 = uint32(d >> 32) 3085 o2 = uint32(d) 3086 } else { 3087 o1 = uint32(d) 3088 o2 = uint32(d >> 32) 3089 } 3090 3091 if p.From.Sym != nil { 3092 rel := obj.Addrel(c.cursym) 3093 rel.Off = int32(c.pc) 3094 rel.Siz = 8 3095 rel.Sym = p.From.Sym 3096 rel.Add = p.From.Offset 3097 rel.Type = objabi.R_ADDR 3098 o2 = 0 3099 o1 = o2 3100 } 3101 3102 case 32: /* fmul frc,fra,frd */ 3103 r := int(p.Reg) 3104 3105 if r == 0 { 3106 r = int(p.To.Reg) 3107 } 3108 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6 3109 3110 case 33: /* fabs [frb,]frd; fmr. frb,frd */ 3111 r := int(p.From.Reg) 3112 3113 if oclass(&p.From) == C_NONE { 3114 r = int(p.To.Reg) 3115 } 3116 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) 3117 3118 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ 3119 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6 3120 3121 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ 3122 v := c.regoff(&p.To) 3123 r := int(p.To.Reg) 3124 // Offsets in DS form stores must be a multiple of 4 3125 if o.ispfx { 3126 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS) 3127 o1 |= uint32((v >> 16) & 0x3FFFF) 3128 o2 |= uint32(v & 0xFFFF) 3129 } else { 3130 inst := c.opstore(p.As) 3131 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3132 log.Fatalf("invalid offset for DS form load/store %v", p) 3133 } 3134 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 3135 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v)) 3136 } 3137 3138 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */ 3139 v := c.regoff(&p.From) 3140 r := int(p.From.Reg) 3141 3142 if o.ispfx { 3143 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS) 3144 o1 |= uint32((v >> 16) & 0x3FFFF) 3145 o2 |= uint32(v & 0xFFFF) 3146 } else { 3147 if o.a6 == C_REG { 3148 // Reuse the base register when loading a GPR (C_REG) to avoid 3149 // using REGTMP (R31) when possible. 3150 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v))) 3151 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3152 } else { 3153 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v))) 3154 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v)) 3155 } 3156 } 3157 3158 // Sign extend MOVB if needed 3159 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3160 3161 case 40: /* word */ 3162 o1 = uint32(c.regoff(&p.From)) 3163 3164 case 41: /* stswi */ 3165 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 { 3166 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3167 } 3168 3169 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3170 3171 case 42: /* lswi */ 3172 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 { 3173 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3174 } 3175 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3176 3177 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */ 3178 /* TH field for dcbt/dcbtst: */ 3179 /* 0 = Block access - program will soon access EA. */ 3180 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */ 3181 /* 16 = Block access - program will soon make a transient access to EA. */ 3182 /* 17 = Block access - program will not access EA for a long time. */ 3183 3184 /* L field for dcbf: */ 3185 /* 0 = invalidates the block containing EA in all processors. */ 3186 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */ 3187 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */ 3188 if p.To.Type == obj.TYPE_NONE { 3189 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) 3190 } else { 3191 th := c.regoff(&p.To) 3192 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg)) 3193 } 3194 3195 case 44: /* indexed store */ 3196 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3197 3198 case 45: /* indexed load */ 3199 switch p.As { 3200 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */ 3201 /* The EH field can be used as a lock acquire/release hint as follows: */ 3202 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */ 3203 /* 1 = Exclusive Access (lock acquire and release) */ 3204 case ALBAR, ALHAR, ALWAR, ALDAR: 3205 if p.From3Type() != obj.TYPE_NONE { 3206 eh := int(c.regoff(p.GetFrom3())) 3207 if eh > 1 { 3208 c.ctxt.Diag("illegal EH field\n%v", p) 3209 } 3210 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh)) 3211 } else { 3212 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3213 } 3214 default: 3215 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3216 } 3217 case 46: /* plain op */ 3218 o1 = c.oprrr(p.As) 3219 3220 case 47: /* op Ra, Rd; also op [Ra,] Rd */ 3221 r := int(p.From.Reg) 3222 3223 if r == 0 { 3224 r = int(p.To.Reg) 3225 } 3226 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3227 3228 case 48: /* op Rs, Ra */ 3229 r := int(p.From.Reg) 3230 3231 if r == 0 { 3232 r = int(p.To.Reg) 3233 } 3234 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3235 3236 case 49: /* op Rb; op $n, Rb */ 3237 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */ 3238 v := c.regoff(&p.From) & 1 3239 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21 3240 } else { 3241 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg)) 3242 } 3243 3244 case 50: /* rem[u] r1[,r2],r3 */ 3245 r := int(p.Reg) 3246 3247 if r == 0 { 3248 r = int(p.To.Reg) 3249 } 3250 v := c.oprrr(p.As) 3251 t := v & (1<<10 | 1) /* OE|Rc */ 3252 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3253 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) 3254 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3255 if p.As == AREMU { 3256 o4 = o3 3257 3258 /* Clear top 32 bits */ 3259 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5 3260 } 3261 3262 case 51: /* remd[u] r1[,r2],r3 */ 3263 r := int(p.Reg) 3264 3265 if r == 0 { 3266 r = int(p.To.Reg) 3267 } 3268 v := c.oprrr(p.As) 3269 t := v & (1<<10 | 1) /* OE|Rc */ 3270 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3271 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) 3272 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3273 /* cases 50,51: removed; can be reused. */ 3274 3275 /* cases 50,51: removed; can be reused. */ 3276 3277 case 52: /* mtfsbNx cr(n) */ 3278 v := c.regoff(&p.From) & 31 3279 3280 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0) 3281 3282 case 53: /* mffsX ,fr1 */ 3283 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) 3284 3285 case 55: /* op Rb, Rd */ 3286 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) 3287 3288 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */ 3289 v := c.regoff(&p.From) 3290 3291 r := int(p.Reg) 3292 if r == 0 { 3293 r = int(p.To.Reg) 3294 } 3295 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31) 3296 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) { 3297 o1 |= 1 << 1 /* mb[5] */ 3298 } 3299 3300 case 57: /* slw $sh,[s,]a -> rlwinm ... */ 3301 v := c.regoff(&p.From) 3302 3303 r := int(p.Reg) 3304 if r == 0 { 3305 r = int(p.To.Reg) 3306 } 3307 3308 /* 3309 * Let user (gs) shoot himself in the foot. 3310 * qc has already complained. 3311 * 3312 if(v < 0 || v > 31) 3313 ctxt->diag("illegal shift %ld\n%v", v, p); 3314 */ 3315 if v < 0 { 3316 v = 0 3317 } else if v > 32 { 3318 v = 32 3319 } 3320 var mask [2]uint8 3321 switch p.As { 3322 case AROTLW: 3323 mask[0], mask[1] = 0, 31 3324 case ASRW, ASRWCC: 3325 mask[0], mask[1] = uint8(v), 31 3326 v = 32 - v 3327 default: 3328 mask[0], mask[1] = 0, uint8(31-v) 3329 } 3330 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1])) 3331 if p.As == ASLWCC || p.As == ASRWCC { 3332 o1 |= 1 // set the condition code 3333 } 3334 3335 case 58: /* logical $andcon,[s],a */ 3336 v := c.regoff(&p.From) 3337 3338 r := int(p.Reg) 3339 if r == 0 { 3340 r = int(p.To.Reg) 3341 } 3342 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3343 3344 case 60: /* tw to,a,b */ 3345 r := int(c.regoff(&p.From) & 31) 3346 3347 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg)) 3348 3349 case 61: /* tw to,a,$simm */ 3350 r := int(c.regoff(&p.From) & 31) 3351 3352 v := c.regoff(&p.To) 3353 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v)) 3354 3355 case 62: /* clrlslwi $sh,s,$mask,a */ 3356 v := c.regoff(&p.From) 3357 n := c.regoff(p.GetFrom3()) 3358 // This is an extended mnemonic described in the ISA C.8.2 3359 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n 3360 // It maps onto rlwinm which is directly generated here. 3361 if n > v || v >= 32 { 3362 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) 3363 } 3364 3365 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) 3366 3367 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/ 3368 var mb, me uint32 3369 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me. 3370 var valid bool 3371 // Note, optab rules ensure $mask is a 32b constant. 3372 mb, me, valid = decodeMask32(uint32(p.RestArgs[0].Addr.Offset)) 3373 if !valid { 3374 c.ctxt.Diag("cannot generate mask #%x\n%v", uint64(p.RestArgs[0].Addr.Offset), p) 3375 } 3376 } else { // Otherwise, mask is already passed as mb and me in RestArgs. 3377 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset) 3378 } 3379 if p.From.Type == obj.TYPE_CONST { 3380 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me) 3381 } else { 3382 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me) 3383 } 3384 3385 case 64: /* mtfsf fr[, $m] {,fpcsr} */ 3386 var v int32 3387 if p.From3Type() != obj.TYPE_NONE { 3388 v = c.regoff(p.GetFrom3()) & 255 3389 } else { 3390 v = 255 3391 } 3392 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11 3393 3394 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */ 3395 if p.To.Reg == 0 { 3396 c.ctxt.Diag("must specify FPSCR(n)\n%v", p) 3397 } 3398 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12 3399 3400 case 66: /* mov spr,r1; mov r1,spr */ 3401 var r int 3402 var v int32 3403 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 { 3404 r = int(p.From.Reg) 3405 v = int32(p.To.Reg) 3406 o1 = OPVCC(31, 467, 0, 0) /* mtspr */ 3407 } else { 3408 r = int(p.To.Reg) 3409 v = int32(p.From.Reg) 3410 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3411 } 3412 3413 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3414 3415 case 67: /* mcrf crfD,crfS */ 3416 if p.From.Reg == REG_CR || p.To.Reg == REG_CR { 3417 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p) 3418 } 3419 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0) 3420 3421 case 68: /* mfcr rD; mfocrf CRM,rD */ 3422 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */ 3423 if p.From.Reg != REG_CR { 3424 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */ 3425 o1 |= 1<<20 | v<<12 /* new form, mfocrf */ 3426 } 3427 3428 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */ 3429 var v uint32 3430 if p.To.Reg == REG_CR { 3431 v = 0xff 3432 } else if p.To.Offset != 0 { // MOVFL gpr, constant 3433 v = uint32(p.To.Offset) 3434 } else { // p.To.Reg == REG_CRx 3435 v = 1 << uint(7-(p.To.Reg&7)) 3436 } 3437 // Use mtocrf form if only one CR field moved. 3438 if bits.OnesCount32(v) == 1 { 3439 v |= 1 << 8 3440 } 3441 3442 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 3443 3444 case 70: /* [f]cmp r,r,cr*/ 3445 var r int 3446 if p.Reg == 0 { 3447 r = 0 3448 } else { 3449 r = (int(p.Reg) & 7) << 2 3450 } 3451 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) 3452 3453 case 71: /* cmp[l] r,i,cr*/ 3454 var r int 3455 if p.Reg == 0 { 3456 r = 0 3457 } else { 3458 r = (int(p.Reg) & 7) << 2 3459 } 3460 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff 3461 3462 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */ 3463 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg)) 3464 3465 case 73: /* mcrfs crfD,crfS */ 3466 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { 3467 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p) 3468 } 3469 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0) 3470 3471 case 77: /* syscall $scon, syscall Rx */ 3472 if p.From.Type == obj.TYPE_CONST { 3473 if p.From.Offset > BIG || p.From.Offset < -BIG { 3474 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p) 3475 } 3476 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset)) 3477 } else if p.From.Type == obj.TYPE_REG { 3478 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg)) 3479 } else { 3480 c.ctxt.Diag("illegal syscall: %v", p) 3481 o1 = 0x7fe00008 // trap always 3482 } 3483 3484 o2 = c.oprrr(p.As) 3485 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0 3486 3487 case 78: /* undef */ 3488 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed 3489 always to be an illegal instruction." */ 3490 3491 /* relocation operations */ 3492 case 74: 3493 var rel *obj.Reloc 3494 v := c.vregoff(&p.To) 3495 // Offsets in DS form stores must be a multiple of 4 3496 inst := c.opstore(p.As) 3497 3498 // Can't reuse base for store instructions. 3499 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false) 3500 3501 // Rewrite as a prefixed store if supported. 3502 if o.ispfx { 3503 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL) 3504 rel.Type = objabi.R_ADDRPOWER_PCREL34 3505 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3506 log.Fatalf("invalid offset for DS form load/store %v", p) 3507 } 3508 3509 case 75: // 32 bit offset symbol loads (got/toc/addr) 3510 var rel *obj.Reloc 3511 v := p.From.Offset 3512 3513 // Offsets in DS form loads must be a multiple of 4 3514 inst := c.opload(p.As) 3515 switch p.From.Name { 3516 case obj.NAME_GOTREF, obj.NAME_TOCREF: 3517 if v != 0 { 3518 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p) 3519 } 3520 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3521 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3522 rel = obj.Addrel(c.cursym) 3523 rel.Off = int32(c.pc) 3524 rel.Siz = 8 3525 rel.Sym = p.From.Sym 3526 switch p.From.Name { 3527 case obj.NAME_GOTREF: 3528 rel.Type = objabi.R_ADDRPOWER_GOT 3529 case obj.NAME_TOCREF: 3530 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 3531 } 3532 default: 3533 reuseBaseReg := o.a6 == C_REG 3534 // Reuse To.Reg as base register if it is a GPR. 3535 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg) 3536 } 3537 3538 // Convert to prefixed forms if supported. 3539 if o.ispfx { 3540 switch rel.Type { 3541 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, 3542 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS: 3543 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3544 rel.Type = objabi.R_ADDRPOWER_PCREL34 3545 case objabi.R_POWER_TLS_IE: 3546 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3547 rel.Type = objabi.R_POWER_TLS_IE_PCREL34 3548 case objabi.R_ADDRPOWER_GOT: 3549 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3550 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34 3551 default: 3552 // We've failed to convert a TOC-relative relocation to a PC-relative one. 3553 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type) 3554 } 3555 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3556 log.Fatalf("invalid offset for DS form load/store %v", p) 3557 } 3558 3559 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3560 3561 case 79: 3562 if p.From.Offset != 0 { 3563 c.ctxt.Diag("invalid offset against tls var %v", p) 3564 } 3565 rel := obj.Addrel(c.cursym) 3566 rel.Off = int32(c.pc) 3567 rel.Siz = 8 3568 rel.Sym = p.From.Sym 3569 if !o.ispfx { 3570 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0) 3571 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3572 rel.Type = objabi.R_POWER_TLS_LE 3573 } else { 3574 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0) 3575 rel.Type = objabi.R_POWER_TLS_LE_TPREL34 3576 } 3577 3578 case 80: 3579 if p.From.Offset != 0 { 3580 c.ctxt.Diag("invalid offset against tls var %v", p) 3581 } 3582 rel := obj.Addrel(c.cursym) 3583 rel.Off = int32(c.pc) 3584 rel.Siz = 8 3585 rel.Sym = p.From.Sym 3586 rel.Type = objabi.R_POWER_TLS_IE 3587 if !o.ispfx { 3588 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3589 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) 3590 } else { 3591 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3592 rel.Type = objabi.R_POWER_TLS_IE_PCREL34 3593 } 3594 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13) 3595 rel = obj.Addrel(c.cursym) 3596 rel.Off = int32(c.pc) + 8 3597 rel.Siz = 4 3598 rel.Sym = p.From.Sym 3599 rel.Type = objabi.R_POWER_TLS 3600 3601 case 82: /* vector instructions, VX-form and VC-form */ 3602 if p.From.Type == obj.TYPE_REG { 3603 /* reg reg none OR reg reg reg */ 3604 /* 3-register operand order: VRA, VRB, VRT */ 3605 /* 2-register operand order: VRA, VRT */ 3606 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3607 } else if p.From3Type() == obj.TYPE_CONST { 3608 /* imm imm reg reg */ 3609 /* operand order: SIX, VRA, ST, VRT */ 3610 six := int(c.regoff(&p.From)) 3611 st := int(c.regoff(p.GetFrom3())) 3612 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) 3613 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { 3614 /* imm reg reg */ 3615 /* operand order: UIM, VRB, VRT */ 3616 uim := int(c.regoff(&p.From)) 3617 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim)) 3618 } else { 3619 /* imm reg */ 3620 /* operand order: SIM, VRT */ 3621 sim := int(c.regoff(&p.From)) 3622 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim)) 3623 } 3624 3625 case 83: /* vector instructions, VA-form */ 3626 if p.From.Type == obj.TYPE_REG { 3627 /* reg reg reg reg */ 3628 /* 4-register operand order: VRA, VRB, VRC, VRT */ 3629 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3630 } else if p.From.Type == obj.TYPE_CONST { 3631 /* imm reg reg reg */ 3632 /* operand order: SHB, VRA, VRB, VRT */ 3633 shb := int(c.regoff(&p.From)) 3634 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb)) 3635 } 3636 3637 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc 3638 bc := c.vregoff(&p.From) 3639 if o.a1 == C_CRBIT { 3640 // CR bit is encoded as a register, not a constant. 3641 bc = int64(p.From.Reg) 3642 } 3643 3644 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg 3645 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc)) 3646 3647 case 85: /* vector instructions, VX-form */ 3648 /* reg none reg */ 3649 /* 2-register operand order: VRB, VRT */ 3650 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg)) 3651 3652 case 86: /* VSX indexed store, XX1-form */ 3653 /* reg reg reg */ 3654 /* 3-register operand order: XT, (RB)(RA*1) */ 3655 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3656 3657 case 87: /* VSX indexed load, XX1-form */ 3658 /* reg reg reg */ 3659 /* 3-register operand order: (RB)(RA*1), XT */ 3660 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3661 3662 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */ 3663 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3664 3665 case 89: /* VSX instructions, XX2-form */ 3666 /* reg none reg OR reg imm reg */ 3667 /* 2-register operand order: XB, XT or XB, UIM, XT*/ 3668 uim := int(c.regoff(p.GetFrom3())) 3669 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) 3670 3671 case 90: /* VSX instructions, XX3-form */ 3672 if p.From3Type() == obj.TYPE_NONE { 3673 /* reg reg reg */ 3674 /* 3-register operand order: XA, XB, XT */ 3675 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3676 } else if p.From3Type() == obj.TYPE_CONST { 3677 /* reg reg reg imm */ 3678 /* operand order: XA, XB, DM, XT */ 3679 dm := int(c.regoff(p.GetFrom3())) 3680 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) 3681 } 3682 3683 case 91: /* VSX instructions, XX4-form */ 3684 /* reg reg reg reg */ 3685 /* 3-register operand order: XA, XB, XC, XT */ 3686 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3687 3688 case 92: /* X-form instructions, 3-operands */ 3689 if p.To.Type == obj.TYPE_CONST { 3690 /* imm reg reg */ 3691 xf := int32(p.From.Reg) 3692 if REG_F0 <= xf && xf <= REG_F31 { 3693 /* operand order: FRA, FRB, BF */ 3694 bf := int(c.regoff(&p.To)) << 2 3695 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3696 } else { 3697 /* operand order: RA, RB, L */ 3698 l := int(c.regoff(&p.To)) 3699 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg)) 3700 } 3701 } else if p.From3Type() == obj.TYPE_CONST { 3702 /* reg reg imm */ 3703 /* operand order: RB, L, RA */ 3704 l := int(c.regoff(p.GetFrom3())) 3705 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg)) 3706 } else if p.To.Type == obj.TYPE_REG { 3707 cr := int32(p.To.Reg) 3708 if REG_CR0 <= cr && cr <= REG_CR7 { 3709 /* cr reg reg */ 3710 /* operand order: RA, RB, BF */ 3711 bf := (int(p.To.Reg) & 7) << 2 3712 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3713 } else if p.From.Type == obj.TYPE_CONST { 3714 /* reg imm */ 3715 /* operand order: L, RT */ 3716 l := int(c.regoff(&p.From)) 3717 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg)) 3718 } else { 3719 switch p.As { 3720 case ACOPY, APASTECC: 3721 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg)) 3722 default: 3723 /* reg reg reg */ 3724 /* operand order: RS, RB, RA */ 3725 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3726 } 3727 } 3728 } 3729 3730 case 93: /* X-form instructions, 2-operands */ 3731 if p.To.Type == obj.TYPE_CONST { 3732 /* imm reg */ 3733 /* operand order: FRB, BF */ 3734 bf := int(c.regoff(&p.To)) << 2 3735 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg)) 3736 } else if p.Reg == 0 { 3737 /* popcnt* r,r, X-form */ 3738 /* operand order: RS, RA */ 3739 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3740 } 3741 3742 case 94: /* Z23-form instructions, 4-operands */ 3743 /* reg reg reg imm */ 3744 /* operand order: RA, RB, CY, RT */ 3745 cy := int(c.regoff(p.GetFrom3())) 3746 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) 3747 3748 case 96: /* VSX load, DQ-form */ 3749 /* reg imm reg */ 3750 /* operand order: (RA)(DQ), XT */ 3751 dq := int16(c.regoff(&p.From)) 3752 if (dq & 15) != 0 { 3753 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3754 } 3755 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq)) 3756 3757 case 97: /* VSX store, DQ-form */ 3758 /* reg imm reg */ 3759 /* operand order: XT, (RA)(DQ) */ 3760 dq := int16(c.regoff(&p.To)) 3761 if (dq & 15) != 0 { 3762 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3763 } 3764 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq)) 3765 case 98: /* VSX indexed load or load with length (also left-justified), x-form */ 3766 /* vsreg, reg, reg */ 3767 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3768 case 99: /* VSX store with length (also left-justified) x-form */ 3769 /* reg, reg, vsreg */ 3770 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg)) 3771 case 100: /* VSX X-form XXSPLTIB */ 3772 if p.From.Type == obj.TYPE_CONST { 3773 /* imm reg */ 3774 uim := int(c.regoff(&p.From)) 3775 /* imm reg */ 3776 /* Use AOP_XX1 form with 0 for one of the registers. */ 3777 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim)) 3778 } else { 3779 c.ctxt.Diag("invalid ops for %v", p.As) 3780 } 3781 case 101: 3782 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) 3783 3784 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */ 3785 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3786 3787 case 106: /* MOVD spr, soreg */ 3788 v := int32(p.From.Reg) 3789 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3790 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3791 so := c.regoff(&p.To) 3792 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so)) 3793 if so&0x3 != 0 { 3794 log.Fatalf("invalid offset for DS form load/store %v", p) 3795 } 3796 if p.To.Reg == REGTMP { 3797 log.Fatalf("SPR move to memory will clobber R31 %v", p) 3798 } 3799 3800 case 107: /* MOVD soreg, spr */ 3801 v := int32(p.From.Reg) 3802 so := c.regoff(&p.From) 3803 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so)) 3804 o2 = OPVCC(31, 467, 0, 0) /* mtspr */ 3805 v = int32(p.To.Reg) 3806 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3807 if so&0x3 != 0 { 3808 log.Fatalf("invalid offset for DS form load/store %v", p) 3809 } 3810 3811 case 108: /* mov r, xoreg ==> stwx rx,ry */ 3812 r := int(p.To.Reg) 3813 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) 3814 3815 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */ 3816 r := int(p.From.Reg) 3817 3818 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) 3819 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 3820 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3821 3822 case 110: /* SETB creg, rt */ 3823 bfa := uint32(p.From.Reg) << 2 3824 rt := uint32(p.To.Reg) 3825 o1 = LOP_RRR(OP_SETB, bfa, rt, 0) 3826 } 3827 3828 out[0] = o1 3829 out[1] = o2 3830 out[2] = o3 3831 out[3] = o4 3832 out[4] = o5 3833 } 3834 3835 func (c *ctxt9) vregoff(a *obj.Addr) int64 { 3836 c.instoffset = 0 3837 if a != nil { 3838 c.aclass(a) 3839 } 3840 return c.instoffset 3841 } 3842 3843 func (c *ctxt9) regoff(a *obj.Addr) int32 { 3844 return int32(c.vregoff(a)) 3845 } 3846 3847 func (c *ctxt9) oprrr(a obj.As) uint32 { 3848 switch a { 3849 case AADD: 3850 return OPVCC(31, 266, 0, 0) 3851 case AADDCC: 3852 return OPVCC(31, 266, 0, 1) 3853 case AADDV: 3854 return OPVCC(31, 266, 1, 0) 3855 case AADDVCC: 3856 return OPVCC(31, 266, 1, 1) 3857 case AADDC: 3858 return OPVCC(31, 10, 0, 0) 3859 case AADDCCC: 3860 return OPVCC(31, 10, 0, 1) 3861 case AADDCV: 3862 return OPVCC(31, 10, 1, 0) 3863 case AADDCVCC: 3864 return OPVCC(31, 10, 1, 1) 3865 case AADDE: 3866 return OPVCC(31, 138, 0, 0) 3867 case AADDECC: 3868 return OPVCC(31, 138, 0, 1) 3869 case AADDEV: 3870 return OPVCC(31, 138, 1, 0) 3871 case AADDEVCC: 3872 return OPVCC(31, 138, 1, 1) 3873 case AADDME: 3874 return OPVCC(31, 234, 0, 0) 3875 case AADDMECC: 3876 return OPVCC(31, 234, 0, 1) 3877 case AADDMEV: 3878 return OPVCC(31, 234, 1, 0) 3879 case AADDMEVCC: 3880 return OPVCC(31, 234, 1, 1) 3881 case AADDZE: 3882 return OPVCC(31, 202, 0, 0) 3883 case AADDZECC: 3884 return OPVCC(31, 202, 0, 1) 3885 case AADDZEV: 3886 return OPVCC(31, 202, 1, 0) 3887 case AADDZEVCC: 3888 return OPVCC(31, 202, 1, 1) 3889 case AADDEX: 3890 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */ 3891 3892 case AAND: 3893 return OPVCC(31, 28, 0, 0) 3894 case AANDCC: 3895 return OPVCC(31, 28, 0, 1) 3896 case AANDN: 3897 return OPVCC(31, 60, 0, 0) 3898 case AANDNCC: 3899 return OPVCC(31, 60, 0, 1) 3900 3901 case ACMP: 3902 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */ 3903 case ACMPU: 3904 return OPVCC(31, 32, 0, 0) | 1<<21 3905 case ACMPW: 3906 return OPVCC(31, 0, 0, 0) /* L=0 */ 3907 case ACMPWU: 3908 return OPVCC(31, 32, 0, 0) 3909 case ACMPB: 3910 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */ 3911 case ACMPEQB: 3912 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 3913 3914 case ACNTLZW: 3915 return OPVCC(31, 26, 0, 0) 3916 case ACNTLZWCC: 3917 return OPVCC(31, 26, 0, 1) 3918 case ACNTLZD: 3919 return OPVCC(31, 58, 0, 0) 3920 case ACNTLZDCC: 3921 return OPVCC(31, 58, 0, 1) 3922 3923 case ACRAND: 3924 return OPVCC(19, 257, 0, 0) 3925 case ACRANDN: 3926 return OPVCC(19, 129, 0, 0) 3927 case ACREQV: 3928 return OPVCC(19, 289, 0, 0) 3929 case ACRNAND: 3930 return OPVCC(19, 225, 0, 0) 3931 case ACRNOR: 3932 return OPVCC(19, 33, 0, 0) 3933 case ACROR: 3934 return OPVCC(19, 449, 0, 0) 3935 case ACRORN: 3936 return OPVCC(19, 417, 0, 0) 3937 case ACRXOR: 3938 return OPVCC(19, 193, 0, 0) 3939 3940 case ADCBF: 3941 return OPVCC(31, 86, 0, 0) 3942 case ADCBI: 3943 return OPVCC(31, 470, 0, 0) 3944 case ADCBST: 3945 return OPVCC(31, 54, 0, 0) 3946 case ADCBT: 3947 return OPVCC(31, 278, 0, 0) 3948 case ADCBTST: 3949 return OPVCC(31, 246, 0, 0) 3950 case ADCBZ: 3951 return OPVCC(31, 1014, 0, 0) 3952 3953 case AMODUD: 3954 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */ 3955 case AMODUW: 3956 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */ 3957 case AMODSD: 3958 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */ 3959 case AMODSW: 3960 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */ 3961 3962 case ADIVW, AREM: 3963 return OPVCC(31, 491, 0, 0) 3964 3965 case ADIVWCC: 3966 return OPVCC(31, 491, 0, 1) 3967 3968 case ADIVWV: 3969 return OPVCC(31, 491, 1, 0) 3970 3971 case ADIVWVCC: 3972 return OPVCC(31, 491, 1, 1) 3973 3974 case ADIVWU, AREMU: 3975 return OPVCC(31, 459, 0, 0) 3976 3977 case ADIVWUCC: 3978 return OPVCC(31, 459, 0, 1) 3979 3980 case ADIVWUV: 3981 return OPVCC(31, 459, 1, 0) 3982 3983 case ADIVWUVCC: 3984 return OPVCC(31, 459, 1, 1) 3985 3986 case ADIVD, AREMD: 3987 return OPVCC(31, 489, 0, 0) 3988 3989 case ADIVDCC: 3990 return OPVCC(31, 489, 0, 1) 3991 3992 case ADIVDE: 3993 return OPVCC(31, 425, 0, 0) 3994 3995 case ADIVDECC: 3996 return OPVCC(31, 425, 0, 1) 3997 3998 case ADIVDEU: 3999 return OPVCC(31, 393, 0, 0) 4000 4001 case ADIVDEUCC: 4002 return OPVCC(31, 393, 0, 1) 4003 4004 case ADIVDV: 4005 return OPVCC(31, 489, 1, 0) 4006 4007 case ADIVDVCC: 4008 return OPVCC(31, 489, 1, 1) 4009 4010 case ADIVDU, AREMDU: 4011 return OPVCC(31, 457, 0, 0) 4012 4013 case ADIVDUCC: 4014 return OPVCC(31, 457, 0, 1) 4015 4016 case ADIVDUV: 4017 return OPVCC(31, 457, 1, 0) 4018 4019 case ADIVDUVCC: 4020 return OPVCC(31, 457, 1, 1) 4021 4022 case AEIEIO: 4023 return OPVCC(31, 854, 0, 0) 4024 4025 case AEQV: 4026 return OPVCC(31, 284, 0, 0) 4027 case AEQVCC: 4028 return OPVCC(31, 284, 0, 1) 4029 4030 case AEXTSB: 4031 return OPVCC(31, 954, 0, 0) 4032 case AEXTSBCC: 4033 return OPVCC(31, 954, 0, 1) 4034 case AEXTSH: 4035 return OPVCC(31, 922, 0, 0) 4036 case AEXTSHCC: 4037 return OPVCC(31, 922, 0, 1) 4038 case AEXTSW: 4039 return OPVCC(31, 986, 0, 0) 4040 case AEXTSWCC: 4041 return OPVCC(31, 986, 0, 1) 4042 4043 case AFABS: 4044 return OPVCC(63, 264, 0, 0) 4045 case AFABSCC: 4046 return OPVCC(63, 264, 0, 1) 4047 case AFADD: 4048 return OPVCC(63, 21, 0, 0) 4049 case AFADDCC: 4050 return OPVCC(63, 21, 0, 1) 4051 case AFADDS: 4052 return OPVCC(59, 21, 0, 0) 4053 case AFADDSCC: 4054 return OPVCC(59, 21, 0, 1) 4055 case AFCMPO: 4056 return OPVCC(63, 32, 0, 0) 4057 case AFCMPU: 4058 return OPVCC(63, 0, 0, 0) 4059 case AFCFID: 4060 return OPVCC(63, 846, 0, 0) 4061 case AFCFIDCC: 4062 return OPVCC(63, 846, 0, 1) 4063 case AFCFIDU: 4064 return OPVCC(63, 974, 0, 0) 4065 case AFCFIDUCC: 4066 return OPVCC(63, 974, 0, 1) 4067 case AFCFIDS: 4068 return OPVCC(59, 846, 0, 0) 4069 case AFCFIDSCC: 4070 return OPVCC(59, 846, 0, 1) 4071 case AFCTIW: 4072 return OPVCC(63, 14, 0, 0) 4073 case AFCTIWCC: 4074 return OPVCC(63, 14, 0, 1) 4075 case AFCTIWZ: 4076 return OPVCC(63, 15, 0, 0) 4077 case AFCTIWZCC: 4078 return OPVCC(63, 15, 0, 1) 4079 case AFCTID: 4080 return OPVCC(63, 814, 0, 0) 4081 case AFCTIDCC: 4082 return OPVCC(63, 814, 0, 1) 4083 case AFCTIDZ: 4084 return OPVCC(63, 815, 0, 0) 4085 case AFCTIDZCC: 4086 return OPVCC(63, 815, 0, 1) 4087 case AFDIV: 4088 return OPVCC(63, 18, 0, 0) 4089 case AFDIVCC: 4090 return OPVCC(63, 18, 0, 1) 4091 case AFDIVS: 4092 return OPVCC(59, 18, 0, 0) 4093 case AFDIVSCC: 4094 return OPVCC(59, 18, 0, 1) 4095 case AFMADD: 4096 return OPVCC(63, 29, 0, 0) 4097 case AFMADDCC: 4098 return OPVCC(63, 29, 0, 1) 4099 case AFMADDS: 4100 return OPVCC(59, 29, 0, 0) 4101 case AFMADDSCC: 4102 return OPVCC(59, 29, 0, 1) 4103 4104 case AFMOVS, AFMOVD: 4105 return OPVCC(63, 72, 0, 0) /* load */ 4106 case AFMOVDCC: 4107 return OPVCC(63, 72, 0, 1) 4108 case AFMSUB: 4109 return OPVCC(63, 28, 0, 0) 4110 case AFMSUBCC: 4111 return OPVCC(63, 28, 0, 1) 4112 case AFMSUBS: 4113 return OPVCC(59, 28, 0, 0) 4114 case AFMSUBSCC: 4115 return OPVCC(59, 28, 0, 1) 4116 case AFMUL: 4117 return OPVCC(63, 25, 0, 0) 4118 case AFMULCC: 4119 return OPVCC(63, 25, 0, 1) 4120 case AFMULS: 4121 return OPVCC(59, 25, 0, 0) 4122 case AFMULSCC: 4123 return OPVCC(59, 25, 0, 1) 4124 case AFNABS: 4125 return OPVCC(63, 136, 0, 0) 4126 case AFNABSCC: 4127 return OPVCC(63, 136, 0, 1) 4128 case AFNEG: 4129 return OPVCC(63, 40, 0, 0) 4130 case AFNEGCC: 4131 return OPVCC(63, 40, 0, 1) 4132 case AFNMADD: 4133 return OPVCC(63, 31, 0, 0) 4134 case AFNMADDCC: 4135 return OPVCC(63, 31, 0, 1) 4136 case AFNMADDS: 4137 return OPVCC(59, 31, 0, 0) 4138 case AFNMADDSCC: 4139 return OPVCC(59, 31, 0, 1) 4140 case AFNMSUB: 4141 return OPVCC(63, 30, 0, 0) 4142 case AFNMSUBCC: 4143 return OPVCC(63, 30, 0, 1) 4144 case AFNMSUBS: 4145 return OPVCC(59, 30, 0, 0) 4146 case AFNMSUBSCC: 4147 return OPVCC(59, 30, 0, 1) 4148 case AFCPSGN: 4149 return OPVCC(63, 8, 0, 0) 4150 case AFCPSGNCC: 4151 return OPVCC(63, 8, 0, 1) 4152 case AFRES: 4153 return OPVCC(59, 24, 0, 0) 4154 case AFRESCC: 4155 return OPVCC(59, 24, 0, 1) 4156 case AFRIM: 4157 return OPVCC(63, 488, 0, 0) 4158 case AFRIMCC: 4159 return OPVCC(63, 488, 0, 1) 4160 case AFRIP: 4161 return OPVCC(63, 456, 0, 0) 4162 case AFRIPCC: 4163 return OPVCC(63, 456, 0, 1) 4164 case AFRIZ: 4165 return OPVCC(63, 424, 0, 0) 4166 case AFRIZCC: 4167 return OPVCC(63, 424, 0, 1) 4168 case AFRIN: 4169 return OPVCC(63, 392, 0, 0) 4170 case AFRINCC: 4171 return OPVCC(63, 392, 0, 1) 4172 case AFRSP: 4173 return OPVCC(63, 12, 0, 0) 4174 case AFRSPCC: 4175 return OPVCC(63, 12, 0, 1) 4176 case AFRSQRTE: 4177 return OPVCC(63, 26, 0, 0) 4178 case AFRSQRTECC: 4179 return OPVCC(63, 26, 0, 1) 4180 case AFSEL: 4181 return OPVCC(63, 23, 0, 0) 4182 case AFSELCC: 4183 return OPVCC(63, 23, 0, 1) 4184 case AFSQRT: 4185 return OPVCC(63, 22, 0, 0) 4186 case AFSQRTCC: 4187 return OPVCC(63, 22, 0, 1) 4188 case AFSQRTS: 4189 return OPVCC(59, 22, 0, 0) 4190 case AFSQRTSCC: 4191 return OPVCC(59, 22, 0, 1) 4192 case AFSUB: 4193 return OPVCC(63, 20, 0, 0) 4194 case AFSUBCC: 4195 return OPVCC(63, 20, 0, 1) 4196 case AFSUBS: 4197 return OPVCC(59, 20, 0, 0) 4198 case AFSUBSCC: 4199 return OPVCC(59, 20, 0, 1) 4200 4201 case AICBI: 4202 return OPVCC(31, 982, 0, 0) 4203 case AISYNC: 4204 return OPVCC(19, 150, 0, 0) 4205 4206 case AMTFSB0: 4207 return OPVCC(63, 70, 0, 0) 4208 case AMTFSB0CC: 4209 return OPVCC(63, 70, 0, 1) 4210 case AMTFSB1: 4211 return OPVCC(63, 38, 0, 0) 4212 case AMTFSB1CC: 4213 return OPVCC(63, 38, 0, 1) 4214 4215 case AMULHW: 4216 return OPVCC(31, 75, 0, 0) 4217 case AMULHWCC: 4218 return OPVCC(31, 75, 0, 1) 4219 case AMULHWU: 4220 return OPVCC(31, 11, 0, 0) 4221 case AMULHWUCC: 4222 return OPVCC(31, 11, 0, 1) 4223 case AMULLW: 4224 return OPVCC(31, 235, 0, 0) 4225 case AMULLWCC: 4226 return OPVCC(31, 235, 0, 1) 4227 case AMULLWV: 4228 return OPVCC(31, 235, 1, 0) 4229 case AMULLWVCC: 4230 return OPVCC(31, 235, 1, 1) 4231 4232 case AMULHD: 4233 return OPVCC(31, 73, 0, 0) 4234 case AMULHDCC: 4235 return OPVCC(31, 73, 0, 1) 4236 case AMULHDU: 4237 return OPVCC(31, 9, 0, 0) 4238 case AMULHDUCC: 4239 return OPVCC(31, 9, 0, 1) 4240 case AMULLD: 4241 return OPVCC(31, 233, 0, 0) 4242 case AMULLDCC: 4243 return OPVCC(31, 233, 0, 1) 4244 case AMULLDV: 4245 return OPVCC(31, 233, 1, 0) 4246 case AMULLDVCC: 4247 return OPVCC(31, 233, 1, 1) 4248 4249 case ANAND: 4250 return OPVCC(31, 476, 0, 0) 4251 case ANANDCC: 4252 return OPVCC(31, 476, 0, 1) 4253 case ANEG: 4254 return OPVCC(31, 104, 0, 0) 4255 case ANEGCC: 4256 return OPVCC(31, 104, 0, 1) 4257 case ANEGV: 4258 return OPVCC(31, 104, 1, 0) 4259 case ANEGVCC: 4260 return OPVCC(31, 104, 1, 1) 4261 case ANOR: 4262 return OPVCC(31, 124, 0, 0) 4263 case ANORCC: 4264 return OPVCC(31, 124, 0, 1) 4265 case AOR: 4266 return OPVCC(31, 444, 0, 0) 4267 case AORCC: 4268 return OPVCC(31, 444, 0, 1) 4269 case AORN: 4270 return OPVCC(31, 412, 0, 0) 4271 case AORNCC: 4272 return OPVCC(31, 412, 0, 1) 4273 4274 case APOPCNTD: 4275 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */ 4276 case APOPCNTW: 4277 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */ 4278 case APOPCNTB: 4279 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */ 4280 case ACNTTZW: 4281 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */ 4282 case ACNTTZWCC: 4283 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */ 4284 case ACNTTZD: 4285 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */ 4286 case ACNTTZDCC: 4287 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */ 4288 4289 case ARFI: 4290 return OPVCC(19, 50, 0, 0) 4291 case ARFCI: 4292 return OPVCC(19, 51, 0, 0) 4293 case ARFID: 4294 return OPVCC(19, 18, 0, 0) 4295 case AHRFID: 4296 return OPVCC(19, 274, 0, 0) 4297 4298 case ARLWNM: 4299 return OPVCC(23, 0, 0, 0) 4300 case ARLWNMCC: 4301 return OPVCC(23, 0, 0, 1) 4302 4303 case ARLDCL: 4304 return OPVCC(30, 8, 0, 0) 4305 case ARLDCLCC: 4306 return OPVCC(30, 0, 0, 1) 4307 4308 case ARLDCR: 4309 return OPVCC(30, 9, 0, 0) 4310 case ARLDCRCC: 4311 return OPVCC(30, 9, 0, 1) 4312 4313 case ARLDICL: 4314 return OPVCC(30, 0, 0, 0) 4315 case ARLDICLCC: 4316 return OPVCC(30, 0, 0, 1) 4317 case ARLDICR: 4318 return OPMD(30, 1, 0) // rldicr 4319 case ARLDICRCC: 4320 return OPMD(30, 1, 1) // rldicr. 4321 4322 case ARLDIC: 4323 return OPMD(30, 2, 0) // rldic 4324 case ARLDICCC: 4325 return OPMD(30, 2, 1) // rldic. 4326 4327 case ASYSCALL: 4328 return OPVCC(17, 1, 0, 0) 4329 4330 case ASLW: 4331 return OPVCC(31, 24, 0, 0) 4332 case ASLWCC: 4333 return OPVCC(31, 24, 0, 1) 4334 case ASLD: 4335 return OPVCC(31, 27, 0, 0) 4336 case ASLDCC: 4337 return OPVCC(31, 27, 0, 1) 4338 4339 case ASRAW: 4340 return OPVCC(31, 792, 0, 0) 4341 case ASRAWCC: 4342 return OPVCC(31, 792, 0, 1) 4343 case ASRAD: 4344 return OPVCC(31, 794, 0, 0) 4345 case ASRADCC: 4346 return OPVCC(31, 794, 0, 1) 4347 4348 case AEXTSWSLI: 4349 return OPVCC(31, 445, 0, 0) 4350 case AEXTSWSLICC: 4351 return OPVCC(31, 445, 0, 1) 4352 4353 case ASRW: 4354 return OPVCC(31, 536, 0, 0) 4355 case ASRWCC: 4356 return OPVCC(31, 536, 0, 1) 4357 case ASRD: 4358 return OPVCC(31, 539, 0, 0) 4359 case ASRDCC: 4360 return OPVCC(31, 539, 0, 1) 4361 4362 case ASUB: 4363 return OPVCC(31, 40, 0, 0) 4364 case ASUBCC: 4365 return OPVCC(31, 40, 0, 1) 4366 case ASUBV: 4367 return OPVCC(31, 40, 1, 0) 4368 case ASUBVCC: 4369 return OPVCC(31, 40, 1, 1) 4370 case ASUBC: 4371 return OPVCC(31, 8, 0, 0) 4372 case ASUBCCC: 4373 return OPVCC(31, 8, 0, 1) 4374 case ASUBCV: 4375 return OPVCC(31, 8, 1, 0) 4376 case ASUBCVCC: 4377 return OPVCC(31, 8, 1, 1) 4378 case ASUBE: 4379 return OPVCC(31, 136, 0, 0) 4380 case ASUBECC: 4381 return OPVCC(31, 136, 0, 1) 4382 case ASUBEV: 4383 return OPVCC(31, 136, 1, 0) 4384 case ASUBEVCC: 4385 return OPVCC(31, 136, 1, 1) 4386 case ASUBME: 4387 return OPVCC(31, 232, 0, 0) 4388 case ASUBMECC: 4389 return OPVCC(31, 232, 0, 1) 4390 case ASUBMEV: 4391 return OPVCC(31, 232, 1, 0) 4392 case ASUBMEVCC: 4393 return OPVCC(31, 232, 1, 1) 4394 case ASUBZE: 4395 return OPVCC(31, 200, 0, 0) 4396 case ASUBZECC: 4397 return OPVCC(31, 200, 0, 1) 4398 case ASUBZEV: 4399 return OPVCC(31, 200, 1, 0) 4400 case ASUBZEVCC: 4401 return OPVCC(31, 200, 1, 1) 4402 4403 case ASYNC: 4404 return OPVCC(31, 598, 0, 0) 4405 case ALWSYNC: 4406 return OPVCC(31, 598, 0, 0) | 1<<21 4407 4408 case APTESYNC: 4409 return OPVCC(31, 598, 0, 0) | 2<<21 4410 4411 case ATLBIE: 4412 return OPVCC(31, 306, 0, 0) 4413 case ATLBIEL: 4414 return OPVCC(31, 274, 0, 0) 4415 case ATLBSYNC: 4416 return OPVCC(31, 566, 0, 0) 4417 case ASLBIA: 4418 return OPVCC(31, 498, 0, 0) 4419 case ASLBIE: 4420 return OPVCC(31, 434, 0, 0) 4421 case ASLBMFEE: 4422 return OPVCC(31, 915, 0, 0) 4423 case ASLBMFEV: 4424 return OPVCC(31, 851, 0, 0) 4425 case ASLBMTE: 4426 return OPVCC(31, 402, 0, 0) 4427 4428 case ATW: 4429 return OPVCC(31, 4, 0, 0) 4430 case ATD: 4431 return OPVCC(31, 68, 0, 0) 4432 4433 /* Vector (VMX/Altivec) instructions */ 4434 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4435 /* are enabled starting at POWER6 (ISA 2.05). */ 4436 case AVAND: 4437 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */ 4438 case AVANDC: 4439 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */ 4440 case AVNAND: 4441 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */ 4442 4443 case AVOR: 4444 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */ 4445 case AVORC: 4446 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */ 4447 case AVNOR: 4448 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */ 4449 case AVXOR: 4450 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */ 4451 case AVEQV: 4452 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */ 4453 4454 case AVADDUBM: 4455 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */ 4456 case AVADDUHM: 4457 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */ 4458 case AVADDUWM: 4459 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */ 4460 case AVADDUDM: 4461 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */ 4462 case AVADDUQM: 4463 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */ 4464 4465 case AVADDCUQ: 4466 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */ 4467 case AVADDCUW: 4468 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */ 4469 4470 case AVADDUBS: 4471 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */ 4472 case AVADDUHS: 4473 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */ 4474 case AVADDUWS: 4475 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */ 4476 4477 case AVADDSBS: 4478 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */ 4479 case AVADDSHS: 4480 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */ 4481 case AVADDSWS: 4482 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */ 4483 4484 case AVADDEUQM: 4485 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */ 4486 case AVADDECUQ: 4487 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */ 4488 4489 case AVMULESB: 4490 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */ 4491 case AVMULOSB: 4492 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */ 4493 case AVMULEUB: 4494 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */ 4495 case AVMULOUB: 4496 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */ 4497 case AVMULESH: 4498 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */ 4499 case AVMULOSH: 4500 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */ 4501 case AVMULEUH: 4502 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */ 4503 case AVMULOUH: 4504 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */ 4505 case AVMULESW: 4506 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */ 4507 case AVMULOSW: 4508 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */ 4509 case AVMULEUW: 4510 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */ 4511 case AVMULOUW: 4512 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */ 4513 case AVMULUWM: 4514 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */ 4515 4516 case AVPMSUMB: 4517 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */ 4518 case AVPMSUMH: 4519 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */ 4520 case AVPMSUMW: 4521 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */ 4522 case AVPMSUMD: 4523 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ 4524 4525 case AVMSUMUDM: 4526 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */ 4527 4528 case AVSUBUBM: 4529 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ 4530 case AVSUBUHM: 4531 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */ 4532 case AVSUBUWM: 4533 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */ 4534 case AVSUBUDM: 4535 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */ 4536 case AVSUBUQM: 4537 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */ 4538 4539 case AVSUBCUQ: 4540 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */ 4541 case AVSUBCUW: 4542 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */ 4543 4544 case AVSUBUBS: 4545 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */ 4546 case AVSUBUHS: 4547 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */ 4548 case AVSUBUWS: 4549 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */ 4550 4551 case AVSUBSBS: 4552 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */ 4553 case AVSUBSHS: 4554 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */ 4555 case AVSUBSWS: 4556 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */ 4557 4558 case AVSUBEUQM: 4559 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */ 4560 case AVSUBECUQ: 4561 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */ 4562 4563 case AVRLB: 4564 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */ 4565 case AVRLH: 4566 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */ 4567 case AVRLW: 4568 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */ 4569 case AVRLD: 4570 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */ 4571 4572 case AVMRGOW: 4573 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */ 4574 case AVMRGEW: 4575 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */ 4576 4577 case AVSLB: 4578 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */ 4579 case AVSLH: 4580 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */ 4581 case AVSLW: 4582 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */ 4583 case AVSL: 4584 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */ 4585 case AVSLO: 4586 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */ 4587 case AVSRB: 4588 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */ 4589 case AVSRH: 4590 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */ 4591 case AVSRW: 4592 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */ 4593 case AVSR: 4594 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */ 4595 case AVSRO: 4596 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */ 4597 case AVSLD: 4598 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */ 4599 case AVSRD: 4600 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */ 4601 4602 case AVSRAB: 4603 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */ 4604 case AVSRAH: 4605 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */ 4606 case AVSRAW: 4607 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */ 4608 case AVSRAD: 4609 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ 4610 4611 case AVBPERMQ: 4612 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */ 4613 case AVBPERMD: 4614 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */ 4615 4616 case AVCLZB: 4617 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ 4618 case AVCLZH: 4619 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */ 4620 case AVCLZW: 4621 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */ 4622 case AVCLZD: 4623 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */ 4624 4625 case AVCLZLSBB: 4626 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */ 4627 case AVCTZLSBB: 4628 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */ 4629 4630 case AVPOPCNTB: 4631 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */ 4632 case AVPOPCNTH: 4633 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */ 4634 case AVPOPCNTW: 4635 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */ 4636 case AVPOPCNTD: 4637 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */ 4638 4639 case AVCMPEQUB: 4640 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */ 4641 case AVCMPEQUBCC: 4642 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */ 4643 case AVCMPEQUH: 4644 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */ 4645 case AVCMPEQUHCC: 4646 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */ 4647 case AVCMPEQUW: 4648 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */ 4649 case AVCMPEQUWCC: 4650 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */ 4651 case AVCMPEQUD: 4652 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */ 4653 case AVCMPEQUDCC: 4654 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */ 4655 4656 case AVCMPGTUB: 4657 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */ 4658 case AVCMPGTUBCC: 4659 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */ 4660 case AVCMPGTUH: 4661 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */ 4662 case AVCMPGTUHCC: 4663 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */ 4664 case AVCMPGTUW: 4665 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */ 4666 case AVCMPGTUWCC: 4667 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */ 4668 case AVCMPGTUD: 4669 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */ 4670 case AVCMPGTUDCC: 4671 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */ 4672 case AVCMPGTSB: 4673 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */ 4674 case AVCMPGTSBCC: 4675 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */ 4676 case AVCMPGTSH: 4677 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */ 4678 case AVCMPGTSHCC: 4679 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */ 4680 case AVCMPGTSW: 4681 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */ 4682 case AVCMPGTSWCC: 4683 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */ 4684 case AVCMPGTSD: 4685 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */ 4686 case AVCMPGTSDCC: 4687 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ 4688 4689 case AVCMPNEZB: 4690 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */ 4691 case AVCMPNEZBCC: 4692 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */ 4693 case AVCMPNEB: 4694 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */ 4695 case AVCMPNEBCC: 4696 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */ 4697 case AVCMPNEH: 4698 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */ 4699 case AVCMPNEHCC: 4700 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */ 4701 case AVCMPNEW: 4702 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */ 4703 case AVCMPNEWCC: 4704 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */ 4705 4706 case AVPERM: 4707 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ 4708 case AVPERMXOR: 4709 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */ 4710 case AVPERMR: 4711 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */ 4712 4713 case AVSEL: 4714 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ 4715 4716 case AVCIPHER: 4717 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */ 4718 case AVCIPHERLAST: 4719 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */ 4720 case AVNCIPHER: 4721 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */ 4722 case AVNCIPHERLAST: 4723 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */ 4724 case AVSBOX: 4725 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */ 4726 /* End of vector instructions */ 4727 4728 /* Vector scalar (VSX) instructions */ 4729 /* ISA 2.06 enables these for POWER7. */ 4730 case AMFVSRD, AMFVRD, AMFFPRD: 4731 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ 4732 case AMFVSRWZ: 4733 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ 4734 case AMFVSRLD: 4735 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */ 4736 4737 case AMTVSRD, AMTFPRD, AMTVRD: 4738 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ 4739 case AMTVSRWA: 4740 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ 4741 case AMTVSRWZ: 4742 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ 4743 case AMTVSRDD: 4744 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */ 4745 case AMTVSRWS: 4746 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */ 4747 4748 case AXXLAND: 4749 return OPVXX3(60, 130, 0) /* xxland - v2.06 */ 4750 case AXXLANDC: 4751 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */ 4752 case AXXLEQV: 4753 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */ 4754 case AXXLNAND: 4755 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */ 4756 4757 case AXXLORC: 4758 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */ 4759 case AXXLNOR: 4760 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */ 4761 case AXXLOR, AXXLORQ: 4762 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */ 4763 case AXXLXOR: 4764 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */ 4765 4766 case AXXSEL: 4767 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */ 4768 4769 case AXXMRGHW: 4770 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */ 4771 case AXXMRGLW: 4772 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */ 4773 4774 case AXXSPLTW: 4775 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */ 4776 4777 case AXXSPLTIB: 4778 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */ 4779 4780 case AXXPERM: 4781 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */ 4782 case AXXPERMDI: 4783 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */ 4784 4785 case AXXSLDWI: 4786 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */ 4787 4788 case AXXBRQ: 4789 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */ 4790 case AXXBRD: 4791 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */ 4792 case AXXBRW: 4793 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */ 4794 case AXXBRH: 4795 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */ 4796 4797 case AXSCVDPSP: 4798 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */ 4799 case AXSCVSPDP: 4800 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */ 4801 case AXSCVDPSPN: 4802 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */ 4803 case AXSCVSPDPN: 4804 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */ 4805 4806 case AXVCVDPSP: 4807 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */ 4808 case AXVCVSPDP: 4809 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */ 4810 4811 case AXSCVDPSXDS: 4812 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */ 4813 case AXSCVDPSXWS: 4814 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */ 4815 case AXSCVDPUXDS: 4816 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */ 4817 case AXSCVDPUXWS: 4818 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */ 4819 4820 case AXSCVSXDDP: 4821 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */ 4822 case AXSCVUXDDP: 4823 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */ 4824 case AXSCVSXDSP: 4825 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */ 4826 case AXSCVUXDSP: 4827 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */ 4828 4829 case AXVCVDPSXDS: 4830 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */ 4831 case AXVCVDPSXWS: 4832 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */ 4833 case AXVCVDPUXDS: 4834 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */ 4835 case AXVCVDPUXWS: 4836 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */ 4837 case AXVCVSPSXDS: 4838 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */ 4839 case AXVCVSPSXWS: 4840 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */ 4841 case AXVCVSPUXDS: 4842 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */ 4843 case AXVCVSPUXWS: 4844 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */ 4845 4846 case AXVCVSXDDP: 4847 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */ 4848 case AXVCVSXWDP: 4849 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */ 4850 case AXVCVUXDDP: 4851 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */ 4852 case AXVCVUXWDP: 4853 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */ 4854 case AXVCVSXDSP: 4855 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */ 4856 case AXVCVSXWSP: 4857 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */ 4858 case AXVCVUXDSP: 4859 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */ 4860 case AXVCVUXWSP: 4861 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ 4862 /* End of VSX instructions */ 4863 4864 case AMADDHD: 4865 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */ 4866 case AMADDHDU: 4867 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */ 4868 case AMADDLD: 4869 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */ 4870 4871 case AXOR: 4872 return OPVCC(31, 316, 0, 0) 4873 case AXORCC: 4874 return OPVCC(31, 316, 0, 1) 4875 } 4876 4877 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a) 4878 return 0 4879 } 4880 4881 func (c *ctxt9) opirrr(a obj.As) uint32 { 4882 switch a { 4883 /* Vector (VMX/Altivec) instructions */ 4884 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4885 /* are enabled starting at POWER6 (ISA 2.05). */ 4886 case AVSLDOI: 4887 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */ 4888 } 4889 4890 c.ctxt.Diag("bad i/r/r/r opcode %v", a) 4891 return 0 4892 } 4893 4894 func (c *ctxt9) opiirr(a obj.As) uint32 { 4895 switch a { 4896 /* Vector (VMX/Altivec) instructions */ 4897 /* ISA 2.07 enables these for POWER8 and beyond. */ 4898 case AVSHASIGMAW: 4899 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */ 4900 case AVSHASIGMAD: 4901 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */ 4902 } 4903 4904 c.ctxt.Diag("bad i/i/r/r opcode %v", a) 4905 return 0 4906 } 4907 4908 func (c *ctxt9) opirr(a obj.As) uint32 { 4909 switch a { 4910 case AADD: 4911 return OPVCC(14, 0, 0, 0) 4912 case AADDC: 4913 return OPVCC(12, 0, 0, 0) 4914 case AADDCCC: 4915 return OPVCC(13, 0, 0, 0) 4916 case AADDIS: 4917 return OPVCC(15, 0, 0, 0) /* ADDIS */ 4918 4919 case AANDCC: 4920 return OPVCC(28, 0, 0, 0) 4921 case AANDISCC: 4922 return OPVCC(29, 0, 0, 0) /* ANDIS. */ 4923 4924 case ABR: 4925 return OPVCC(18, 0, 0, 0) 4926 case ABL: 4927 return OPVCC(18, 0, 0, 0) | 1 4928 case obj.ADUFFZERO: 4929 return OPVCC(18, 0, 0, 0) | 1 4930 case obj.ADUFFCOPY: 4931 return OPVCC(18, 0, 0, 0) | 1 4932 case ABC: 4933 return OPVCC(16, 0, 0, 0) 4934 case ABCL: 4935 return OPVCC(16, 0, 0, 0) | 1 4936 4937 case ABEQ: 4938 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0) 4939 case ABGE: 4940 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0) 4941 case ABGT: 4942 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0) 4943 case ABLE: 4944 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0) 4945 case ABLT: 4946 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0) 4947 case ABNE: 4948 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0) 4949 case ABVC: 4950 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0) 4951 case ABVS: 4952 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0) 4953 case ABDZ: 4954 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0) 4955 case ABDNZ: 4956 return AOP_RRR(16<<26, BO_BCTR, 0, 0) 4957 4958 case ACMP: 4959 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */ 4960 case ACMPU: 4961 return OPVCC(10, 0, 0, 0) | 1<<21 4962 case ACMPW: 4963 return OPVCC(11, 0, 0, 0) /* L=0 */ 4964 case ACMPWU: 4965 return OPVCC(10, 0, 0, 0) 4966 case ACMPEQB: 4967 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 4968 4969 case ALSW: 4970 return OPVCC(31, 597, 0, 0) 4971 4972 case ACOPY: 4973 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */ 4974 case APASTECC: 4975 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */ 4976 case ADARN: 4977 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ 4978 4979 case AMULLW, AMULLD: 4980 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */ 4981 4982 case AOR: 4983 return OPVCC(24, 0, 0, 0) 4984 case AORIS: 4985 return OPVCC(25, 0, 0, 0) /* ORIS */ 4986 4987 case ARLWMI: 4988 return OPVCC(20, 0, 0, 0) /* rlwimi */ 4989 case ARLWMICC: 4990 return OPVCC(20, 0, 0, 1) 4991 case ARLDMI: 4992 return OPMD(30, 3, 0) /* rldimi */ 4993 case ARLDMICC: 4994 return OPMD(30, 3, 1) /* rldimi. */ 4995 case ARLDIMI: 4996 return OPMD(30, 3, 0) /* rldimi */ 4997 case ARLDIMICC: 4998 return OPMD(30, 3, 1) /* rldimi. */ 4999 case ARLWNM: 5000 return OPVCC(21, 0, 0, 0) /* rlwinm */ 5001 case ARLWNMCC: 5002 return OPVCC(21, 0, 0, 1) 5003 5004 case ARLDCL: 5005 return OPMD(30, 0, 0) /* rldicl */ 5006 case ARLDCLCC: 5007 return OPMD(30, 0, 1) /* rldicl. */ 5008 case ARLDCR: 5009 return OPMD(30, 1, 0) /* rldicr */ 5010 case ARLDCRCC: 5011 return OPMD(30, 1, 1) /* rldicr. */ 5012 case ARLDC: 5013 return OPMD(30, 2, 0) /* rldic */ 5014 case ARLDCCC: 5015 return OPMD(30, 2, 1) /* rldic. */ 5016 5017 case ASRAW: 5018 return OPVCC(31, 824, 0, 0) 5019 case ASRAWCC: 5020 return OPVCC(31, 824, 0, 1) 5021 case ASRAD: 5022 return OPVCC(31, (413 << 1), 0, 0) 5023 case ASRADCC: 5024 return OPVCC(31, (413 << 1), 0, 1) 5025 case AEXTSWSLI: 5026 return OPVCC(31, 445, 0, 0) 5027 case AEXTSWSLICC: 5028 return OPVCC(31, 445, 0, 1) 5029 5030 case ASTSW: 5031 return OPVCC(31, 725, 0, 0) 5032 5033 case ASUBC: 5034 return OPVCC(8, 0, 0, 0) 5035 5036 case ATW: 5037 return OPVCC(3, 0, 0, 0) 5038 case ATD: 5039 return OPVCC(2, 0, 0, 0) 5040 5041 /* Vector (VMX/Altivec) instructions */ 5042 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 5043 /* are enabled starting at POWER6 (ISA 2.05). */ 5044 case AVSPLTB: 5045 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */ 5046 case AVSPLTH: 5047 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */ 5048 case AVSPLTW: 5049 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */ 5050 5051 case AVSPLTISB: 5052 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */ 5053 case AVSPLTISH: 5054 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */ 5055 case AVSPLTISW: 5056 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */ 5057 /* End of vector instructions */ 5058 5059 case AFTDIV: 5060 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */ 5061 case AFTSQRT: 5062 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */ 5063 5064 case AXOR: 5065 return OPVCC(26, 0, 0, 0) /* XORIL */ 5066 case AXORIS: 5067 return OPVCC(27, 0, 0, 0) /* XORIS */ 5068 } 5069 5070 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a) 5071 return 0 5072 } 5073 5074 /* 5075 * load o(a),d 5076 */ 5077 func (c *ctxt9) opload(a obj.As) uint32 { 5078 switch a { 5079 case AMOVD: 5080 return OPVCC(58, 0, 0, 0) /* ld */ 5081 case AMOVDU: 5082 return OPVCC(58, 0, 0, 1) /* ldu */ 5083 case AMOVWZ: 5084 return OPVCC(32, 0, 0, 0) /* lwz */ 5085 case AMOVWZU: 5086 return OPVCC(33, 0, 0, 0) /* lwzu */ 5087 case AMOVW: 5088 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */ 5089 case ALXV: 5090 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */ 5091 case ALXVL: 5092 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */ 5093 case ALXVLL: 5094 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */ 5095 case ALXVX: 5096 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5097 5098 /* no AMOVWU */ 5099 case AMOVB, AMOVBZ: 5100 return OPVCC(34, 0, 0, 0) 5101 /* load */ 5102 5103 case AMOVBU, AMOVBZU: 5104 return OPVCC(35, 0, 0, 0) 5105 case AFMOVD: 5106 return OPVCC(50, 0, 0, 0) 5107 case AFMOVDU: 5108 return OPVCC(51, 0, 0, 0) 5109 case AFMOVS: 5110 return OPVCC(48, 0, 0, 0) 5111 case AFMOVSU: 5112 return OPVCC(49, 0, 0, 0) 5113 case AMOVH: 5114 return OPVCC(42, 0, 0, 0) 5115 case AMOVHU: 5116 return OPVCC(43, 0, 0, 0) 5117 case AMOVHZ: 5118 return OPVCC(40, 0, 0, 0) 5119 case AMOVHZU: 5120 return OPVCC(41, 0, 0, 0) 5121 case AMOVMW: 5122 return OPVCC(46, 0, 0, 0) /* lmw */ 5123 } 5124 5125 c.ctxt.Diag("bad load opcode %v", a) 5126 return 0 5127 } 5128 5129 /* 5130 * indexed load a(b),d 5131 */ 5132 func (c *ctxt9) oploadx(a obj.As) uint32 { 5133 switch a { 5134 case AMOVWZ: 5135 return OPVCC(31, 23, 0, 0) /* lwzx */ 5136 case AMOVWZU: 5137 return OPVCC(31, 55, 0, 0) /* lwzux */ 5138 case AMOVW: 5139 return OPVCC(31, 341, 0, 0) /* lwax */ 5140 case AMOVWU: 5141 return OPVCC(31, 373, 0, 0) /* lwaux */ 5142 5143 case AMOVB, AMOVBZ: 5144 return OPVCC(31, 87, 0, 0) /* lbzx */ 5145 5146 case AMOVBU, AMOVBZU: 5147 return OPVCC(31, 119, 0, 0) /* lbzux */ 5148 case AFMOVD: 5149 return OPVCC(31, 599, 0, 0) /* lfdx */ 5150 case AFMOVDU: 5151 return OPVCC(31, 631, 0, 0) /* lfdux */ 5152 case AFMOVS: 5153 return OPVCC(31, 535, 0, 0) /* lfsx */ 5154 case AFMOVSU: 5155 return OPVCC(31, 567, 0, 0) /* lfsux */ 5156 case AFMOVSX: 5157 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */ 5158 case AFMOVSZ: 5159 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */ 5160 case AMOVH: 5161 return OPVCC(31, 343, 0, 0) /* lhax */ 5162 case AMOVHU: 5163 return OPVCC(31, 375, 0, 0) /* lhaux */ 5164 case AMOVHBR: 5165 return OPVCC(31, 790, 0, 0) /* lhbrx */ 5166 case AMOVWBR: 5167 return OPVCC(31, 534, 0, 0) /* lwbrx */ 5168 case AMOVDBR: 5169 return OPVCC(31, 532, 0, 0) /* ldbrx */ 5170 case AMOVHZ: 5171 return OPVCC(31, 279, 0, 0) /* lhzx */ 5172 case AMOVHZU: 5173 return OPVCC(31, 311, 0, 0) /* lhzux */ 5174 case ALBAR: 5175 return OPVCC(31, 52, 0, 0) /* lbarx */ 5176 case ALHAR: 5177 return OPVCC(31, 116, 0, 0) /* lharx */ 5178 case ALWAR: 5179 return OPVCC(31, 20, 0, 0) /* lwarx */ 5180 case ALDAR: 5181 return OPVCC(31, 84, 0, 0) /* ldarx */ 5182 case ALSW: 5183 return OPVCC(31, 533, 0, 0) /* lswx */ 5184 case AMOVD: 5185 return OPVCC(31, 21, 0, 0) /* ldx */ 5186 case AMOVDU: 5187 return OPVCC(31, 53, 0, 0) /* ldux */ 5188 5189 /* Vector (VMX/Altivec) instructions */ 5190 case ALVEBX: 5191 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */ 5192 case ALVEHX: 5193 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */ 5194 case ALVEWX: 5195 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */ 5196 case ALVX: 5197 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */ 5198 case ALVXL: 5199 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */ 5200 case ALVSL: 5201 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */ 5202 case ALVSR: 5203 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */ 5204 /* End of vector instructions */ 5205 5206 /* Vector scalar (VSX) instructions */ 5207 case ALXVX: 5208 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5209 case ALXVD2X: 5210 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */ 5211 case ALXVW4X: 5212 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */ 5213 case ALXVH8X: 5214 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */ 5215 case ALXVB16X: 5216 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */ 5217 case ALXVDSX: 5218 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */ 5219 case ALXSDX: 5220 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */ 5221 case ALXSIWAX: 5222 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */ 5223 case ALXSIWZX: 5224 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */ 5225 } 5226 5227 c.ctxt.Diag("bad loadx opcode %v", a) 5228 return 0 5229 } 5230 5231 /* 5232 * store s,o(d) 5233 */ 5234 func (c *ctxt9) opstore(a obj.As) uint32 { 5235 switch a { 5236 case AMOVB, AMOVBZ: 5237 return OPVCC(38, 0, 0, 0) /* stb */ 5238 5239 case AMOVBU, AMOVBZU: 5240 return OPVCC(39, 0, 0, 0) /* stbu */ 5241 case AFMOVD: 5242 return OPVCC(54, 0, 0, 0) /* stfd */ 5243 case AFMOVDU: 5244 return OPVCC(55, 0, 0, 0) /* stfdu */ 5245 case AFMOVS: 5246 return OPVCC(52, 0, 0, 0) /* stfs */ 5247 case AFMOVSU: 5248 return OPVCC(53, 0, 0, 0) /* stfsu */ 5249 5250 case AMOVHZ, AMOVH: 5251 return OPVCC(44, 0, 0, 0) /* sth */ 5252 5253 case AMOVHZU, AMOVHU: 5254 return OPVCC(45, 0, 0, 0) /* sthu */ 5255 case AMOVMW: 5256 return OPVCC(47, 0, 0, 0) /* stmw */ 5257 case ASTSW: 5258 return OPVCC(31, 725, 0, 0) /* stswi */ 5259 5260 case AMOVWZ, AMOVW: 5261 return OPVCC(36, 0, 0, 0) /* stw */ 5262 5263 case AMOVWZU, AMOVWU: 5264 return OPVCC(37, 0, 0, 0) /* stwu */ 5265 case AMOVD: 5266 return OPVCC(62, 0, 0, 0) /* std */ 5267 case AMOVDU: 5268 return OPVCC(62, 0, 0, 1) /* stdu */ 5269 case ASTXV: 5270 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */ 5271 case ASTXVL: 5272 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */ 5273 case ASTXVLL: 5274 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */ 5275 case ASTXVX: 5276 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */ 5277 5278 } 5279 5280 c.ctxt.Diag("unknown store opcode %v", a) 5281 return 0 5282 } 5283 5284 /* 5285 * indexed store s,a(b) 5286 */ 5287 func (c *ctxt9) opstorex(a obj.As) uint32 { 5288 switch a { 5289 case AMOVB, AMOVBZ: 5290 return OPVCC(31, 215, 0, 0) /* stbx */ 5291 5292 case AMOVBU, AMOVBZU: 5293 return OPVCC(31, 247, 0, 0) /* stbux */ 5294 case AFMOVD: 5295 return OPVCC(31, 727, 0, 0) /* stfdx */ 5296 case AFMOVDU: 5297 return OPVCC(31, 759, 0, 0) /* stfdux */ 5298 case AFMOVS: 5299 return OPVCC(31, 663, 0, 0) /* stfsx */ 5300 case AFMOVSU: 5301 return OPVCC(31, 695, 0, 0) /* stfsux */ 5302 case AFMOVSX: 5303 return OPVCC(31, 983, 0, 0) /* stfiwx */ 5304 5305 case AMOVHZ, AMOVH: 5306 return OPVCC(31, 407, 0, 0) /* sthx */ 5307 case AMOVHBR: 5308 return OPVCC(31, 918, 0, 0) /* sthbrx */ 5309 5310 case AMOVHZU, AMOVHU: 5311 return OPVCC(31, 439, 0, 0) /* sthux */ 5312 5313 case AMOVWZ, AMOVW: 5314 return OPVCC(31, 151, 0, 0) /* stwx */ 5315 5316 case AMOVWZU, AMOVWU: 5317 return OPVCC(31, 183, 0, 0) /* stwux */ 5318 case ASTSW: 5319 return OPVCC(31, 661, 0, 0) /* stswx */ 5320 case AMOVWBR: 5321 return OPVCC(31, 662, 0, 0) /* stwbrx */ 5322 case AMOVDBR: 5323 return OPVCC(31, 660, 0, 0) /* stdbrx */ 5324 case ASTBCCC: 5325 return OPVCC(31, 694, 0, 1) /* stbcx. */ 5326 case ASTHCCC: 5327 return OPVCC(31, 726, 0, 1) /* sthcx. */ 5328 case ASTWCCC: 5329 return OPVCC(31, 150, 0, 1) /* stwcx. */ 5330 case ASTDCCC: 5331 return OPVCC(31, 214, 0, 1) /* stwdx. */ 5332 case AMOVD: 5333 return OPVCC(31, 149, 0, 0) /* stdx */ 5334 case AMOVDU: 5335 return OPVCC(31, 181, 0, 0) /* stdux */ 5336 5337 /* Vector (VMX/Altivec) instructions */ 5338 case ASTVEBX: 5339 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */ 5340 case ASTVEHX: 5341 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */ 5342 case ASTVEWX: 5343 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */ 5344 case ASTVX: 5345 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */ 5346 case ASTVXL: 5347 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */ 5348 /* End of vector instructions */ 5349 5350 /* Vector scalar (VSX) instructions */ 5351 case ASTXVX: 5352 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */ 5353 case ASTXVD2X: 5354 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */ 5355 case ASTXVW4X: 5356 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */ 5357 case ASTXVH8X: 5358 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */ 5359 case ASTXVB16X: 5360 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */ 5361 5362 case ASTXSDX: 5363 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */ 5364 5365 case ASTXSIWX: 5366 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */ 5367 5368 /* End of vector scalar instructions */ 5369 5370 } 5371 5372 c.ctxt.Diag("unknown storex opcode %v", a) 5373 return 0 5374 }