github.com/go-asm/go@v1.21.1-0.20240213172139-40c5ead50c48/cmd/obj/ppc64/asm9.go (about) 1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. 2 // 3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 5 // Portions Copyright © 1997-1999 Vita Nuova Limited 6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7 // Portions Copyright © 2004,2006 Bruce Ellis 8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10 // Portions Copyright © 2009 The Go Authors. All rights reserved. 11 // 12 // Permission is hereby granted, free of charge, to any person obtaining a copy 13 // of this software and associated documentation files (the "Software"), to deal 14 // in the Software without restriction, including without limitation the rights 15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 // copies of the Software, and to permit persons to whom the Software is 17 // furnished to do so, subject to the following conditions: 18 // 19 // The above copyright notice and this permission notice shall be included in 20 // all copies or substantial portions of the Software. 21 // 22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 // THE SOFTWARE. 29 30 package ppc64 31 32 import ( 33 "encoding/binary" 34 "fmt" 35 "log" 36 "math" 37 "math/bits" 38 "sort" 39 40 "github.com/go-asm/go/buildcfg" 41 "github.com/go-asm/go/cmd/obj" 42 "github.com/go-asm/go/cmd/objabi" 43 ) 44 45 // ctxt9 holds state while assembling a single function. 46 // Each function gets a fresh ctxt9. 47 // This allows for multiple functions to be safely concurrently assembled. 48 type ctxt9 struct { 49 ctxt *obj.Link 50 newprog obj.ProgAlloc 51 cursym *obj.LSym 52 autosize int32 53 instoffset int64 54 pc int64 55 } 56 57 // Instruction layout. 58 59 const ( 60 r0iszero = 1 61 ) 62 63 const ( 64 // R bit option in prefixed load/store/add D-form operations 65 PFX_R_ABS = 0 // Offset is absolute 66 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0 67 ) 68 69 const ( 70 // The preferred hardware nop instruction. 71 NOP = 0x60000000 72 ) 73 74 type Optab struct { 75 as obj.As // Opcode 76 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog. 77 a2 uint8 // p.Reg argument (int16 Register) 78 a3 uint8 // p.RestArgs[0] (obj.AddrPos) 79 a4 uint8 // p.RestArgs[1] 80 a5 uint8 // p.RestARgs[2] 81 a6 uint8 // p.To (obj.Addr) 82 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r 83 size int8 // Text space in bytes to lay operation 84 85 // A prefixed instruction is generated by this opcode. This cannot be placed 86 // across a 64B PC address. Opcodes should not translate to more than one 87 // prefixed instruction. The prefixed instruction should be written first 88 // (e.g when Optab.size > 8). 89 ispfx bool 90 91 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32) 92 } 93 94 // optab contains an array to be sliced of accepted operand combinations for an 95 // instruction. Unused arguments and fields are not explicitly enumerated, and 96 // should not be listed for clarity. Unused arguments and values should always 97 // assume the default value for the given type. 98 // 99 // optab does not list every valid ppc64 opcode, it enumerates representative 100 // operand combinations for a class of instruction. The variable oprange indexes 101 // all valid ppc64 opcodes. 102 // 103 // oprange is initialized to point a slice within optab which contains the valid 104 // operand combinations for a given instruction. This is initialized from buildop. 105 // 106 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface 107 // to arrange entries to minimize text size of each opcode. 108 // 109 // optab is the sorted result of combining optabBase, optabGen, and prefixableOptab. 110 var optab []Optab 111 112 var optabBase = []Optab{ 113 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, 114 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, 115 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, 116 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, 117 /* move register */ 118 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 119 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 120 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 121 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4}, 122 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 123 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 124 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, 125 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8}, 126 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, 127 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4}, 128 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 129 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 130 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 131 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 132 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 133 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 134 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ 135 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 136 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 137 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 138 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 139 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 140 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, 141 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 142 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, 143 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 144 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 145 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 146 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, 147 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 148 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 149 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, 150 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, 151 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4}, 152 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, 153 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, 154 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, 155 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 156 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4}, 157 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12}, 158 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ 159 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 160 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 161 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 162 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, 163 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, 164 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, 165 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, 166 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, 167 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, 168 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ 169 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, 170 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ 171 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4}, 172 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 173 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 174 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 175 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 176 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 177 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, 178 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, 179 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, 180 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, 181 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4}, 182 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 183 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 184 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 185 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, 186 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, 187 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, 188 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, 189 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, 190 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, 191 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4}, 192 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, 193 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4}, 194 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, 195 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4}, 196 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, 197 {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4}, 198 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, 199 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 200 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 201 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 202 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, 203 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, 204 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, 205 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 206 {as: AFABS, a6: C_FREG, type_: 33, size: 4}, 207 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4}, 208 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4}, 209 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4}, 210 211 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 212 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 213 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 214 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 215 216 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 217 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 218 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 219 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 220 221 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 222 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 223 224 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, 225 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, 226 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 227 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 228 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 229 230 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 231 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 232 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 233 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 234 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 235 236 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, 237 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, 238 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 239 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 240 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 241 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8}, 242 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 243 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 244 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 245 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8}, 246 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 247 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 248 249 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, 250 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, 251 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, 252 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, 253 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, 254 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, 255 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, 256 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 257 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, 258 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, 259 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, 260 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, 261 262 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8}, 263 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, 264 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4}, 265 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, 266 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, 267 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4}, 268 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4}, 269 270 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 271 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4}, 272 273 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, 274 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, 275 276 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, 277 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, 278 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, 279 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4}, 280 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, 281 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4}, 282 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, 283 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4}, 284 285 {as: ASYSCALL, type_: 5, size: 4}, 286 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, 287 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12}, 288 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4}, 289 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4}, 290 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label 291 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop 292 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr 293 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr 294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label 295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label 296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi 297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh 298 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi 299 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4}, 300 {as: ASYNC, type_: 46, size: 4}, 301 {as: AWORD, a1: C_LCON, type_: 40, size: 4}, 302 {as: ADWORD, a1: C_64CON, type_: 31, size: 8}, 303 {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, 304 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 305 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4}, 306 {as: AEXTSB, a6: C_REG, type_: 48, size: 4}, 307 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 308 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, 309 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4}, 310 {as: ANEG, a6: C_REG, type_: 47, size: 4}, 311 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12}, 312 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12}, 313 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16}, 314 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, 315 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, 316 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, 317 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4}, 318 /* Other ISA 2.05+ instructions */ 319 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ 320 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ 321 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ 322 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 323 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ 324 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ 325 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ 326 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ 327 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ 328 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ 329 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ 330 331 /* Misc ISA 3.0 instructions */ 332 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4}, 333 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4}, 334 335 /* Vector instructions */ 336 337 /* Vector load */ 338 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */ 339 340 /* Vector store */ 341 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */ 342 343 /* Vector logical */ 344 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */ 345 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */ 346 347 /* Vector add */ 348 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */ 349 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */ 350 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */ 351 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */ 352 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */ 353 354 /* Vector subtract */ 355 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */ 356 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */ 357 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */ 358 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */ 359 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */ 360 361 /* Vector multiply */ 362 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ 363 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ 364 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ 365 366 /* Vector rotate */ 367 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */ 368 369 /* Vector shift */ 370 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ 371 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ 372 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ 373 374 /* Vector count */ 375 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ 376 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */ 377 378 /* Vector compare */ 379 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */ 380 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */ 381 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */ 382 383 /* Vector merge */ 384 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */ 385 386 /* Vector permute */ 387 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */ 388 389 /* Vector bit permute */ 390 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */ 391 392 /* Vector select */ 393 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ 394 395 /* Vector splat */ 396 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */ 397 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, 398 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */ 399 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4}, 400 401 /* Vector AES */ 402 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ 403 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */ 404 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ 405 406 /* Vector SHA */ 407 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ 408 409 /* VSX vector load */ 410 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ 411 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */ 412 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */ 413 414 /* VSX vector store */ 415 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */ 416 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */ 417 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */ 418 419 /* VSX scalar load */ 420 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */ 421 422 /* VSX scalar store */ 423 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */ 424 425 /* VSX scalar as integer load */ 426 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */ 427 428 /* VSX scalar store as integer */ 429 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ 430 431 /* VSX move from VSR */ 432 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, 433 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, 434 435 /* VSX move to VSR */ 436 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4}, 437 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4}, 438 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4}, 439 440 /* VSX logical */ 441 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */ 442 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */ 443 444 /* VSX select */ 445 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */ 446 447 /* VSX merge */ 448 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ 449 450 /* VSX splat */ 451 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ 452 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ 453 454 /* VSX permute */ 455 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ 456 457 /* VSX shift */ 458 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ 459 460 /* VSX reverse bytes */ 461 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ 462 463 /* VSX scalar FP-FP conversion */ 464 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */ 465 466 /* VSX vector FP-FP conversion */ 467 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */ 468 469 /* VSX scalar FP-integer conversion */ 470 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */ 471 472 /* VSX scalar integer-FP conversion */ 473 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */ 474 475 /* VSX vector FP-integer conversion */ 476 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */ 477 478 /* VSX vector integer-FP conversion */ 479 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */ 480 481 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 482 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 483 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4}, 484 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4}, 485 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, 486 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, 487 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4}, 488 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4}, 489 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, 490 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4}, 491 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, 492 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4}, 493 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, 494 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4}, 495 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4}, 496 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4}, 497 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4}, 498 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4}, 499 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 500 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 501 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4}, 502 {as: AEIEIO, type_: 46, size: 4}, 503 {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, 504 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4}, 505 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 506 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, 507 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, 508 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4}, 509 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, 510 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4}, 511 512 {as: obj.AUNDEF, type_: 78, size: 4}, 513 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0}, 514 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0}, 515 {as: obj.ANOP, type_: 0, size: 0}, 516 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689 517 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior 518 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, 519 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL 520 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL 521 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code 522 } 523 524 // These are opcodes above which may generate different sequences depending on whether prefix opcode support 525 // is available 526 type PrefixableOptab struct { 527 Optab 528 minGOPPC64 int // Minimum GOPPC64 required to support this. 529 pfxsize int8 // Instruction sequence size when prefixed opcodes are used 530 } 531 532 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate 533 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add). 534 // 535 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative 536 // sequence. It also encompasses several transformations which do not involve relocations, those could be 537 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding 538 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x 539 // 540 // MOVD something-byte-aligned(Rx), Ry 541 // MOVD 3(Rx), Ry 542 // 543 // is allowed when the prefixed forms are used. 544 // 545 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant. 546 var prefixableOptab = []PrefixableOptab{ 547 {Optab: Optab{as: AMOVD, a1: C_S34CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, 548 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 549 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8}, 550 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12}, 551 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, 552 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 553 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 554 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 555 556 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, 557 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, 558 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 559 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 560 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 561 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 562 563 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 564 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12}, 565 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12}, 566 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 567 568 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 569 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 570 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 571 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 572 573 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, 574 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, 575 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, 576 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, 577 578 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, 579 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, 580 {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, 581 {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, 582 } 583 584 var oprange [ALAST & obj.AMask][]Optab 585 586 var xcmp [C_NCLASS][C_NCLASS]bool 587 588 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported. 589 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop 590 591 // padding bytes to add to align code as requested. 592 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { 593 switch a { 594 case 8, 16, 32, 64: 595 // By default function alignment is 16. If an alignment > 16 is 596 // requested then the function alignment must also be promoted. 597 // The function alignment is not promoted on AIX at this time. 598 // TODO: Investigate AIX function alignment. 599 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) { 600 cursym.Func().Align = int32(a) 601 } 602 if pc&(a-1) != 0 { 603 return int(a - (pc & (a - 1))) 604 } 605 default: 606 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a) 607 } 608 return 0 609 } 610 611 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 612 p := cursym.Func().Text 613 if p == nil || p.Link == nil { // handle external functions and ELF section symbols 614 return 615 } 616 617 if oprange[AANDN&obj.AMask] == nil { 618 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first") 619 } 620 621 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} 622 623 pc := int64(0) 624 p.Pc = pc 625 626 var m int 627 var o *Optab 628 for p = p.Link; p != nil; p = p.Link { 629 p.Pc = pc 630 o = c.oplook(p) 631 m = int(o.size) 632 if m == 0 { 633 if p.As == obj.APCALIGN { 634 a := c.vregoff(&p.From) 635 m = addpad(pc, a, ctxt, cursym) 636 } else { 637 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 638 ctxt.Diag("zero-width instruction\n%v", p) 639 } 640 continue 641 } 642 } 643 pc += int64(m) 644 } 645 646 c.cursym.Size = pc 647 648 /* 649 * if any procedure is large enough to 650 * generate a large SBRA branch, then 651 * generate extra passes putting branches 652 * around jmps to fix. this is rare. 653 */ 654 bflag := 1 655 656 var otxt int64 657 var q *obj.Prog 658 var out [5]uint32 659 var falign int32 // Track increased alignment requirements for prefix. 660 for bflag != 0 { 661 bflag = 0 662 pc = 0 663 falign = 0 // Note, linker bumps function symbols to funcAlign. 664 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { 665 p.Pc = pc 666 o = c.oplook(p) 667 668 // very large conditional branches 669 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { 670 otxt = p.To.Target().Pc - pc 671 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { 672 // Assemble the instruction with a target not too far to figure out BI and BO fields. 673 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted, 674 // and only one extra branch is needed to reach the target. 675 tgt := p.To.Target() 676 p.To.SetTarget(p.Link) 677 o.asmout(&c, p, o, &out) 678 p.To.SetTarget(tgt) 679 680 bo := int64(out[0]>>21) & 31 681 bi := int16((out[0] >> 16) & 31) 682 invertible := false 683 684 if bo&0x14 == 0x14 { 685 // A conditional branch that is unconditionally taken. This cannot be inverted. 686 } else if bo&0x10 == 0x10 { 687 // A branch based on the value of CTR. Invert the CTR comparison against zero bit. 688 bo ^= 0x2 689 invertible = true 690 } else if bo&0x04 == 0x04 { 691 // A branch based on CR bit. Invert the BI comparison bit. 692 bo ^= 0x8 693 invertible = true 694 } 695 696 if invertible { 697 // Rewrite 698 // BC bo,...,far_away_target 699 // NEXT_INSN 700 // to: 701 // BC invert(bo),next_insn 702 // JMP far_away_target 703 // next_insn: 704 // NEXT_INSN 705 p.As = ABC 706 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo} 707 q = c.newprog() 708 q.As = ABR 709 q.To.Type = obj.TYPE_BRANCH 710 q.To.SetTarget(p.To.Target()) 711 q.Link = p.Link 712 p.To.SetTarget(p.Link) 713 p.Link = q 714 p.Reg = REG_CRBIT0 + bi 715 } else { 716 // Rewrite 717 // BC ...,far_away_target 718 // NEXT_INSN 719 // to 720 // BC ...,tmp 721 // JMP next_insn 722 // tmp: 723 // JMP far_away_target 724 // next_insn: 725 // NEXT_INSN 726 q = c.newprog() 727 q.Link = p.Link 728 p.Link = q 729 q.As = ABR 730 q.To.Type = obj.TYPE_BRANCH 731 q.To.SetTarget(p.To.Target()) 732 p.To.SetTarget(q) 733 q = c.newprog() 734 q.Link = p.Link 735 p.Link = q 736 q.As = ABR 737 q.To.Type = obj.TYPE_BRANCH 738 q.To.SetTarget(q.Link.Link) 739 } 740 bflag = 1 741 } 742 } 743 744 m = int(o.size) 745 if m == 0 { 746 if p.As == obj.APCALIGN { 747 a := c.vregoff(&p.From) 748 m = addpad(pc, a, ctxt, cursym) 749 } else { 750 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 751 ctxt.Diag("zero-width instruction\n%v", p) 752 } 753 continue 754 } 755 } 756 757 // Prefixed instructions cannot be placed across a 64B boundary. 758 // Mark and adjust the PC of those which do. A nop will be 759 // inserted during final assembly. 760 if o.ispfx { 761 mark := p.Mark &^ PFX_X64B 762 if pc&63 == 60 { 763 p.Pc += 4 764 m += 4 765 mark |= PFX_X64B 766 } 767 768 // Marks may be adjusted if a too-far conditional branch is 769 // fixed up above. Likewise, inserting a NOP may cause a 770 // branch target to become too far away. We need to run 771 // another iteration and verify no additional changes 772 // are needed. 773 if mark != p.Mark { 774 bflag = 1 775 p.Mark = mark 776 } 777 778 // Check for 16 or 32B crossing of this prefixed insn. 779 // These do no require padding, but do require increasing 780 // the function alignment to prevent them from potentially 781 // crossing a 64B boundary when the linker assigns the final 782 // PC. 783 switch p.Pc & 31 { 784 case 28: // 32B crossing 785 falign = 64 786 case 12: // 16B crossing 787 if falign < 64 { 788 falign = 32 789 } 790 } 791 } 792 793 pc += int64(m) 794 } 795 796 c.cursym.Size = pc 797 } 798 799 c.cursym.Size = pc 800 c.cursym.Func().Align = falign 801 c.cursym.Grow(c.cursym.Size) 802 803 // lay out the code, emitting code and data relocations. 804 805 bp := c.cursym.P 806 var i int32 807 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { 808 c.pc = p.Pc 809 o = c.oplook(p) 810 if int(o.size) > 4*len(out) { 811 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) 812 } 813 // asmout is not set up to add large amounts of padding 814 if o.type_ == 0 && p.As == obj.APCALIGN { 815 aln := c.vregoff(&p.From) 816 v := addpad(p.Pc, aln, c.ctxt, c.cursym) 817 if v > 0 { 818 // Same padding instruction for all 819 for i = 0; i < int32(v/4); i++ { 820 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP) 821 bp = bp[4:] 822 } 823 } 824 } else { 825 if p.Mark&PFX_X64B != 0 { 826 c.ctxt.Arch.ByteOrder.PutUint32(bp, NOP) 827 bp = bp[4:] 828 } 829 o.asmout(&c, p, o, &out) 830 for i = 0; i < int32(o.size/4); i++ { 831 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) 832 bp = bp[4:] 833 } 834 } 835 } 836 } 837 838 func isint32(v int64) bool { 839 return int64(int32(v)) == v 840 } 841 842 func isuint32(v uint64) bool { 843 return uint64(uint32(v)) == v 844 } 845 846 func (c *ctxt9) aclassreg(reg int16) int { 847 if REG_R0 <= reg && reg <= REG_R31 { 848 return C_REGP + int(reg&1) 849 } 850 if REG_F0 <= reg && reg <= REG_F31 { 851 return C_FREGP + int(reg&1) 852 } 853 if REG_V0 <= reg && reg <= REG_V31 { 854 return C_VREG 855 } 856 if REG_VS0 <= reg && reg <= REG_VS63 { 857 return C_VSREGP + int(reg&1) 858 } 859 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR { 860 return C_CREG 861 } 862 if REG_CR0LT <= reg && reg <= REG_CR7SO { 863 return C_CRBIT 864 } 865 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 { 866 switch reg { 867 case REG_LR: 868 return C_LR 869 870 case REG_CTR: 871 return C_CTR 872 } 873 874 return C_SPR 875 } 876 if REG_A0 <= reg && reg <= REG_A7 { 877 return C_AREG 878 } 879 if reg == REG_FPSCR { 880 return C_FPSCR 881 } 882 return C_GOK 883 } 884 885 func (c *ctxt9) aclass(a *obj.Addr) int { 886 switch a.Type { 887 case obj.TYPE_NONE: 888 return C_NONE 889 890 case obj.TYPE_REG: 891 return c.aclassreg(a.Reg) 892 893 case obj.TYPE_MEM: 894 if a.Index != 0 { 895 if a.Name != obj.NAME_NONE || a.Offset != 0 { 896 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class) 897 898 } 899 return C_XOREG 900 } 901 switch a.Name { 902 case obj.NAME_GOTREF, obj.NAME_TOCREF: 903 return C_ADDR 904 905 case obj.NAME_EXTERN, 906 obj.NAME_STATIC: 907 c.instoffset = a.Offset 908 if a.Sym == nil { 909 break 910 } else if a.Sym.Type == objabi.STLSBSS { 911 // For PIC builds, use 12 byte got initial-exec TLS accesses. 912 if c.ctxt.Flag_shared { 913 return C_TLS_IE 914 } 915 // Otherwise, use 8 byte local-exec TLS accesses. 916 return C_TLS_LE 917 } else { 918 return C_ADDR 919 } 920 921 case obj.NAME_AUTO: 922 a.Reg = REGSP 923 c.instoffset = int64(c.autosize) + a.Offset 924 if c.instoffset >= -BIG && c.instoffset < BIG { 925 return C_SOREG 926 } 927 return C_LOREG 928 929 case obj.NAME_PARAM: 930 a.Reg = REGSP 931 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 932 if c.instoffset >= -BIG && c.instoffset < BIG { 933 return C_SOREG 934 } 935 return C_LOREG 936 937 case obj.NAME_NONE: 938 c.instoffset = a.Offset 939 if a.Offset == 0 && a.Index == 0 { 940 return C_ZOREG 941 } else if c.instoffset >= -BIG && c.instoffset < BIG { 942 return C_SOREG 943 } else { 944 return C_LOREG 945 } 946 } 947 948 return C_GOK 949 950 case obj.TYPE_TEXTSIZE: 951 return C_TEXTSIZE 952 953 case obj.TYPE_FCONST: 954 // The only cases where FCONST will occur are with float64 +/- 0. 955 // All other float constants are generated in memory. 956 f64 := a.Val.(float64) 957 if f64 == 0 { 958 if math.Signbit(f64) { 959 return C_ADDCON 960 } 961 return C_ZCON 962 } 963 log.Fatalf("Unexpected nonzero FCONST operand %v", a) 964 965 case obj.TYPE_CONST, 966 obj.TYPE_ADDR: 967 switch a.Name { 968 case obj.NAME_NONE: 969 c.instoffset = a.Offset 970 if a.Reg != 0 { 971 if -BIG <= c.instoffset && c.instoffset < BIG { 972 return C_SACON 973 } 974 if isint32(c.instoffset) { 975 return C_LACON 976 } 977 return C_DACON 978 } 979 980 case obj.NAME_EXTERN, 981 obj.NAME_STATIC: 982 s := a.Sym 983 if s == nil { 984 return C_GOK 985 } 986 c.instoffset = a.Offset 987 return C_LACON 988 989 case obj.NAME_AUTO: 990 a.Reg = REGSP 991 c.instoffset = int64(c.autosize) + a.Offset 992 if c.instoffset >= -BIG && c.instoffset < BIG { 993 return C_SACON 994 } 995 return C_LACON 996 997 case obj.NAME_PARAM: 998 a.Reg = REGSP 999 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize 1000 if c.instoffset >= -BIG && c.instoffset < BIG { 1001 return C_SACON 1002 } 1003 return C_LACON 1004 1005 default: 1006 return C_GOK 1007 } 1008 1009 if c.instoffset >= 0 { 1010 sbits := bits.Len64(uint64(c.instoffset)) 1011 switch { 1012 case sbits <= 5: 1013 return C_ZCON + sbits 1014 case sbits <= 8: 1015 return C_U8CON 1016 case sbits <= 15: 1017 return C_U15CON 1018 case sbits <= 16: 1019 return C_U16CON 1020 case sbits <= 31: 1021 return C_U32CON 1022 case sbits <= 32: 1023 return C_U32CON 1024 case sbits <= 33: 1025 return C_S34CON 1026 default: 1027 return C_64CON 1028 } 1029 } else { 1030 sbits := bits.Len64(uint64(^c.instoffset)) 1031 switch { 1032 case sbits <= 15: 1033 return C_S16CON 1034 case sbits <= 31: 1035 return C_S32CON 1036 case sbits <= 33: 1037 return C_S34CON 1038 default: 1039 return C_64CON 1040 } 1041 } 1042 1043 case obj.TYPE_BRANCH: 1044 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled { 1045 return C_LBRAPIC 1046 } 1047 return C_SBRA 1048 } 1049 1050 return C_GOK 1051 } 1052 1053 func prasm(p *obj.Prog) { 1054 fmt.Printf("%v\n", p) 1055 } 1056 1057 func (c *ctxt9) oplook(p *obj.Prog) *Optab { 1058 a1 := int(p.Optab) 1059 if a1 != 0 { 1060 return &optab[a1-1] 1061 } 1062 a1 = int(p.From.Class) 1063 if a1 == 0 { 1064 a1 = c.aclass(&p.From) + 1 1065 p.From.Class = int8(a1) 1066 } 1067 a1-- 1068 1069 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1} 1070 for i, ap := range p.RestArgs { 1071 argsv[i] = int(ap.Addr.Class) 1072 if argsv[i] == 0 { 1073 argsv[i] = c.aclass(&ap.Addr) + 1 1074 ap.Addr.Class = int8(argsv[i]) 1075 } 1076 1077 } 1078 a3 := argsv[0] - 1 1079 a4 := argsv[1] - 1 1080 a5 := argsv[2] - 1 1081 1082 a6 := int(p.To.Class) 1083 if a6 == 0 { 1084 a6 = c.aclass(&p.To) + 1 1085 p.To.Class = int8(a6) 1086 } 1087 a6-- 1088 1089 a2 := C_NONE 1090 if p.Reg != 0 { 1091 a2 = c.aclassreg(p.Reg) 1092 } 1093 1094 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6) 1095 ops := oprange[p.As&obj.AMask] 1096 c1 := &xcmp[a1] 1097 c2 := &xcmp[a2] 1098 c3 := &xcmp[a3] 1099 c4 := &xcmp[a4] 1100 c5 := &xcmp[a5] 1101 c6 := &xcmp[a6] 1102 for i := range ops { 1103 op := &ops[i] 1104 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] { 1105 p.Optab = uint16(cap(optab) - cap(ops) + i + 1) 1106 return op 1107 } 1108 } 1109 1110 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6)) 1111 prasm(p) 1112 if ops == nil { 1113 ops = optab 1114 } 1115 return &ops[0] 1116 } 1117 1118 // Compare two operand types (ex C_REG, or C_SCON) 1119 // and return true if b is compatible with a. 1120 // 1121 // Argument comparison isn't reflexitive, so care must be taken. 1122 // a is the argument type as found in optab, b is the argument as 1123 // fitted by aclass. 1124 func cmp(a int, b int) bool { 1125 if a == b { 1126 return true 1127 } 1128 switch a { 1129 1130 case C_SPR: 1131 if b == C_LR || b == C_CTR { 1132 return true 1133 } 1134 1135 case C_U1CON: 1136 return cmp(C_ZCON, b) 1137 case C_U2CON: 1138 return cmp(C_U1CON, b) 1139 case C_U3CON: 1140 return cmp(C_U2CON, b) 1141 case C_U4CON: 1142 return cmp(C_U3CON, b) 1143 case C_U5CON: 1144 return cmp(C_U4CON, b) 1145 case C_U8CON: 1146 return cmp(C_U5CON, b) 1147 case C_U15CON: 1148 return cmp(C_U8CON, b) 1149 case C_U16CON: 1150 return cmp(C_U15CON, b) 1151 1152 case C_S16CON: 1153 return cmp(C_U15CON, b) 1154 case C_32CON: 1155 return cmp(C_S16CON, b) || cmp(C_U16CON, b) 1156 case C_S34CON: 1157 return cmp(C_32CON, b) 1158 case C_64CON: 1159 return cmp(C_S34CON, b) 1160 1161 case C_LACON: 1162 return cmp(C_SACON, b) 1163 1164 case C_LBRA: 1165 return cmp(C_SBRA, b) 1166 1167 case C_SOREG: 1168 return cmp(C_ZOREG, b) 1169 1170 case C_LOREG: 1171 return cmp(C_SOREG, b) 1172 1173 case C_XOREG: 1174 return cmp(C_REG, b) || cmp(C_ZOREG, b) 1175 1176 // An even/odd register input always matches the regular register types. 1177 case C_REG: 1178 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0) 1179 case C_FREG: 1180 return cmp(C_FREGP, b) 1181 case C_VSREG: 1182 /* Allow any VR argument as a VSR operand. */ 1183 return cmp(C_VSREGP, b) || cmp(C_VREG, b) 1184 1185 case C_ANY: 1186 return true 1187 } 1188 1189 return false 1190 } 1191 1192 // Used when sorting the optab. Sorting is 1193 // done in a way so that the best choice of 1194 // opcode/operand combination is considered first. 1195 func optabLess(i, j int) bool { 1196 p1 := &optab[i] 1197 p2 := &optab[j] 1198 n := int(p1.as) - int(p2.as) 1199 // same opcode 1200 if n != 0 { 1201 return n < 0 1202 } 1203 // Consider those that generate fewer 1204 // instructions first. 1205 n = int(p1.size) - int(p2.size) 1206 if n != 0 { 1207 return n < 0 1208 } 1209 // operand order should match 1210 // better choices first 1211 n = int(p1.a1) - int(p2.a1) 1212 if n != 0 { 1213 return n < 0 1214 } 1215 n = int(p1.a2) - int(p2.a2) 1216 if n != 0 { 1217 return n < 0 1218 } 1219 n = int(p1.a3) - int(p2.a3) 1220 if n != 0 { 1221 return n < 0 1222 } 1223 n = int(p1.a4) - int(p2.a4) 1224 if n != 0 { 1225 return n < 0 1226 } 1227 n = int(p1.a5) - int(p2.a5) 1228 if n != 0 { 1229 return n < 0 1230 } 1231 n = int(p1.a6) - int(p2.a6) 1232 if n != 0 { 1233 return n < 0 1234 } 1235 return false 1236 } 1237 1238 // Add an entry to the opcode table for 1239 // a new opcode b0 with the same operand combinations 1240 // as opcode a. 1241 func opset(a, b0 obj.As) { 1242 oprange[a&obj.AMask] = oprange[b0] 1243 } 1244 1245 // Determine if the build configuration requires a TOC pointer. 1246 // It is assumed this always called after buildop. 1247 func NeedTOCpointer(ctxt *obj.Link) bool { 1248 return !pfxEnabled && ctxt.Flag_shared 1249 } 1250 1251 // Build the opcode table 1252 func buildop(ctxt *obj.Link) { 1253 // Limit PC-relative prefix instruction usage to supported and tested targets. 1254 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux" 1255 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS) 1256 if cfg == buildOpCfg { 1257 // Already initialized to correct OS/cpu; stop now. 1258 // This happens in the cmd/asm tests, 1259 // each of which re-initializes the arch. 1260 return 1261 } 1262 buildOpCfg = cfg 1263 1264 // Configure the optab entries which may generate prefix opcodes. 1265 prefixOptab := make([]Optab, 0, len(prefixableOptab)) 1266 for _, entry := range prefixableOptab { 1267 entry := entry 1268 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 { 1269 // Enable prefix opcode generation and resize. 1270 entry.ispfx = true 1271 entry.size = entry.pfxsize 1272 } 1273 prefixOptab = append(prefixOptab, entry.Optab) 1274 1275 } 1276 1277 for i := 0; i < C_NCLASS; i++ { 1278 for n := 0; n < C_NCLASS; n++ { 1279 if cmp(n, i) { 1280 xcmp[i][n] = true 1281 } 1282 } 1283 } 1284 1285 // Append the generated entries, sort, and fill out oprange. 1286 optab = make([]Optab, 0, len(optabBase)+len(optabGen)+len(prefixOptab)) 1287 optab = append(optab, optabBase...) 1288 optab = append(optab, optabGen...) 1289 optab = append(optab, prefixOptab...) 1290 sort.Slice(optab, optabLess) 1291 1292 for i := range optab { 1293 // Use the legacy assembler function if none provided. 1294 if optab[i].asmout == nil { 1295 optab[i].asmout = asmout 1296 } 1297 } 1298 1299 for i := 0; i < len(optab); { 1300 r := optab[i].as 1301 r0 := r & obj.AMask 1302 start := i 1303 for i < len(optab) && optab[i].as == r { 1304 i++ 1305 } 1306 oprange[r0] = optab[start:i] 1307 1308 switch r { 1309 default: 1310 if !opsetGen(r) { 1311 ctxt.Diag("unknown op in build: %v", r) 1312 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r) 1313 } 1314 1315 case ADCBF: /* unary indexed: op (b+a); op (b) */ 1316 opset(ADCBI, r0) 1317 1318 opset(ADCBST, r0) 1319 opset(ADCBT, r0) 1320 opset(ADCBTST, r0) 1321 opset(ADCBZ, r0) 1322 opset(AICBI, r0) 1323 1324 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */ 1325 opset(ASTWCCC, r0) 1326 opset(ASTHCCC, r0) 1327 opset(ASTBCCC, r0) 1328 1329 case AREM: /* macro */ 1330 opset(AREM, r0) 1331 1332 case AREMU: 1333 opset(AREMU, r0) 1334 1335 case AREMD: 1336 opset(AREMDU, r0) 1337 1338 case AMULLW: 1339 opset(AMULLD, r0) 1340 1341 case ADIVW: /* op Rb[,Ra],Rd */ 1342 opset(AMULHW, r0) 1343 1344 opset(AMULHWCC, r0) 1345 opset(AMULHWU, r0) 1346 opset(AMULHWUCC, r0) 1347 opset(AMULLWCC, r0) 1348 opset(AMULLWVCC, r0) 1349 opset(AMULLWV, r0) 1350 opset(ADIVWCC, r0) 1351 opset(ADIVWV, r0) 1352 opset(ADIVWVCC, r0) 1353 opset(ADIVWU, r0) 1354 opset(ADIVWUCC, r0) 1355 opset(ADIVWUV, r0) 1356 opset(ADIVWUVCC, r0) 1357 opset(AMODUD, r0) 1358 opset(AMODUW, r0) 1359 opset(AMODSD, r0) 1360 opset(AMODSW, r0) 1361 opset(AADDCC, r0) 1362 opset(AADDCV, r0) 1363 opset(AADDCVCC, r0) 1364 opset(AADDV, r0) 1365 opset(AADDVCC, r0) 1366 opset(AADDE, r0) 1367 opset(AADDECC, r0) 1368 opset(AADDEV, r0) 1369 opset(AADDEVCC, r0) 1370 opset(AMULHD, r0) 1371 opset(AMULHDCC, r0) 1372 opset(AMULHDU, r0) 1373 opset(AMULHDUCC, r0) 1374 opset(AMULLDCC, r0) 1375 opset(AMULLDVCC, r0) 1376 opset(AMULLDV, r0) 1377 opset(ADIVD, r0) 1378 opset(ADIVDCC, r0) 1379 opset(ADIVDE, r0) 1380 opset(ADIVDEU, r0) 1381 opset(ADIVDECC, r0) 1382 opset(ADIVDEUCC, r0) 1383 opset(ADIVDVCC, r0) 1384 opset(ADIVDV, r0) 1385 opset(ADIVDU, r0) 1386 opset(ADIVDUV, r0) 1387 opset(ADIVDUVCC, r0) 1388 opset(ADIVDUCC, r0) 1389 1390 case ACRAND: 1391 opset(ACRANDN, r0) 1392 opset(ACREQV, r0) 1393 opset(ACRNAND, r0) 1394 opset(ACRNOR, r0) 1395 opset(ACROR, r0) 1396 opset(ACRORN, r0) 1397 opset(ACRXOR, r0) 1398 1399 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */ 1400 opset(APOPCNTW, r0) 1401 opset(APOPCNTB, r0) 1402 opset(ACNTTZW, r0) 1403 opset(ACNTTZWCC, r0) 1404 opset(ACNTTZD, r0) 1405 opset(ACNTTZDCC, r0) 1406 1407 case ACOPY: /* copy, paste. */ 1408 opset(APASTECC, r0) 1409 1410 case AMADDHD: /* maddhd, maddhdu, maddld */ 1411 opset(AMADDHDU, r0) 1412 opset(AMADDLD, r0) 1413 1414 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ 1415 opset(AMOVH, r0) 1416 opset(AMOVHZ, r0) 1417 1418 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ 1419 opset(AMOVHU, r0) 1420 1421 opset(AMOVHZU, r0) 1422 opset(AMOVWU, r0) 1423 opset(AMOVWZU, r0) 1424 opset(AMOVDU, r0) 1425 opset(AMOVMW, r0) 1426 1427 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */ 1428 opset(ALVEHX, r0) 1429 opset(ALVEWX, r0) 1430 opset(ALVX, r0) 1431 opset(ALVXL, r0) 1432 opset(ALVSL, r0) 1433 opset(ALVSR, r0) 1434 1435 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */ 1436 opset(ASTVEHX, r0) 1437 opset(ASTVEWX, r0) 1438 opset(ASTVX, r0) 1439 opset(ASTVXL, r0) 1440 1441 case AVAND: /* vand, vandc, vnand */ 1442 opset(AVAND, r0) 1443 opset(AVANDC, r0) 1444 opset(AVNAND, r0) 1445 1446 case AVMRGOW: /* vmrgew, vmrgow */ 1447 opset(AVMRGEW, r0) 1448 1449 case AVOR: /* vor, vorc, vxor, vnor, veqv */ 1450 opset(AVOR, r0) 1451 opset(AVORC, r0) 1452 opset(AVXOR, r0) 1453 opset(AVNOR, r0) 1454 opset(AVEQV, r0) 1455 1456 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */ 1457 opset(AVADDUBM, r0) 1458 opset(AVADDUHM, r0) 1459 opset(AVADDUWM, r0) 1460 opset(AVADDUDM, r0) 1461 opset(AVADDUQM, r0) 1462 1463 case AVADDCU: /* vaddcuq, vaddcuw */ 1464 opset(AVADDCUQ, r0) 1465 opset(AVADDCUW, r0) 1466 1467 case AVADDUS: /* vaddubs, vadduhs, vadduws */ 1468 opset(AVADDUBS, r0) 1469 opset(AVADDUHS, r0) 1470 opset(AVADDUWS, r0) 1471 1472 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */ 1473 opset(AVADDSBS, r0) 1474 opset(AVADDSHS, r0) 1475 opset(AVADDSWS, r0) 1476 1477 case AVADDE: /* vaddeuqm, vaddecuq */ 1478 opset(AVADDEUQM, r0) 1479 opset(AVADDECUQ, r0) 1480 1481 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */ 1482 opset(AVSUBUBM, r0) 1483 opset(AVSUBUHM, r0) 1484 opset(AVSUBUWM, r0) 1485 opset(AVSUBUDM, r0) 1486 opset(AVSUBUQM, r0) 1487 1488 case AVSUBCU: /* vsubcuq, vsubcuw */ 1489 opset(AVSUBCUQ, r0) 1490 opset(AVSUBCUW, r0) 1491 1492 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */ 1493 opset(AVSUBUBS, r0) 1494 opset(AVSUBUHS, r0) 1495 opset(AVSUBUWS, r0) 1496 1497 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */ 1498 opset(AVSUBSBS, r0) 1499 opset(AVSUBSHS, r0) 1500 opset(AVSUBSWS, r0) 1501 1502 case AVSUBE: /* vsubeuqm, vsubecuq */ 1503 opset(AVSUBEUQM, r0) 1504 opset(AVSUBECUQ, r0) 1505 1506 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */ 1507 opset(AVMULOSB, r0) 1508 opset(AVMULEUB, r0) 1509 opset(AVMULOUB, r0) 1510 opset(AVMULESH, r0) 1511 opset(AVMULOSH, r0) 1512 opset(AVMULEUH, r0) 1513 opset(AVMULOUH, r0) 1514 opset(AVMULESW, r0) 1515 opset(AVMULOSW, r0) 1516 opset(AVMULEUW, r0) 1517 opset(AVMULOUW, r0) 1518 opset(AVMULUWM, r0) 1519 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */ 1520 opset(AVPMSUMB, r0) 1521 opset(AVPMSUMH, r0) 1522 opset(AVPMSUMW, r0) 1523 opset(AVPMSUMD, r0) 1524 1525 case AVR: /* vrlb, vrlh, vrlw, vrld */ 1526 opset(AVRLB, r0) 1527 opset(AVRLH, r0) 1528 opset(AVRLW, r0) 1529 opset(AVRLD, r0) 1530 1531 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */ 1532 opset(AVSLB, r0) 1533 opset(AVSLH, r0) 1534 opset(AVSLW, r0) 1535 opset(AVSL, r0) 1536 opset(AVSLO, r0) 1537 opset(AVSRB, r0) 1538 opset(AVSRH, r0) 1539 opset(AVSRW, r0) 1540 opset(AVSR, r0) 1541 opset(AVSRO, r0) 1542 opset(AVSLD, r0) 1543 opset(AVSRD, r0) 1544 1545 case AVSA: /* vsrab, vsrah, vsraw, vsrad */ 1546 opset(AVSRAB, r0) 1547 opset(AVSRAH, r0) 1548 opset(AVSRAW, r0) 1549 opset(AVSRAD, r0) 1550 1551 case AVSOI: /* vsldoi */ 1552 opset(AVSLDOI, r0) 1553 1554 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */ 1555 opset(AVCLZB, r0) 1556 opset(AVCLZH, r0) 1557 opset(AVCLZW, r0) 1558 opset(AVCLZD, r0) 1559 1560 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */ 1561 opset(AVPOPCNTB, r0) 1562 opset(AVPOPCNTH, r0) 1563 opset(AVPOPCNTW, r0) 1564 opset(AVPOPCNTD, r0) 1565 1566 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */ 1567 opset(AVCMPEQUB, r0) 1568 opset(AVCMPEQUBCC, r0) 1569 opset(AVCMPEQUH, r0) 1570 opset(AVCMPEQUHCC, r0) 1571 opset(AVCMPEQUW, r0) 1572 opset(AVCMPEQUWCC, r0) 1573 opset(AVCMPEQUD, r0) 1574 opset(AVCMPEQUDCC, r0) 1575 1576 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */ 1577 opset(AVCMPGTUB, r0) 1578 opset(AVCMPGTUBCC, r0) 1579 opset(AVCMPGTUH, r0) 1580 opset(AVCMPGTUHCC, r0) 1581 opset(AVCMPGTUW, r0) 1582 opset(AVCMPGTUWCC, r0) 1583 opset(AVCMPGTUD, r0) 1584 opset(AVCMPGTUDCC, r0) 1585 opset(AVCMPGTSB, r0) 1586 opset(AVCMPGTSBCC, r0) 1587 opset(AVCMPGTSH, r0) 1588 opset(AVCMPGTSHCC, r0) 1589 opset(AVCMPGTSW, r0) 1590 opset(AVCMPGTSWCC, r0) 1591 opset(AVCMPGTSD, r0) 1592 opset(AVCMPGTSDCC, r0) 1593 1594 case AVCMPNEZB: /* vcmpnezb[.] */ 1595 opset(AVCMPNEZBCC, r0) 1596 opset(AVCMPNEB, r0) 1597 opset(AVCMPNEBCC, r0) 1598 opset(AVCMPNEH, r0) 1599 opset(AVCMPNEHCC, r0) 1600 opset(AVCMPNEW, r0) 1601 opset(AVCMPNEWCC, r0) 1602 1603 case AVPERM: /* vperm */ 1604 opset(AVPERMXOR, r0) 1605 opset(AVPERMR, r0) 1606 1607 case AVBPERMQ: /* vbpermq, vbpermd */ 1608 opset(AVBPERMD, r0) 1609 1610 case AVSEL: /* vsel */ 1611 opset(AVSEL, r0) 1612 1613 case AVSPLTB: /* vspltb, vsplth, vspltw */ 1614 opset(AVSPLTH, r0) 1615 opset(AVSPLTW, r0) 1616 1617 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */ 1618 opset(AVSPLTISH, r0) 1619 opset(AVSPLTISW, r0) 1620 1621 case AVCIPH: /* vcipher, vcipherlast */ 1622 opset(AVCIPHER, r0) 1623 opset(AVCIPHERLAST, r0) 1624 1625 case AVNCIPH: /* vncipher, vncipherlast */ 1626 opset(AVNCIPHER, r0) 1627 opset(AVNCIPHERLAST, r0) 1628 1629 case AVSBOX: /* vsbox */ 1630 opset(AVSBOX, r0) 1631 1632 case AVSHASIGMA: /* vshasigmaw, vshasigmad */ 1633 opset(AVSHASIGMAW, r0) 1634 opset(AVSHASIGMAD, r0) 1635 1636 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */ 1637 opset(ALXVDSX, r0) 1638 opset(ALXVW4X, r0) 1639 opset(ALXVH8X, r0) 1640 opset(ALXVB16X, r0) 1641 1642 case ALXV: /* lxv */ 1643 opset(ALXV, r0) 1644 1645 case ALXVL: /* lxvl, lxvll, lxvx */ 1646 opset(ALXVLL, r0) 1647 opset(ALXVX, r0) 1648 1649 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */ 1650 opset(ASTXVW4X, r0) 1651 opset(ASTXVH8X, r0) 1652 opset(ASTXVB16X, r0) 1653 1654 case ASTXV: /* stxv */ 1655 opset(ASTXV, r0) 1656 1657 case ASTXVL: /* stxvl, stxvll, stvx */ 1658 opset(ASTXVLL, r0) 1659 opset(ASTXVX, r0) 1660 1661 case ALXSDX: /* lxsdx */ 1662 opset(ALXSDX, r0) 1663 1664 case ASTXSDX: /* stxsdx */ 1665 opset(ASTXSDX, r0) 1666 1667 case ALXSIWAX: /* lxsiwax, lxsiwzx */ 1668 opset(ALXSIWZX, r0) 1669 1670 case ASTXSIWX: /* stxsiwx */ 1671 opset(ASTXSIWX, r0) 1672 1673 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ 1674 opset(AMFFPRD, r0) 1675 opset(AMFVRD, r0) 1676 opset(AMFVSRWZ, r0) 1677 opset(AMFVSRLD, r0) 1678 1679 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ 1680 opset(AMTFPRD, r0) 1681 opset(AMTVRD, r0) 1682 opset(AMTVSRWA, r0) 1683 opset(AMTVSRWZ, r0) 1684 opset(AMTVSRWS, r0) 1685 1686 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */ 1687 opset(AXXLANDC, r0) 1688 opset(AXXLEQV, r0) 1689 opset(AXXLNAND, r0) 1690 1691 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */ 1692 opset(AXXLORC, r0) 1693 opset(AXXLNOR, r0) 1694 opset(AXXLORQ, r0) 1695 opset(AXXLXOR, r0) 1696 1697 case AXXSEL: /* xxsel */ 1698 opset(AXXSEL, r0) 1699 1700 case AXXMRGHW: /* xxmrghw, xxmrglw */ 1701 opset(AXXMRGLW, r0) 1702 1703 case AXXSPLTW: /* xxspltw */ 1704 opset(AXXSPLTW, r0) 1705 1706 case AXXSPLTIB: /* xxspltib */ 1707 opset(AXXSPLTIB, r0) 1708 1709 case AXXPERM: /* xxpermdi */ 1710 opset(AXXPERM, r0) 1711 1712 case AXXSLDWI: /* xxsldwi */ 1713 opset(AXXPERMDI, r0) 1714 opset(AXXSLDWI, r0) 1715 1716 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */ 1717 opset(AXXBRD, r0) 1718 opset(AXXBRW, r0) 1719 opset(AXXBRH, r0) 1720 1721 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */ 1722 opset(AXSCVSPDP, r0) 1723 opset(AXSCVDPSPN, r0) 1724 opset(AXSCVSPDPN, r0) 1725 1726 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */ 1727 opset(AXVCVSPDP, r0) 1728 1729 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */ 1730 opset(AXSCVDPSXWS, r0) 1731 opset(AXSCVDPUXDS, r0) 1732 opset(AXSCVDPUXWS, r0) 1733 1734 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */ 1735 opset(AXSCVUXDDP, r0) 1736 opset(AXSCVSXDSP, r0) 1737 opset(AXSCVUXDSP, r0) 1738 1739 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */ 1740 opset(AXVCVDPSXDS, r0) 1741 opset(AXVCVDPSXWS, r0) 1742 opset(AXVCVDPUXDS, r0) 1743 opset(AXVCVDPUXWS, r0) 1744 opset(AXVCVSPSXDS, r0) 1745 opset(AXVCVSPSXWS, r0) 1746 opset(AXVCVSPUXDS, r0) 1747 opset(AXVCVSPUXWS, r0) 1748 1749 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */ 1750 opset(AXVCVSXWDP, r0) 1751 opset(AXVCVUXDDP, r0) 1752 opset(AXVCVUXWDP, r0) 1753 opset(AXVCVSXDSP, r0) 1754 opset(AXVCVSXWSP, r0) 1755 opset(AXVCVUXDSP, r0) 1756 opset(AXVCVUXWSP, r0) 1757 1758 case AAND: /* logical op Rb,Rs,Ra; no literal */ 1759 opset(AANDN, r0) 1760 opset(AANDNCC, r0) 1761 opset(AEQV, r0) 1762 opset(AEQVCC, r0) 1763 opset(ANAND, r0) 1764 opset(ANANDCC, r0) 1765 opset(ANOR, r0) 1766 opset(ANORCC, r0) 1767 opset(AORCC, r0) 1768 opset(AORN, r0) 1769 opset(AORNCC, r0) 1770 opset(AXORCC, r0) 1771 1772 case AADDME: /* op Ra, Rd */ 1773 opset(AADDMECC, r0) 1774 1775 opset(AADDMEV, r0) 1776 opset(AADDMEVCC, r0) 1777 opset(AADDZE, r0) 1778 opset(AADDZECC, r0) 1779 opset(AADDZEV, r0) 1780 opset(AADDZEVCC, r0) 1781 opset(ASUBME, r0) 1782 opset(ASUBMECC, r0) 1783 opset(ASUBMEV, r0) 1784 opset(ASUBMEVCC, r0) 1785 opset(ASUBZE, r0) 1786 opset(ASUBZECC, r0) 1787 opset(ASUBZEV, r0) 1788 opset(ASUBZEVCC, r0) 1789 1790 case AADDC: 1791 opset(AADDCCC, r0) 1792 1793 case ABEQ: 1794 opset(ABGE, r0) 1795 opset(ABGT, r0) 1796 opset(ABLE, r0) 1797 opset(ABLT, r0) 1798 opset(ABNE, r0) 1799 opset(ABVC, r0) 1800 opset(ABVS, r0) 1801 1802 case ABR: 1803 opset(ABL, r0) 1804 1805 case ABC: 1806 opset(ABCL, r0) 1807 1808 case ABDNZ: 1809 opset(ABDZ, r0) 1810 1811 case AEXTSB: /* op Rs, Ra */ 1812 opset(AEXTSBCC, r0) 1813 1814 opset(AEXTSH, r0) 1815 opset(AEXTSHCC, r0) 1816 opset(ACNTLZW, r0) 1817 opset(ACNTLZWCC, r0) 1818 opset(ACNTLZD, r0) 1819 opset(AEXTSW, r0) 1820 opset(AEXTSWCC, r0) 1821 opset(ACNTLZDCC, r0) 1822 1823 case AFABS: /* fop [s,]d */ 1824 opset(AFABSCC, r0) 1825 1826 opset(AFNABS, r0) 1827 opset(AFNABSCC, r0) 1828 opset(AFNEG, r0) 1829 opset(AFNEGCC, r0) 1830 opset(AFRSP, r0) 1831 opset(AFRSPCC, r0) 1832 opset(AFCTIW, r0) 1833 opset(AFCTIWCC, r0) 1834 opset(AFCTIWZ, r0) 1835 opset(AFCTIWZCC, r0) 1836 opset(AFCTID, r0) 1837 opset(AFCTIDCC, r0) 1838 opset(AFCTIDZ, r0) 1839 opset(AFCTIDZCC, r0) 1840 opset(AFCFID, r0) 1841 opset(AFCFIDCC, r0) 1842 opset(AFCFIDU, r0) 1843 opset(AFCFIDUCC, r0) 1844 opset(AFCFIDS, r0) 1845 opset(AFCFIDSCC, r0) 1846 opset(AFRES, r0) 1847 opset(AFRESCC, r0) 1848 opset(AFRIM, r0) 1849 opset(AFRIMCC, r0) 1850 opset(AFRIP, r0) 1851 opset(AFRIPCC, r0) 1852 opset(AFRIZ, r0) 1853 opset(AFRIZCC, r0) 1854 opset(AFRIN, r0) 1855 opset(AFRINCC, r0) 1856 opset(AFRSQRTE, r0) 1857 opset(AFRSQRTECC, r0) 1858 opset(AFSQRT, r0) 1859 opset(AFSQRTCC, r0) 1860 opset(AFSQRTS, r0) 1861 opset(AFSQRTSCC, r0) 1862 1863 case AFADD: 1864 opset(AFADDS, r0) 1865 opset(AFADDCC, r0) 1866 opset(AFADDSCC, r0) 1867 opset(AFCPSGN, r0) 1868 opset(AFCPSGNCC, r0) 1869 opset(AFDIV, r0) 1870 opset(AFDIVS, r0) 1871 opset(AFDIVCC, r0) 1872 opset(AFDIVSCC, r0) 1873 opset(AFSUB, r0) 1874 opset(AFSUBS, r0) 1875 opset(AFSUBCC, r0) 1876 opset(AFSUBSCC, r0) 1877 1878 case AFMADD: 1879 opset(AFMADDCC, r0) 1880 opset(AFMADDS, r0) 1881 opset(AFMADDSCC, r0) 1882 opset(AFMSUB, r0) 1883 opset(AFMSUBCC, r0) 1884 opset(AFMSUBS, r0) 1885 opset(AFMSUBSCC, r0) 1886 opset(AFNMADD, r0) 1887 opset(AFNMADDCC, r0) 1888 opset(AFNMADDS, r0) 1889 opset(AFNMADDSCC, r0) 1890 opset(AFNMSUB, r0) 1891 opset(AFNMSUBCC, r0) 1892 opset(AFNMSUBS, r0) 1893 opset(AFNMSUBSCC, r0) 1894 opset(AFSEL, r0) 1895 opset(AFSELCC, r0) 1896 1897 case AFMUL: 1898 opset(AFMULS, r0) 1899 opset(AFMULCC, r0) 1900 opset(AFMULSCC, r0) 1901 1902 case AFCMPO: 1903 opset(AFCMPU, r0) 1904 1905 case AMTFSB0: 1906 opset(AMTFSB0CC, r0) 1907 opset(AMTFSB1, r0) 1908 opset(AMTFSB1CC, r0) 1909 1910 case ANEG: /* op [Ra,] Rd */ 1911 opset(ANEGCC, r0) 1912 1913 opset(ANEGV, r0) 1914 opset(ANEGVCC, r0) 1915 1916 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */ 1917 opset(AXOR, r0) 1918 1919 case AORIS: /* oris/xoris $uimm,Rs,Ra */ 1920 opset(AXORIS, r0) 1921 1922 case ASLW: 1923 opset(ASLWCC, r0) 1924 opset(ASRW, r0) 1925 opset(ASRWCC, r0) 1926 opset(AROTLW, r0) 1927 1928 case ASLD: 1929 opset(ASLDCC, r0) 1930 opset(ASRD, r0) 1931 opset(ASRDCC, r0) 1932 opset(AROTL, r0) 1933 1934 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1935 opset(ASRAWCC, r0) 1936 1937 case AEXTSWSLI: 1938 opset(AEXTSWSLICC, r0) 1939 1940 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1941 opset(ASRADCC, r0) 1942 1943 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */ 1944 opset(ASUB, r0) 1945 1946 opset(ASUBCC, r0) 1947 opset(ASUBV, r0) 1948 opset(ASUBVCC, r0) 1949 opset(ASUBCCC, r0) 1950 opset(ASUBCV, r0) 1951 opset(ASUBCVCC, r0) 1952 opset(ASUBE, r0) 1953 opset(ASUBECC, r0) 1954 opset(ASUBEV, r0) 1955 opset(ASUBEVCC, r0) 1956 1957 case ASYNC: 1958 opset(AISYNC, r0) 1959 opset(ALWSYNC, r0) 1960 opset(APTESYNC, r0) 1961 opset(ATLBSYNC, r0) 1962 1963 case ARLWNM: 1964 opset(ARLWNMCC, r0) 1965 opset(ARLWMI, r0) 1966 opset(ARLWMICC, r0) 1967 1968 case ARLDMI: 1969 opset(ARLDMICC, r0) 1970 opset(ARLDIMI, r0) 1971 opset(ARLDIMICC, r0) 1972 1973 case ARLDC: 1974 opset(ARLDCCC, r0) 1975 1976 case ARLDCL: 1977 opset(ARLDCR, r0) 1978 opset(ARLDCLCC, r0) 1979 opset(ARLDCRCC, r0) 1980 1981 case ARLDICL: 1982 opset(ARLDICLCC, r0) 1983 opset(ARLDICR, r0) 1984 opset(ARLDICRCC, r0) 1985 opset(ARLDIC, r0) 1986 opset(ARLDICCC, r0) 1987 opset(ACLRLSLDI, r0) 1988 1989 case AFMOVD: 1990 opset(AFMOVDCC, r0) 1991 opset(AFMOVDU, r0) 1992 opset(AFMOVS, r0) 1993 opset(AFMOVSU, r0) 1994 1995 case ALDAR: 1996 opset(ALBAR, r0) 1997 opset(ALHAR, r0) 1998 opset(ALWAR, r0) 1999 2000 case ASYSCALL: /* just the op; flow of control */ 2001 opset(ARFI, r0) 2002 2003 opset(ARFCI, r0) 2004 opset(ARFID, r0) 2005 opset(AHRFID, r0) 2006 2007 case AMOVHBR: 2008 opset(AMOVWBR, r0) 2009 opset(AMOVDBR, r0) 2010 2011 case ASLBMFEE: 2012 opset(ASLBMFEV, r0) 2013 2014 case ATW: 2015 opset(ATD, r0) 2016 2017 case ATLBIE: 2018 opset(ASLBIE, r0) 2019 opset(ATLBIEL, r0) 2020 2021 case AEIEIO: 2022 opset(ASLBIA, r0) 2023 2024 case ACMP: 2025 opset(ACMPW, r0) 2026 2027 case ACMPU: 2028 opset(ACMPWU, r0) 2029 2030 case ACMPB: 2031 opset(ACMPB, r0) 2032 2033 case AFTDIV: 2034 opset(AFTDIV, r0) 2035 2036 case AFTSQRT: 2037 opset(AFTSQRT, r0) 2038 2039 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */ 2040 opset(AMOVWZ, r0) /* Same as above, but zero extended */ 2041 2042 case AVCLZLSBB: 2043 opset(AVCTZLSBB, r0) 2044 2045 case AADD, 2046 AADDIS, 2047 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */ 2048 AANDISCC, 2049 AFMOVSX, 2050 AFMOVSZ, 2051 ALSW, 2052 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ 2053 AMOVB, /* macro: move byte with sign extension */ 2054 AMOVBU, /* macro: move byte with sign extension & update */ 2055 AMOVFL, 2056 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ 2057 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ 2058 ASTSW, 2059 ASLBMTE, 2060 AWORD, 2061 ADWORD, 2062 ADARN, 2063 AVMSUMUDM, 2064 AADDEX, 2065 ACMPEQB, 2066 ACLRLSLWI, 2067 AMTVSRDD, 2068 APNOP, 2069 AISEL, 2070 ASETB, 2071 obj.ANOP, 2072 obj.ATEXT, 2073 obj.AUNDEF, 2074 obj.AFUNCDATA, 2075 obj.APCALIGN, 2076 obj.APCDATA, 2077 obj.ADUFFZERO, 2078 obj.ADUFFCOPY: 2079 break 2080 } 2081 } 2082 } 2083 2084 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 { 2085 return o<<26 | xo<<1 | oe<<11 2086 } 2087 2088 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 { 2089 return o<<26 | xo<<2 | oe<<11 2090 } 2091 2092 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 { 2093 return o<<26 | xo<<2 | oe<<16 2094 } 2095 2096 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 { 2097 return o<<26 | xo<<3 | oe<<11 2098 } 2099 2100 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 { 2101 return o<<26 | xo<<4 | oe<<11 2102 } 2103 2104 func OPDQ(o uint32, xo uint32, oe uint32) uint32 { 2105 return o<<26 | xo | oe<<4 2106 } 2107 2108 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2109 return o<<26 | xo | oe<<11 | rc&1 2110 } 2111 2112 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2113 return o<<26 | xo | oe<<11 | (rc&1)<<10 2114 } 2115 2116 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 2117 return o<<26 | xo<<1 | oe<<10 | rc&1 2118 } 2119 2120 func OPCC(o uint32, xo uint32, rc uint32) uint32 { 2121 return OPVCC(o, xo, 0, rc) 2122 } 2123 2124 /* Generate MD-form opcode */ 2125 func OPMD(o, xo, rc uint32) uint32 { 2126 return o<<26 | xo<<2 | rc&1 2127 } 2128 2129 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */ 2130 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { 2131 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 2132 } 2133 2134 /* VX-form 2-register operands, r/none/r */ 2135 func AOP_RR(op uint32, d uint32, a uint32) uint32 { 2136 return op | (d&31)<<21 | (a&31)<<11 2137 } 2138 2139 /* VA-form 4-register operands */ 2140 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2141 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6 2142 } 2143 2144 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2145 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF 2146 } 2147 2148 /* VX-form 2-register + UIM operands */ 2149 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 2150 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11 2151 } 2152 2153 /* VX-form 2-register + ST + SIX operands */ 2154 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 { 2155 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11 2156 } 2157 2158 /* VA-form 3-register + SHB operands */ 2159 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 { 2160 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6 2161 } 2162 2163 /* VX-form 1-register + SIM operands */ 2164 func AOP_IR(op uint32, d uint32, simm uint32) uint32 { 2165 return op | (d&31)<<21 | (simm&31)<<16 2166 } 2167 2168 /* XX1-form 3-register operands, 1 VSR operand */ 2169 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 { 2170 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5 2171 } 2172 2173 /* XX2-form 3-register operands, 2 VSR operands */ 2174 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 { 2175 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5 2176 } 2177 2178 /* XX3-form 3 VSR operands */ 2179 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 { 2180 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2181 } 2182 2183 /* XX3-form 3 VSR operands + immediate */ 2184 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 { 2185 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2186 } 2187 2188 /* XX4-form, 4 VSR operands */ 2189 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 { 2190 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 2191 } 2192 2193 /* DQ-form, VSR register, register + offset operands */ 2194 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 { 2195 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */ 2196 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */ 2197 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */ 2198 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */ 2199 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */ 2200 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */ 2201 dq := b >> 4 2202 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2 2203 } 2204 2205 /* Z23-form, 3-register operands + CY field */ 2206 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2207 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9 2208 } 2209 2210 /* X-form, 3-register operands + EH field */ 2211 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 2212 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1) 2213 } 2214 2215 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { 2216 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 2217 } 2218 2219 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 { 2220 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF 2221 } 2222 2223 func OP_BR(op uint32, li uint32, aa uint32) uint32 { 2224 return op | li&0x03FFFFFC | aa<<1 2225 } 2226 2227 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 { 2228 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1 2229 } 2230 2231 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 { 2232 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 2233 } 2234 2235 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 { 2236 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1 2237 } 2238 2239 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 { 2240 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 2241 } 2242 2243 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { 2244 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 2245 } 2246 2247 /* MD-form 2-register, 2 6-bit immediate operands */ 2248 func AOP_MD(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { 2249 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 2250 } 2251 2252 /* MDS-form 3-register, 1 6-bit immediate operands. rsh argument is a register. */ 2253 func AOP_MDS(op, to, from, rsh, m uint32) uint32 { 2254 return AOP_MD(op, to, from, rsh&31, m) 2255 } 2256 2257 func AOP_PFX_00_8LS(r, ie uint32) uint32 { 2258 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF) 2259 } 2260 func AOP_PFX_10_MLS(r, ie uint32) uint32 { 2261 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF) 2262 } 2263 2264 const ( 2265 /* each rhs is OPVCC(_, _, _, _) */ 2266 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 2267 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 2268 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 2269 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 2270 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 2271 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 2272 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 2273 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 2274 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 2275 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 2276 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 2277 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 2278 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 2279 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 2280 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 2281 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 2282 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 2283 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 2284 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 2285 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 2286 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 2287 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 2288 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 2289 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 2290 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 2291 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 2292 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 2293 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 2294 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 2295 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 2296 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 2297 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 2298 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 2299 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 2300 OP_EXTSWSLI = 31<<26 | 445<<2 2301 OP_SETB = 31<<26 | 128<<1 2302 ) 2303 2304 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) { 2305 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32)) 2306 } 2307 2308 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) { 2309 switch a { 2310 case AMOVH: 2311 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0) 2312 case AMOVW: 2313 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0) 2314 case AMOVD: 2315 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0) 2316 case AMOVBZ, AMOVB: 2317 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0) 2318 case AMOVHZ: 2319 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0) 2320 case AMOVWZ: 2321 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0) 2322 case AFMOVS: 2323 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0) 2324 case AFMOVD: 2325 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0) 2326 } 2327 log.Fatalf("Error no pfxload for %v\n", a) 2328 return 0, 0 2329 } 2330 2331 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) { 2332 switch a { 2333 case AMOVD: 2334 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0) 2335 case AMOVBZ, AMOVB: 2336 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0) 2337 case AMOVHZ, AMOVH: 2338 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0) 2339 case AMOVWZ, AMOVW: 2340 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0) 2341 case AFMOVS: 2342 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0) 2343 case AFMOVD: 2344 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0) 2345 } 2346 log.Fatalf("Error no pfxstore for %v\n", a) 2347 return 0, 0 2348 } 2349 2350 func oclass(a *obj.Addr) int { 2351 return int(a.Class) - 1 2352 } 2353 2354 const ( 2355 D_FORM = iota 2356 DS_FORM 2357 ) 2358 2359 // This function determines when a non-indexed load or store is D or 2360 // DS form for use in finding the size of the offset field in the instruction. 2361 // The size is needed when setting the offset value in the instruction 2362 // and when generating relocation for that field. 2363 // DS form instructions include: ld, ldu, lwa, std, stdu. All other 2364 // loads and stores with an offset field are D form. This function should 2365 // only be called with the same opcodes as are handled by opstore and opload. 2366 func (c *ctxt9) opform(insn uint32) int { 2367 switch insn { 2368 default: 2369 c.ctxt.Diag("bad insn in loadform: %x", insn) 2370 case OPVCC(58, 0, 0, 0), // ld 2371 OPVCC(58, 0, 0, 1), // ldu 2372 OPVCC(58, 0, 0, 0) | 1<<1, // lwa 2373 OPVCC(62, 0, 0, 0), // std 2374 OPVCC(62, 0, 0, 1): //stdu 2375 return DS_FORM 2376 case OP_ADDI, // add 2377 OPVCC(32, 0, 0, 0), // lwz 2378 OPVCC(33, 0, 0, 0), // lwzu 2379 OPVCC(34, 0, 0, 0), // lbz 2380 OPVCC(35, 0, 0, 0), // lbzu 2381 OPVCC(40, 0, 0, 0), // lhz 2382 OPVCC(41, 0, 0, 0), // lhzu 2383 OPVCC(42, 0, 0, 0), // lha 2384 OPVCC(43, 0, 0, 0), // lhau 2385 OPVCC(46, 0, 0, 0), // lmw 2386 OPVCC(48, 0, 0, 0), // lfs 2387 OPVCC(49, 0, 0, 0), // lfsu 2388 OPVCC(50, 0, 0, 0), // lfd 2389 OPVCC(51, 0, 0, 0), // lfdu 2390 OPVCC(36, 0, 0, 0), // stw 2391 OPVCC(37, 0, 0, 0), // stwu 2392 OPVCC(38, 0, 0, 0), // stb 2393 OPVCC(39, 0, 0, 0), // stbu 2394 OPVCC(44, 0, 0, 0), // sth 2395 OPVCC(45, 0, 0, 0), // sthu 2396 OPVCC(47, 0, 0, 0), // stmw 2397 OPVCC(52, 0, 0, 0), // stfs 2398 OPVCC(53, 0, 0, 0), // stfsu 2399 OPVCC(54, 0, 0, 0), // stfd 2400 OPVCC(55, 0, 0, 0): // stfdu 2401 return D_FORM 2402 } 2403 return 0 2404 } 2405 2406 // Encode instructions and create relocation for accessing s+d according to the 2407 // instruction op with source or destination (as appropriate) register reg. 2408 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) { 2409 if c.ctxt.Headtype == objabi.Haix { 2410 // Every symbol access must be made via a TOC anchor. 2411 c.ctxt.Diag("symbolAccess called for %s", s.Name) 2412 } 2413 var base uint32 2414 form := c.opform(op) 2415 if c.ctxt.Flag_shared { 2416 base = REG_R2 2417 } else { 2418 base = REG_R0 2419 } 2420 // If reg can be reused when computing the symbol address, 2421 // use it instead of REGTMP. 2422 if !reuse { 2423 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) 2424 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) 2425 } else { 2426 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0) 2427 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0) 2428 } 2429 rel = obj.Addrel(c.cursym) 2430 rel.Off = int32(c.pc) 2431 rel.Siz = 8 2432 rel.Sym = s 2433 rel.Add = d 2434 if c.ctxt.Flag_shared { 2435 switch form { 2436 case D_FORM: 2437 rel.Type = objabi.R_ADDRPOWER_TOCREL 2438 case DS_FORM: 2439 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 2440 } 2441 2442 } else { 2443 switch form { 2444 case D_FORM: 2445 rel.Type = objabi.R_ADDRPOWER 2446 case DS_FORM: 2447 rel.Type = objabi.R_ADDRPOWER_DS 2448 } 2449 } 2450 return 2451 } 2452 2453 // Determine the mask begin (mb) and mask end (me) values 2454 // for a valid word rotate mask. A valid 32 bit mask is of 2455 // the form 1+0*1+ or 0*1+0*. 2456 // 2457 // Note, me is inclusive. 2458 func decodeMask32(mask uint32) (mb, me uint32, valid bool) { 2459 mb = uint32(bits.LeadingZeros32(mask)) 2460 me = uint32(32 - bits.TrailingZeros32(mask)) 2461 mbn := uint32(bits.LeadingZeros32(^mask)) 2462 men := uint32(32 - bits.TrailingZeros32(^mask)) 2463 // Check for a wrapping mask (e.g bits at 0 and 31) 2464 if mb == 0 && me == 32 { 2465 // swap the inverted values 2466 mb, me = men, mbn 2467 } 2468 2469 // Validate mask is of the binary form 1+0*1+ or 0*1+0* 2470 // Isolate rightmost 1 (if none 0) and add. 2471 v := mask 2472 vp := (v & -v) + v 2473 // Likewise, check for the wrapping (inverted) case. 2474 vn := ^v 2475 vpn := (vn & -vn) + vn 2476 return mb, (me - 1) & 31, (v&vp == 0 || vn&vpn == 0) && v != 0 2477 } 2478 2479 // Decompose a mask of contiguous bits into a begin (mb) and 2480 // end (me) value. 2481 // 2482 // 64b mask values cannot wrap on any valid PPC64 instruction. 2483 // Only masks of the form 0*1+0* are valid. 2484 // 2485 // Note, me is inclusive. 2486 func decodeMask64(mask int64) (mb, me uint32, valid bool) { 2487 m := uint64(mask) 2488 mb = uint32(bits.LeadingZeros64(m)) 2489 me = uint32(64 - bits.TrailingZeros64(m)) 2490 valid = ((m&-m)+m)&m == 0 && m != 0 2491 return mb, (me - 1) & 63, valid 2492 } 2493 2494 // Load the lower 16 bits of a constant into register r. 2495 func loadl16(r int, d int64) uint32 { 2496 v := uint16(d) 2497 if v == 0 { 2498 // Avoid generating "ori r,r,0", r != 0. Instead, generate the architectually preferred nop. 2499 // For example, "ori r31,r31,0" is a special execution serializing nop on Power10 called "exser". 2500 return NOP 2501 } 2502 return LOP_IRR(OP_ORI, uint32(r), uint32(r), uint32(v)) 2503 } 2504 2505 // Load the upper 16 bits of a 32b constant into register r. 2506 func loadu32(r int, d int64) uint32 { 2507 v := int32(d >> 16) 2508 if isuint32(uint64(d)) { 2509 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v)) 2510 } 2511 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v)) 2512 } 2513 2514 func high16adjusted(d int32) uint16 { 2515 if d&0x8000 != 0 { 2516 return uint16((d >> 16) + 1) 2517 } 2518 return uint16(d >> 16) 2519 } 2520 2521 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { 2522 o1 := uint32(0) 2523 o2 := uint32(0) 2524 o3 := uint32(0) 2525 o4 := uint32(0) 2526 o5 := uint32(0) 2527 2528 //print("%v => case %d\n", p, o->type); 2529 switch o.type_ { 2530 default: 2531 c.ctxt.Diag("unknown type %d", o.type_) 2532 prasm(p) 2533 2534 case 0: /* pseudo ops */ 2535 break 2536 2537 case 2: /* int/cr/fp op Rb,[Ra],Rd */ 2538 r := int(p.Reg) 2539 2540 if r == 0 { 2541 r = int(p.To.Reg) 2542 } 2543 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2544 2545 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */ 2546 d := c.vregoff(&p.From) 2547 2548 v := int32(d) 2549 r := int(p.From.Reg) 2550 // p.From may be a constant value or an offset(reg) type argument. 2551 isZeroOrR0 := r&0x1f == 0 2552 2553 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { 2554 c.ctxt.Diag("literal operation on R0\n%v", p) 2555 } 2556 a := OP_ADDI 2557 if int64(int16(d)) != d { 2558 // Operand is 16 bit value with sign bit set 2559 if o.a1 == C_ANDCON { 2560 // Needs unsigned 16 bit so use ORI 2561 if isZeroOrR0 { 2562 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) 2563 break 2564 } 2565 // With ADDCON, needs signed 16 bit value, fall through to use ADDI 2566 } else if o.a1 != C_ADDCON { 2567 log.Fatalf("invalid handling of %v", p) 2568 } 2569 } 2570 2571 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v)) 2572 2573 case 4: /* add/mul $scon,[r1],r2 */ 2574 v := c.regoff(&p.From) 2575 2576 r := int(p.Reg) 2577 if r == 0 { 2578 r = int(p.To.Reg) 2579 } 2580 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 { 2581 c.ctxt.Diag("literal operation on R0\n%v", p) 2582 } 2583 if int32(int16(v)) != v { 2584 log.Fatalf("mishandled instruction %v", p) 2585 } 2586 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2587 2588 case 5: /* syscall */ 2589 o1 = c.oprrr(p.As) 2590 2591 case 6: /* logical op Rb,[Rs,]Ra; no literal */ 2592 r := int(p.Reg) 2593 2594 if r == 0 { 2595 r = int(p.To.Reg) 2596 } 2597 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM. 2598 switch p.As { 2599 case AROTL: 2600 o1 = AOP_MD(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0)) 2601 case AROTLW: 2602 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31) 2603 default: 2604 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { 2605 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred 2606 // hardware no-op. This happens because $0 matches C_REG before C_ZCON. 2607 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0) 2608 } else { 2609 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2610 } 2611 } 2612 2613 case 7: /* mov r, soreg ==> stw o(r) */ 2614 r := int(p.To.Reg) 2615 v := c.regoff(&p.To) 2616 if int32(int16(v)) != v { 2617 log.Fatalf("mishandled instruction %v", p) 2618 } 2619 // Offsets in DS form stores must be a multiple of 4 2620 inst := c.opstore(p.As) 2621 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2622 log.Fatalf("invalid offset for DS form load/store %v", p) 2623 } 2624 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v)) 2625 2626 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */ 2627 r := int(p.From.Reg) 2628 v := c.regoff(&p.From) 2629 if int32(int16(v)) != v { 2630 log.Fatalf("mishandled instruction %v", p) 2631 } 2632 // Offsets in DS form loads must be a multiple of 4 2633 inst := c.opload(p.As) 2634 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 2635 log.Fatalf("invalid offset for DS form load/store %v", p) 2636 } 2637 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v)) 2638 2639 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 2640 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 2641 2642 case 9: /* RLDC Ra, $sh, $mb, Rb */ 2643 sh := uint32(p.RestArgs[0].Addr.Offset) & 0x3F 2644 mb := uint32(p.RestArgs[1].Addr.Offset) & 0x3F 2645 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), (uint32(sh) & 0x1F)) 2646 o1 |= (sh & 0x20) >> 4 // sh[5] is placed in bit 1. 2647 o1 |= (mb & 0x1F) << 6 // mb[0:4] is placed in bits 6-10. 2648 o1 |= (mb & 0x20) // mb[5] is placed in bit 5 2649 2650 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ 2651 r := int(p.Reg) 2652 2653 if r == 0 { 2654 r = int(p.To.Reg) 2655 } 2656 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) 2657 2658 case 11: /* br/bl lbra */ 2659 v := int32(0) 2660 2661 if p.To.Target() != nil { 2662 v = int32(p.To.Target().Pc - p.Pc) 2663 if v&03 != 0 { 2664 c.ctxt.Diag("odd branch target address\n%v", p) 2665 v &^= 03 2666 } 2667 2668 if v < -(1<<25) || v >= 1<<24 { 2669 c.ctxt.Diag("branch too far\n%v", p) 2670 } 2671 } 2672 2673 o1 = OP_BR(c.opirr(p.As), uint32(v), 0) 2674 if p.To.Sym != nil { 2675 rel := obj.Addrel(c.cursym) 2676 rel.Off = int32(c.pc) 2677 rel.Siz = 4 2678 rel.Sym = p.To.Sym 2679 v += int32(p.To.Offset) 2680 if v&03 != 0 { 2681 c.ctxt.Diag("odd branch target address\n%v", p) 2682 v &^= 03 2683 } 2684 2685 rel.Add = int64(v) 2686 rel.Type = objabi.R_CALLPOWER 2687 } 2688 o2 = NOP // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking 2689 2690 case 13: /* mov[bhwd]{z,} r,r */ 2691 // This needs to handle "MOV* $0, Rx". This shows up because $0 also 2692 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON 2693 // TODO: fix the above behavior and cleanup this exception. 2694 if p.From.Type == obj.TYPE_CONST { 2695 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) 2696 break 2697 } 2698 if p.To.Type == obj.TYPE_CONST { 2699 c.ctxt.Diag("cannot move into constant 0\n%v", p) 2700 } 2701 2702 switch p.As { 2703 case AMOVB: 2704 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2705 case AMOVBZ: 2706 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) 2707 case AMOVH: 2708 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2709 case AMOVHZ: 2710 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) 2711 case AMOVW: 2712 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2713 case AMOVWZ: 2714 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ 2715 case AMOVD: 2716 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) 2717 default: 2718 c.ctxt.Diag("internal: bad register move/truncation\n%v", p) 2719 } 2720 2721 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ 2722 r := uint32(p.Reg) 2723 2724 if r == 0 { 2725 r = uint32(p.To.Reg) 2726 } 2727 d := c.vregoff(p.GetFrom3()) 2728 switch p.As { 2729 2730 // These opcodes expect a mask operand that has to be converted into the 2731 // appropriate operand. The way these were defined, not all valid masks are possible. 2732 // Left here for compatibility in case they were used or generated. 2733 case ARLDCL, ARLDCLCC: 2734 mb, me, valid := decodeMask64(d) 2735 if me != 63 || !valid { 2736 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p) 2737 } 2738 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), mb) 2739 2740 case ARLDCR, ARLDCRCC: 2741 mb, me, valid := decodeMask64(d) 2742 if mb != 0 || !valid { 2743 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p) 2744 } 2745 o1 = AOP_MDS(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(p.From.Reg), me) 2746 2747 // These opcodes use a shift count like the ppc64 asm, no mask conversion done 2748 case ARLDICR, ARLDICRCC: 2749 me := uint32(d) 2750 sh := c.regoff(&p.From) 2751 if me < 0 || me > 63 || sh > 63 { 2752 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p) 2753 } 2754 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), me) 2755 2756 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC: 2757 mb := uint32(d) 2758 sh := c.regoff(&p.From) 2759 if mb < 0 || mb > 63 || sh > 63 { 2760 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p) 2761 } 2762 o1 = AOP_MD(c.oprrr(p.As), uint32(p.To.Reg), r, uint32(sh), mb) 2763 2764 case ACLRLSLDI: 2765 // This is an extended mnemonic defined in the ISA section C.8.1 2766 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n 2767 // It maps onto RLDIC so is directly generated here based on the operands from 2768 // the clrlsldi. 2769 n := int32(d) 2770 b := c.regoff(&p.From) 2771 if n > b || b > 63 { 2772 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) 2773 } 2774 o1 = AOP_MD(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) 2775 2776 default: 2777 c.ctxt.Diag("unexpected op in rldc case\n%v", p) 2778 } 2779 2780 case 17, /* bc bo,bi,lbra (same for now) */ 2781 16: /* bc bo,bi,sbra */ 2782 a := 0 2783 2784 r := int(p.Reg) 2785 2786 if p.From.Type == obj.TYPE_CONST { 2787 a = int(c.regoff(&p.From)) 2788 } else if p.From.Type == obj.TYPE_REG { 2789 if r != 0 { 2790 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r) 2791 } 2792 // BI values for the CR 2793 switch p.From.Reg { 2794 case REG_CR0: 2795 r = BI_CR0 2796 case REG_CR1: 2797 r = BI_CR1 2798 case REG_CR2: 2799 r = BI_CR2 2800 case REG_CR3: 2801 r = BI_CR3 2802 case REG_CR4: 2803 r = BI_CR4 2804 case REG_CR5: 2805 r = BI_CR5 2806 case REG_CR6: 2807 r = BI_CR6 2808 case REG_CR7: 2809 r = BI_CR7 2810 default: 2811 c.ctxt.Diag("unrecognized register: expecting CR\n") 2812 } 2813 } 2814 v := int32(0) 2815 if p.To.Target() != nil { 2816 v = int32(p.To.Target().Pc - p.Pc) 2817 } 2818 if v&03 != 0 { 2819 c.ctxt.Diag("odd branch target address\n%v", p) 2820 v &^= 03 2821 } 2822 2823 if v < -(1<<16) || v >= 1<<15 { 2824 c.ctxt.Diag("branch too far\n%v", p) 2825 } 2826 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0) 2827 2828 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */ 2829 var v int32 2830 var bh uint32 = 0 2831 if p.As == ABC || p.As == ABCL { 2832 v = c.regoff(&p.From) & 31 2833 } else { 2834 v = 20 /* unconditional */ 2835 } 2836 r := int(p.Reg) 2837 if r == 0 { 2838 r = 0 2839 } 2840 switch oclass(&p.To) { 2841 case C_CTR: 2842 o1 = OPVCC(19, 528, 0, 0) 2843 2844 case C_LR: 2845 o1 = OPVCC(19, 16, 0, 0) 2846 2847 default: 2848 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p) 2849 v = 0 2850 } 2851 2852 // Insert optional branch hint for bclr[l]/bcctr[l] 2853 if p.From3Type() != obj.TYPE_NONE { 2854 bh = uint32(p.GetFrom3().Offset) 2855 if bh == 2 || bh > 3 { 2856 log.Fatalf("BH must be 0,1,3 for %v", p) 2857 } 2858 o1 |= bh << 11 2859 } 2860 2861 if p.As == ABL || p.As == ABCL { 2862 o1 |= 1 2863 } 2864 o1 = OP_BCR(o1, uint32(v), uint32(r)) 2865 2866 case 19: /* mov $lcon,r ==> cau+or */ 2867 d := c.vregoff(&p.From) 2868 if o.ispfx { 2869 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d) 2870 } else { 2871 o1 = loadu32(int(p.To.Reg), d) 2872 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) 2873 } 2874 2875 case 20: /* add $ucon,,r | addis $addcon,r,r */ 2876 v := c.regoff(&p.From) 2877 2878 r := int(p.Reg) 2879 if r == 0 { 2880 r = int(p.To.Reg) 2881 } 2882 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2883 2884 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */ 2885 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2886 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2887 } 2888 d := c.vregoff(&p.From) 2889 r := int(p.Reg) 2890 if r == 0 { 2891 r = int(p.To.Reg) 2892 } 2893 if p.From.Sym != nil { 2894 c.ctxt.Diag("%v is not supported", p) 2895 } 2896 if o.ispfx { 2897 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d) 2898 } else if o.size == 8 { 2899 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) // tmp = uint16(d) 2900 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = tmp + from 2901 } else if o.size == 12 { 2902 // Note, o1 is ADDIS if d is negative, ORIS otherwise. 2903 o1 = loadu32(REGTMP, d) // tmp = d & 0xFFFF0000 2904 o2 = loadl16(REGTMP, d) // tmp |= d & 0xFFFF 2905 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) // to = from + tmp 2906 } else { 2907 // For backwards compatibility with GOPPC64 < 10, generate 34b constants in register. 2908 o1 = LOP_IRR(OP_ADDIS, REGZERO, REGTMP, uint32(d>>32)) // tmp = sign_extend((d>>32)&0xFFFF0000) 2909 o2 = loadl16(REGTMP, int64(d>>16)) // tmp |= (d>>16)&0xFFFF 2910 o3 = AOP_MD(OP_RLDICR, REGTMP, REGTMP, 16, 63-16) // tmp <<= 16 2911 o4 = loadl16(REGTMP, int64(uint16(d))) // tmp |= d&0xFFFF 2912 o5 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2913 } 2914 2915 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */ 2916 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2917 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2918 } 2919 d := c.vregoff(&p.From) 2920 r := int(p.Reg) 2921 if r == 0 { 2922 r = int(p.To.Reg) 2923 } 2924 2925 // With ADDCON operand, generate 2 instructions using ADDI for signed value, 2926 // with LCON operand generate 3 instructions. 2927 if o.size == 8 { 2928 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d))) 2929 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2930 } else { 2931 o1 = loadu32(REGTMP, d) 2932 o2 = loadl16(REGTMP, d) 2933 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2934 } 2935 if p.From.Sym != nil { 2936 c.ctxt.Diag("%v is not supported", p) 2937 } 2938 2939 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */ 2940 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0)) 2941 // This is needed for -0. 2942 if o.size == 8 { 2943 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg)) 2944 } 2945 2946 case 25: 2947 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ 2948 v := c.regoff(&p.From) 2949 2950 if v < 0 { 2951 v = 0 2952 } else if v > 63 { 2953 v = 63 2954 } 2955 r := int(p.Reg) 2956 if r == 0 { 2957 r = int(p.To.Reg) 2958 } 2959 var a int 2960 op := uint32(0) 2961 switch p.As { 2962 case ASLD, ASLDCC: 2963 a = int(63 - v) 2964 op = OP_RLDICR 2965 2966 case ASRD, ASRDCC: 2967 a = int(v) 2968 v = 64 - v 2969 op = OP_RLDICL 2970 case AROTL: 2971 a = int(0) 2972 op = OP_RLDICL 2973 case AEXTSWSLI, AEXTSWSLICC: 2974 a = int(v) 2975 default: 2976 c.ctxt.Diag("unexpected op in sldi case\n%v", p) 2977 a = 0 2978 o1 = 0 2979 } 2980 2981 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC { 2982 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v)) 2983 2984 } else { 2985 o1 = AOP_MD(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) 2986 } 2987 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC { 2988 o1 |= 1 // Set the condition code bit 2989 } 2990 2991 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ 2992 v := c.vregoff(&p.From) 2993 r := int(p.From.Reg) 2994 var rel *obj.Reloc 2995 2996 switch p.From.Name { 2997 case obj.NAME_EXTERN, obj.NAME_STATIC: 2998 // Load a 32 bit constant, or relocation depending on if a symbol is attached 2999 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true) 3000 default: 3001 // Add a 32 bit offset to a register. 3002 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v)))) 3003 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3004 } 3005 3006 if o.ispfx { 3007 if rel == nil { 3008 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v) 3009 } else { 3010 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0) 3011 rel.Type = objabi.R_ADDRPOWER_PCREL34 3012 } 3013 } 3014 3015 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ 3016 v := c.regoff(p.GetFrom3()) 3017 3018 r := int(p.From.Reg) 3019 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3020 3021 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */ 3022 if p.To.Reg == REGTMP || p.From.Reg == REGTMP { 3023 c.ctxt.Diag("can't synthesize large constant\n%v", p) 3024 } 3025 v := c.vregoff(p.GetFrom3()) 3026 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) 3027 o2 = loadl16(REGTMP, v) 3028 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) 3029 if p.From.Sym != nil { 3030 c.ctxt.Diag("%v is not supported", p) 3031 } 3032 3033 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ 3034 sh := uint32(c.regoff(&p.From)) 3035 d := c.vregoff(p.GetFrom3()) 3036 mb, me, valid := decodeMask64(d) 3037 var a uint32 3038 switch p.As { 3039 case ARLDC, ARLDCCC: 3040 a = mb 3041 if me != (63-sh) || !valid { 3042 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3043 } 3044 3045 case ARLDCL, ARLDCLCC: 3046 a = mb 3047 if mb != 63 || !valid { 3048 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3049 } 3050 3051 case ARLDCR, ARLDCRCC: 3052 a = me 3053 if mb != 0 || !valid { 3054 c.ctxt.Diag("invalid mask for shift: %016x (mb=%d,me=%d) (shift %d)\n%v", uint64(d), mb, me, sh, p) 3055 } 3056 3057 default: 3058 c.ctxt.Diag("unexpected op in rldic case\n%v", p) 3059 } 3060 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, a) 3061 3062 case 30: /* rldimi $sh,s,$mask,a */ 3063 sh := uint32(c.regoff(&p.From)) 3064 d := c.vregoff(p.GetFrom3()) 3065 3066 // Original opcodes had mask operands which had to be converted to a shift count as expected by 3067 // the ppc64 asm. 3068 switch p.As { 3069 case ARLDMI, ARLDMICC: 3070 mb, me, valid := decodeMask64(d) 3071 if me != (63-sh) || !valid { 3072 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), me, sh, p) 3073 } 3074 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb) 3075 3076 // Opcodes with shift count operands. 3077 case ARLDIMI, ARLDIMICC: 3078 o1 = AOP_MD(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, uint32(d)) 3079 } 3080 3081 case 31: /* dword */ 3082 d := c.vregoff(&p.From) 3083 3084 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 3085 o1 = uint32(d >> 32) 3086 o2 = uint32(d) 3087 } else { 3088 o1 = uint32(d) 3089 o2 = uint32(d >> 32) 3090 } 3091 3092 if p.From.Sym != nil { 3093 rel := obj.Addrel(c.cursym) 3094 rel.Off = int32(c.pc) 3095 rel.Siz = 8 3096 rel.Sym = p.From.Sym 3097 rel.Add = p.From.Offset 3098 rel.Type = objabi.R_ADDR 3099 o2 = 0 3100 o1 = o2 3101 } 3102 3103 case 32: /* fmul frc,fra,frd */ 3104 r := int(p.Reg) 3105 3106 if r == 0 { 3107 r = int(p.To.Reg) 3108 } 3109 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6 3110 3111 case 33: /* fabs [frb,]frd; fmr. frb,frd */ 3112 r := int(p.From.Reg) 3113 3114 if oclass(&p.From) == C_NONE { 3115 r = int(p.To.Reg) 3116 } 3117 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) 3118 3119 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ 3120 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6 3121 3122 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ 3123 v := c.regoff(&p.To) 3124 r := int(p.To.Reg) 3125 // Offsets in DS form stores must be a multiple of 4 3126 if o.ispfx { 3127 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS) 3128 o1 |= uint32((v >> 16) & 0x3FFFF) 3129 o2 |= uint32(v & 0xFFFF) 3130 } else { 3131 inst := c.opstore(p.As) 3132 if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3133 log.Fatalf("invalid offset for DS form load/store %v", p) 3134 } 3135 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 3136 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v)) 3137 } 3138 3139 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */ 3140 v := c.regoff(&p.From) 3141 r := int(p.From.Reg) 3142 3143 if o.ispfx { 3144 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS) 3145 o1 |= uint32((v >> 16) & 0x3FFFF) 3146 o2 |= uint32(v & 0xFFFF) 3147 } else { 3148 if o.a6 == C_REG { 3149 // Reuse the base register when loading a GPR (C_REG) to avoid 3150 // using REGTMP (R31) when possible. 3151 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v))) 3152 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) 3153 } else { 3154 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v))) 3155 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v)) 3156 } 3157 } 3158 3159 // Sign extend MOVB if needed 3160 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3161 3162 case 40: /* word */ 3163 o1 = uint32(c.regoff(&p.From)) 3164 3165 case 41: /* stswi */ 3166 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 { 3167 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3168 } 3169 3170 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3171 3172 case 42: /* lswi */ 3173 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 { 3174 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) 3175 } 3176 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 3177 3178 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */ 3179 /* TH field for dcbt/dcbtst: */ 3180 /* 0 = Block access - program will soon access EA. */ 3181 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */ 3182 /* 16 = Block access - program will soon make a transient access to EA. */ 3183 /* 17 = Block access - program will not access EA for a long time. */ 3184 3185 /* L field for dcbf: */ 3186 /* 0 = invalidates the block containing EA in all processors. */ 3187 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */ 3188 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */ 3189 if p.To.Type == obj.TYPE_NONE { 3190 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) 3191 } else { 3192 th := c.regoff(&p.To) 3193 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg)) 3194 } 3195 3196 case 44: /* indexed store */ 3197 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3198 3199 case 45: /* indexed load */ 3200 switch p.As { 3201 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */ 3202 /* The EH field can be used as a lock acquire/release hint as follows: */ 3203 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */ 3204 /* 1 = Exclusive Access (lock acquire and release) */ 3205 case ALBAR, ALHAR, ALWAR, ALDAR: 3206 if p.From3Type() != obj.TYPE_NONE { 3207 eh := int(c.regoff(p.GetFrom3())) 3208 if eh > 1 { 3209 c.ctxt.Diag("illegal EH field\n%v", p) 3210 } 3211 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh)) 3212 } else { 3213 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3214 } 3215 default: 3216 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3217 } 3218 case 46: /* plain op */ 3219 o1 = c.oprrr(p.As) 3220 3221 case 47: /* op Ra, Rd; also op [Ra,] Rd */ 3222 r := int(p.From.Reg) 3223 3224 if r == 0 { 3225 r = int(p.To.Reg) 3226 } 3227 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3228 3229 case 48: /* op Rs, Ra */ 3230 r := int(p.From.Reg) 3231 3232 if r == 0 { 3233 r = int(p.To.Reg) 3234 } 3235 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 3236 3237 case 49: /* op Rb; op $n, Rb */ 3238 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */ 3239 v := c.regoff(&p.From) & 1 3240 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21 3241 } else { 3242 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg)) 3243 } 3244 3245 case 50: /* rem[u] r1[,r2],r3 */ 3246 r := int(p.Reg) 3247 3248 if r == 0 { 3249 r = int(p.To.Reg) 3250 } 3251 v := c.oprrr(p.As) 3252 t := v & (1<<10 | 1) /* OE|Rc */ 3253 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3254 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) 3255 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3256 if p.As == AREMU { 3257 o4 = o3 3258 3259 /* Clear top 32 bits */ 3260 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5 3261 } 3262 3263 case 51: /* remd[u] r1[,r2],r3 */ 3264 r := int(p.Reg) 3265 3266 if r == 0 { 3267 r = int(p.To.Reg) 3268 } 3269 v := c.oprrr(p.As) 3270 t := v & (1<<10 | 1) /* OE|Rc */ 3271 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 3272 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) 3273 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 3274 /* cases 50,51: removed; can be reused. */ 3275 3276 /* cases 50,51: removed; can be reused. */ 3277 3278 case 52: /* mtfsbNx cr(n) */ 3279 v := c.regoff(&p.From) & 31 3280 3281 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0) 3282 3283 case 53: /* mffsX ,fr1 */ 3284 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) 3285 3286 case 55: /* op Rb, Rd */ 3287 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) 3288 3289 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */ 3290 v := c.regoff(&p.From) 3291 3292 r := int(p.Reg) 3293 if r == 0 { 3294 r = int(p.To.Reg) 3295 } 3296 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31) 3297 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) { 3298 o1 |= 1 << 1 /* mb[5] */ 3299 } 3300 3301 case 57: /* slw $sh,[s,]a -> rlwinm ... */ 3302 v := c.regoff(&p.From) 3303 3304 r := int(p.Reg) 3305 if r == 0 { 3306 r = int(p.To.Reg) 3307 } 3308 3309 /* 3310 * Let user (gs) shoot himself in the foot. 3311 * qc has already complained. 3312 * 3313 if(v < 0 || v > 31) 3314 ctxt->diag("illegal shift %ld\n%v", v, p); 3315 */ 3316 if v < 0 { 3317 v = 0 3318 } else if v > 32 { 3319 v = 32 3320 } 3321 var mask [2]uint8 3322 switch p.As { 3323 case AROTLW: 3324 mask[0], mask[1] = 0, 31 3325 case ASRW, ASRWCC: 3326 mask[0], mask[1] = uint8(v), 31 3327 v = 32 - v 3328 default: 3329 mask[0], mask[1] = 0, uint8(31-v) 3330 } 3331 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1])) 3332 if p.As == ASLWCC || p.As == ASRWCC { 3333 o1 |= 1 // set the condition code 3334 } 3335 3336 case 58: /* logical $andcon,[s],a */ 3337 v := c.regoff(&p.From) 3338 3339 r := int(p.Reg) 3340 if r == 0 { 3341 r = int(p.To.Reg) 3342 } 3343 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 3344 3345 case 60: /* tw to,a,b */ 3346 r := int(c.regoff(&p.From) & 31) 3347 3348 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg)) 3349 3350 case 61: /* tw to,a,$simm */ 3351 r := int(c.regoff(&p.From) & 31) 3352 3353 v := c.regoff(&p.To) 3354 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v)) 3355 3356 case 62: /* clrlslwi $sh,s,$mask,a */ 3357 v := c.regoff(&p.From) 3358 n := c.regoff(p.GetFrom3()) 3359 // This is an extended mnemonic described in the ISA C.8.2 3360 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n 3361 // It maps onto rlwinm which is directly generated here. 3362 if n > v || v >= 32 { 3363 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) 3364 } 3365 3366 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) 3367 3368 case 63: /* rlwimi/rlwnm/rlwinm [$sh,b],s,[$mask or mb,me],a*/ 3369 var mb, me uint32 3370 if len(p.RestArgs) == 1 { // Mask needs decomposed into mb and me. 3371 var valid bool 3372 // Note, optab rules ensure $mask is a 32b constant. 3373 mb, me, valid = decodeMask32(uint32(p.RestArgs[0].Addr.Offset)) 3374 if !valid { 3375 c.ctxt.Diag("cannot generate mask #%x\n%v", uint64(p.RestArgs[0].Addr.Offset), p) 3376 } 3377 } else { // Otherwise, mask is already passed as mb and me in RestArgs. 3378 mb, me = uint32(p.RestArgs[0].Addr.Offset), uint32(p.RestArgs[1].Addr.Offset) 3379 } 3380 if p.From.Type == obj.TYPE_CONST { 3381 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Offset), mb, me) 3382 } else { 3383 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me) 3384 } 3385 3386 case 64: /* mtfsf fr[, $m] {,fpcsr} */ 3387 var v int32 3388 if p.From3Type() != obj.TYPE_NONE { 3389 v = c.regoff(p.GetFrom3()) & 255 3390 } else { 3391 v = 255 3392 } 3393 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11 3394 3395 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */ 3396 if p.To.Reg == 0 { 3397 c.ctxt.Diag("must specify FPSCR(n)\n%v", p) 3398 } 3399 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12 3400 3401 case 66: /* mov spr,r1; mov r1,spr */ 3402 var r int 3403 var v int32 3404 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 { 3405 r = int(p.From.Reg) 3406 v = int32(p.To.Reg) 3407 o1 = OPVCC(31, 467, 0, 0) /* mtspr */ 3408 } else { 3409 r = int(p.To.Reg) 3410 v = int32(p.From.Reg) 3411 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3412 } 3413 3414 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3415 3416 case 67: /* mcrf crfD,crfS */ 3417 if p.From.Reg == REG_CR || p.To.Reg == REG_CR { 3418 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p) 3419 } 3420 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0) 3421 3422 case 68: /* mfcr rD; mfocrf CRM,rD */ 3423 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */ 3424 if p.From.Reg != REG_CR { 3425 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */ 3426 o1 |= 1<<20 | v<<12 /* new form, mfocrf */ 3427 } 3428 3429 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */ 3430 var v uint32 3431 if p.To.Reg == REG_CR { 3432 v = 0xff 3433 } else if p.To.Offset != 0 { // MOVFL gpr, constant 3434 v = uint32(p.To.Offset) 3435 } else { // p.To.Reg == REG_CRx 3436 v = 1 << uint(7-(p.To.Reg&7)) 3437 } 3438 // Use mtocrf form if only one CR field moved. 3439 if bits.OnesCount32(v) == 1 { 3440 v |= 1 << 8 3441 } 3442 3443 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 3444 3445 case 70: /* [f]cmp r,r,cr*/ 3446 var r int 3447 if p.Reg == 0 { 3448 r = 0 3449 } else { 3450 r = (int(p.Reg) & 7) << 2 3451 } 3452 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) 3453 3454 case 71: /* cmp[l] r,i,cr*/ 3455 var r int 3456 if p.Reg == 0 { 3457 r = 0 3458 } else { 3459 r = (int(p.Reg) & 7) << 2 3460 } 3461 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff 3462 3463 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */ 3464 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg)) 3465 3466 case 73: /* mcrfs crfD,crfS */ 3467 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { 3468 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p) 3469 } 3470 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0) 3471 3472 case 77: /* syscall $scon, syscall Rx */ 3473 if p.From.Type == obj.TYPE_CONST { 3474 if p.From.Offset > BIG || p.From.Offset < -BIG { 3475 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p) 3476 } 3477 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset)) 3478 } else if p.From.Type == obj.TYPE_REG { 3479 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg)) 3480 } else { 3481 c.ctxt.Diag("illegal syscall: %v", p) 3482 o1 = 0x7fe00008 // trap always 3483 } 3484 3485 o2 = c.oprrr(p.As) 3486 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0 3487 3488 case 78: /* undef */ 3489 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed 3490 always to be an illegal instruction." */ 3491 3492 /* relocation operations */ 3493 case 74: 3494 var rel *obj.Reloc 3495 v := c.vregoff(&p.To) 3496 // Offsets in DS form stores must be a multiple of 4 3497 inst := c.opstore(p.As) 3498 3499 // Can't reuse base for store instructions. 3500 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false) 3501 3502 // Rewrite as a prefixed store if supported. 3503 if o.ispfx { 3504 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL) 3505 rel.Type = objabi.R_ADDRPOWER_PCREL34 3506 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3507 log.Fatalf("invalid offset for DS form load/store %v", p) 3508 } 3509 3510 case 75: // 32 bit offset symbol loads (got/toc/addr) 3511 var rel *obj.Reloc 3512 v := p.From.Offset 3513 3514 // Offsets in DS form loads must be a multiple of 4 3515 inst := c.opload(p.As) 3516 switch p.From.Name { 3517 case obj.NAME_GOTREF, obj.NAME_TOCREF: 3518 if v != 0 { 3519 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p) 3520 } 3521 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3522 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3523 rel = obj.Addrel(c.cursym) 3524 rel.Off = int32(c.pc) 3525 rel.Siz = 8 3526 rel.Sym = p.From.Sym 3527 switch p.From.Name { 3528 case obj.NAME_GOTREF: 3529 rel.Type = objabi.R_ADDRPOWER_GOT 3530 case obj.NAME_TOCREF: 3531 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS 3532 } 3533 default: 3534 reuseBaseReg := o.a6 == C_REG 3535 // Reuse To.Reg as base register if it is a GPR. 3536 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg) 3537 } 3538 3539 // Convert to prefixed forms if supported. 3540 if o.ispfx { 3541 switch rel.Type { 3542 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS, 3543 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS: 3544 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3545 rel.Type = objabi.R_ADDRPOWER_PCREL34 3546 case objabi.R_POWER_TLS_IE: 3547 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3548 rel.Type = objabi.R_POWER_TLS_IE_PCREL34 3549 case objabi.R_ADDRPOWER_GOT: 3550 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3551 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34 3552 default: 3553 // We've failed to convert a TOC-relative relocation to a PC-relative one. 3554 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type) 3555 } 3556 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 { 3557 log.Fatalf("invalid offset for DS form load/store %v", p) 3558 } 3559 3560 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3561 3562 case 79: 3563 if p.From.Offset != 0 { 3564 c.ctxt.Diag("invalid offset against tls var %v", p) 3565 } 3566 rel := obj.Addrel(c.cursym) 3567 rel.Off = int32(c.pc) 3568 rel.Siz = 8 3569 rel.Sym = p.From.Sym 3570 if !o.ispfx { 3571 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0) 3572 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3573 rel.Type = objabi.R_POWER_TLS_LE 3574 } else { 3575 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0) 3576 rel.Type = objabi.R_POWER_TLS_LE_TPREL34 3577 } 3578 3579 case 80: 3580 if p.From.Offset != 0 { 3581 c.ctxt.Diag("invalid offset against tls var %v", p) 3582 } 3583 rel := obj.Addrel(c.cursym) 3584 rel.Off = int32(c.pc) 3585 rel.Siz = 8 3586 rel.Sym = p.From.Sym 3587 rel.Type = objabi.R_POWER_TLS_IE 3588 if !o.ispfx { 3589 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3590 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) 3591 } else { 3592 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL) 3593 rel.Type = objabi.R_POWER_TLS_IE_PCREL34 3594 } 3595 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13) 3596 rel = obj.Addrel(c.cursym) 3597 rel.Off = int32(c.pc) + 8 3598 rel.Siz = 4 3599 rel.Sym = p.From.Sym 3600 rel.Type = objabi.R_POWER_TLS 3601 3602 case 82: /* vector instructions, VX-form and VC-form */ 3603 if p.From.Type == obj.TYPE_REG { 3604 /* reg reg none OR reg reg reg */ 3605 /* 3-register operand order: VRA, VRB, VRT */ 3606 /* 2-register operand order: VRA, VRT */ 3607 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3608 } else if p.From3Type() == obj.TYPE_CONST { 3609 /* imm imm reg reg */ 3610 /* operand order: SIX, VRA, ST, VRT */ 3611 six := int(c.regoff(&p.From)) 3612 st := int(c.regoff(p.GetFrom3())) 3613 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) 3614 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { 3615 /* imm reg reg */ 3616 /* operand order: UIM, VRB, VRT */ 3617 uim := int(c.regoff(&p.From)) 3618 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim)) 3619 } else { 3620 /* imm reg */ 3621 /* operand order: SIM, VRT */ 3622 sim := int(c.regoff(&p.From)) 3623 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim)) 3624 } 3625 3626 case 83: /* vector instructions, VA-form */ 3627 if p.From.Type == obj.TYPE_REG { 3628 /* reg reg reg reg */ 3629 /* 4-register operand order: VRA, VRB, VRC, VRT */ 3630 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3631 } else if p.From.Type == obj.TYPE_CONST { 3632 /* imm reg reg reg */ 3633 /* operand order: SHB, VRA, VRB, VRT */ 3634 shb := int(c.regoff(&p.From)) 3635 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb)) 3636 } 3637 3638 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc 3639 bc := c.vregoff(&p.From) 3640 if o.a1 == C_CRBIT { 3641 // CR bit is encoded as a register, not a constant. 3642 bc = int64(p.From.Reg) 3643 } 3644 3645 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg 3646 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc)) 3647 3648 case 85: /* vector instructions, VX-form */ 3649 /* reg none reg */ 3650 /* 2-register operand order: VRB, VRT */ 3651 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg)) 3652 3653 case 86: /* VSX indexed store, XX1-form */ 3654 /* reg reg reg */ 3655 /* 3-register operand order: XT, (RB)(RA*1) */ 3656 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3657 3658 case 87: /* VSX indexed load, XX1-form */ 3659 /* reg reg reg */ 3660 /* 3-register operand order: (RB)(RA*1), XT */ 3661 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3662 3663 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */ 3664 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3665 3666 case 89: /* VSX instructions, XX2-form */ 3667 /* reg none reg OR reg imm reg */ 3668 /* 2-register operand order: XB, XT or XB, UIM, XT*/ 3669 uim := int(c.regoff(p.GetFrom3())) 3670 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) 3671 3672 case 90: /* VSX instructions, XX3-form */ 3673 if p.From3Type() == obj.TYPE_NONE { 3674 /* reg reg reg */ 3675 /* 3-register operand order: XA, XB, XT */ 3676 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3677 } else if p.From3Type() == obj.TYPE_CONST { 3678 /* reg reg reg imm */ 3679 /* operand order: XA, XB, DM, XT */ 3680 dm := int(c.regoff(p.GetFrom3())) 3681 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) 3682 } 3683 3684 case 91: /* VSX instructions, XX4-form */ 3685 /* reg reg reg reg */ 3686 /* 3-register operand order: XA, XB, XC, XT */ 3687 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) 3688 3689 case 92: /* X-form instructions, 3-operands */ 3690 if p.To.Type == obj.TYPE_CONST { 3691 /* imm reg reg */ 3692 xf := int32(p.From.Reg) 3693 if REG_F0 <= xf && xf <= REG_F31 { 3694 /* operand order: FRA, FRB, BF */ 3695 bf := int(c.regoff(&p.To)) << 2 3696 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3697 } else { 3698 /* operand order: RA, RB, L */ 3699 l := int(c.regoff(&p.To)) 3700 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg)) 3701 } 3702 } else if p.From3Type() == obj.TYPE_CONST { 3703 /* reg reg imm */ 3704 /* operand order: RB, L, RA */ 3705 l := int(c.regoff(p.GetFrom3())) 3706 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg)) 3707 } else if p.To.Type == obj.TYPE_REG { 3708 cr := int32(p.To.Reg) 3709 if REG_CR0 <= cr && cr <= REG_CR7 { 3710 /* cr reg reg */ 3711 /* operand order: RA, RB, BF */ 3712 bf := (int(p.To.Reg) & 7) << 2 3713 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3714 } else if p.From.Type == obj.TYPE_CONST { 3715 /* reg imm */ 3716 /* operand order: L, RT */ 3717 l := int(c.regoff(&p.From)) 3718 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg)) 3719 } else { 3720 switch p.As { 3721 case ACOPY, APASTECC: 3722 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg)) 3723 default: 3724 /* reg reg reg */ 3725 /* operand order: RS, RB, RA */ 3726 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3727 } 3728 } 3729 } 3730 3731 case 93: /* X-form instructions, 2-operands */ 3732 if p.To.Type == obj.TYPE_CONST { 3733 /* imm reg */ 3734 /* operand order: FRB, BF */ 3735 bf := int(c.regoff(&p.To)) << 2 3736 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg)) 3737 } else if p.Reg == 0 { 3738 /* popcnt* r,r, X-form */ 3739 /* operand order: RS, RA */ 3740 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3741 } 3742 3743 case 94: /* Z23-form instructions, 4-operands */ 3744 /* reg reg reg imm */ 3745 /* operand order: RA, RB, CY, RT */ 3746 cy := int(c.regoff(p.GetFrom3())) 3747 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) 3748 3749 case 96: /* VSX load, DQ-form */ 3750 /* reg imm reg */ 3751 /* operand order: (RA)(DQ), XT */ 3752 dq := int16(c.regoff(&p.From)) 3753 if (dq & 15) != 0 { 3754 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3755 } 3756 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq)) 3757 3758 case 97: /* VSX store, DQ-form */ 3759 /* reg imm reg */ 3760 /* operand order: XT, (RA)(DQ) */ 3761 dq := int16(c.regoff(&p.To)) 3762 if (dq & 15) != 0 { 3763 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) 3764 } 3765 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq)) 3766 case 98: /* VSX indexed load or load with length (also left-justified), x-form */ 3767 /* vsreg, reg, reg */ 3768 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3769 case 99: /* VSX store with length (also left-justified) x-form */ 3770 /* reg, reg, vsreg */ 3771 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg)) 3772 case 100: /* VSX X-form XXSPLTIB */ 3773 if p.From.Type == obj.TYPE_CONST { 3774 /* imm reg */ 3775 uim := int(c.regoff(&p.From)) 3776 /* imm reg */ 3777 /* Use AOP_XX1 form with 0 for one of the registers. */ 3778 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim)) 3779 } else { 3780 c.ctxt.Diag("invalid ops for %v", p.As) 3781 } 3782 case 101: 3783 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) 3784 3785 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */ 3786 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3787 3788 case 106: /* MOVD spr, soreg */ 3789 v := int32(p.From.Reg) 3790 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3791 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3792 so := c.regoff(&p.To) 3793 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so)) 3794 if so&0x3 != 0 { 3795 log.Fatalf("invalid offset for DS form load/store %v", p) 3796 } 3797 if p.To.Reg == REGTMP { 3798 log.Fatalf("SPR move to memory will clobber R31 %v", p) 3799 } 3800 3801 case 107: /* MOVD soreg, spr */ 3802 v := int32(p.From.Reg) 3803 so := c.regoff(&p.From) 3804 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so)) 3805 o2 = OPVCC(31, 467, 0, 0) /* mtspr */ 3806 v = int32(p.To.Reg) 3807 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3808 if so&0x3 != 0 { 3809 log.Fatalf("invalid offset for DS form load/store %v", p) 3810 } 3811 3812 case 108: /* mov r, xoreg ==> stwx rx,ry */ 3813 r := int(p.To.Reg) 3814 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) 3815 3816 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */ 3817 r := int(p.From.Reg) 3818 3819 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) 3820 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). 3821 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3822 3823 case 110: /* SETB creg, rt */ 3824 bfa := uint32(p.From.Reg) << 2 3825 rt := uint32(p.To.Reg) 3826 o1 = LOP_RRR(OP_SETB, bfa, rt, 0) 3827 } 3828 3829 out[0] = o1 3830 out[1] = o2 3831 out[2] = o3 3832 out[3] = o4 3833 out[4] = o5 3834 } 3835 3836 func (c *ctxt9) vregoff(a *obj.Addr) int64 { 3837 c.instoffset = 0 3838 if a != nil { 3839 c.aclass(a) 3840 } 3841 return c.instoffset 3842 } 3843 3844 func (c *ctxt9) regoff(a *obj.Addr) int32 { 3845 return int32(c.vregoff(a)) 3846 } 3847 3848 func (c *ctxt9) oprrr(a obj.As) uint32 { 3849 switch a { 3850 case AADD: 3851 return OPVCC(31, 266, 0, 0) 3852 case AADDCC: 3853 return OPVCC(31, 266, 0, 1) 3854 case AADDV: 3855 return OPVCC(31, 266, 1, 0) 3856 case AADDVCC: 3857 return OPVCC(31, 266, 1, 1) 3858 case AADDC: 3859 return OPVCC(31, 10, 0, 0) 3860 case AADDCCC: 3861 return OPVCC(31, 10, 0, 1) 3862 case AADDCV: 3863 return OPVCC(31, 10, 1, 0) 3864 case AADDCVCC: 3865 return OPVCC(31, 10, 1, 1) 3866 case AADDE: 3867 return OPVCC(31, 138, 0, 0) 3868 case AADDECC: 3869 return OPVCC(31, 138, 0, 1) 3870 case AADDEV: 3871 return OPVCC(31, 138, 1, 0) 3872 case AADDEVCC: 3873 return OPVCC(31, 138, 1, 1) 3874 case AADDME: 3875 return OPVCC(31, 234, 0, 0) 3876 case AADDMECC: 3877 return OPVCC(31, 234, 0, 1) 3878 case AADDMEV: 3879 return OPVCC(31, 234, 1, 0) 3880 case AADDMEVCC: 3881 return OPVCC(31, 234, 1, 1) 3882 case AADDZE: 3883 return OPVCC(31, 202, 0, 0) 3884 case AADDZECC: 3885 return OPVCC(31, 202, 0, 1) 3886 case AADDZEV: 3887 return OPVCC(31, 202, 1, 0) 3888 case AADDZEVCC: 3889 return OPVCC(31, 202, 1, 1) 3890 case AADDEX: 3891 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */ 3892 3893 case AAND: 3894 return OPVCC(31, 28, 0, 0) 3895 case AANDCC: 3896 return OPVCC(31, 28, 0, 1) 3897 case AANDN: 3898 return OPVCC(31, 60, 0, 0) 3899 case AANDNCC: 3900 return OPVCC(31, 60, 0, 1) 3901 3902 case ACMP: 3903 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */ 3904 case ACMPU: 3905 return OPVCC(31, 32, 0, 0) | 1<<21 3906 case ACMPW: 3907 return OPVCC(31, 0, 0, 0) /* L=0 */ 3908 case ACMPWU: 3909 return OPVCC(31, 32, 0, 0) 3910 case ACMPB: 3911 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */ 3912 case ACMPEQB: 3913 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 3914 3915 case ACNTLZW: 3916 return OPVCC(31, 26, 0, 0) 3917 case ACNTLZWCC: 3918 return OPVCC(31, 26, 0, 1) 3919 case ACNTLZD: 3920 return OPVCC(31, 58, 0, 0) 3921 case ACNTLZDCC: 3922 return OPVCC(31, 58, 0, 1) 3923 3924 case ACRAND: 3925 return OPVCC(19, 257, 0, 0) 3926 case ACRANDN: 3927 return OPVCC(19, 129, 0, 0) 3928 case ACREQV: 3929 return OPVCC(19, 289, 0, 0) 3930 case ACRNAND: 3931 return OPVCC(19, 225, 0, 0) 3932 case ACRNOR: 3933 return OPVCC(19, 33, 0, 0) 3934 case ACROR: 3935 return OPVCC(19, 449, 0, 0) 3936 case ACRORN: 3937 return OPVCC(19, 417, 0, 0) 3938 case ACRXOR: 3939 return OPVCC(19, 193, 0, 0) 3940 3941 case ADCBF: 3942 return OPVCC(31, 86, 0, 0) 3943 case ADCBI: 3944 return OPVCC(31, 470, 0, 0) 3945 case ADCBST: 3946 return OPVCC(31, 54, 0, 0) 3947 case ADCBT: 3948 return OPVCC(31, 278, 0, 0) 3949 case ADCBTST: 3950 return OPVCC(31, 246, 0, 0) 3951 case ADCBZ: 3952 return OPVCC(31, 1014, 0, 0) 3953 3954 case AMODUD: 3955 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */ 3956 case AMODUW: 3957 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */ 3958 case AMODSD: 3959 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */ 3960 case AMODSW: 3961 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */ 3962 3963 case ADIVW, AREM: 3964 return OPVCC(31, 491, 0, 0) 3965 3966 case ADIVWCC: 3967 return OPVCC(31, 491, 0, 1) 3968 3969 case ADIVWV: 3970 return OPVCC(31, 491, 1, 0) 3971 3972 case ADIVWVCC: 3973 return OPVCC(31, 491, 1, 1) 3974 3975 case ADIVWU, AREMU: 3976 return OPVCC(31, 459, 0, 0) 3977 3978 case ADIVWUCC: 3979 return OPVCC(31, 459, 0, 1) 3980 3981 case ADIVWUV: 3982 return OPVCC(31, 459, 1, 0) 3983 3984 case ADIVWUVCC: 3985 return OPVCC(31, 459, 1, 1) 3986 3987 case ADIVD, AREMD: 3988 return OPVCC(31, 489, 0, 0) 3989 3990 case ADIVDCC: 3991 return OPVCC(31, 489, 0, 1) 3992 3993 case ADIVDE: 3994 return OPVCC(31, 425, 0, 0) 3995 3996 case ADIVDECC: 3997 return OPVCC(31, 425, 0, 1) 3998 3999 case ADIVDEU: 4000 return OPVCC(31, 393, 0, 0) 4001 4002 case ADIVDEUCC: 4003 return OPVCC(31, 393, 0, 1) 4004 4005 case ADIVDV: 4006 return OPVCC(31, 489, 1, 0) 4007 4008 case ADIVDVCC: 4009 return OPVCC(31, 489, 1, 1) 4010 4011 case ADIVDU, AREMDU: 4012 return OPVCC(31, 457, 0, 0) 4013 4014 case ADIVDUCC: 4015 return OPVCC(31, 457, 0, 1) 4016 4017 case ADIVDUV: 4018 return OPVCC(31, 457, 1, 0) 4019 4020 case ADIVDUVCC: 4021 return OPVCC(31, 457, 1, 1) 4022 4023 case AEIEIO: 4024 return OPVCC(31, 854, 0, 0) 4025 4026 case AEQV: 4027 return OPVCC(31, 284, 0, 0) 4028 case AEQVCC: 4029 return OPVCC(31, 284, 0, 1) 4030 4031 case AEXTSB: 4032 return OPVCC(31, 954, 0, 0) 4033 case AEXTSBCC: 4034 return OPVCC(31, 954, 0, 1) 4035 case AEXTSH: 4036 return OPVCC(31, 922, 0, 0) 4037 case AEXTSHCC: 4038 return OPVCC(31, 922, 0, 1) 4039 case AEXTSW: 4040 return OPVCC(31, 986, 0, 0) 4041 case AEXTSWCC: 4042 return OPVCC(31, 986, 0, 1) 4043 4044 case AFABS: 4045 return OPVCC(63, 264, 0, 0) 4046 case AFABSCC: 4047 return OPVCC(63, 264, 0, 1) 4048 case AFADD: 4049 return OPVCC(63, 21, 0, 0) 4050 case AFADDCC: 4051 return OPVCC(63, 21, 0, 1) 4052 case AFADDS: 4053 return OPVCC(59, 21, 0, 0) 4054 case AFADDSCC: 4055 return OPVCC(59, 21, 0, 1) 4056 case AFCMPO: 4057 return OPVCC(63, 32, 0, 0) 4058 case AFCMPU: 4059 return OPVCC(63, 0, 0, 0) 4060 case AFCFID: 4061 return OPVCC(63, 846, 0, 0) 4062 case AFCFIDCC: 4063 return OPVCC(63, 846, 0, 1) 4064 case AFCFIDU: 4065 return OPVCC(63, 974, 0, 0) 4066 case AFCFIDUCC: 4067 return OPVCC(63, 974, 0, 1) 4068 case AFCFIDS: 4069 return OPVCC(59, 846, 0, 0) 4070 case AFCFIDSCC: 4071 return OPVCC(59, 846, 0, 1) 4072 case AFCTIW: 4073 return OPVCC(63, 14, 0, 0) 4074 case AFCTIWCC: 4075 return OPVCC(63, 14, 0, 1) 4076 case AFCTIWZ: 4077 return OPVCC(63, 15, 0, 0) 4078 case AFCTIWZCC: 4079 return OPVCC(63, 15, 0, 1) 4080 case AFCTID: 4081 return OPVCC(63, 814, 0, 0) 4082 case AFCTIDCC: 4083 return OPVCC(63, 814, 0, 1) 4084 case AFCTIDZ: 4085 return OPVCC(63, 815, 0, 0) 4086 case AFCTIDZCC: 4087 return OPVCC(63, 815, 0, 1) 4088 case AFDIV: 4089 return OPVCC(63, 18, 0, 0) 4090 case AFDIVCC: 4091 return OPVCC(63, 18, 0, 1) 4092 case AFDIVS: 4093 return OPVCC(59, 18, 0, 0) 4094 case AFDIVSCC: 4095 return OPVCC(59, 18, 0, 1) 4096 case AFMADD: 4097 return OPVCC(63, 29, 0, 0) 4098 case AFMADDCC: 4099 return OPVCC(63, 29, 0, 1) 4100 case AFMADDS: 4101 return OPVCC(59, 29, 0, 0) 4102 case AFMADDSCC: 4103 return OPVCC(59, 29, 0, 1) 4104 4105 case AFMOVS, AFMOVD: 4106 return OPVCC(63, 72, 0, 0) /* load */ 4107 case AFMOVDCC: 4108 return OPVCC(63, 72, 0, 1) 4109 case AFMSUB: 4110 return OPVCC(63, 28, 0, 0) 4111 case AFMSUBCC: 4112 return OPVCC(63, 28, 0, 1) 4113 case AFMSUBS: 4114 return OPVCC(59, 28, 0, 0) 4115 case AFMSUBSCC: 4116 return OPVCC(59, 28, 0, 1) 4117 case AFMUL: 4118 return OPVCC(63, 25, 0, 0) 4119 case AFMULCC: 4120 return OPVCC(63, 25, 0, 1) 4121 case AFMULS: 4122 return OPVCC(59, 25, 0, 0) 4123 case AFMULSCC: 4124 return OPVCC(59, 25, 0, 1) 4125 case AFNABS: 4126 return OPVCC(63, 136, 0, 0) 4127 case AFNABSCC: 4128 return OPVCC(63, 136, 0, 1) 4129 case AFNEG: 4130 return OPVCC(63, 40, 0, 0) 4131 case AFNEGCC: 4132 return OPVCC(63, 40, 0, 1) 4133 case AFNMADD: 4134 return OPVCC(63, 31, 0, 0) 4135 case AFNMADDCC: 4136 return OPVCC(63, 31, 0, 1) 4137 case AFNMADDS: 4138 return OPVCC(59, 31, 0, 0) 4139 case AFNMADDSCC: 4140 return OPVCC(59, 31, 0, 1) 4141 case AFNMSUB: 4142 return OPVCC(63, 30, 0, 0) 4143 case AFNMSUBCC: 4144 return OPVCC(63, 30, 0, 1) 4145 case AFNMSUBS: 4146 return OPVCC(59, 30, 0, 0) 4147 case AFNMSUBSCC: 4148 return OPVCC(59, 30, 0, 1) 4149 case AFCPSGN: 4150 return OPVCC(63, 8, 0, 0) 4151 case AFCPSGNCC: 4152 return OPVCC(63, 8, 0, 1) 4153 case AFRES: 4154 return OPVCC(59, 24, 0, 0) 4155 case AFRESCC: 4156 return OPVCC(59, 24, 0, 1) 4157 case AFRIM: 4158 return OPVCC(63, 488, 0, 0) 4159 case AFRIMCC: 4160 return OPVCC(63, 488, 0, 1) 4161 case AFRIP: 4162 return OPVCC(63, 456, 0, 0) 4163 case AFRIPCC: 4164 return OPVCC(63, 456, 0, 1) 4165 case AFRIZ: 4166 return OPVCC(63, 424, 0, 0) 4167 case AFRIZCC: 4168 return OPVCC(63, 424, 0, 1) 4169 case AFRIN: 4170 return OPVCC(63, 392, 0, 0) 4171 case AFRINCC: 4172 return OPVCC(63, 392, 0, 1) 4173 case AFRSP: 4174 return OPVCC(63, 12, 0, 0) 4175 case AFRSPCC: 4176 return OPVCC(63, 12, 0, 1) 4177 case AFRSQRTE: 4178 return OPVCC(63, 26, 0, 0) 4179 case AFRSQRTECC: 4180 return OPVCC(63, 26, 0, 1) 4181 case AFSEL: 4182 return OPVCC(63, 23, 0, 0) 4183 case AFSELCC: 4184 return OPVCC(63, 23, 0, 1) 4185 case AFSQRT: 4186 return OPVCC(63, 22, 0, 0) 4187 case AFSQRTCC: 4188 return OPVCC(63, 22, 0, 1) 4189 case AFSQRTS: 4190 return OPVCC(59, 22, 0, 0) 4191 case AFSQRTSCC: 4192 return OPVCC(59, 22, 0, 1) 4193 case AFSUB: 4194 return OPVCC(63, 20, 0, 0) 4195 case AFSUBCC: 4196 return OPVCC(63, 20, 0, 1) 4197 case AFSUBS: 4198 return OPVCC(59, 20, 0, 0) 4199 case AFSUBSCC: 4200 return OPVCC(59, 20, 0, 1) 4201 4202 case AICBI: 4203 return OPVCC(31, 982, 0, 0) 4204 case AISYNC: 4205 return OPVCC(19, 150, 0, 0) 4206 4207 case AMTFSB0: 4208 return OPVCC(63, 70, 0, 0) 4209 case AMTFSB0CC: 4210 return OPVCC(63, 70, 0, 1) 4211 case AMTFSB1: 4212 return OPVCC(63, 38, 0, 0) 4213 case AMTFSB1CC: 4214 return OPVCC(63, 38, 0, 1) 4215 4216 case AMULHW: 4217 return OPVCC(31, 75, 0, 0) 4218 case AMULHWCC: 4219 return OPVCC(31, 75, 0, 1) 4220 case AMULHWU: 4221 return OPVCC(31, 11, 0, 0) 4222 case AMULHWUCC: 4223 return OPVCC(31, 11, 0, 1) 4224 case AMULLW: 4225 return OPVCC(31, 235, 0, 0) 4226 case AMULLWCC: 4227 return OPVCC(31, 235, 0, 1) 4228 case AMULLWV: 4229 return OPVCC(31, 235, 1, 0) 4230 case AMULLWVCC: 4231 return OPVCC(31, 235, 1, 1) 4232 4233 case AMULHD: 4234 return OPVCC(31, 73, 0, 0) 4235 case AMULHDCC: 4236 return OPVCC(31, 73, 0, 1) 4237 case AMULHDU: 4238 return OPVCC(31, 9, 0, 0) 4239 case AMULHDUCC: 4240 return OPVCC(31, 9, 0, 1) 4241 case AMULLD: 4242 return OPVCC(31, 233, 0, 0) 4243 case AMULLDCC: 4244 return OPVCC(31, 233, 0, 1) 4245 case AMULLDV: 4246 return OPVCC(31, 233, 1, 0) 4247 case AMULLDVCC: 4248 return OPVCC(31, 233, 1, 1) 4249 4250 case ANAND: 4251 return OPVCC(31, 476, 0, 0) 4252 case ANANDCC: 4253 return OPVCC(31, 476, 0, 1) 4254 case ANEG: 4255 return OPVCC(31, 104, 0, 0) 4256 case ANEGCC: 4257 return OPVCC(31, 104, 0, 1) 4258 case ANEGV: 4259 return OPVCC(31, 104, 1, 0) 4260 case ANEGVCC: 4261 return OPVCC(31, 104, 1, 1) 4262 case ANOR: 4263 return OPVCC(31, 124, 0, 0) 4264 case ANORCC: 4265 return OPVCC(31, 124, 0, 1) 4266 case AOR: 4267 return OPVCC(31, 444, 0, 0) 4268 case AORCC: 4269 return OPVCC(31, 444, 0, 1) 4270 case AORN: 4271 return OPVCC(31, 412, 0, 0) 4272 case AORNCC: 4273 return OPVCC(31, 412, 0, 1) 4274 4275 case APOPCNTD: 4276 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */ 4277 case APOPCNTW: 4278 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */ 4279 case APOPCNTB: 4280 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */ 4281 case ACNTTZW: 4282 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */ 4283 case ACNTTZWCC: 4284 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */ 4285 case ACNTTZD: 4286 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */ 4287 case ACNTTZDCC: 4288 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */ 4289 4290 case ARFI: 4291 return OPVCC(19, 50, 0, 0) 4292 case ARFCI: 4293 return OPVCC(19, 51, 0, 0) 4294 case ARFID: 4295 return OPVCC(19, 18, 0, 0) 4296 case AHRFID: 4297 return OPVCC(19, 274, 0, 0) 4298 4299 case ARLWNM: 4300 return OPVCC(23, 0, 0, 0) 4301 case ARLWNMCC: 4302 return OPVCC(23, 0, 0, 1) 4303 4304 case ARLDCL: 4305 return OPVCC(30, 8, 0, 0) 4306 case ARLDCLCC: 4307 return OPVCC(30, 0, 0, 1) 4308 4309 case ARLDCR: 4310 return OPVCC(30, 9, 0, 0) 4311 case ARLDCRCC: 4312 return OPVCC(30, 9, 0, 1) 4313 4314 case ARLDICL: 4315 return OPVCC(30, 0, 0, 0) 4316 case ARLDICLCC: 4317 return OPVCC(30, 0, 0, 1) 4318 case ARLDICR: 4319 return OPMD(30, 1, 0) // rldicr 4320 case ARLDICRCC: 4321 return OPMD(30, 1, 1) // rldicr. 4322 4323 case ARLDIC: 4324 return OPMD(30, 2, 0) // rldic 4325 case ARLDICCC: 4326 return OPMD(30, 2, 1) // rldic. 4327 4328 case ASYSCALL: 4329 return OPVCC(17, 1, 0, 0) 4330 4331 case ASLW: 4332 return OPVCC(31, 24, 0, 0) 4333 case ASLWCC: 4334 return OPVCC(31, 24, 0, 1) 4335 case ASLD: 4336 return OPVCC(31, 27, 0, 0) 4337 case ASLDCC: 4338 return OPVCC(31, 27, 0, 1) 4339 4340 case ASRAW: 4341 return OPVCC(31, 792, 0, 0) 4342 case ASRAWCC: 4343 return OPVCC(31, 792, 0, 1) 4344 case ASRAD: 4345 return OPVCC(31, 794, 0, 0) 4346 case ASRADCC: 4347 return OPVCC(31, 794, 0, 1) 4348 4349 case AEXTSWSLI: 4350 return OPVCC(31, 445, 0, 0) 4351 case AEXTSWSLICC: 4352 return OPVCC(31, 445, 0, 1) 4353 4354 case ASRW: 4355 return OPVCC(31, 536, 0, 0) 4356 case ASRWCC: 4357 return OPVCC(31, 536, 0, 1) 4358 case ASRD: 4359 return OPVCC(31, 539, 0, 0) 4360 case ASRDCC: 4361 return OPVCC(31, 539, 0, 1) 4362 4363 case ASUB: 4364 return OPVCC(31, 40, 0, 0) 4365 case ASUBCC: 4366 return OPVCC(31, 40, 0, 1) 4367 case ASUBV: 4368 return OPVCC(31, 40, 1, 0) 4369 case ASUBVCC: 4370 return OPVCC(31, 40, 1, 1) 4371 case ASUBC: 4372 return OPVCC(31, 8, 0, 0) 4373 case ASUBCCC: 4374 return OPVCC(31, 8, 0, 1) 4375 case ASUBCV: 4376 return OPVCC(31, 8, 1, 0) 4377 case ASUBCVCC: 4378 return OPVCC(31, 8, 1, 1) 4379 case ASUBE: 4380 return OPVCC(31, 136, 0, 0) 4381 case ASUBECC: 4382 return OPVCC(31, 136, 0, 1) 4383 case ASUBEV: 4384 return OPVCC(31, 136, 1, 0) 4385 case ASUBEVCC: 4386 return OPVCC(31, 136, 1, 1) 4387 case ASUBME: 4388 return OPVCC(31, 232, 0, 0) 4389 case ASUBMECC: 4390 return OPVCC(31, 232, 0, 1) 4391 case ASUBMEV: 4392 return OPVCC(31, 232, 1, 0) 4393 case ASUBMEVCC: 4394 return OPVCC(31, 232, 1, 1) 4395 case ASUBZE: 4396 return OPVCC(31, 200, 0, 0) 4397 case ASUBZECC: 4398 return OPVCC(31, 200, 0, 1) 4399 case ASUBZEV: 4400 return OPVCC(31, 200, 1, 0) 4401 case ASUBZEVCC: 4402 return OPVCC(31, 200, 1, 1) 4403 4404 case ASYNC: 4405 return OPVCC(31, 598, 0, 0) 4406 case ALWSYNC: 4407 return OPVCC(31, 598, 0, 0) | 1<<21 4408 4409 case APTESYNC: 4410 return OPVCC(31, 598, 0, 0) | 2<<21 4411 4412 case ATLBIE: 4413 return OPVCC(31, 306, 0, 0) 4414 case ATLBIEL: 4415 return OPVCC(31, 274, 0, 0) 4416 case ATLBSYNC: 4417 return OPVCC(31, 566, 0, 0) 4418 case ASLBIA: 4419 return OPVCC(31, 498, 0, 0) 4420 case ASLBIE: 4421 return OPVCC(31, 434, 0, 0) 4422 case ASLBMFEE: 4423 return OPVCC(31, 915, 0, 0) 4424 case ASLBMFEV: 4425 return OPVCC(31, 851, 0, 0) 4426 case ASLBMTE: 4427 return OPVCC(31, 402, 0, 0) 4428 4429 case ATW: 4430 return OPVCC(31, 4, 0, 0) 4431 case ATD: 4432 return OPVCC(31, 68, 0, 0) 4433 4434 /* Vector (VMX/Altivec) instructions */ 4435 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4436 /* are enabled starting at POWER6 (ISA 2.05). */ 4437 case AVAND: 4438 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */ 4439 case AVANDC: 4440 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */ 4441 case AVNAND: 4442 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */ 4443 4444 case AVOR: 4445 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */ 4446 case AVORC: 4447 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */ 4448 case AVNOR: 4449 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */ 4450 case AVXOR: 4451 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */ 4452 case AVEQV: 4453 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */ 4454 4455 case AVADDUBM: 4456 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */ 4457 case AVADDUHM: 4458 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */ 4459 case AVADDUWM: 4460 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */ 4461 case AVADDUDM: 4462 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */ 4463 case AVADDUQM: 4464 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */ 4465 4466 case AVADDCUQ: 4467 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */ 4468 case AVADDCUW: 4469 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */ 4470 4471 case AVADDUBS: 4472 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */ 4473 case AVADDUHS: 4474 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */ 4475 case AVADDUWS: 4476 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */ 4477 4478 case AVADDSBS: 4479 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */ 4480 case AVADDSHS: 4481 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */ 4482 case AVADDSWS: 4483 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */ 4484 4485 case AVADDEUQM: 4486 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */ 4487 case AVADDECUQ: 4488 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */ 4489 4490 case AVMULESB: 4491 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */ 4492 case AVMULOSB: 4493 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */ 4494 case AVMULEUB: 4495 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */ 4496 case AVMULOUB: 4497 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */ 4498 case AVMULESH: 4499 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */ 4500 case AVMULOSH: 4501 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */ 4502 case AVMULEUH: 4503 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */ 4504 case AVMULOUH: 4505 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */ 4506 case AVMULESW: 4507 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */ 4508 case AVMULOSW: 4509 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */ 4510 case AVMULEUW: 4511 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */ 4512 case AVMULOUW: 4513 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */ 4514 case AVMULUWM: 4515 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */ 4516 4517 case AVPMSUMB: 4518 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */ 4519 case AVPMSUMH: 4520 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */ 4521 case AVPMSUMW: 4522 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */ 4523 case AVPMSUMD: 4524 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ 4525 4526 case AVMSUMUDM: 4527 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */ 4528 4529 case AVSUBUBM: 4530 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ 4531 case AVSUBUHM: 4532 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */ 4533 case AVSUBUWM: 4534 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */ 4535 case AVSUBUDM: 4536 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */ 4537 case AVSUBUQM: 4538 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */ 4539 4540 case AVSUBCUQ: 4541 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */ 4542 case AVSUBCUW: 4543 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */ 4544 4545 case AVSUBUBS: 4546 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */ 4547 case AVSUBUHS: 4548 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */ 4549 case AVSUBUWS: 4550 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */ 4551 4552 case AVSUBSBS: 4553 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */ 4554 case AVSUBSHS: 4555 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */ 4556 case AVSUBSWS: 4557 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */ 4558 4559 case AVSUBEUQM: 4560 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */ 4561 case AVSUBECUQ: 4562 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */ 4563 4564 case AVRLB: 4565 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */ 4566 case AVRLH: 4567 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */ 4568 case AVRLW: 4569 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */ 4570 case AVRLD: 4571 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */ 4572 4573 case AVMRGOW: 4574 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */ 4575 case AVMRGEW: 4576 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */ 4577 4578 case AVSLB: 4579 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */ 4580 case AVSLH: 4581 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */ 4582 case AVSLW: 4583 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */ 4584 case AVSL: 4585 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */ 4586 case AVSLO: 4587 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */ 4588 case AVSRB: 4589 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */ 4590 case AVSRH: 4591 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */ 4592 case AVSRW: 4593 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */ 4594 case AVSR: 4595 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */ 4596 case AVSRO: 4597 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */ 4598 case AVSLD: 4599 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */ 4600 case AVSRD: 4601 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */ 4602 4603 case AVSRAB: 4604 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */ 4605 case AVSRAH: 4606 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */ 4607 case AVSRAW: 4608 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */ 4609 case AVSRAD: 4610 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ 4611 4612 case AVBPERMQ: 4613 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */ 4614 case AVBPERMD: 4615 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */ 4616 4617 case AVCLZB: 4618 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ 4619 case AVCLZH: 4620 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */ 4621 case AVCLZW: 4622 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */ 4623 case AVCLZD: 4624 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */ 4625 4626 case AVCLZLSBB: 4627 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */ 4628 case AVCTZLSBB: 4629 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */ 4630 4631 case AVPOPCNTB: 4632 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */ 4633 case AVPOPCNTH: 4634 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */ 4635 case AVPOPCNTW: 4636 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */ 4637 case AVPOPCNTD: 4638 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */ 4639 4640 case AVCMPEQUB: 4641 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */ 4642 case AVCMPEQUBCC: 4643 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */ 4644 case AVCMPEQUH: 4645 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */ 4646 case AVCMPEQUHCC: 4647 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */ 4648 case AVCMPEQUW: 4649 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */ 4650 case AVCMPEQUWCC: 4651 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */ 4652 case AVCMPEQUD: 4653 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */ 4654 case AVCMPEQUDCC: 4655 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */ 4656 4657 case AVCMPGTUB: 4658 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */ 4659 case AVCMPGTUBCC: 4660 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */ 4661 case AVCMPGTUH: 4662 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */ 4663 case AVCMPGTUHCC: 4664 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */ 4665 case AVCMPGTUW: 4666 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */ 4667 case AVCMPGTUWCC: 4668 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */ 4669 case AVCMPGTUD: 4670 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */ 4671 case AVCMPGTUDCC: 4672 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */ 4673 case AVCMPGTSB: 4674 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */ 4675 case AVCMPGTSBCC: 4676 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */ 4677 case AVCMPGTSH: 4678 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */ 4679 case AVCMPGTSHCC: 4680 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */ 4681 case AVCMPGTSW: 4682 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */ 4683 case AVCMPGTSWCC: 4684 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */ 4685 case AVCMPGTSD: 4686 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */ 4687 case AVCMPGTSDCC: 4688 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ 4689 4690 case AVCMPNEZB: 4691 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */ 4692 case AVCMPNEZBCC: 4693 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */ 4694 case AVCMPNEB: 4695 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */ 4696 case AVCMPNEBCC: 4697 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */ 4698 case AVCMPNEH: 4699 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */ 4700 case AVCMPNEHCC: 4701 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */ 4702 case AVCMPNEW: 4703 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */ 4704 case AVCMPNEWCC: 4705 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */ 4706 4707 case AVPERM: 4708 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ 4709 case AVPERMXOR: 4710 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */ 4711 case AVPERMR: 4712 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */ 4713 4714 case AVSEL: 4715 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ 4716 4717 case AVCIPHER: 4718 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */ 4719 case AVCIPHERLAST: 4720 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */ 4721 case AVNCIPHER: 4722 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */ 4723 case AVNCIPHERLAST: 4724 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */ 4725 case AVSBOX: 4726 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */ 4727 /* End of vector instructions */ 4728 4729 /* Vector scalar (VSX) instructions */ 4730 /* ISA 2.06 enables these for POWER7. */ 4731 case AMFVSRD, AMFVRD, AMFFPRD: 4732 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ 4733 case AMFVSRWZ: 4734 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ 4735 case AMFVSRLD: 4736 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */ 4737 4738 case AMTVSRD, AMTFPRD, AMTVRD: 4739 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ 4740 case AMTVSRWA: 4741 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ 4742 case AMTVSRWZ: 4743 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ 4744 case AMTVSRDD: 4745 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */ 4746 case AMTVSRWS: 4747 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */ 4748 4749 case AXXLAND: 4750 return OPVXX3(60, 130, 0) /* xxland - v2.06 */ 4751 case AXXLANDC: 4752 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */ 4753 case AXXLEQV: 4754 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */ 4755 case AXXLNAND: 4756 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */ 4757 4758 case AXXLORC: 4759 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */ 4760 case AXXLNOR: 4761 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */ 4762 case AXXLOR, AXXLORQ: 4763 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */ 4764 case AXXLXOR: 4765 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */ 4766 4767 case AXXSEL: 4768 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */ 4769 4770 case AXXMRGHW: 4771 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */ 4772 case AXXMRGLW: 4773 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */ 4774 4775 case AXXSPLTW: 4776 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */ 4777 4778 case AXXSPLTIB: 4779 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */ 4780 4781 case AXXPERM: 4782 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */ 4783 case AXXPERMDI: 4784 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */ 4785 4786 case AXXSLDWI: 4787 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */ 4788 4789 case AXXBRQ: 4790 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */ 4791 case AXXBRD: 4792 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */ 4793 case AXXBRW: 4794 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */ 4795 case AXXBRH: 4796 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */ 4797 4798 case AXSCVDPSP: 4799 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */ 4800 case AXSCVSPDP: 4801 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */ 4802 case AXSCVDPSPN: 4803 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */ 4804 case AXSCVSPDPN: 4805 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */ 4806 4807 case AXVCVDPSP: 4808 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */ 4809 case AXVCVSPDP: 4810 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */ 4811 4812 case AXSCVDPSXDS: 4813 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */ 4814 case AXSCVDPSXWS: 4815 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */ 4816 case AXSCVDPUXDS: 4817 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */ 4818 case AXSCVDPUXWS: 4819 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */ 4820 4821 case AXSCVSXDDP: 4822 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */ 4823 case AXSCVUXDDP: 4824 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */ 4825 case AXSCVSXDSP: 4826 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */ 4827 case AXSCVUXDSP: 4828 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */ 4829 4830 case AXVCVDPSXDS: 4831 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */ 4832 case AXVCVDPSXWS: 4833 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */ 4834 case AXVCVDPUXDS: 4835 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */ 4836 case AXVCVDPUXWS: 4837 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */ 4838 case AXVCVSPSXDS: 4839 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */ 4840 case AXVCVSPSXWS: 4841 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */ 4842 case AXVCVSPUXDS: 4843 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */ 4844 case AXVCVSPUXWS: 4845 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */ 4846 4847 case AXVCVSXDDP: 4848 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */ 4849 case AXVCVSXWDP: 4850 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */ 4851 case AXVCVUXDDP: 4852 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */ 4853 case AXVCVUXWDP: 4854 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */ 4855 case AXVCVSXDSP: 4856 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */ 4857 case AXVCVSXWSP: 4858 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */ 4859 case AXVCVUXDSP: 4860 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */ 4861 case AXVCVUXWSP: 4862 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ 4863 /* End of VSX instructions */ 4864 4865 case AMADDHD: 4866 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */ 4867 case AMADDHDU: 4868 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */ 4869 case AMADDLD: 4870 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */ 4871 4872 case AXOR: 4873 return OPVCC(31, 316, 0, 0) 4874 case AXORCC: 4875 return OPVCC(31, 316, 0, 1) 4876 } 4877 4878 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a) 4879 return 0 4880 } 4881 4882 func (c *ctxt9) opirrr(a obj.As) uint32 { 4883 switch a { 4884 /* Vector (VMX/Altivec) instructions */ 4885 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4886 /* are enabled starting at POWER6 (ISA 2.05). */ 4887 case AVSLDOI: 4888 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */ 4889 } 4890 4891 c.ctxt.Diag("bad i/r/r/r opcode %v", a) 4892 return 0 4893 } 4894 4895 func (c *ctxt9) opiirr(a obj.As) uint32 { 4896 switch a { 4897 /* Vector (VMX/Altivec) instructions */ 4898 /* ISA 2.07 enables these for POWER8 and beyond. */ 4899 case AVSHASIGMAW: 4900 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */ 4901 case AVSHASIGMAD: 4902 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */ 4903 } 4904 4905 c.ctxt.Diag("bad i/i/r/r opcode %v", a) 4906 return 0 4907 } 4908 4909 func (c *ctxt9) opirr(a obj.As) uint32 { 4910 switch a { 4911 case AADD: 4912 return OPVCC(14, 0, 0, 0) 4913 case AADDC: 4914 return OPVCC(12, 0, 0, 0) 4915 case AADDCCC: 4916 return OPVCC(13, 0, 0, 0) 4917 case AADDIS: 4918 return OPVCC(15, 0, 0, 0) /* ADDIS */ 4919 4920 case AANDCC: 4921 return OPVCC(28, 0, 0, 0) 4922 case AANDISCC: 4923 return OPVCC(29, 0, 0, 0) /* ANDIS. */ 4924 4925 case ABR: 4926 return OPVCC(18, 0, 0, 0) 4927 case ABL: 4928 return OPVCC(18, 0, 0, 0) | 1 4929 case obj.ADUFFZERO: 4930 return OPVCC(18, 0, 0, 0) | 1 4931 case obj.ADUFFCOPY: 4932 return OPVCC(18, 0, 0, 0) | 1 4933 case ABC: 4934 return OPVCC(16, 0, 0, 0) 4935 case ABCL: 4936 return OPVCC(16, 0, 0, 0) | 1 4937 4938 case ABEQ: 4939 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0) 4940 case ABGE: 4941 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0) 4942 case ABGT: 4943 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0) 4944 case ABLE: 4945 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0) 4946 case ABLT: 4947 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0) 4948 case ABNE: 4949 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0) 4950 case ABVC: 4951 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0) 4952 case ABVS: 4953 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0) 4954 case ABDZ: 4955 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0) 4956 case ABDNZ: 4957 return AOP_RRR(16<<26, BO_BCTR, 0, 0) 4958 4959 case ACMP: 4960 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */ 4961 case ACMPU: 4962 return OPVCC(10, 0, 0, 0) | 1<<21 4963 case ACMPW: 4964 return OPVCC(11, 0, 0, 0) /* L=0 */ 4965 case ACMPWU: 4966 return OPVCC(10, 0, 0, 0) 4967 case ACMPEQB: 4968 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ 4969 4970 case ALSW: 4971 return OPVCC(31, 597, 0, 0) 4972 4973 case ACOPY: 4974 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */ 4975 case APASTECC: 4976 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */ 4977 case ADARN: 4978 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ 4979 4980 case AMULLW, AMULLD: 4981 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */ 4982 4983 case AOR: 4984 return OPVCC(24, 0, 0, 0) 4985 case AORIS: 4986 return OPVCC(25, 0, 0, 0) /* ORIS */ 4987 4988 case ARLWMI: 4989 return OPVCC(20, 0, 0, 0) /* rlwimi */ 4990 case ARLWMICC: 4991 return OPVCC(20, 0, 0, 1) 4992 case ARLDMI: 4993 return OPMD(30, 3, 0) /* rldimi */ 4994 case ARLDMICC: 4995 return OPMD(30, 3, 1) /* rldimi. */ 4996 case ARLDIMI: 4997 return OPMD(30, 3, 0) /* rldimi */ 4998 case ARLDIMICC: 4999 return OPMD(30, 3, 1) /* rldimi. */ 5000 case ARLWNM: 5001 return OPVCC(21, 0, 0, 0) /* rlwinm */ 5002 case ARLWNMCC: 5003 return OPVCC(21, 0, 0, 1) 5004 5005 case ARLDCL: 5006 return OPMD(30, 0, 0) /* rldicl */ 5007 case ARLDCLCC: 5008 return OPMD(30, 0, 1) /* rldicl. */ 5009 case ARLDCR: 5010 return OPMD(30, 1, 0) /* rldicr */ 5011 case ARLDCRCC: 5012 return OPMD(30, 1, 1) /* rldicr. */ 5013 case ARLDC: 5014 return OPMD(30, 2, 0) /* rldic */ 5015 case ARLDCCC: 5016 return OPMD(30, 2, 1) /* rldic. */ 5017 5018 case ASRAW: 5019 return OPVCC(31, 824, 0, 0) 5020 case ASRAWCC: 5021 return OPVCC(31, 824, 0, 1) 5022 case ASRAD: 5023 return OPVCC(31, (413 << 1), 0, 0) 5024 case ASRADCC: 5025 return OPVCC(31, (413 << 1), 0, 1) 5026 case AEXTSWSLI: 5027 return OPVCC(31, 445, 0, 0) 5028 case AEXTSWSLICC: 5029 return OPVCC(31, 445, 0, 1) 5030 5031 case ASTSW: 5032 return OPVCC(31, 725, 0, 0) 5033 5034 case ASUBC: 5035 return OPVCC(8, 0, 0, 0) 5036 5037 case ATW: 5038 return OPVCC(3, 0, 0, 0) 5039 case ATD: 5040 return OPVCC(2, 0, 0, 0) 5041 5042 /* Vector (VMX/Altivec) instructions */ 5043 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 5044 /* are enabled starting at POWER6 (ISA 2.05). */ 5045 case AVSPLTB: 5046 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */ 5047 case AVSPLTH: 5048 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */ 5049 case AVSPLTW: 5050 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */ 5051 5052 case AVSPLTISB: 5053 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */ 5054 case AVSPLTISH: 5055 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */ 5056 case AVSPLTISW: 5057 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */ 5058 /* End of vector instructions */ 5059 5060 case AFTDIV: 5061 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */ 5062 case AFTSQRT: 5063 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */ 5064 5065 case AXOR: 5066 return OPVCC(26, 0, 0, 0) /* XORIL */ 5067 case AXORIS: 5068 return OPVCC(27, 0, 0, 0) /* XORIS */ 5069 } 5070 5071 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a) 5072 return 0 5073 } 5074 5075 /* 5076 * load o(a),d 5077 */ 5078 func (c *ctxt9) opload(a obj.As) uint32 { 5079 switch a { 5080 case AMOVD: 5081 return OPVCC(58, 0, 0, 0) /* ld */ 5082 case AMOVDU: 5083 return OPVCC(58, 0, 0, 1) /* ldu */ 5084 case AMOVWZ: 5085 return OPVCC(32, 0, 0, 0) /* lwz */ 5086 case AMOVWZU: 5087 return OPVCC(33, 0, 0, 0) /* lwzu */ 5088 case AMOVW: 5089 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */ 5090 case ALXV: 5091 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */ 5092 case ALXVL: 5093 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */ 5094 case ALXVLL: 5095 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */ 5096 case ALXVX: 5097 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5098 5099 /* no AMOVWU */ 5100 case AMOVB, AMOVBZ: 5101 return OPVCC(34, 0, 0, 0) 5102 /* load */ 5103 5104 case AMOVBU, AMOVBZU: 5105 return OPVCC(35, 0, 0, 0) 5106 case AFMOVD: 5107 return OPVCC(50, 0, 0, 0) 5108 case AFMOVDU: 5109 return OPVCC(51, 0, 0, 0) 5110 case AFMOVS: 5111 return OPVCC(48, 0, 0, 0) 5112 case AFMOVSU: 5113 return OPVCC(49, 0, 0, 0) 5114 case AMOVH: 5115 return OPVCC(42, 0, 0, 0) 5116 case AMOVHU: 5117 return OPVCC(43, 0, 0, 0) 5118 case AMOVHZ: 5119 return OPVCC(40, 0, 0, 0) 5120 case AMOVHZU: 5121 return OPVCC(41, 0, 0, 0) 5122 case AMOVMW: 5123 return OPVCC(46, 0, 0, 0) /* lmw */ 5124 } 5125 5126 c.ctxt.Diag("bad load opcode %v", a) 5127 return 0 5128 } 5129 5130 /* 5131 * indexed load a(b),d 5132 */ 5133 func (c *ctxt9) oploadx(a obj.As) uint32 { 5134 switch a { 5135 case AMOVWZ: 5136 return OPVCC(31, 23, 0, 0) /* lwzx */ 5137 case AMOVWZU: 5138 return OPVCC(31, 55, 0, 0) /* lwzux */ 5139 case AMOVW: 5140 return OPVCC(31, 341, 0, 0) /* lwax */ 5141 case AMOVWU: 5142 return OPVCC(31, 373, 0, 0) /* lwaux */ 5143 5144 case AMOVB, AMOVBZ: 5145 return OPVCC(31, 87, 0, 0) /* lbzx */ 5146 5147 case AMOVBU, AMOVBZU: 5148 return OPVCC(31, 119, 0, 0) /* lbzux */ 5149 case AFMOVD: 5150 return OPVCC(31, 599, 0, 0) /* lfdx */ 5151 case AFMOVDU: 5152 return OPVCC(31, 631, 0, 0) /* lfdux */ 5153 case AFMOVS: 5154 return OPVCC(31, 535, 0, 0) /* lfsx */ 5155 case AFMOVSU: 5156 return OPVCC(31, 567, 0, 0) /* lfsux */ 5157 case AFMOVSX: 5158 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */ 5159 case AFMOVSZ: 5160 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */ 5161 case AMOVH: 5162 return OPVCC(31, 343, 0, 0) /* lhax */ 5163 case AMOVHU: 5164 return OPVCC(31, 375, 0, 0) /* lhaux */ 5165 case AMOVHBR: 5166 return OPVCC(31, 790, 0, 0) /* lhbrx */ 5167 case AMOVWBR: 5168 return OPVCC(31, 534, 0, 0) /* lwbrx */ 5169 case AMOVDBR: 5170 return OPVCC(31, 532, 0, 0) /* ldbrx */ 5171 case AMOVHZ: 5172 return OPVCC(31, 279, 0, 0) /* lhzx */ 5173 case AMOVHZU: 5174 return OPVCC(31, 311, 0, 0) /* lhzux */ 5175 case ALBAR: 5176 return OPVCC(31, 52, 0, 0) /* lbarx */ 5177 case ALHAR: 5178 return OPVCC(31, 116, 0, 0) /* lharx */ 5179 case ALWAR: 5180 return OPVCC(31, 20, 0, 0) /* lwarx */ 5181 case ALDAR: 5182 return OPVCC(31, 84, 0, 0) /* ldarx */ 5183 case ALSW: 5184 return OPVCC(31, 533, 0, 0) /* lswx */ 5185 case AMOVD: 5186 return OPVCC(31, 21, 0, 0) /* ldx */ 5187 case AMOVDU: 5188 return OPVCC(31, 53, 0, 0) /* ldux */ 5189 5190 /* Vector (VMX/Altivec) instructions */ 5191 case ALVEBX: 5192 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */ 5193 case ALVEHX: 5194 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */ 5195 case ALVEWX: 5196 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */ 5197 case ALVX: 5198 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */ 5199 case ALVXL: 5200 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */ 5201 case ALVSL: 5202 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */ 5203 case ALVSR: 5204 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */ 5205 /* End of vector instructions */ 5206 5207 /* Vector scalar (VSX) instructions */ 5208 case ALXVX: 5209 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ 5210 case ALXVD2X: 5211 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */ 5212 case ALXVW4X: 5213 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */ 5214 case ALXVH8X: 5215 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */ 5216 case ALXVB16X: 5217 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */ 5218 case ALXVDSX: 5219 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */ 5220 case ALXSDX: 5221 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */ 5222 case ALXSIWAX: 5223 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */ 5224 case ALXSIWZX: 5225 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */ 5226 } 5227 5228 c.ctxt.Diag("bad loadx opcode %v", a) 5229 return 0 5230 } 5231 5232 /* 5233 * store s,o(d) 5234 */ 5235 func (c *ctxt9) opstore(a obj.As) uint32 { 5236 switch a { 5237 case AMOVB, AMOVBZ: 5238 return OPVCC(38, 0, 0, 0) /* stb */ 5239 5240 case AMOVBU, AMOVBZU: 5241 return OPVCC(39, 0, 0, 0) /* stbu */ 5242 case AFMOVD: 5243 return OPVCC(54, 0, 0, 0) /* stfd */ 5244 case AFMOVDU: 5245 return OPVCC(55, 0, 0, 0) /* stfdu */ 5246 case AFMOVS: 5247 return OPVCC(52, 0, 0, 0) /* stfs */ 5248 case AFMOVSU: 5249 return OPVCC(53, 0, 0, 0) /* stfsu */ 5250 5251 case AMOVHZ, AMOVH: 5252 return OPVCC(44, 0, 0, 0) /* sth */ 5253 5254 case AMOVHZU, AMOVHU: 5255 return OPVCC(45, 0, 0, 0) /* sthu */ 5256 case AMOVMW: 5257 return OPVCC(47, 0, 0, 0) /* stmw */ 5258 case ASTSW: 5259 return OPVCC(31, 725, 0, 0) /* stswi */ 5260 5261 case AMOVWZ, AMOVW: 5262 return OPVCC(36, 0, 0, 0) /* stw */ 5263 5264 case AMOVWZU, AMOVWU: 5265 return OPVCC(37, 0, 0, 0) /* stwu */ 5266 case AMOVD: 5267 return OPVCC(62, 0, 0, 0) /* std */ 5268 case AMOVDU: 5269 return OPVCC(62, 0, 0, 1) /* stdu */ 5270 case ASTXV: 5271 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */ 5272 case ASTXVL: 5273 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */ 5274 case ASTXVLL: 5275 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */ 5276 case ASTXVX: 5277 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */ 5278 5279 } 5280 5281 c.ctxt.Diag("unknown store opcode %v", a) 5282 return 0 5283 } 5284 5285 /* 5286 * indexed store s,a(b) 5287 */ 5288 func (c *ctxt9) opstorex(a obj.As) uint32 { 5289 switch a { 5290 case AMOVB, AMOVBZ: 5291 return OPVCC(31, 215, 0, 0) /* stbx */ 5292 5293 case AMOVBU, AMOVBZU: 5294 return OPVCC(31, 247, 0, 0) /* stbux */ 5295 case AFMOVD: 5296 return OPVCC(31, 727, 0, 0) /* stfdx */ 5297 case AFMOVDU: 5298 return OPVCC(31, 759, 0, 0) /* stfdux */ 5299 case AFMOVS: 5300 return OPVCC(31, 663, 0, 0) /* stfsx */ 5301 case AFMOVSU: 5302 return OPVCC(31, 695, 0, 0) /* stfsux */ 5303 case AFMOVSX: 5304 return OPVCC(31, 983, 0, 0) /* stfiwx */ 5305 5306 case AMOVHZ, AMOVH: 5307 return OPVCC(31, 407, 0, 0) /* sthx */ 5308 case AMOVHBR: 5309 return OPVCC(31, 918, 0, 0) /* sthbrx */ 5310 5311 case AMOVHZU, AMOVHU: 5312 return OPVCC(31, 439, 0, 0) /* sthux */ 5313 5314 case AMOVWZ, AMOVW: 5315 return OPVCC(31, 151, 0, 0) /* stwx */ 5316 5317 case AMOVWZU, AMOVWU: 5318 return OPVCC(31, 183, 0, 0) /* stwux */ 5319 case ASTSW: 5320 return OPVCC(31, 661, 0, 0) /* stswx */ 5321 case AMOVWBR: 5322 return OPVCC(31, 662, 0, 0) /* stwbrx */ 5323 case AMOVDBR: 5324 return OPVCC(31, 660, 0, 0) /* stdbrx */ 5325 case ASTBCCC: 5326 return OPVCC(31, 694, 0, 1) /* stbcx. */ 5327 case ASTHCCC: 5328 return OPVCC(31, 726, 0, 1) /* sthcx. */ 5329 case ASTWCCC: 5330 return OPVCC(31, 150, 0, 1) /* stwcx. */ 5331 case ASTDCCC: 5332 return OPVCC(31, 214, 0, 1) /* stwdx. */ 5333 case AMOVD: 5334 return OPVCC(31, 149, 0, 0) /* stdx */ 5335 case AMOVDU: 5336 return OPVCC(31, 181, 0, 0) /* stdux */ 5337 5338 /* Vector (VMX/Altivec) instructions */ 5339 case ASTVEBX: 5340 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */ 5341 case ASTVEHX: 5342 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */ 5343 case ASTVEWX: 5344 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */ 5345 case ASTVX: 5346 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */ 5347 case ASTVXL: 5348 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */ 5349 /* End of vector instructions */ 5350 5351 /* Vector scalar (VSX) instructions */ 5352 case ASTXVX: 5353 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */ 5354 case ASTXVD2X: 5355 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */ 5356 case ASTXVW4X: 5357 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */ 5358 case ASTXVH8X: 5359 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */ 5360 case ASTXVB16X: 5361 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */ 5362 5363 case ASTXSDX: 5364 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */ 5365 5366 case ASTXSIWX: 5367 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */ 5368 5369 /* End of vector scalar instructions */ 5370 5371 } 5372 5373 c.ctxt.Diag("unknown storex opcode %v", a) 5374 return 0 5375 }