github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/cmd/internal/obj/ppc64/asm9.go (about) 1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. 2 // 3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 5 // Portions Copyright © 1997-1999 Vita Nuova Limited 6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) 7 // Portions Copyright © 2004,2006 Bruce Ellis 8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others 10 // Portions Copyright © 2009 The Go Authors. All rights reserved. 11 // 12 // Permission is hereby granted, free of charge, to any person obtaining a copy 13 // of this software and associated documentation files (the "Software"), to deal 14 // in the Software without restriction, including without limitation the rights 15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 16 // copies of the Software, and to permit persons to whom the Software is 17 // furnished to do so, subject to the following conditions: 18 // 19 // The above copyright notice and this permission notice shall be included in 20 // all copies or substantial portions of the Software. 21 // 22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 28 // THE SOFTWARE. 29 30 package ppc64 31 32 import ( 33 "cmd/internal/obj" 34 "encoding/binary" 35 "fmt" 36 "log" 37 "sort" 38 ) 39 40 // ctxt9 holds state while assembling a single function. 41 // Each function gets a fresh ctxt9. 42 // This allows for multiple functions to be safely concurrently assembled. 43 type ctxt9 struct { 44 ctxt *obj.Link 45 newprog obj.ProgAlloc 46 cursym *obj.LSym 47 autosize int32 48 instoffset int64 49 pc int64 50 } 51 52 // Instruction layout. 53 54 const ( 55 funcAlign = 8 56 ) 57 58 const ( 59 r0iszero = 1 60 ) 61 62 type Optab struct { 63 as obj.As // Opcode 64 a1 uint8 65 a2 uint8 66 a3 uint8 67 a4 uint8 68 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r 69 size int8 70 param int16 71 } 72 73 var optab = []Optab{ 74 {obj.ATEXT, C_LEXT, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0}, 75 {obj.ATEXT, C_LEXT, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0}, 76 {obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0, 0}, 77 {obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0, 0}, 78 /* move register */ 79 {AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0}, 80 {AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0}, 81 {AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0}, 82 {AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0}, 83 {AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0}, 84 {AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, 85 {AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, 86 {AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, 87 {AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, 88 {AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0}, 89 {AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0}, 90 {AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0}, 91 {AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0}, 92 {AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, 93 {AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, 94 {AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, 95 {AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, 96 {AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0}, 97 {AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0}, 98 {AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */ 99 {AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 100 {AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, 101 {AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 102 {AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0}, 103 {AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0}, 104 {AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0}, 105 {AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0}, 106 {AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0}, 107 {AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0}, 108 {AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, 109 {AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, 110 {AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, 111 {AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, 112 {AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0}, 113 {AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0}, 114 {AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0}, 115 {AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0}, 116 {ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, 117 {ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0}, 118 {ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0}, 119 {ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0}, 120 {AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */ 121 {AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 122 {AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0}, 123 {AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0}, 124 {AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0}, 125 {AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0}, 126 {AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0}, 127 {AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0}, 128 {ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */ 129 {ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0}, 130 {ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */ 131 {ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0}, 132 {ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 133 {ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, 134 {ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 135 {ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, 136 {ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0}, 137 {ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0}, 138 {ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0}, 139 {ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0}, 140 {ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 141 {ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, 142 {ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0}, 143 {ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0}, 144 {ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0}, 145 {ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, 146 {ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0}, 147 {ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0}, 148 {ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0}, 149 {ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0}, 150 {ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0}, 151 {ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0}, 152 {ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0}, 153 {ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0}, 154 {ARLDICL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0}, 155 {ARLDICL, C_SCON, C_REG, C_LCON, C_REG, 14, 4, 0}, 156 {ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0}, 157 {AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0}, 158 {AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 4, 0}, 159 {AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0}, 160 {AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0}, 161 {AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0}, 162 {AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 4, 0}, 163 {AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0}, 164 {AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 4, 0}, 165 166 /* store, short offset */ 167 {AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 168 {AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 169 {AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 170 {AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 171 {AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 172 {AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 173 {AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO}, 174 {AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, 175 {AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, 176 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, 177 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, 178 {AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, 179 {AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, 180 {AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, 181 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, 182 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, 183 {AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, 184 {AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 185 {AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 186 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 187 {AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 188 {AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 189 {AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 190 {AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 191 192 /* load, short offset */ 193 {AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, 194 {AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, 195 {AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, 196 {AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, 197 {AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO}, 198 {AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO}, 199 {AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO}, 200 {AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, 201 {AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, 202 {AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, 203 {AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB}, 204 {AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB}, 205 {AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, 206 {AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, 207 {AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, 208 {AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP}, 209 {AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP}, 210 {AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, 211 {AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, 212 {AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, 213 {AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, 214 {AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO}, 215 {AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO}, 216 {AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO}, 217 218 /* store, long offset */ 219 {AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, 220 {AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, 221 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, 222 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, 223 {AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, 224 {AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, 225 {AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, 226 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, 227 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, 228 {AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, 229 {AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, 230 {AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, 231 {AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, 232 {AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, 233 {AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, 234 {AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, 235 {AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, 236 {AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, 237 {AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, 238 {AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, 239 240 /* load, long offset */ 241 {AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, 242 {AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, 243 {AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, 244 {AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB}, 245 {AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB}, 246 {AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, 247 {AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, 248 {AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, 249 {AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP}, 250 {AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP}, 251 {AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, 252 {AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, 253 {AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, 254 {AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO}, 255 {AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO}, 256 {AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, 257 {AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, 258 {AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, 259 {AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0}, 260 {AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0}, 261 262 {AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 79, 4, 0}, 263 {AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 80, 8, 0}, 264 265 {AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 81, 8, 0}, 266 267 /* load constant */ 268 {AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, 269 {AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP}, 270 {AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB}, 271 {AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP}, 272 {AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, 273 {AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */ 274 {AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP}, 275 {AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB}, 276 {AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP}, 277 {AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, 278 {AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */ 279 {AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP}, 280 {AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB}, 281 {AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP}, 282 {AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, 283 284 /* load unsigned/long constants (TO DO: check) */ 285 {AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, 286 {AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0}, 287 {AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, 288 {AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0}, 289 {AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO}, 290 {AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0}, 291 {AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0}, 292 {AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, 293 {AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, 294 {AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, 295 {ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0}, 296 {ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0}, 297 {ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0}, 298 {ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0}, 299 {ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0}, 300 {ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, 301 {ABR, C_NONE, C_NONE, C_NONE, C_LBRAPIC, 11, 8, 0}, 302 {ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0}, 303 {ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0}, 304 {ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0}, 305 {ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0}, 306 {ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0}, 307 {ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0}, 308 {ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0}, 309 {ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0}, 310 {ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0}, 311 {ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0}, 312 {ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0}, 313 {AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB}, 314 {AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP}, 315 {AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO}, 316 {AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB}, 317 {AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP}, 318 {AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO}, 319 {AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0}, 320 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB}, 321 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP}, 322 {AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO}, 323 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB}, 324 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP}, 325 {AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO}, 326 {AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0}, 327 {AFMOVSX, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0}, 328 {AFMOVSX, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0}, 329 {AFMOVSX, C_FREG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, 330 {AFMOVSX, C_FREG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, 331 {AFMOVSZ, C_ZOREG, C_REG, C_NONE, C_FREG, 45, 4, 0}, 332 {AFMOVSZ, C_ZOREG, C_NONE, C_NONE, C_FREG, 45, 4, 0}, 333 {ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0}, 334 {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0}, 335 {ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0}, 336 {ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0}, 337 {AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0}, 338 {AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0}, 339 {AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0}, 340 {AISEL, C_LCON, C_REG, C_REG, C_REG, 84, 4, 0}, 341 {AISEL, C_ZCON, C_REG, C_REG, C_REG, 84, 4, 0}, 342 {ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0}, 343 {ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0}, 344 {AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0}, 345 {AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0}, 346 {AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0}, 347 {AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0}, 348 {AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0}, 349 {AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0}, 350 {AREMDU, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0}, 351 {AREMDU, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0}, 352 {AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0}, 353 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0}, 354 {AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0}, 355 {AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0}, 356 {AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0}, 357 {AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */ 358 {AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */ 359 {AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */ 360 361 /* Other ISA 2.05+ instructions */ 362 {APOPCNTD, C_REG, C_NONE, C_NONE, C_REG, 93, 4, 0}, /* population count, x-form */ 363 {ACMPB, C_REG, C_REG, C_NONE, C_REG, 92, 4, 0}, /* compare byte, x-form */ 364 {AFTDIV, C_FREG, C_FREG, C_NONE, C_SCON, 92, 4, 0}, /* floating test for sw divide, x-form */ 365 {AFTSQRT, C_FREG, C_NONE, C_NONE, C_SCON, 93, 4, 0}, /* floating test for sw square root, x-form */ 366 367 /* Vector instructions */ 368 369 /* Vector load */ 370 {ALV, C_SOREG, C_NONE, C_NONE, C_VREG, 45, 4, 0}, /* vector load, x-form */ 371 372 /* Vector store */ 373 {ASTV, C_VREG, C_NONE, C_NONE, C_SOREG, 44, 4, 0}, /* vector store, x-form */ 374 375 /* Vector logical */ 376 {AVAND, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector and, vx-form */ 377 {AVOR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector or, vx-form */ 378 379 /* Vector add */ 380 {AVADDUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned modulo, vx-form */ 381 {AVADDCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add & write carry unsigned, vx-form */ 382 {AVADDUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add unsigned saturate, vx-form */ 383 {AVADDSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector add signed saturate, vx-form */ 384 {AVADDE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector add extended, va-form */ 385 386 /* Vector subtract */ 387 {AVSUBUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned modulo, vx-form */ 388 {AVSUBCU, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract & write carry unsigned, vx-form */ 389 {AVSUBUS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract unsigned saturate, vx-form */ 390 {AVSUBSS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector subtract signed saturate, vx-form */ 391 {AVSUBE, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector subtract extended, va-form */ 392 393 /* Vector multiply */ 394 {AVPMSUM, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector polynomial multiply & sum, vx-form */ 395 396 /* Vector rotate */ 397 {AVR, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector rotate, vx-form */ 398 399 /* Vector shift */ 400 {AVS, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift, vx-form */ 401 {AVSA, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector shift algebraic, vx-form */ 402 {AVSOI, C_ANDCON, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector shift by octet immediate, va-form */ 403 404 /* Vector count */ 405 {AVCLZ, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector count leading zeros, vx-form */ 406 {AVPOPCNT, C_VREG, C_NONE, C_NONE, C_VREG, 85, 4, 0}, /* vector population count, vx-form */ 407 408 /* Vector compare */ 409 {AVCMPEQ, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare equal, vc-form */ 410 {AVCMPGT, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector compare greater than, vc-form */ 411 412 /* Vector permute */ 413 {AVPERM, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector permute, va-form */ 414 415 /* Vector select */ 416 {AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 83, 4, 0}, /* vector select, va-form */ 417 418 /* Vector splat */ 419 {AVSPLT, C_SCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector splat, vx-form */ 420 {AVSPLT, C_ADDCON, C_VREG, C_NONE, C_VREG, 82, 4, 0}, 421 {AVSPLTI, C_SCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector splat immediate, vx-form */ 422 {AVSPLTI, C_ADDCON, C_NONE, C_NONE, C_VREG, 82, 4, 0}, 423 424 /* Vector AES */ 425 {AVCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES cipher, vx-form */ 426 {AVNCIPH, C_VREG, C_VREG, C_NONE, C_VREG, 82, 4, 0}, /* vector AES inverse cipher, vx-form */ 427 {AVSBOX, C_VREG, C_NONE, C_NONE, C_VREG, 82, 4, 0}, /* vector AES subbytes, vx-form */ 428 429 /* Vector SHA */ 430 {AVSHASIGMA, C_ANDCON, C_VREG, C_ANDCON, C_VREG, 82, 4, 0}, /* vector SHA sigma, vx-form */ 431 432 /* VSX vector load */ 433 {ALXV, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx vector load, xx1-form */ 434 435 /* VSX vector store */ 436 {ASTXV, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx vector store, xx1-form */ 437 438 /* VSX scalar load */ 439 {ALXS, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar load, xx1-form */ 440 441 /* VSX scalar store */ 442 {ASTXS, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar store, xx1-form */ 443 444 /* VSX scalar as integer load */ 445 {ALXSI, C_SOREG, C_NONE, C_NONE, C_VSREG, 87, 4, 0}, /* vsx scalar as integer load, xx1-form */ 446 447 /* VSX scalar store as integer */ 448 {ASTXSI, C_VSREG, C_NONE, C_NONE, C_SOREG, 86, 4, 0}, /* vsx scalar as integer store, xx1-form */ 449 450 /* VSX move from VSR */ 451 {AMFVSR, C_VSREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, /* vsx move from vsr, xx1-form */ 452 {AMFVSR, C_FREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, 453 {AMFVSR, C_VREG, C_NONE, C_NONE, C_REG, 88, 4, 0}, 454 455 /* VSX move to VSR */ 456 {AMTVSR, C_REG, C_NONE, C_NONE, C_VSREG, 88, 4, 0}, /* vsx move to vsr, xx1-form */ 457 {AMTVSR, C_REG, C_NONE, C_NONE, C_FREG, 88, 4, 0}, 458 {AMTVSR, C_REG, C_NONE, C_NONE, C_VREG, 88, 4, 0}, 459 460 /* VSX logical */ 461 {AXXLAND, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx and, xx3-form */ 462 {AXXLOR, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx or, xx3-form */ 463 464 /* VSX select */ 465 {AXXSEL, C_VSREG, C_VSREG, C_VSREG, C_VSREG, 91, 4, 0}, /* vsx select, xx4-form */ 466 467 /* VSX merge */ 468 {AXXMRG, C_VSREG, C_VSREG, C_NONE, C_VSREG, 90, 4, 0}, /* vsx merge, xx3-form */ 469 470 /* VSX splat */ 471 {AXXSPLT, C_VSREG, C_NONE, C_SCON, C_VSREG, 89, 4, 0}, /* vsx splat, xx2-form */ 472 473 /* VSX permute */ 474 {AXXPERM, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx permute, xx3-form */ 475 476 /* VSX shift */ 477 {AXXSI, C_VSREG, C_VSREG, C_SCON, C_VSREG, 90, 4, 0}, /* vsx shift immediate, xx3-form */ 478 479 /* VSX scalar FP-FP conversion */ 480 {AXSCV, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-fp conversion, xx2-form */ 481 482 /* VSX vector FP-FP conversion */ 483 {AXVCV, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-fp conversion, xx2-form */ 484 485 /* VSX scalar FP-integer conversion */ 486 {AXSCVX, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar fp-integer conversion, xx2-form */ 487 488 /* VSX scalar integer-FP conversion */ 489 {AXSCVXP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx scalar integer-fp conversion, xx2-form */ 490 491 /* VSX vector FP-integer conversion */ 492 {AXVCVX, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector fp-integer conversion, xx2-form */ 493 494 /* VSX vector integer-FP conversion */ 495 {AXVCVXP, C_VSREG, C_NONE, C_NONE, C_VSREG, 89, 4, 0}, /* vsx vector integer-fp conversion, xx2-form */ 496 497 /* 64-bit special registers */ 498 {AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0}, 499 {AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0}, 500 {AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0}, 501 {AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0}, 502 {AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0}, 503 {AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0}, 504 {AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0}, 505 {AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0}, 506 507 /* 32-bit special registers (gloss over sign-extension or not?) */ 508 {AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0}, 509 {AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0}, 510 {AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0}, 511 {AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0}, 512 {AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0}, 513 {AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0}, 514 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0}, 515 {AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0}, 516 {AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0}, 517 {AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0}, 518 {AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0}, 519 {AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0}, 520 {AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0}, 521 {AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0}, 522 {AMOVFL, C_REG, C_NONE, C_LCON, C_CREG, 69, 4, 0}, 523 {AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0}, 524 {AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0}, 525 {AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0}, 526 {ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0}, 527 {ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0}, 528 {ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0}, 529 {ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0}, 530 {ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0}, 531 {ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0}, 532 {ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0}, 533 {ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0}, 534 {AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0}, 535 {AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0}, 536 {ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0}, 537 {ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0}, 538 {ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0}, 539 {ADCBF, C_ZOREG, C_REG, C_NONE, C_NONE, 43, 4, 0}, 540 {AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0}, 541 {AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0}, 542 {AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, 543 {AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, 544 {AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0}, 545 {ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0}, 546 {ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0}, 547 {ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0}, 548 {ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0}, 549 {ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0}, 550 {ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0}, 551 {ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0}, 552 {ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0}, 553 {obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0}, 554 {obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0}, 555 {obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0}, 556 {obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0}, 557 {obj.ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL 558 {obj.ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL 559 560 {obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0}, 561 } 562 563 var oprange [ALAST & obj.AMask][]Optab 564 565 var xcmp [C_NCLASS][C_NCLASS]bool 566 567 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { 568 p := cursym.Text 569 if p == nil || p.Link == nil { // handle external functions and ELF section symbols 570 return 571 } 572 573 if oprange[AANDN&obj.AMask] == nil { 574 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first") 575 } 576 577 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} 578 579 pc := int64(0) 580 p.Pc = pc 581 582 var m int 583 var o *Optab 584 for p = p.Link; p != nil; p = p.Link { 585 p.Pc = pc 586 o = c.oplook(p) 587 m = int(o.size) 588 if m == 0 { 589 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 590 c.ctxt.Diag("zero-width instruction\n%v", p) 591 } 592 continue 593 } 594 595 pc += int64(m) 596 } 597 598 c.cursym.Size = pc 599 600 /* 601 * if any procedure is large enough to 602 * generate a large SBRA branch, then 603 * generate extra passes putting branches 604 * around jmps to fix. this is rare. 605 */ 606 bflag := 1 607 608 var otxt int64 609 var q *obj.Prog 610 for bflag != 0 { 611 bflag = 0 612 pc = 0 613 for p = c.cursym.Text.Link; p != nil; p = p.Link { 614 p.Pc = pc 615 o = c.oplook(p) 616 617 // very large conditional branches 618 if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil { 619 otxt = p.Pcond.Pc - pc 620 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { 621 q = c.newprog() 622 q.Link = p.Link 623 p.Link = q 624 q.As = ABR 625 q.To.Type = obj.TYPE_BRANCH 626 q.Pcond = p.Pcond 627 p.Pcond = q 628 q = c.newprog() 629 q.Link = p.Link 630 p.Link = q 631 q.As = ABR 632 q.To.Type = obj.TYPE_BRANCH 633 q.Pcond = q.Link.Link 634 635 //addnop(p->link); 636 //addnop(p); 637 bflag = 1 638 } 639 } 640 641 m = int(o.size) 642 if m == 0 { 643 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { 644 c.ctxt.Diag("zero-width instruction\n%v", p) 645 } 646 continue 647 } 648 649 pc += int64(m) 650 } 651 652 c.cursym.Size = pc 653 } 654 655 pc += -pc & (funcAlign - 1) 656 c.cursym.Size = pc 657 658 /* 659 * lay out the code, emitting code and data relocations. 660 */ 661 662 c.cursym.Grow(c.cursym.Size) 663 664 bp := c.cursym.P 665 var i int32 666 var out [6]uint32 667 for p := c.cursym.Text.Link; p != nil; p = p.Link { 668 c.pc = p.Pc 669 o = c.oplook(p) 670 if int(o.size) > 4*len(out) { 671 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) 672 } 673 c.asmout(p, o, out[:]) 674 for i = 0; i < int32(o.size/4); i++ { 675 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) 676 bp = bp[4:] 677 } 678 } 679 } 680 681 func isint32(v int64) bool { 682 return int64(int32(v)) == v 683 } 684 685 func isuint32(v uint64) bool { 686 return uint64(uint32(v)) == v 687 } 688 689 func (c *ctxt9) aclass(a *obj.Addr) int { 690 switch a.Type { 691 case obj.TYPE_NONE: 692 return C_NONE 693 694 case obj.TYPE_REG: 695 if REG_R0 <= a.Reg && a.Reg <= REG_R31 { 696 return C_REG 697 } 698 if REG_F0 <= a.Reg && a.Reg <= REG_F31 { 699 return C_FREG 700 } 701 if REG_V0 <= a.Reg && a.Reg <= REG_V31 { 702 return C_VREG 703 } 704 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 { 705 return C_VSREG 706 } 707 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR { 708 return C_CREG 709 } 710 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 { 711 switch a.Reg { 712 case REG_LR: 713 return C_LR 714 715 case REG_XER: 716 return C_XER 717 718 case REG_CTR: 719 return C_CTR 720 } 721 722 return C_SPR 723 } 724 725 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 { 726 return C_SPR 727 } 728 if a.Reg == REG_FPSCR { 729 return C_FPSCR 730 } 731 if a.Reg == REG_MSR { 732 return C_MSR 733 } 734 return C_GOK 735 736 case obj.TYPE_MEM: 737 switch a.Name { 738 case obj.NAME_EXTERN, 739 obj.NAME_STATIC: 740 if a.Sym == nil { 741 break 742 } 743 c.instoffset = a.Offset 744 if a.Sym != nil { // use relocation 745 if a.Sym.Type == obj.STLSBSS { 746 if c.ctxt.Flag_shared { 747 return C_TLS_IE 748 } else { 749 return C_TLS_LE 750 } 751 } 752 return C_ADDR 753 } 754 return C_LEXT 755 756 case obj.NAME_GOTREF: 757 return C_GOTADDR 758 759 case obj.NAME_AUTO: 760 c.instoffset = int64(c.autosize) + a.Offset 761 if c.instoffset >= -BIG && c.instoffset < BIG { 762 return C_SAUTO 763 } 764 return C_LAUTO 765 766 case obj.NAME_PARAM: 767 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() 768 if c.instoffset >= -BIG && c.instoffset < BIG { 769 return C_SAUTO 770 } 771 return C_LAUTO 772 773 case obj.NAME_NONE: 774 c.instoffset = a.Offset 775 if c.instoffset == 0 { 776 return C_ZOREG 777 } 778 if c.instoffset >= -BIG && c.instoffset < BIG { 779 return C_SOREG 780 } 781 return C_LOREG 782 } 783 784 return C_GOK 785 786 case obj.TYPE_TEXTSIZE: 787 return C_TEXTSIZE 788 789 case obj.TYPE_CONST, 790 obj.TYPE_ADDR: 791 switch a.Name { 792 case obj.NAME_NONE: 793 c.instoffset = a.Offset 794 if a.Reg != 0 { 795 if -BIG <= c.instoffset && c.instoffset <= BIG { 796 return C_SACON 797 } 798 if isint32(c.instoffset) { 799 return C_LACON 800 } 801 return C_DACON 802 } 803 804 goto consize 805 806 case obj.NAME_EXTERN, 807 obj.NAME_STATIC: 808 s := a.Sym 809 if s == nil { 810 break 811 } 812 if s.Type == obj.SCONST { 813 c.instoffset = a.Offset 814 goto consize 815 } 816 817 c.instoffset = a.Offset 818 819 /* not sure why this barfs */ 820 return C_LCON 821 822 case obj.NAME_AUTO: 823 c.instoffset = int64(c.autosize) + a.Offset 824 if c.instoffset >= -BIG && c.instoffset < BIG { 825 return C_SACON 826 } 827 return C_LACON 828 829 case obj.NAME_PARAM: 830 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize() 831 if c.instoffset >= -BIG && c.instoffset < BIG { 832 return C_SACON 833 } 834 return C_LACON 835 } 836 837 return C_GOK 838 839 consize: 840 if c.instoffset >= 0 { 841 if c.instoffset == 0 { 842 return C_ZCON 843 } 844 if c.instoffset <= 0x7fff { 845 return C_SCON 846 } 847 if c.instoffset <= 0xffff { 848 return C_ANDCON 849 } 850 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */ 851 return C_UCON 852 } 853 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) { 854 return C_LCON 855 } 856 return C_DCON 857 } 858 859 if c.instoffset >= -0x8000 { 860 return C_ADDCON 861 } 862 if c.instoffset&0xffff == 0 && isint32(c.instoffset) { 863 return C_UCON 864 } 865 if isint32(c.instoffset) { 866 return C_LCON 867 } 868 return C_DCON 869 870 case obj.TYPE_BRANCH: 871 if a.Sym != nil && c.ctxt.Flag_dynlink { 872 return C_LBRAPIC 873 } 874 return C_SBRA 875 } 876 877 return C_GOK 878 } 879 880 func prasm(p *obj.Prog) { 881 fmt.Printf("%v\n", p) 882 } 883 884 func (c *ctxt9) oplook(p *obj.Prog) *Optab { 885 a1 := int(p.Optab) 886 if a1 != 0 { 887 return &optab[a1-1] 888 } 889 a1 = int(p.From.Class) 890 if a1 == 0 { 891 a1 = c.aclass(&p.From) + 1 892 p.From.Class = int8(a1) 893 } 894 895 a1-- 896 a3 := C_NONE + 1 897 if p.From3 != nil { 898 a3 = int(p.From3.Class) 899 if a3 == 0 { 900 a3 = c.aclass(p.From3) + 1 901 p.From3.Class = int8(a3) 902 } 903 } 904 905 a3-- 906 a4 := int(p.To.Class) 907 if a4 == 0 { 908 a4 = c.aclass(&p.To) + 1 909 p.To.Class = int8(a4) 910 } 911 912 a4-- 913 a2 := C_NONE 914 if p.Reg != 0 { 915 if REG_R0 <= p.Reg && p.Reg <= REG_R31 { 916 a2 = C_REG 917 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 { 918 a2 = C_VREG 919 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 { 920 a2 = C_VSREG 921 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 { 922 a2 = C_FREG 923 } 924 } 925 926 //print("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4); 927 ops := oprange[p.As&obj.AMask] 928 c1 := &xcmp[a1] 929 c3 := &xcmp[a3] 930 c4 := &xcmp[a4] 931 for i := range ops { 932 op := &ops[i] 933 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] { 934 p.Optab = uint16(cap(optab) - cap(ops) + i + 1) 935 return op 936 } 937 } 938 939 c.ctxt.Diag("illegal combination %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4)) 940 prasm(p) 941 if ops == nil { 942 ops = optab 943 } 944 return &ops[0] 945 } 946 947 func cmp(a int, b int) bool { 948 if a == b { 949 return true 950 } 951 switch a { 952 case C_LCON: 953 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON { 954 return true 955 } 956 957 case C_ADDCON: 958 if b == C_ZCON || b == C_SCON { 959 return true 960 } 961 962 case C_ANDCON: 963 if b == C_ZCON || b == C_SCON { 964 return true 965 } 966 967 case C_SPR: 968 if b == C_LR || b == C_XER || b == C_CTR { 969 return true 970 } 971 972 case C_UCON: 973 if b == C_ZCON { 974 return true 975 } 976 977 case C_SCON: 978 if b == C_ZCON { 979 return true 980 } 981 982 case C_LACON: 983 if b == C_SACON { 984 return true 985 } 986 987 case C_LBRA: 988 if b == C_SBRA { 989 return true 990 } 991 992 case C_LEXT: 993 if b == C_SEXT { 994 return true 995 } 996 997 case C_LAUTO: 998 if b == C_SAUTO { 999 return true 1000 } 1001 1002 case C_REG: 1003 if b == C_ZCON { 1004 return r0iszero != 0 /*TypeKind(100016)*/ 1005 } 1006 1007 case C_LOREG: 1008 if b == C_ZOREG || b == C_SOREG { 1009 return true 1010 } 1011 1012 case C_SOREG: 1013 if b == C_ZOREG { 1014 return true 1015 } 1016 1017 case C_ANY: 1018 return true 1019 } 1020 1021 return false 1022 } 1023 1024 type ocmp []Optab 1025 1026 func (x ocmp) Len() int { 1027 return len(x) 1028 } 1029 1030 func (x ocmp) Swap(i, j int) { 1031 x[i], x[j] = x[j], x[i] 1032 } 1033 1034 func (x ocmp) Less(i, j int) bool { 1035 p1 := &x[i] 1036 p2 := &x[j] 1037 n := int(p1.as) - int(p2.as) 1038 if n != 0 { 1039 return n < 0 1040 } 1041 n = int(p1.a1) - int(p2.a1) 1042 if n != 0 { 1043 return n < 0 1044 } 1045 n = int(p1.a2) - int(p2.a2) 1046 if n != 0 { 1047 return n < 0 1048 } 1049 n = int(p1.a3) - int(p2.a3) 1050 if n != 0 { 1051 return n < 0 1052 } 1053 n = int(p1.a4) - int(p2.a4) 1054 if n != 0 { 1055 return n < 0 1056 } 1057 return false 1058 } 1059 func opset(a, b0 obj.As) { 1060 oprange[a&obj.AMask] = oprange[b0] 1061 } 1062 1063 func buildop(ctxt *obj.Link) { 1064 if oprange[AANDN&obj.AMask] != nil { 1065 // Already initialized; stop now. 1066 // This happens in the cmd/asm tests, 1067 // each of which re-initializes the arch. 1068 return 1069 } 1070 1071 var n int 1072 1073 for i := 0; i < C_NCLASS; i++ { 1074 for n = 0; n < C_NCLASS; n++ { 1075 if cmp(n, i) { 1076 xcmp[i][n] = true 1077 } 1078 } 1079 } 1080 for n = 0; optab[n].as != obj.AXXX; n++ { 1081 } 1082 sort.Sort(ocmp(optab[:n])) 1083 for i := 0; i < n; i++ { 1084 r := optab[i].as 1085 r0 := r & obj.AMask 1086 start := i 1087 for optab[i].as == r { 1088 i++ 1089 } 1090 oprange[r0] = optab[start:i] 1091 i-- 1092 1093 switch r { 1094 default: 1095 ctxt.Diag("unknown op in build: %v", r) 1096 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r) 1097 1098 case ADCBF: /* unary indexed: op (b+a); op (b) */ 1099 opset(ADCBI, r0) 1100 1101 opset(ADCBST, r0) 1102 opset(ADCBT, r0) 1103 opset(ADCBTST, r0) 1104 opset(ADCBZ, r0) 1105 opset(AICBI, r0) 1106 1107 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */ 1108 opset(ASTWCCC, r0) 1109 opset(ASTBCCC, r0) 1110 1111 opset(ASTDCCC, r0) 1112 1113 case AREM: /* macro */ 1114 opset(AREMCC, r0) 1115 1116 opset(AREMV, r0) 1117 opset(AREMVCC, r0) 1118 1119 case AREMU: 1120 opset(AREMU, r0) 1121 opset(AREMUCC, r0) 1122 opset(AREMUV, r0) 1123 opset(AREMUVCC, r0) 1124 1125 case AREMD: 1126 opset(AREMDCC, r0) 1127 opset(AREMDV, r0) 1128 opset(AREMDVCC, r0) 1129 1130 case AREMDU: 1131 opset(AREMDU, r0) 1132 opset(AREMDUCC, r0) 1133 opset(AREMDUV, r0) 1134 opset(AREMDUVCC, r0) 1135 1136 case ADIVW: /* op Rb[,Ra],Rd */ 1137 opset(AMULHW, r0) 1138 1139 opset(AMULHWCC, r0) 1140 opset(AMULHWU, r0) 1141 opset(AMULHWUCC, r0) 1142 opset(AMULLWCC, r0) 1143 opset(AMULLWVCC, r0) 1144 opset(AMULLWV, r0) 1145 opset(ADIVWCC, r0) 1146 opset(ADIVWV, r0) 1147 opset(ADIVWVCC, r0) 1148 opset(ADIVWU, r0) 1149 opset(ADIVWUCC, r0) 1150 opset(ADIVWUV, r0) 1151 opset(ADIVWUVCC, r0) 1152 opset(AADDCC, r0) 1153 opset(AADDCV, r0) 1154 opset(AADDCVCC, r0) 1155 opset(AADDV, r0) 1156 opset(AADDVCC, r0) 1157 opset(AADDE, r0) 1158 opset(AADDECC, r0) 1159 opset(AADDEV, r0) 1160 opset(AADDEVCC, r0) 1161 opset(ACRAND, r0) 1162 opset(ACRANDN, r0) 1163 opset(ACREQV, r0) 1164 opset(ACRNAND, r0) 1165 opset(ACRNOR, r0) 1166 opset(ACROR, r0) 1167 opset(ACRORN, r0) 1168 opset(ACRXOR, r0) 1169 opset(AMULHD, r0) 1170 opset(AMULHDCC, r0) 1171 opset(AMULHDU, r0) 1172 opset(AMULHDUCC, r0) 1173 opset(AMULLD, r0) 1174 opset(AMULLDCC, r0) 1175 opset(AMULLDVCC, r0) 1176 opset(AMULLDV, r0) 1177 opset(ADIVD, r0) 1178 opset(ADIVDCC, r0) 1179 opset(ADIVDE, r0) 1180 opset(ADIVDEU, r0) 1181 opset(ADIVDECC, r0) 1182 opset(ADIVDEUCC, r0) 1183 opset(ADIVDVCC, r0) 1184 opset(ADIVDV, r0) 1185 opset(ADIVDU, r0) 1186 opset(ADIVDUCC, r0) 1187 opset(ADIVDUVCC, r0) 1188 opset(ADIVDUCC, r0) 1189 1190 case APOPCNTD: 1191 opset(APOPCNTW, r0) 1192 opset(APOPCNTB, r0) 1193 1194 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ 1195 opset(AMOVH, r0) 1196 1197 opset(AMOVHZ, r0) 1198 1199 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ 1200 opset(AMOVHU, r0) 1201 1202 opset(AMOVHZU, r0) 1203 opset(AMOVWU, r0) 1204 opset(AMOVWZU, r0) 1205 opset(AMOVDU, r0) 1206 opset(AMOVMW, r0) 1207 1208 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */ 1209 opset(ALVEBX, r0) 1210 opset(ALVEHX, r0) 1211 opset(ALVEWX, r0) 1212 opset(ALVX, r0) 1213 opset(ALVXL, r0) 1214 opset(ALVSL, r0) 1215 opset(ALVSR, r0) 1216 1217 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */ 1218 opset(ASTVEBX, r0) 1219 opset(ASTVEHX, r0) 1220 opset(ASTVEWX, r0) 1221 opset(ASTVX, r0) 1222 opset(ASTVXL, r0) 1223 1224 case AVAND: /* vand, vandc, vnand */ 1225 opset(AVAND, r0) 1226 opset(AVANDC, r0) 1227 opset(AVNAND, r0) 1228 1229 case AVOR: /* vor, vorc, vxor, vnor, veqv */ 1230 opset(AVOR, r0) 1231 opset(AVORC, r0) 1232 opset(AVXOR, r0) 1233 opset(AVNOR, r0) 1234 opset(AVEQV, r0) 1235 1236 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */ 1237 opset(AVADDUBM, r0) 1238 opset(AVADDUHM, r0) 1239 opset(AVADDUWM, r0) 1240 opset(AVADDUDM, r0) 1241 opset(AVADDUQM, r0) 1242 1243 case AVADDCU: /* vaddcuq, vaddcuw */ 1244 opset(AVADDCUQ, r0) 1245 opset(AVADDCUW, r0) 1246 1247 case AVADDUS: /* vaddubs, vadduhs, vadduws */ 1248 opset(AVADDUBS, r0) 1249 opset(AVADDUHS, r0) 1250 opset(AVADDUWS, r0) 1251 1252 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */ 1253 opset(AVADDSBS, r0) 1254 opset(AVADDSHS, r0) 1255 opset(AVADDSWS, r0) 1256 1257 case AVADDE: /* vaddeuqm, vaddecuq */ 1258 opset(AVADDEUQM, r0) 1259 opset(AVADDECUQ, r0) 1260 1261 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */ 1262 opset(AVSUBUBM, r0) 1263 opset(AVSUBUHM, r0) 1264 opset(AVSUBUWM, r0) 1265 opset(AVSUBUDM, r0) 1266 opset(AVSUBUQM, r0) 1267 1268 case AVSUBCU: /* vsubcuq, vsubcuw */ 1269 opset(AVSUBCUQ, r0) 1270 opset(AVSUBCUW, r0) 1271 1272 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */ 1273 opset(AVSUBUBS, r0) 1274 opset(AVSUBUHS, r0) 1275 opset(AVSUBUWS, r0) 1276 1277 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */ 1278 opset(AVSUBSBS, r0) 1279 opset(AVSUBSHS, r0) 1280 opset(AVSUBSWS, r0) 1281 1282 case AVSUBE: /* vsubeuqm, vsubecuq */ 1283 opset(AVSUBEUQM, r0) 1284 opset(AVSUBECUQ, r0) 1285 1286 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */ 1287 opset(AVPMSUMB, r0) 1288 opset(AVPMSUMH, r0) 1289 opset(AVPMSUMW, r0) 1290 opset(AVPMSUMD, r0) 1291 1292 case AVR: /* vrlb, vrlh, vrlw, vrld */ 1293 opset(AVRLB, r0) 1294 opset(AVRLH, r0) 1295 opset(AVRLW, r0) 1296 opset(AVRLD, r0) 1297 1298 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */ 1299 opset(AVSLB, r0) 1300 opset(AVSLH, r0) 1301 opset(AVSLW, r0) 1302 opset(AVSL, r0) 1303 opset(AVSLO, r0) 1304 opset(AVSRB, r0) 1305 opset(AVSRH, r0) 1306 opset(AVSRW, r0) 1307 opset(AVSR, r0) 1308 opset(AVSRO, r0) 1309 opset(AVSLD, r0) 1310 opset(AVSRD, r0) 1311 1312 case AVSA: /* vsrab, vsrah, vsraw, vsrad */ 1313 opset(AVSRAB, r0) 1314 opset(AVSRAH, r0) 1315 opset(AVSRAW, r0) 1316 opset(AVSRAD, r0) 1317 1318 case AVSOI: /* vsldoi */ 1319 opset(AVSLDOI, r0) 1320 1321 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */ 1322 opset(AVCLZB, r0) 1323 opset(AVCLZH, r0) 1324 opset(AVCLZW, r0) 1325 opset(AVCLZD, r0) 1326 1327 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */ 1328 opset(AVPOPCNTB, r0) 1329 opset(AVPOPCNTH, r0) 1330 opset(AVPOPCNTW, r0) 1331 opset(AVPOPCNTD, r0) 1332 1333 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */ 1334 opset(AVCMPEQUB, r0) 1335 opset(AVCMPEQUBCC, r0) 1336 opset(AVCMPEQUH, r0) 1337 opset(AVCMPEQUHCC, r0) 1338 opset(AVCMPEQUW, r0) 1339 opset(AVCMPEQUWCC, r0) 1340 opset(AVCMPEQUD, r0) 1341 opset(AVCMPEQUDCC, r0) 1342 1343 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */ 1344 opset(AVCMPGTUB, r0) 1345 opset(AVCMPGTUBCC, r0) 1346 opset(AVCMPGTUH, r0) 1347 opset(AVCMPGTUHCC, r0) 1348 opset(AVCMPGTUW, r0) 1349 opset(AVCMPGTUWCC, r0) 1350 opset(AVCMPGTUD, r0) 1351 opset(AVCMPGTUDCC, r0) 1352 opset(AVCMPGTSB, r0) 1353 opset(AVCMPGTSBCC, r0) 1354 opset(AVCMPGTSH, r0) 1355 opset(AVCMPGTSHCC, r0) 1356 opset(AVCMPGTSW, r0) 1357 opset(AVCMPGTSWCC, r0) 1358 opset(AVCMPGTSD, r0) 1359 opset(AVCMPGTSDCC, r0) 1360 1361 case AVPERM: /* vperm */ 1362 opset(AVPERM, r0) 1363 1364 case AVSEL: /* vsel */ 1365 opset(AVSEL, r0) 1366 1367 case AVSPLT: /* vspltb, vsplth, vspltw */ 1368 opset(AVSPLTB, r0) 1369 opset(AVSPLTH, r0) 1370 opset(AVSPLTW, r0) 1371 1372 case AVSPLTI: /* vspltisb, vspltish, vspltisw */ 1373 opset(AVSPLTISB, r0) 1374 opset(AVSPLTISH, r0) 1375 opset(AVSPLTISW, r0) 1376 1377 case AVCIPH: /* vcipher, vcipherlast */ 1378 opset(AVCIPHER, r0) 1379 opset(AVCIPHERLAST, r0) 1380 1381 case AVNCIPH: /* vncipher, vncipherlast */ 1382 opset(AVNCIPHER, r0) 1383 opset(AVNCIPHERLAST, r0) 1384 1385 case AVSBOX: /* vsbox */ 1386 opset(AVSBOX, r0) 1387 1388 case AVSHASIGMA: /* vshasigmaw, vshasigmad */ 1389 opset(AVSHASIGMAW, r0) 1390 opset(AVSHASIGMAD, r0) 1391 1392 case ALXV: /* lxvd2x, lxvdsx, lxvw4x */ 1393 opset(ALXVD2X, r0) 1394 opset(ALXVDSX, r0) 1395 opset(ALXVW4X, r0) 1396 1397 case ASTXV: /* stxvd2x, stxvdsx, stxvw4x */ 1398 opset(ASTXVD2X, r0) 1399 opset(ASTXVW4X, r0) 1400 1401 case ALXS: /* lxsdx */ 1402 opset(ALXSDX, r0) 1403 1404 case ASTXS: /* stxsdx */ 1405 opset(ASTXSDX, r0) 1406 1407 case ALXSI: /* lxsiwax, lxsiwzx */ 1408 opset(ALXSIWAX, r0) 1409 opset(ALXSIWZX, r0) 1410 1411 case ASTXSI: /* stxsiwx */ 1412 opset(ASTXSIWX, r0) 1413 1414 case AMFVSR: /* mfvsrd, mfvsrwz (and extended mnemonics) */ 1415 opset(AMFVSRD, r0) 1416 opset(AMFFPRD, r0) 1417 opset(AMFVRD, r0) 1418 opset(AMFVSRWZ, r0) 1419 1420 case AMTVSR: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics) */ 1421 opset(AMTVSRD, r0) 1422 opset(AMTFPRD, r0) 1423 opset(AMTVRD, r0) 1424 opset(AMTVSRWA, r0) 1425 opset(AMTVSRWZ, r0) 1426 1427 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */ 1428 opset(AXXLANDQ, r0) 1429 opset(AXXLANDC, r0) 1430 opset(AXXLEQV, r0) 1431 opset(AXXLNAND, r0) 1432 1433 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */ 1434 opset(AXXLORC, r0) 1435 opset(AXXLNOR, r0) 1436 opset(AXXLORQ, r0) 1437 opset(AXXLXOR, r0) 1438 1439 case AXXSEL: /* xxsel */ 1440 opset(AXXSEL, r0) 1441 1442 case AXXMRG: /* xxmrghw, xxmrglw */ 1443 opset(AXXMRGHW, r0) 1444 opset(AXXMRGLW, r0) 1445 1446 case AXXSPLT: /* xxspltw */ 1447 opset(AXXSPLTW, r0) 1448 1449 case AXXPERM: /* xxpermdi */ 1450 opset(AXXPERMDI, r0) 1451 1452 case AXXSI: /* xxsldwi */ 1453 opset(AXXSLDWI, r0) 1454 1455 case AXSCV: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */ 1456 opset(AXSCVDPSP, r0) 1457 opset(AXSCVSPDP, r0) 1458 opset(AXSCVDPSPN, r0) 1459 opset(AXSCVSPDPN, r0) 1460 1461 case AXVCV: /* xvcvdpsp, xvcvspdp */ 1462 opset(AXVCVDPSP, r0) 1463 opset(AXVCVSPDP, r0) 1464 1465 case AXSCVX: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */ 1466 opset(AXSCVDPSXDS, r0) 1467 opset(AXSCVDPSXWS, r0) 1468 opset(AXSCVDPUXDS, r0) 1469 opset(AXSCVDPUXWS, r0) 1470 1471 case AXSCVXP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */ 1472 opset(AXSCVSXDDP, r0) 1473 opset(AXSCVUXDDP, r0) 1474 opset(AXSCVSXDSP, r0) 1475 opset(AXSCVUXDSP, r0) 1476 1477 case AXVCVX: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */ 1478 opset(AXVCVDPSXDS, r0) 1479 opset(AXVCVDPSXWS, r0) 1480 opset(AXVCVDPUXDS, r0) 1481 opset(AXVCVDPUXWS, r0) 1482 opset(AXVCVSPSXDS, r0) 1483 opset(AXVCVSPSXWS, r0) 1484 opset(AXVCVSPUXDS, r0) 1485 opset(AXVCVSPUXWS, r0) 1486 1487 case AXVCVXP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */ 1488 opset(AXVCVSXDDP, r0) 1489 opset(AXVCVSXWDP, r0) 1490 opset(AXVCVUXDDP, r0) 1491 opset(AXVCVUXWDP, r0) 1492 opset(AXVCVSXDSP, r0) 1493 opset(AXVCVSXWSP, r0) 1494 opset(AXVCVUXDSP, r0) 1495 opset(AXVCVUXWSP, r0) 1496 1497 case AAND: /* logical op Rb,Rs,Ra; no literal */ 1498 opset(AANDN, r0) 1499 1500 opset(AANDNCC, r0) 1501 opset(AEQV, r0) 1502 opset(AEQVCC, r0) 1503 opset(ANAND, r0) 1504 opset(ANANDCC, r0) 1505 opset(ANOR, r0) 1506 opset(ANORCC, r0) 1507 opset(AORCC, r0) 1508 opset(AORN, r0) 1509 opset(AORNCC, r0) 1510 opset(AXORCC, r0) 1511 1512 case AADDME: /* op Ra, Rd */ 1513 opset(AADDMECC, r0) 1514 1515 opset(AADDMEV, r0) 1516 opset(AADDMEVCC, r0) 1517 opset(AADDZE, r0) 1518 opset(AADDZECC, r0) 1519 opset(AADDZEV, r0) 1520 opset(AADDZEVCC, r0) 1521 opset(ASUBME, r0) 1522 opset(ASUBMECC, r0) 1523 opset(ASUBMEV, r0) 1524 opset(ASUBMEVCC, r0) 1525 opset(ASUBZE, r0) 1526 opset(ASUBZECC, r0) 1527 opset(ASUBZEV, r0) 1528 opset(ASUBZEVCC, r0) 1529 1530 case AADDC: 1531 opset(AADDCCC, r0) 1532 1533 case ABEQ: 1534 opset(ABGE, r0) 1535 opset(ABGT, r0) 1536 opset(ABLE, r0) 1537 opset(ABLT, r0) 1538 opset(ABNE, r0) 1539 opset(ABVC, r0) 1540 opset(ABVS, r0) 1541 1542 case ABR: 1543 opset(ABL, r0) 1544 1545 case ABC: 1546 opset(ABCL, r0) 1547 1548 case AEXTSB: /* op Rs, Ra */ 1549 opset(AEXTSBCC, r0) 1550 1551 opset(AEXTSH, r0) 1552 opset(AEXTSHCC, r0) 1553 opset(ACNTLZW, r0) 1554 opset(ACNTLZWCC, r0) 1555 opset(ACNTLZD, r0) 1556 opset(AEXTSW, r0) 1557 opset(AEXTSWCC, r0) 1558 opset(ACNTLZDCC, r0) 1559 1560 case AFABS: /* fop [s,]d */ 1561 opset(AFABSCC, r0) 1562 1563 opset(AFNABS, r0) 1564 opset(AFNABSCC, r0) 1565 opset(AFNEG, r0) 1566 opset(AFNEGCC, r0) 1567 opset(AFRSP, r0) 1568 opset(AFRSPCC, r0) 1569 opset(AFCTIW, r0) 1570 opset(AFCTIWCC, r0) 1571 opset(AFCTIWZ, r0) 1572 opset(AFCTIWZCC, r0) 1573 opset(AFCTID, r0) 1574 opset(AFCTIDCC, r0) 1575 opset(AFCTIDZ, r0) 1576 opset(AFCTIDZCC, r0) 1577 opset(AFCFID, r0) 1578 opset(AFCFIDCC, r0) 1579 opset(AFCFIDU, r0) 1580 opset(AFCFIDUCC, r0) 1581 opset(AFRES, r0) 1582 opset(AFRESCC, r0) 1583 opset(AFRIM, r0) 1584 opset(AFRIMCC, r0) 1585 opset(AFRIP, r0) 1586 opset(AFRIPCC, r0) 1587 opset(AFRIZ, r0) 1588 opset(AFRIZCC, r0) 1589 opset(AFRSQRTE, r0) 1590 opset(AFRSQRTECC, r0) 1591 opset(AFSQRT, r0) 1592 opset(AFSQRTCC, r0) 1593 opset(AFSQRTS, r0) 1594 opset(AFSQRTSCC, r0) 1595 1596 case AFADD: 1597 opset(AFADDS, r0) 1598 opset(AFADDCC, r0) 1599 opset(AFADDSCC, r0) 1600 opset(AFDIV, r0) 1601 opset(AFDIVS, r0) 1602 opset(AFDIVCC, r0) 1603 opset(AFDIVSCC, r0) 1604 opset(AFSUB, r0) 1605 opset(AFSUBS, r0) 1606 opset(AFSUBCC, r0) 1607 opset(AFSUBSCC, r0) 1608 1609 case AFMADD: 1610 opset(AFMADDCC, r0) 1611 opset(AFMADDS, r0) 1612 opset(AFMADDSCC, r0) 1613 opset(AFMSUB, r0) 1614 opset(AFMSUBCC, r0) 1615 opset(AFMSUBS, r0) 1616 opset(AFMSUBSCC, r0) 1617 opset(AFNMADD, r0) 1618 opset(AFNMADDCC, r0) 1619 opset(AFNMADDS, r0) 1620 opset(AFNMADDSCC, r0) 1621 opset(AFNMSUB, r0) 1622 opset(AFNMSUBCC, r0) 1623 opset(AFNMSUBS, r0) 1624 opset(AFNMSUBSCC, r0) 1625 opset(AFSEL, r0) 1626 opset(AFSELCC, r0) 1627 1628 case AFMUL: 1629 opset(AFMULS, r0) 1630 opset(AFMULCC, r0) 1631 opset(AFMULSCC, r0) 1632 1633 case AFCMPO: 1634 opset(AFCMPU, r0) 1635 1636 case AISEL: 1637 opset(AISEL, r0) 1638 1639 case AMTFSB0: 1640 opset(AMTFSB0CC, r0) 1641 opset(AMTFSB1, r0) 1642 opset(AMTFSB1CC, r0) 1643 1644 case ANEG: /* op [Ra,] Rd */ 1645 opset(ANEGCC, r0) 1646 1647 opset(ANEGV, r0) 1648 opset(ANEGVCC, r0) 1649 1650 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,Ra; oris/xoris $uimm,Rs,Ra */ 1651 opset(AXOR, r0) 1652 1653 case ASLW: 1654 opset(ASLWCC, r0) 1655 opset(ASRW, r0) 1656 opset(ASRWCC, r0) 1657 1658 case ASLD: 1659 opset(ASLDCC, r0) 1660 opset(ASRD, r0) 1661 opset(ASRDCC, r0) 1662 1663 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1664 opset(ASRAWCC, r0) 1665 1666 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ 1667 opset(ASRADCC, r0) 1668 1669 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */ 1670 opset(ASUB, r0) 1671 1672 opset(ASUBCC, r0) 1673 opset(ASUBV, r0) 1674 opset(ASUBVCC, r0) 1675 opset(ASUBCCC, r0) 1676 opset(ASUBCV, r0) 1677 opset(ASUBCVCC, r0) 1678 opset(ASUBE, r0) 1679 opset(ASUBECC, r0) 1680 opset(ASUBEV, r0) 1681 opset(ASUBEVCC, r0) 1682 1683 case ASYNC: 1684 opset(AISYNC, r0) 1685 opset(ALWSYNC, r0) 1686 opset(APTESYNC, r0) 1687 opset(ATLBSYNC, r0) 1688 1689 case ARLWMI: 1690 opset(ARLWMICC, r0) 1691 opset(ARLWNM, r0) 1692 opset(ARLWNMCC, r0) 1693 1694 case ARLDMI: 1695 opset(ARLDMICC, r0) 1696 opset(ARLDIMI, r0) 1697 opset(ARLDIMICC, r0) 1698 1699 case ARLDC: 1700 opset(ARLDCCC, r0) 1701 1702 case ARLDCL: 1703 opset(ARLDCR, r0) 1704 opset(ARLDCLCC, r0) 1705 opset(ARLDCRCC, r0) 1706 1707 case ARLDICL: 1708 opset(ARLDICLCC, r0) 1709 opset(ARLDICR, r0) 1710 opset(ARLDICRCC, r0) 1711 1712 case AFMOVD: 1713 opset(AFMOVDCC, r0) 1714 opset(AFMOVDU, r0) 1715 opset(AFMOVS, r0) 1716 opset(AFMOVSU, r0) 1717 1718 case AECIWX: 1719 opset(ALBAR, r0) 1720 opset(ALWAR, r0) 1721 opset(ALDAR, r0) 1722 1723 case ASYSCALL: /* just the op; flow of control */ 1724 opset(ARFI, r0) 1725 1726 opset(ARFCI, r0) 1727 opset(ARFID, r0) 1728 opset(AHRFID, r0) 1729 1730 case AMOVHBR: 1731 opset(AMOVWBR, r0) 1732 opset(AMOVDBR, r0) 1733 1734 case ASLBMFEE: 1735 opset(ASLBMFEV, r0) 1736 1737 case ATW: 1738 opset(ATD, r0) 1739 1740 case ATLBIE: 1741 opset(ASLBIE, r0) 1742 opset(ATLBIEL, r0) 1743 1744 case AEIEIO: 1745 opset(ASLBIA, r0) 1746 1747 case ACMP: 1748 opset(ACMPW, r0) 1749 1750 case ACMPU: 1751 opset(ACMPWU, r0) 1752 1753 case ACMPB: 1754 opset(ACMPB, r0) 1755 1756 case AFTDIV: 1757 opset(AFTDIV, r0) 1758 1759 case AFTSQRT: 1760 opset(AFTSQRT, r0) 1761 1762 case AADD, 1763 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra; andis. $uimm,Rs,Ra */ 1764 AFMOVSX, 1765 AFMOVSZ, 1766 ALSW, 1767 AMOVW, 1768 /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */ 1769 AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */ 1770 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ 1771 AMOVB, /* macro: move byte with sign extension */ 1772 AMOVBU, /* macro: move byte with sign extension & update */ 1773 AMOVFL, 1774 AMULLW, 1775 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ 1776 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ 1777 ASTSW, 1778 ASLBMTE, 1779 AWORD, 1780 ADWORD, 1781 obj.ANOP, 1782 obj.ATEXT, 1783 obj.AUNDEF, 1784 obj.AFUNCDATA, 1785 obj.APCDATA, 1786 obj.ADUFFZERO, 1787 obj.ADUFFCOPY: 1788 break 1789 } 1790 } 1791 } 1792 1793 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 { 1794 return o<<26 | xo<<1 | oe<<11 1795 } 1796 1797 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 { 1798 return o<<26 | xo<<2 | oe<<11 1799 } 1800 1801 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 { 1802 return o<<26 | xo<<3 | oe<<11 1803 } 1804 1805 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 { 1806 return o<<26 | xo<<4 | oe<<11 1807 } 1808 1809 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 1810 return o<<26 | xo | oe<<11 | rc&1 1811 } 1812 1813 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 1814 return o<<26 | xo | oe<<11 | (rc&1)<<10 1815 } 1816 1817 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { 1818 return o<<26 | xo<<1 | oe<<10 | rc&1 1819 } 1820 1821 func OPCC(o uint32, xo uint32, rc uint32) uint32 { 1822 return OPVCC(o, xo, 0, rc) 1823 } 1824 1825 func OP(o uint32, xo uint32) uint32 { 1826 return OPVCC(o, xo, 0, 0) 1827 } 1828 1829 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */ 1830 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { 1831 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 1832 } 1833 1834 /* VX-form 2-register operands, r/none/r */ 1835 func AOP_RR(op uint32, d uint32, a uint32) uint32 { 1836 return op | (d&31)<<21 | (a&31)<<11 1837 } 1838 1839 /* VA-form 4-register operands */ 1840 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 1841 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6 1842 } 1843 1844 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 1845 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF 1846 } 1847 1848 /* VX-form 2-register + UIM operands */ 1849 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 { 1850 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11 1851 } 1852 1853 /* VX-form 2-register + ST + SIX operands */ 1854 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 { 1855 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11 1856 } 1857 1858 /* VA-form 3-register + SHB operands */ 1859 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 { 1860 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6 1861 } 1862 1863 /* VX-form 1-register + SIM operands */ 1864 func AOP_IR(op uint32, d uint32, simm uint32) uint32 { 1865 return op | (d&31)<<21 | (simm&31)<<16 1866 } 1867 1868 /* XX1-form 3-register operands, 1 VSR operand */ 1869 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 { 1870 /* For the XX-form encodings, we need the VSX register number to be exactly */ 1871 /* between 0-63, so we can properly set the rightmost bits. */ 1872 r := d - REG_VS0 1873 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5 1874 } 1875 1876 /* XX2-form 3-register operands, 2 VSR operands */ 1877 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 { 1878 xt := d - REG_VS0 1879 xb := b - REG_VS0 1880 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5 1881 } 1882 1883 /* XX3-form 3 VSR operands */ 1884 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 { 1885 xt := d - REG_VS0 1886 xa := a - REG_VS0 1887 xb := b - REG_VS0 1888 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 1889 } 1890 1891 /* XX3-form 3 VSR operands + immediate */ 1892 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 1893 xt := d - REG_VS0 1894 xa := a - REG_VS0 1895 xb := b - REG_VS0 1896 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 1897 } 1898 1899 /* XX4-form, 4 VSR operands */ 1900 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { 1901 xt := d - REG_VS0 1902 xa := a - REG_VS0 1903 xb := b - REG_VS0 1904 xc := c - REG_VS0 1905 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 1906 } 1907 1908 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { 1909 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 1910 } 1911 1912 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 { 1913 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF 1914 } 1915 1916 func OP_BR(op uint32, li uint32, aa uint32) uint32 { 1917 return op | li&0x03FFFFFC | aa<<1 1918 } 1919 1920 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 { 1921 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1 1922 } 1923 1924 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 { 1925 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 1926 } 1927 1928 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 { 1929 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1 1930 } 1931 1932 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { 1933 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 1934 } 1935 1936 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { 1937 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 1938 } 1939 1940 const ( 1941 /* each rhs is OPVCC(_, _, _, _) */ 1942 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 1943 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 1944 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 1945 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 1946 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 1947 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 1948 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 1949 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 1950 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 1951 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 1952 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 1953 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 1954 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 1955 OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0 1956 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 1957 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 1958 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 1959 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 1960 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 1961 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 1962 OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0 1963 OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0 1964 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 1965 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 1966 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 1967 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 1968 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 1969 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 1970 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 1971 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 1972 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 1973 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 1974 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 1975 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 1976 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 1977 ) 1978 1979 func oclass(a *obj.Addr) int { 1980 return int(a.Class) - 1 1981 } 1982 1983 const ( 1984 D_FORM = iota 1985 DS_FORM 1986 ) 1987 1988 // opform returns the form (D_FORM or DS_FORM) of an instruction. Used to decide on 1989 // which relocation to use with a load or store and only supports the needed 1990 // instructions. 1991 func (c *ctxt9) opform(insn uint32) int { 1992 switch insn { 1993 default: 1994 c.ctxt.Diag("bad insn in loadform: %x", insn) 1995 case OPVCC(58, 0, 0, 0), // ld 1996 OPVCC(58, 0, 0, 0) | 1<<1, // lwa 1997 OPVCC(62, 0, 0, 0): // std 1998 return DS_FORM 1999 case OP_ADDI, // add 2000 OPVCC(32, 0, 0, 0), // lwz 2001 OPVCC(42, 0, 0, 0), // lha 2002 OPVCC(40, 0, 0, 0), // lhz 2003 OPVCC(34, 0, 0, 0), // lbz 2004 OPVCC(50, 0, 0, 0), // lfd 2005 OPVCC(48, 0, 0, 0), // lfs 2006 OPVCC(36, 0, 0, 0), // stw 2007 OPVCC(44, 0, 0, 0), // sth 2008 OPVCC(38, 0, 0, 0), // stb 2009 OPVCC(54, 0, 0, 0), // stfd 2010 OPVCC(52, 0, 0, 0): // stfs 2011 return D_FORM 2012 } 2013 return 0 2014 } 2015 2016 // Encode instructions and create relocation for accessing s+d according to the 2017 // instruction op with source or destination (as appropriate) register reg. 2018 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) { 2019 var base uint32 2020 form := c.opform(op) 2021 if c.ctxt.Flag_shared { 2022 base = REG_R2 2023 } else { 2024 base = REG_R0 2025 } 2026 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) 2027 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) 2028 rel := obj.Addrel(c.cursym) 2029 rel.Off = int32(c.pc) 2030 rel.Siz = 8 2031 rel.Sym = s 2032 rel.Add = d 2033 if c.ctxt.Flag_shared { 2034 switch form { 2035 case D_FORM: 2036 rel.Type = obj.R_ADDRPOWER_TOCREL 2037 case DS_FORM: 2038 rel.Type = obj.R_ADDRPOWER_TOCREL_DS 2039 } 2040 2041 } else { 2042 switch form { 2043 case D_FORM: 2044 rel.Type = obj.R_ADDRPOWER 2045 case DS_FORM: 2046 rel.Type = obj.R_ADDRPOWER_DS 2047 } 2048 } 2049 return 2050 } 2051 2052 /* 2053 * 32-bit masks 2054 */ 2055 func getmask(m []byte, v uint32) bool { 2056 m[1] = 0 2057 m[0] = m[1] 2058 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */ 2059 if getmask(m, ^v) { 2060 i := int(m[0]) 2061 m[0] = m[1] + 1 2062 m[1] = byte(i - 1) 2063 return true 2064 } 2065 2066 return false 2067 } 2068 2069 for i := 0; i < 32; i++ { 2070 if v&(1<<uint(31-i)) != 0 { 2071 m[0] = byte(i) 2072 for { 2073 m[1] = byte(i) 2074 i++ 2075 if i >= 32 || v&(1<<uint(31-i)) == 0 { 2076 break 2077 } 2078 } 2079 2080 for ; i < 32; i++ { 2081 if v&(1<<uint(31-i)) != 0 { 2082 return false 2083 } 2084 } 2085 return true 2086 } 2087 } 2088 2089 return false 2090 } 2091 2092 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) { 2093 if !getmask(m, v) { 2094 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p) 2095 } 2096 } 2097 2098 /* 2099 * 64-bit masks (rldic etc) 2100 */ 2101 func getmask64(m []byte, v uint64) bool { 2102 m[1] = 0 2103 m[0] = m[1] 2104 for i := 0; i < 64; i++ { 2105 if v&(uint64(1)<<uint(63-i)) != 0 { 2106 m[0] = byte(i) 2107 for { 2108 m[1] = byte(i) 2109 i++ 2110 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 { 2111 break 2112 } 2113 } 2114 2115 for ; i < 64; i++ { 2116 if v&(uint64(1)<<uint(63-i)) != 0 { 2117 return false 2118 } 2119 } 2120 return true 2121 } 2122 } 2123 2124 return false 2125 } 2126 2127 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) { 2128 if !getmask64(m, v) { 2129 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p) 2130 } 2131 } 2132 2133 func loadu32(r int, d int64) uint32 { 2134 v := int32(d >> 16) 2135 if isuint32(uint64(d)) { 2136 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v)) 2137 } 2138 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v)) 2139 } 2140 2141 func high16adjusted(d int32) uint16 { 2142 if d&0x8000 != 0 { 2143 return uint16((d >> 16) + 1) 2144 } 2145 return uint16(d >> 16) 2146 } 2147 2148 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) { 2149 o1 := uint32(0) 2150 o2 := uint32(0) 2151 o3 := uint32(0) 2152 o4 := uint32(0) 2153 o5 := uint32(0) 2154 2155 //print("%v => case %d\n", p, o->type); 2156 switch o.type_ { 2157 default: 2158 c.ctxt.Diag("unknown type %d", o.type_) 2159 prasm(p) 2160 2161 case 0: /* pseudo ops */ 2162 break 2163 2164 case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */ 2165 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST { 2166 v := c.regoff(&p.From) 2167 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 { 2168 //nerrors--; 2169 c.ctxt.Diag("literal operation on R0\n%v", p) 2170 } 2171 2172 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v)) 2173 break 2174 } 2175 2176 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) 2177 2178 case 2: /* int/cr/fp op Rb,[Ra],Rd */ 2179 r := int(p.Reg) 2180 2181 if r == 0 { 2182 r = int(p.To.Reg) 2183 } 2184 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2185 2186 case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */ 2187 d := c.vregoff(&p.From) 2188 2189 v := int32(d) 2190 r := int(p.From.Reg) 2191 if r == 0 { 2192 r = int(o.param) 2193 } 2194 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { 2195 c.ctxt.Diag("literal operation on R0\n%v", p) 2196 } 2197 a := OP_ADDI 2198 if o.a1 == C_UCON { 2199 if d&0xffff != 0 { 2200 log.Fatalf("invalid handling of %v", p) 2201 } 2202 v >>= 16 2203 if r == REGZERO && isuint32(uint64(d)) { 2204 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v)) 2205 break 2206 } 2207 2208 a = OP_ADDIS 2209 } else { 2210 if int64(int16(d)) != d { 2211 log.Fatalf("invalid handling of %v", p) 2212 } 2213 } 2214 2215 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v)) 2216 2217 case 4: /* add/mul $scon,[r1],r2 */ 2218 v := c.regoff(&p.From) 2219 2220 r := int(p.Reg) 2221 if r == 0 { 2222 r = int(p.To.Reg) 2223 } 2224 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 { 2225 c.ctxt.Diag("literal operation on R0\n%v", p) 2226 } 2227 if int32(int16(v)) != v { 2228 log.Fatalf("mishandled instruction %v", p) 2229 } 2230 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2231 2232 case 5: /* syscall */ 2233 o1 = c.oprrr(p.As) 2234 2235 case 6: /* logical op Rb,[Rs,]Ra; no literal */ 2236 r := int(p.Reg) 2237 2238 if r == 0 { 2239 r = int(p.To.Reg) 2240 } 2241 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2242 2243 case 7: /* mov r, soreg ==> stw o(r) */ 2244 r := int(p.To.Reg) 2245 2246 if r == 0 { 2247 r = int(o.param) 2248 } 2249 v := c.regoff(&p.To) 2250 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 { 2251 if v != 0 { 2252 c.ctxt.Diag("illegal indexed instruction\n%v", p) 2253 } 2254 if c.ctxt.Flag_shared && r == REG_R13 { 2255 rel := obj.Addrel(c.cursym) 2256 rel.Off = int32(c.pc) 2257 rel.Siz = 4 2258 // This (and the matching part in the load case 2259 // below) are the only places in the ppc64 toolchain 2260 // that knows the name of the tls variable. Possibly 2261 // we could add some assembly syntax so that the name 2262 // of the variable does not have to be assumed. 2263 rel.Sym = c.ctxt.Lookup("runtime.tls_g", 0) 2264 rel.Type = obj.R_POWER_TLS 2265 } 2266 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) 2267 } else { 2268 if int32(int16(v)) != v { 2269 log.Fatalf("mishandled instruction %v", p) 2270 } 2271 o1 = AOP_IRR(c.opstore(p.As), uint32(p.From.Reg), uint32(r), uint32(v)) 2272 } 2273 2274 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */ 2275 r := int(p.From.Reg) 2276 2277 if r == 0 { 2278 r = int(o.param) 2279 } 2280 v := c.regoff(&p.From) 2281 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 { 2282 if v != 0 { 2283 c.ctxt.Diag("illegal indexed instruction\n%v", p) 2284 } 2285 if c.ctxt.Flag_shared && r == REG_R13 { 2286 rel := obj.Addrel(c.cursym) 2287 rel.Off = int32(c.pc) 2288 rel.Siz = 4 2289 rel.Sym = c.ctxt.Lookup("runtime.tls_g", 0) 2290 rel.Type = obj.R_POWER_TLS 2291 } 2292 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) 2293 } else { 2294 if int32(int16(v)) != v { 2295 log.Fatalf("mishandled instruction %v", p) 2296 } 2297 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2298 } 2299 2300 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */ 2301 r := int(p.From.Reg) 2302 2303 if r == 0 { 2304 r = int(o.param) 2305 } 2306 v := c.regoff(&p.From) 2307 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 { 2308 if v != 0 { 2309 c.ctxt.Diag("illegal indexed instruction\n%v", p) 2310 } 2311 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) 2312 } else { 2313 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2314 } 2315 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 2316 2317 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ 2318 r := int(p.Reg) 2319 2320 if r == 0 { 2321 r = int(p.To.Reg) 2322 } 2323 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) 2324 2325 case 11: /* br/bl lbra */ 2326 v := int32(0) 2327 2328 if p.Pcond != nil { 2329 v = int32(p.Pcond.Pc - p.Pc) 2330 if v&03 != 0 { 2331 c.ctxt.Diag("odd branch target address\n%v", p) 2332 v &^= 03 2333 } 2334 2335 if v < -(1<<25) || v >= 1<<24 { 2336 c.ctxt.Diag("branch too far\n%v", p) 2337 } 2338 } 2339 2340 o1 = OP_BR(c.opirr(p.As), uint32(v), 0) 2341 if p.To.Sym != nil { 2342 rel := obj.Addrel(c.cursym) 2343 rel.Off = int32(c.pc) 2344 rel.Siz = 4 2345 rel.Sym = p.To.Sym 2346 v += int32(p.To.Offset) 2347 if v&03 != 0 { 2348 c.ctxt.Diag("odd branch target address\n%v", p) 2349 v &^= 03 2350 } 2351 2352 rel.Add = int64(v) 2353 rel.Type = obj.R_CALLPOWER 2354 } 2355 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking 2356 2357 case 12: /* movb r,r (extsb); movw r,r (extsw) */ 2358 if p.To.Reg == REGZERO && p.From.Type == obj.TYPE_CONST { 2359 v := c.regoff(&p.From) 2360 if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 { 2361 c.ctxt.Diag("literal operation on R0\n%v", p) 2362 } 2363 2364 o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v)) 2365 break 2366 } 2367 2368 if p.As == AMOVW { 2369 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2370 } else { 2371 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2372 } 2373 2374 case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */ 2375 if p.As == AMOVBZ { 2376 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) 2377 } else if p.As == AMOVH { 2378 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) 2379 } else if p.As == AMOVHZ { 2380 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) 2381 } else if p.As == AMOVWZ { 2382 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ 2383 } else { 2384 c.ctxt.Diag("internal: bad mov[bhw]z\n%v", p) 2385 } 2386 2387 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ 2388 r := int(p.Reg) 2389 2390 if r == 0 { 2391 r = int(p.To.Reg) 2392 } 2393 d := c.vregoff(p.From3) 2394 var a int 2395 switch p.As { 2396 2397 // These opcodes expect a mask operand that has to be converted into the 2398 // appropriate operand. The way these were defined, not all valid masks are possible. 2399 // Left here for compatibility in case they were used or generated. 2400 case ARLDCL, ARLDCLCC: 2401 var mask [2]uint8 2402 c.maskgen64(p, mask[:], uint64(d)) 2403 2404 a = int(mask[0]) /* MB */ 2405 if mask[1] != 63 { 2406 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p) 2407 } 2408 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2409 o1 |= (uint32(a) & 31) << 6 2410 if a&0x20 != 0 { 2411 o1 |= 1 << 5 /* mb[5] is top bit */ 2412 } 2413 2414 case ARLDCR, ARLDCRCC: 2415 var mask [2]uint8 2416 c.maskgen64(p, mask[:], uint64(d)) 2417 2418 a = int(mask[1]) /* ME */ 2419 if mask[0] != 0 { 2420 c.ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p) 2421 } 2422 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) 2423 o1 |= (uint32(a) & 31) << 6 2424 if a&0x20 != 0 { 2425 o1 |= 1 << 5 /* mb[5] is top bit */ 2426 } 2427 2428 // These opcodes use a shift count like the ppc64 asm, no mask conversion done 2429 case ARLDICR, ARLDICRCC: 2430 me := int(d) 2431 sh := c.regoff(&p.From) 2432 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me)) 2433 2434 case ARLDICL, ARLDICLCC: 2435 mb := int(d) 2436 sh := c.regoff(&p.From) 2437 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb)) 2438 2439 default: 2440 c.ctxt.Diag("unexpected op in rldc case\n%v", p) 2441 a = 0 2442 } 2443 2444 case 17, /* bc bo,bi,lbra (same for now) */ 2445 16: /* bc bo,bi,sbra */ 2446 a := 0 2447 2448 r := int(p.Reg) 2449 2450 if p.From.Type == obj.TYPE_CONST { 2451 a = int(c.regoff(&p.From)) 2452 } else if p.From.Type == obj.TYPE_REG { 2453 if r != 0 { 2454 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r) 2455 } 2456 // BI values for the CR 2457 switch p.From.Reg { 2458 case REG_CR0: 2459 r = BI_CR0 2460 case REG_CR1: 2461 r = BI_CR1 2462 case REG_CR2: 2463 r = BI_CR2 2464 case REG_CR3: 2465 r = BI_CR3 2466 case REG_CR4: 2467 r = BI_CR4 2468 case REG_CR5: 2469 r = BI_CR5 2470 case REG_CR6: 2471 r = BI_CR6 2472 case REG_CR7: 2473 r = BI_CR7 2474 default: 2475 c.ctxt.Diag("unrecognized register: expecting CR\n") 2476 } 2477 } 2478 v := int32(0) 2479 if p.Pcond != nil { 2480 v = int32(p.Pcond.Pc - p.Pc) 2481 } 2482 if v&03 != 0 { 2483 c.ctxt.Diag("odd branch target address\n%v", p) 2484 v &^= 03 2485 } 2486 2487 if v < -(1<<16) || v >= 1<<15 { 2488 c.ctxt.Diag("branch too far\n%v", p) 2489 } 2490 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0) 2491 2492 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */ 2493 var v int32 2494 if p.As == ABC || p.As == ABCL { 2495 v = c.regoff(&p.To) & 31 2496 } else { 2497 v = 20 /* unconditional */ 2498 } 2499 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11 2500 o2 = OPVCC(19, 16, 0, 0) 2501 if p.As == ABL || p.As == ABCL { 2502 o2 |= 1 2503 } 2504 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index)) 2505 2506 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */ 2507 var v int32 2508 if p.As == ABC || p.As == ABCL { 2509 v = c.regoff(&p.From) & 31 2510 } else { 2511 v = 20 /* unconditional */ 2512 } 2513 r := int(p.Reg) 2514 if r == 0 { 2515 r = 0 2516 } 2517 switch oclass(&p.To) { 2518 case C_CTR: 2519 o1 = OPVCC(19, 528, 0, 0) 2520 2521 case C_LR: 2522 o1 = OPVCC(19, 16, 0, 0) 2523 2524 default: 2525 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p) 2526 v = 0 2527 } 2528 2529 if p.As == ABL || p.As == ABCL { 2530 o1 |= 1 2531 } 2532 o1 = OP_BCR(o1, uint32(v), uint32(r)) 2533 2534 case 19: /* mov $lcon,r ==> cau+or */ 2535 d := c.vregoff(&p.From) 2536 2537 if p.From.Sym == nil { 2538 o1 = loadu32(int(p.To.Reg), d) 2539 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) 2540 } else { 2541 o1, o2 = c.symbolAccess(p.From.Sym, d, p.To.Reg, OP_ADDI) 2542 } 2543 2544 //if(dlm) reloc(&p->from, p->pc, 0); 2545 2546 case 20: /* add $ucon,,r */ 2547 v := c.regoff(&p.From) 2548 2549 r := int(p.Reg) 2550 if r == 0 { 2551 r = int(p.To.Reg) 2552 } 2553 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) { 2554 c.ctxt.Diag("literal operation on R0\n%v", p) 2555 } 2556 o1 = AOP_IRR(c.opirr(-p.As), uint32(p.To.Reg), uint32(r), uint32(v)>>16) 2557 2558 case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */ 2559 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2560 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2561 } 2562 d := c.vregoff(&p.From) 2563 o1 = loadu32(REGTMP, d) 2564 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) 2565 r := int(p.Reg) 2566 if r == 0 { 2567 r = int(p.To.Reg) 2568 } 2569 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2570 if p.From.Sym != nil { 2571 c.ctxt.Diag("%v is not supported", p) 2572 } 2573 2574 //if(dlm) reloc(&p->from, p->pc, 0); 2575 2576 case 23: /* and $lcon,r1,r2 ==> cau+or+and */ /* masks could be done using rlnm etc. */ 2577 if p.To.Reg == REGTMP || p.Reg == REGTMP { 2578 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2579 } 2580 d := c.vregoff(&p.From) 2581 o1 = loadu32(REGTMP, d) 2582 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) 2583 r := int(p.Reg) 2584 if r == 0 { 2585 r = int(p.To.Reg) 2586 } 2587 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) 2588 if p.From.Sym != nil { 2589 c.ctxt.Diag("%v is not supported", p) 2590 } 2591 2592 //if(dlm) reloc(&p->from, p->pc, 0); 2593 2594 /*24*/ 2595 case 25: 2596 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ 2597 v := c.regoff(&p.From) 2598 2599 if v < 0 { 2600 v = 0 2601 } else if v > 63 { 2602 v = 63 2603 } 2604 r := int(p.Reg) 2605 if r == 0 { 2606 r = int(p.To.Reg) 2607 } 2608 var a int 2609 switch p.As { 2610 case ASLD, ASLDCC: 2611 a = int(63 - v) 2612 o1 = OP_RLDICR 2613 2614 case ASRD, ASRDCC: 2615 a = int(v) 2616 v = 64 - v 2617 o1 = OP_RLDICL 2618 2619 default: 2620 c.ctxt.Diag("unexpected op in sldi case\n%v", p) 2621 a = 0 2622 o1 = 0 2623 } 2624 2625 o1 = AOP_RRR(o1, uint32(r), uint32(p.To.Reg), (uint32(v) & 0x1F)) 2626 o1 |= (uint32(a) & 31) << 6 2627 if v&0x20 != 0 { 2628 o1 |= 1 << 1 2629 } 2630 if a&0x20 != 0 { 2631 o1 |= 1 << 5 /* mb[5] is top bit */ 2632 } 2633 if p.As == ASLDCC || p.As == ASRDCC { 2634 o1 |= 1 /* Rc */ 2635 } 2636 2637 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ 2638 if p.To.Reg == REGTMP { 2639 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2640 } 2641 v := c.regoff(&p.From) 2642 r := int(p.From.Reg) 2643 if r == 0 { 2644 r = int(o.param) 2645 } 2646 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 2647 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v)) 2648 2649 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ 2650 v := c.regoff(p.From3) 2651 2652 r := int(p.From.Reg) 2653 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2654 2655 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */ 2656 if p.To.Reg == REGTMP || p.From.Reg == REGTMP { 2657 c.ctxt.Diag("can't synthesize large constant\n%v", p) 2658 } 2659 v := c.regoff(p.From3) 2660 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) 2661 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v)) 2662 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) 2663 if p.From.Sym != nil { 2664 c.ctxt.Diag("%v is not supported", p) 2665 } 2666 2667 //if(dlm) reloc(&p->from3, p->pc, 0); 2668 2669 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ 2670 v := c.regoff(&p.From) 2671 2672 d := c.vregoff(p.From3) 2673 var mask [2]uint8 2674 c.maskgen64(p, mask[:], uint64(d)) 2675 var a int 2676 switch p.As { 2677 case ARLDC, ARLDCCC: 2678 a = int(mask[0]) /* MB */ 2679 if int32(mask[1]) != (63 - v) { 2680 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p) 2681 } 2682 2683 case ARLDCL, ARLDCLCC: 2684 a = int(mask[0]) /* MB */ 2685 if mask[1] != 63 { 2686 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p) 2687 } 2688 2689 case ARLDCR, ARLDCRCC: 2690 a = int(mask[1]) /* ME */ 2691 if mask[0] != 0 { 2692 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p) 2693 } 2694 2695 default: 2696 c.ctxt.Diag("unexpected op in rldic case\n%v", p) 2697 a = 0 2698 } 2699 2700 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) 2701 o1 |= (uint32(a) & 31) << 6 2702 if v&0x20 != 0 { 2703 o1 |= 1 << 1 2704 } 2705 if a&0x20 != 0 { 2706 o1 |= 1 << 5 /* mb[5] is top bit */ 2707 } 2708 2709 case 30: /* rldimi $sh,s,$mask,a */ 2710 v := c.regoff(&p.From) 2711 2712 d := c.vregoff(p.From3) 2713 2714 // Original opcodes had mask operands which had to be converted to a shift count as expected by 2715 // the ppc64 asm. 2716 switch p.As { 2717 case ARLDMI, ARLDMICC: 2718 var mask [2]uint8 2719 c.maskgen64(p, mask[:], uint64(d)) 2720 if int32(mask[1]) != (63 - v) { 2721 c.ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p) 2722 } 2723 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) 2724 o1 |= (uint32(mask[0]) & 31) << 6 2725 if v&0x20 != 0 { 2726 o1 |= 1 << 1 2727 } 2728 if mask[0]&0x20 != 0 { 2729 o1 |= 1 << 5 /* mb[5] is top bit */ 2730 } 2731 2732 // Opcodes with shift count operands. 2733 case ARLDIMI, ARLDIMICC: 2734 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) 2735 o1 |= (uint32(d) & 31) << 6 2736 if d&0x20 != 0 { 2737 o1 |= 1 << 5 2738 } 2739 if v&0x20 != 0 { 2740 o1 |= 1 << 1 2741 } 2742 } 2743 2744 case 31: /* dword */ 2745 d := c.vregoff(&p.From) 2746 2747 if c.ctxt.Arch.ByteOrder == binary.BigEndian { 2748 o1 = uint32(d >> 32) 2749 o2 = uint32(d) 2750 } else { 2751 o1 = uint32(d) 2752 o2 = uint32(d >> 32) 2753 } 2754 2755 if p.From.Sym != nil { 2756 rel := obj.Addrel(c.cursym) 2757 rel.Off = int32(c.pc) 2758 rel.Siz = 8 2759 rel.Sym = p.From.Sym 2760 rel.Add = p.From.Offset 2761 rel.Type = obj.R_ADDR 2762 o2 = 0 2763 o1 = o2 2764 } 2765 2766 case 32: /* fmul frc,fra,frd */ 2767 r := int(p.Reg) 2768 2769 if r == 0 { 2770 r = int(p.To.Reg) 2771 } 2772 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6 2773 2774 case 33: /* fabs [frb,]frd; fmr. frb,frd */ 2775 r := int(p.From.Reg) 2776 2777 if oclass(&p.From) == C_NONE { 2778 r = int(p.To.Reg) 2779 } 2780 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) 2781 2782 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ 2783 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6 2784 2785 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ 2786 v := c.regoff(&p.To) 2787 2788 r := int(p.To.Reg) 2789 if r == 0 { 2790 r = int(o.param) 2791 } 2792 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 2793 o2 = AOP_IRR(c.opstore(p.As), uint32(p.From.Reg), REGTMP, uint32(v)) 2794 2795 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */ 2796 v := c.regoff(&p.From) 2797 2798 r := int(p.From.Reg) 2799 if r == 0 { 2800 r = int(o.param) 2801 } 2802 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 2803 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v)) 2804 2805 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */ 2806 v := c.regoff(&p.From) 2807 2808 r := int(p.From.Reg) 2809 if r == 0 { 2810 r = int(o.param) 2811 } 2812 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) 2813 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v)) 2814 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 2815 2816 case 40: /* word */ 2817 o1 = uint32(c.regoff(&p.From)) 2818 2819 case 41: /* stswi */ 2820 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.From3))&0x7F)<<11 2821 2822 case 42: /* lswi */ 2823 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.From3))&0x7F)<<11 2824 2825 case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */ 2826 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) 2827 2828 case 44: /* indexed store */ 2829 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 2830 2831 case 45: /* indexed load */ 2832 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 2833 2834 case 46: /* plain op */ 2835 o1 = c.oprrr(p.As) 2836 2837 case 47: /* op Ra, Rd; also op [Ra,] Rd */ 2838 r := int(p.From.Reg) 2839 2840 if r == 0 { 2841 r = int(p.To.Reg) 2842 } 2843 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 2844 2845 case 48: /* op Rs, Ra */ 2846 r := int(p.From.Reg) 2847 2848 if r == 0 { 2849 r = int(p.To.Reg) 2850 } 2851 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) 2852 2853 case 49: /* op Rb; op $n, Rb */ 2854 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */ 2855 v := c.regoff(&p.From) & 1 2856 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21 2857 } else { 2858 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg)) 2859 } 2860 2861 case 50: /* rem[u] r1[,r2],r3 */ 2862 r := int(p.Reg) 2863 2864 if r == 0 { 2865 r = int(p.To.Reg) 2866 } 2867 v := c.oprrr(p.As) 2868 t := v & (1<<10 | 1) /* OE|Rc */ 2869 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 2870 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) 2871 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 2872 if p.As == AREMU { 2873 o4 = o3 2874 2875 /* Clear top 32 bits */ 2876 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5 2877 } 2878 2879 case 51: /* remd[u] r1[,r2],r3 */ 2880 r := int(p.Reg) 2881 2882 if r == 0 { 2883 r = int(p.To.Reg) 2884 } 2885 v := c.oprrr(p.As) 2886 t := v & (1<<10 | 1) /* OE|Rc */ 2887 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) 2888 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) 2889 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) 2890 2891 case 52: /* mtfsbNx cr(n) */ 2892 v := c.regoff(&p.From) & 31 2893 2894 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0) 2895 2896 case 53: /* mffsX ,fr1 */ 2897 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) 2898 2899 case 54: /* mov msr,r1; mov r1, msr*/ 2900 if oclass(&p.From) == C_REG { 2901 if p.As == AMOVD { 2902 o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0) 2903 } else { 2904 o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0) 2905 } 2906 } else { 2907 o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0) 2908 } 2909 2910 case 55: /* op Rb, Rd */ 2911 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) 2912 2913 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */ 2914 v := c.regoff(&p.From) 2915 2916 r := int(p.Reg) 2917 if r == 0 { 2918 r = int(p.To.Reg) 2919 } 2920 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31) 2921 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) { 2922 o1 |= 1 << 1 /* mb[5] */ 2923 } 2924 2925 case 57: /* slw $sh,[s,]a -> rlwinm ... */ 2926 v := c.regoff(&p.From) 2927 2928 r := int(p.Reg) 2929 if r == 0 { 2930 r = int(p.To.Reg) 2931 } 2932 2933 /* 2934 * Let user (gs) shoot himself in the foot. 2935 * qc has already complained. 2936 * 2937 if(v < 0 || v > 31) 2938 ctxt->diag("illegal shift %ld\n%v", v, p); 2939 */ 2940 if v < 0 { 2941 v = 0 2942 } else if v > 32 { 2943 v = 32 2944 } 2945 var mask [2]uint8 2946 if p.As == ASRW || p.As == ASRWCC { /* shift right */ 2947 mask[0] = uint8(v) 2948 mask[1] = 31 2949 v = 32 - v 2950 } else { 2951 mask[0] = 0 2952 mask[1] = uint8(31 - v) 2953 } 2954 2955 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1])) 2956 if p.As == ASLWCC || p.As == ASRWCC { 2957 o1 |= 1 /* Rc */ 2958 } 2959 2960 case 58: /* logical $andcon,[s],a */ 2961 v := c.regoff(&p.From) 2962 2963 r := int(p.Reg) 2964 if r == 0 { 2965 r = int(p.To.Reg) 2966 } 2967 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) 2968 2969 case 59: /* or/and $ucon,,r */ 2970 v := c.regoff(&p.From) 2971 2972 r := int(p.Reg) 2973 if r == 0 { 2974 r = int(p.To.Reg) 2975 } 2976 o1 = LOP_IRR(c.opirr(-p.As), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */ 2977 2978 case 60: /* tw to,a,b */ 2979 r := int(c.regoff(&p.From) & 31) 2980 2981 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg)) 2982 2983 case 61: /* tw to,a,$simm */ 2984 r := int(c.regoff(&p.From) & 31) 2985 2986 v := c.regoff(&p.To) 2987 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v)) 2988 2989 case 62: /* rlwmi $sh,s,$mask,a */ 2990 v := c.regoff(&p.From) 2991 2992 var mask [2]uint8 2993 c.maskgen(p, mask[:], uint32(c.regoff(p.From3))) 2994 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) 2995 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 2996 2997 case 63: /* rlwmi b,s,$mask,a */ 2998 var mask [2]uint8 2999 c.maskgen(p, mask[:], uint32(c.regoff(p.From3))) 3000 3001 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg)) 3002 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 3003 3004 case 64: /* mtfsf fr[, $m] {,fpcsr} */ 3005 var v int32 3006 if p.From3Type() != obj.TYPE_NONE { 3007 v = c.regoff(p.From3) & 255 3008 } else { 3009 v = 255 3010 } 3011 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11 3012 3013 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */ 3014 if p.To.Reg == 0 { 3015 c.ctxt.Diag("must specify FPSCR(n)\n%v", p) 3016 } 3017 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12 3018 3019 case 66: /* mov spr,r1; mov r1,spr, also dcr */ 3020 var r int 3021 var v int32 3022 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 { 3023 r = int(p.From.Reg) 3024 v = int32(p.To.Reg) 3025 if REG_DCR0 <= v && v <= REG_DCR0+1023 { 3026 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */ 3027 } else { 3028 o1 = OPVCC(31, 467, 0, 0) /* mtspr */ 3029 } 3030 } else { 3031 r = int(p.To.Reg) 3032 v = int32(p.From.Reg) 3033 if REG_DCR0 <= v && v <= REG_DCR0+1023 { 3034 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */ 3035 } else { 3036 o1 = OPVCC(31, 339, 0, 0) /* mfspr */ 3037 } 3038 } 3039 3040 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 3041 3042 case 67: /* mcrf crfD,crfS */ 3043 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { 3044 c.ctxt.Diag("illegal CR field number\n%v", p) 3045 } 3046 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0) 3047 3048 case 68: /* mfcr rD; mfocrf CRM,rD */ 3049 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 { 3050 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */ 3051 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */ 3052 } else { 3053 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */ 3054 } 3055 3056 case 69: /* mtcrf CRM,rS */ 3057 var v int32 3058 if p.From3Type() != obj.TYPE_NONE { 3059 if p.To.Reg != 0 { 3060 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p) 3061 } 3062 v = c.regoff(p.From3) & 0xff 3063 } else { 3064 if p.To.Reg == 0 { 3065 v = 0xff /* CR */ 3066 } else { 3067 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */ 3068 } 3069 } 3070 3071 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 3072 3073 case 70: /* [f]cmp r,r,cr*/ 3074 var r int 3075 if p.Reg == 0 { 3076 r = 0 3077 } else { 3078 r = (int(p.Reg) & 7) << 2 3079 } 3080 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) 3081 3082 case 71: /* cmp[l] r,i,cr*/ 3083 var r int 3084 if p.Reg == 0 { 3085 r = 0 3086 } else { 3087 r = (int(p.Reg) & 7) << 2 3088 } 3089 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff 3090 3091 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */ 3092 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg)) 3093 3094 case 73: /* mcrfs crfD,crfS */ 3095 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { 3096 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p) 3097 } 3098 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0) 3099 3100 case 77: /* syscall $scon, syscall Rx */ 3101 if p.From.Type == obj.TYPE_CONST { 3102 if p.From.Offset > BIG || p.From.Offset < -BIG { 3103 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p) 3104 } 3105 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset)) 3106 } else if p.From.Type == obj.TYPE_REG { 3107 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg)) 3108 } else { 3109 c.ctxt.Diag("illegal syscall: %v", p) 3110 o1 = 0x7fe00008 // trap always 3111 } 3112 3113 o2 = c.oprrr(p.As) 3114 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0 3115 3116 case 78: /* undef */ 3117 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed 3118 always to be an illegal instruction." */ 3119 3120 /* relocation operations */ 3121 case 74: 3122 v := c.vregoff(&p.To) 3123 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, c.opstore(p.As)) 3124 3125 //if(dlm) reloc(&p->to, p->pc, 1); 3126 3127 case 75: 3128 v := c.vregoff(&p.From) 3129 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, c.opload(p.As)) 3130 3131 //if(dlm) reloc(&p->from, p->pc, 1); 3132 3133 case 76: 3134 v := c.vregoff(&p.From) 3135 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, c.opload(p.As)) 3136 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) 3137 3138 //if(dlm) reloc(&p->from, p->pc, 1); 3139 3140 case 79: 3141 if p.From.Offset != 0 { 3142 c.ctxt.Diag("invalid offset against tls var %v", p) 3143 } 3144 o1 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGZERO, 0) 3145 rel := obj.Addrel(c.cursym) 3146 rel.Off = int32(c.pc) 3147 rel.Siz = 4 3148 rel.Sym = p.From.Sym 3149 rel.Type = obj.R_POWER_TLS_LE 3150 3151 case 80: 3152 if p.From.Offset != 0 { 3153 c.ctxt.Diag("invalid offset against tls var %v", p) 3154 } 3155 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3156 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) 3157 rel := obj.Addrel(c.cursym) 3158 rel.Off = int32(c.pc) 3159 rel.Siz = 8 3160 rel.Sym = p.From.Sym 3161 rel.Type = obj.R_POWER_TLS_IE 3162 3163 case 81: 3164 v := c.vregoff(&p.To) 3165 if v != 0 { 3166 c.ctxt.Diag("invalid offset against GOT slot %v", p) 3167 } 3168 3169 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) 3170 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) 3171 rel := obj.Addrel(c.cursym) 3172 rel.Off = int32(c.pc) 3173 rel.Siz = 8 3174 rel.Sym = p.From.Sym 3175 rel.Type = obj.R_ADDRPOWER_GOT 3176 case 82: /* vector instructions, VX-form and VC-form */ 3177 if p.From.Type == obj.TYPE_REG { 3178 /* reg reg none OR reg reg reg */ 3179 /* 3-register operand order: VRA, VRB, VRT */ 3180 /* 2-register operand order: VRA, VRT */ 3181 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3182 } else if p.From3Type() == obj.TYPE_CONST { 3183 /* imm imm reg reg */ 3184 /* operand order: SIX, VRA, ST, VRT */ 3185 six := int(c.regoff(&p.From)) 3186 st := int(c.regoff(p.From3)) 3187 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) 3188 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { 3189 /* imm reg reg */ 3190 /* operand order: UIM, VRB, VRT */ 3191 uim := int(c.regoff(&p.From)) 3192 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim)) 3193 } else { 3194 /* imm reg */ 3195 /* operand order: SIM, VRT */ 3196 sim := int(c.regoff(&p.From)) 3197 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim)) 3198 } 3199 3200 case 83: /* vector instructions, VA-form */ 3201 if p.From.Type == obj.TYPE_REG { 3202 /* reg reg reg reg */ 3203 /* 4-register operand order: VRA, VRB, VRC, VRT */ 3204 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.From3.Reg)) 3205 } else if p.From.Type == obj.TYPE_CONST { 3206 /* imm reg reg reg */ 3207 /* operand order: SHB, VRA, VRB, VRT */ 3208 shb := int(c.regoff(&p.From)) 3209 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), uint32(shb)) 3210 } 3211 3212 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc 3213 bc := c.vregoff(&p.From) 3214 3215 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg 3216 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.From3.Reg), uint32(bc)) 3217 3218 case 85: /* vector instructions, VX-form */ 3219 /* reg none reg */ 3220 /* 2-register operand order: VRB, VRT */ 3221 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg)) 3222 3223 case 86: /* VSX indexed store, XX1-form */ 3224 /* reg reg reg */ 3225 /* 3-register operand order: XT, (RB)(RA*1) */ 3226 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) 3227 3228 case 87: /* VSX indexed load, XX1-form */ 3229 /* reg reg reg */ 3230 /* 3-register operand order: (RB)(RA*1), XT */ 3231 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) 3232 3233 case 88: /* VSX instructions, XX1-form */ 3234 /* reg reg none OR reg reg reg */ 3235 /* 3-register operand order: RA, RB, XT */ 3236 /* 2-register operand order: XS, RA or RA, XT */ 3237 xt := int32(p.To.Reg) 3238 xs := int32(p.From.Reg) 3239 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */ 3240 if REG_V0 <= xt && xt <= REG_V31 { 3241 /* Convert V0-V31 to VS32-VS63 */ 3242 xt = xt + 64 3243 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3244 } else if REG_F0 <= xt && xt <= REG_F31 { 3245 /* Convert F0-F31 to VS0-VS31 */ 3246 xt = xt + 64 3247 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3248 } else if REG_VS0 <= xt && xt <= REG_VS63 { 3249 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3250 } else if REG_V0 <= xs && xs <= REG_V31 { 3251 /* Likewise for XS */ 3252 xs = xs + 64 3253 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3254 } else if REG_F0 <= xs && xs <= REG_F31 { 3255 xs = xs + 64 3256 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3257 } else if REG_VS0 <= xs && xs <= REG_VS63 { 3258 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3259 } 3260 3261 case 89: /* VSX instructions, XX2-form */ 3262 /* reg none reg OR reg imm reg */ 3263 /* 2-register operand order: XB, XT or XB, UIM, XT*/ 3264 uim := int(c.regoff(p.From3)) 3265 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) 3266 3267 case 90: /* VSX instructions, XX3-form */ 3268 if p.From3Type() == obj.TYPE_NONE { 3269 /* reg reg reg */ 3270 /* 3-register operand order: XA, XB, XT */ 3271 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) 3272 } else if p.From3Type() == obj.TYPE_CONST { 3273 /* reg reg reg imm */ 3274 /* operand order: XA, XB, DM, XT */ 3275 dm := int(c.regoff(p.From3)) 3276 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) 3277 } 3278 3279 case 91: /* VSX instructions, XX4-form */ 3280 /* reg reg reg reg */ 3281 /* 3-register operand order: XA, XB, XC, XT */ 3282 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.From3.Reg)) 3283 3284 case 92: /* X-form instructions, 3-operands */ 3285 if p.To.Type == obj.TYPE_CONST { 3286 /* imm reg reg */ 3287 /* operand order: FRA, FRB, BF */ 3288 bf := int(c.regoff(&p.To)) << 2 3289 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) 3290 } else if p.To.Type == obj.TYPE_REG { 3291 /* reg reg reg */ 3292 /* operand order: RS, RB, RA */ 3293 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3294 } 3295 3296 case 93: /* X-form instructions, 2-operands */ 3297 if p.To.Type == obj.TYPE_CONST { 3298 /* imm reg */ 3299 /* operand order: FRB, BF */ 3300 bf := int(c.regoff(&p.To)) << 2 3301 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg)) 3302 } else if p.Reg == 0 { 3303 /* popcnt* r,r, X-form */ 3304 /* operand order: RS, RA */ 3305 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) 3306 } 3307 3308 } 3309 3310 out[0] = o1 3311 out[1] = o2 3312 out[2] = o3 3313 out[3] = o4 3314 out[4] = o5 3315 return 3316 } 3317 3318 func (c *ctxt9) vregoff(a *obj.Addr) int64 { 3319 c.instoffset = 0 3320 if a != nil { 3321 c.aclass(a) 3322 } 3323 return c.instoffset 3324 } 3325 3326 func (c *ctxt9) regoff(a *obj.Addr) int32 { 3327 return int32(c.vregoff(a)) 3328 } 3329 3330 func (c *ctxt9) oprrr(a obj.As) uint32 { 3331 switch a { 3332 case AADD: 3333 return OPVCC(31, 266, 0, 0) 3334 case AADDCC: 3335 return OPVCC(31, 266, 0, 1) 3336 case AADDV: 3337 return OPVCC(31, 266, 1, 0) 3338 case AADDVCC: 3339 return OPVCC(31, 266, 1, 1) 3340 case AADDC: 3341 return OPVCC(31, 10, 0, 0) 3342 case AADDCCC: 3343 return OPVCC(31, 10, 0, 1) 3344 case AADDCV: 3345 return OPVCC(31, 10, 1, 0) 3346 case AADDCVCC: 3347 return OPVCC(31, 10, 1, 1) 3348 case AADDE: 3349 return OPVCC(31, 138, 0, 0) 3350 case AADDECC: 3351 return OPVCC(31, 138, 0, 1) 3352 case AADDEV: 3353 return OPVCC(31, 138, 1, 0) 3354 case AADDEVCC: 3355 return OPVCC(31, 138, 1, 1) 3356 case AADDME: 3357 return OPVCC(31, 234, 0, 0) 3358 case AADDMECC: 3359 return OPVCC(31, 234, 0, 1) 3360 case AADDMEV: 3361 return OPVCC(31, 234, 1, 0) 3362 case AADDMEVCC: 3363 return OPVCC(31, 234, 1, 1) 3364 case AADDZE: 3365 return OPVCC(31, 202, 0, 0) 3366 case AADDZECC: 3367 return OPVCC(31, 202, 0, 1) 3368 case AADDZEV: 3369 return OPVCC(31, 202, 1, 0) 3370 case AADDZEVCC: 3371 return OPVCC(31, 202, 1, 1) 3372 3373 case AAND: 3374 return OPVCC(31, 28, 0, 0) 3375 case AANDCC: 3376 return OPVCC(31, 28, 0, 1) 3377 case AANDN: 3378 return OPVCC(31, 60, 0, 0) 3379 case AANDNCC: 3380 return OPVCC(31, 60, 0, 1) 3381 3382 case ACMP: 3383 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */ 3384 case ACMPU: 3385 return OPVCC(31, 32, 0, 0) | 1<<21 3386 case ACMPW: 3387 return OPVCC(31, 0, 0, 0) /* L=0 */ 3388 case ACMPWU: 3389 return OPVCC(31, 32, 0, 0) 3390 case ACMPB: 3391 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */ 3392 3393 case ACNTLZW: 3394 return OPVCC(31, 26, 0, 0) 3395 case ACNTLZWCC: 3396 return OPVCC(31, 26, 0, 1) 3397 case ACNTLZD: 3398 return OPVCC(31, 58, 0, 0) 3399 case ACNTLZDCC: 3400 return OPVCC(31, 58, 0, 1) 3401 3402 case ACRAND: 3403 return OPVCC(19, 257, 0, 0) 3404 case ACRANDN: 3405 return OPVCC(19, 129, 0, 0) 3406 case ACREQV: 3407 return OPVCC(19, 289, 0, 0) 3408 case ACRNAND: 3409 return OPVCC(19, 225, 0, 0) 3410 case ACRNOR: 3411 return OPVCC(19, 33, 0, 0) 3412 case ACROR: 3413 return OPVCC(19, 449, 0, 0) 3414 case ACRORN: 3415 return OPVCC(19, 417, 0, 0) 3416 case ACRXOR: 3417 return OPVCC(19, 193, 0, 0) 3418 3419 case ADCBF: 3420 return OPVCC(31, 86, 0, 0) 3421 case ADCBI: 3422 return OPVCC(31, 470, 0, 0) 3423 case ADCBST: 3424 return OPVCC(31, 54, 0, 0) 3425 case ADCBT: 3426 return OPVCC(31, 278, 0, 0) 3427 case ADCBTST: 3428 return OPVCC(31, 246, 0, 0) 3429 case ADCBZ: 3430 return OPVCC(31, 1014, 0, 0) 3431 3432 case AREM, ADIVW: 3433 return OPVCC(31, 491, 0, 0) 3434 3435 case AREMCC, ADIVWCC: 3436 return OPVCC(31, 491, 0, 1) 3437 3438 case AREMV, ADIVWV: 3439 return OPVCC(31, 491, 1, 0) 3440 3441 case AREMVCC, ADIVWVCC: 3442 return OPVCC(31, 491, 1, 1) 3443 3444 case AREMU, ADIVWU: 3445 return OPVCC(31, 459, 0, 0) 3446 3447 case AREMUCC, ADIVWUCC: 3448 return OPVCC(31, 459, 0, 1) 3449 3450 case AREMUV, ADIVWUV: 3451 return OPVCC(31, 459, 1, 0) 3452 3453 case AREMUVCC, ADIVWUVCC: 3454 return OPVCC(31, 459, 1, 1) 3455 3456 case AREMD, ADIVD: 3457 return OPVCC(31, 489, 0, 0) 3458 3459 case AREMDCC, ADIVDCC: 3460 return OPVCC(31, 489, 0, 1) 3461 3462 case ADIVDE: 3463 return OPVCC(31, 425, 0, 0) 3464 3465 case ADIVDECC: 3466 return OPVCC(31, 425, 0, 1) 3467 3468 case ADIVDEU: 3469 return OPVCC(31, 393, 0, 0) 3470 3471 case ADIVDEUCC: 3472 return OPVCC(31, 393, 0, 1) 3473 3474 case AREMDV, ADIVDV: 3475 return OPVCC(31, 489, 1, 0) 3476 3477 case AREMDVCC, ADIVDVCC: 3478 return OPVCC(31, 489, 1, 1) 3479 3480 case AREMDU, ADIVDU: 3481 return OPVCC(31, 457, 0, 0) 3482 3483 case AREMDUCC, ADIVDUCC: 3484 return OPVCC(31, 457, 0, 1) 3485 3486 case AREMDUV, ADIVDUV: 3487 return OPVCC(31, 457, 1, 0) 3488 3489 case AREMDUVCC, ADIVDUVCC: 3490 return OPVCC(31, 457, 1, 1) 3491 3492 case AEIEIO: 3493 return OPVCC(31, 854, 0, 0) 3494 3495 case AEQV: 3496 return OPVCC(31, 284, 0, 0) 3497 case AEQVCC: 3498 return OPVCC(31, 284, 0, 1) 3499 3500 case AEXTSB: 3501 return OPVCC(31, 954, 0, 0) 3502 case AEXTSBCC: 3503 return OPVCC(31, 954, 0, 1) 3504 case AEXTSH: 3505 return OPVCC(31, 922, 0, 0) 3506 case AEXTSHCC: 3507 return OPVCC(31, 922, 0, 1) 3508 case AEXTSW: 3509 return OPVCC(31, 986, 0, 0) 3510 case AEXTSWCC: 3511 return OPVCC(31, 986, 0, 1) 3512 3513 case AFABS: 3514 return OPVCC(63, 264, 0, 0) 3515 case AFABSCC: 3516 return OPVCC(63, 264, 0, 1) 3517 case AFADD: 3518 return OPVCC(63, 21, 0, 0) 3519 case AFADDCC: 3520 return OPVCC(63, 21, 0, 1) 3521 case AFADDS: 3522 return OPVCC(59, 21, 0, 0) 3523 case AFADDSCC: 3524 return OPVCC(59, 21, 0, 1) 3525 case AFCMPO: 3526 return OPVCC(63, 32, 0, 0) 3527 case AFCMPU: 3528 return OPVCC(63, 0, 0, 0) 3529 case AFCFID: 3530 return OPVCC(63, 846, 0, 0) 3531 case AFCFIDCC: 3532 return OPVCC(63, 846, 0, 1) 3533 case AFCFIDU: 3534 return OPVCC(63, 974, 0, 0) 3535 case AFCFIDUCC: 3536 return OPVCC(63, 974, 0, 1) 3537 case AFCTIW: 3538 return OPVCC(63, 14, 0, 0) 3539 case AFCTIWCC: 3540 return OPVCC(63, 14, 0, 1) 3541 case AFCTIWZ: 3542 return OPVCC(63, 15, 0, 0) 3543 case AFCTIWZCC: 3544 return OPVCC(63, 15, 0, 1) 3545 case AFCTID: 3546 return OPVCC(63, 814, 0, 0) 3547 case AFCTIDCC: 3548 return OPVCC(63, 814, 0, 1) 3549 case AFCTIDZ: 3550 return OPVCC(63, 815, 0, 0) 3551 case AFCTIDZCC: 3552 return OPVCC(63, 815, 0, 1) 3553 case AFDIV: 3554 return OPVCC(63, 18, 0, 0) 3555 case AFDIVCC: 3556 return OPVCC(63, 18, 0, 1) 3557 case AFDIVS: 3558 return OPVCC(59, 18, 0, 0) 3559 case AFDIVSCC: 3560 return OPVCC(59, 18, 0, 1) 3561 case AFMADD: 3562 return OPVCC(63, 29, 0, 0) 3563 case AFMADDCC: 3564 return OPVCC(63, 29, 0, 1) 3565 case AFMADDS: 3566 return OPVCC(59, 29, 0, 0) 3567 case AFMADDSCC: 3568 return OPVCC(59, 29, 0, 1) 3569 3570 case AFMOVS, AFMOVD: 3571 return OPVCC(63, 72, 0, 0) /* load */ 3572 case AFMOVDCC: 3573 return OPVCC(63, 72, 0, 1) 3574 case AFMSUB: 3575 return OPVCC(63, 28, 0, 0) 3576 case AFMSUBCC: 3577 return OPVCC(63, 28, 0, 1) 3578 case AFMSUBS: 3579 return OPVCC(59, 28, 0, 0) 3580 case AFMSUBSCC: 3581 return OPVCC(59, 28, 0, 1) 3582 case AFMUL: 3583 return OPVCC(63, 25, 0, 0) 3584 case AFMULCC: 3585 return OPVCC(63, 25, 0, 1) 3586 case AFMULS: 3587 return OPVCC(59, 25, 0, 0) 3588 case AFMULSCC: 3589 return OPVCC(59, 25, 0, 1) 3590 case AFNABS: 3591 return OPVCC(63, 136, 0, 0) 3592 case AFNABSCC: 3593 return OPVCC(63, 136, 0, 1) 3594 case AFNEG: 3595 return OPVCC(63, 40, 0, 0) 3596 case AFNEGCC: 3597 return OPVCC(63, 40, 0, 1) 3598 case AFNMADD: 3599 return OPVCC(63, 31, 0, 0) 3600 case AFNMADDCC: 3601 return OPVCC(63, 31, 0, 1) 3602 case AFNMADDS: 3603 return OPVCC(59, 31, 0, 0) 3604 case AFNMADDSCC: 3605 return OPVCC(59, 31, 0, 1) 3606 case AFNMSUB: 3607 return OPVCC(63, 30, 0, 0) 3608 case AFNMSUBCC: 3609 return OPVCC(63, 30, 0, 1) 3610 case AFNMSUBS: 3611 return OPVCC(59, 30, 0, 0) 3612 case AFNMSUBSCC: 3613 return OPVCC(59, 30, 0, 1) 3614 case AFRES: 3615 return OPVCC(59, 24, 0, 0) 3616 case AFRESCC: 3617 return OPVCC(59, 24, 0, 1) 3618 case AFRIM: 3619 return OPVCC(63, 488, 0, 0) 3620 case AFRIMCC: 3621 return OPVCC(63, 488, 0, 1) 3622 case AFRIP: 3623 return OPVCC(63, 456, 0, 0) 3624 case AFRIPCC: 3625 return OPVCC(63, 456, 0, 1) 3626 case AFRIZ: 3627 return OPVCC(63, 424, 0, 0) 3628 case AFRIZCC: 3629 return OPVCC(63, 424, 0, 1) 3630 case AFRSP: 3631 return OPVCC(63, 12, 0, 0) 3632 case AFRSPCC: 3633 return OPVCC(63, 12, 0, 1) 3634 case AFRSQRTE: 3635 return OPVCC(63, 26, 0, 0) 3636 case AFRSQRTECC: 3637 return OPVCC(63, 26, 0, 1) 3638 case AFSEL: 3639 return OPVCC(63, 23, 0, 0) 3640 case AFSELCC: 3641 return OPVCC(63, 23, 0, 1) 3642 case AFSQRT: 3643 return OPVCC(63, 22, 0, 0) 3644 case AFSQRTCC: 3645 return OPVCC(63, 22, 0, 1) 3646 case AFSQRTS: 3647 return OPVCC(59, 22, 0, 0) 3648 case AFSQRTSCC: 3649 return OPVCC(59, 22, 0, 1) 3650 case AFSUB: 3651 return OPVCC(63, 20, 0, 0) 3652 case AFSUBCC: 3653 return OPVCC(63, 20, 0, 1) 3654 case AFSUBS: 3655 return OPVCC(59, 20, 0, 0) 3656 case AFSUBSCC: 3657 return OPVCC(59, 20, 0, 1) 3658 3659 case AICBI: 3660 return OPVCC(31, 982, 0, 0) 3661 case AISYNC: 3662 return OPVCC(19, 150, 0, 0) 3663 3664 case AMTFSB0: 3665 return OPVCC(63, 70, 0, 0) 3666 case AMTFSB0CC: 3667 return OPVCC(63, 70, 0, 1) 3668 case AMTFSB1: 3669 return OPVCC(63, 38, 0, 0) 3670 case AMTFSB1CC: 3671 return OPVCC(63, 38, 0, 1) 3672 3673 case AMULHW: 3674 return OPVCC(31, 75, 0, 0) 3675 case AMULHWCC: 3676 return OPVCC(31, 75, 0, 1) 3677 case AMULHWU: 3678 return OPVCC(31, 11, 0, 0) 3679 case AMULHWUCC: 3680 return OPVCC(31, 11, 0, 1) 3681 case AMULLW: 3682 return OPVCC(31, 235, 0, 0) 3683 case AMULLWCC: 3684 return OPVCC(31, 235, 0, 1) 3685 case AMULLWV: 3686 return OPVCC(31, 235, 1, 0) 3687 case AMULLWVCC: 3688 return OPVCC(31, 235, 1, 1) 3689 3690 case AMULHD: 3691 return OPVCC(31, 73, 0, 0) 3692 case AMULHDCC: 3693 return OPVCC(31, 73, 0, 1) 3694 case AMULHDU: 3695 return OPVCC(31, 9, 0, 0) 3696 case AMULHDUCC: 3697 return OPVCC(31, 9, 0, 1) 3698 case AMULLD: 3699 return OPVCC(31, 233, 0, 0) 3700 case AMULLDCC: 3701 return OPVCC(31, 233, 0, 1) 3702 case AMULLDV: 3703 return OPVCC(31, 233, 1, 0) 3704 case AMULLDVCC: 3705 return OPVCC(31, 233, 1, 1) 3706 3707 case ANAND: 3708 return OPVCC(31, 476, 0, 0) 3709 case ANANDCC: 3710 return OPVCC(31, 476, 0, 1) 3711 case ANEG: 3712 return OPVCC(31, 104, 0, 0) 3713 case ANEGCC: 3714 return OPVCC(31, 104, 0, 1) 3715 case ANEGV: 3716 return OPVCC(31, 104, 1, 0) 3717 case ANEGVCC: 3718 return OPVCC(31, 104, 1, 1) 3719 case ANOR: 3720 return OPVCC(31, 124, 0, 0) 3721 case ANORCC: 3722 return OPVCC(31, 124, 0, 1) 3723 case AOR: 3724 return OPVCC(31, 444, 0, 0) 3725 case AORCC: 3726 return OPVCC(31, 444, 0, 1) 3727 case AORN: 3728 return OPVCC(31, 412, 0, 0) 3729 case AORNCC: 3730 return OPVCC(31, 412, 0, 1) 3731 3732 case APOPCNTD: 3733 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */ 3734 case APOPCNTW: 3735 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */ 3736 case APOPCNTB: 3737 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */ 3738 3739 case ARFI: 3740 return OPVCC(19, 50, 0, 0) 3741 case ARFCI: 3742 return OPVCC(19, 51, 0, 0) 3743 case ARFID: 3744 return OPVCC(19, 18, 0, 0) 3745 case AHRFID: 3746 return OPVCC(19, 274, 0, 0) 3747 3748 case ARLWMI: 3749 return OPVCC(20, 0, 0, 0) 3750 case ARLWMICC: 3751 return OPVCC(20, 0, 0, 1) 3752 case ARLWNM: 3753 return OPVCC(23, 0, 0, 0) 3754 case ARLWNMCC: 3755 return OPVCC(23, 0, 0, 1) 3756 3757 case ARLDCL: 3758 return OPVCC(30, 8, 0, 0) 3759 case ARLDCR: 3760 return OPVCC(30, 9, 0, 0) 3761 3762 case ARLDICL: 3763 return OPVCC(30, 0, 0, 0) 3764 case ARLDICLCC: 3765 return OPVCC(30, 0, 0, 1) 3766 case ARLDICR: 3767 return OPVCC(30, 0, 0, 0) | 2<<1 // rldicr 3768 case ARLDICRCC: 3769 return OPVCC(30, 0, 0, 1) | 2<<1 // rldicr. 3770 3771 case ASYSCALL: 3772 return OPVCC(17, 1, 0, 0) 3773 3774 case ASLW: 3775 return OPVCC(31, 24, 0, 0) 3776 case ASLWCC: 3777 return OPVCC(31, 24, 0, 1) 3778 case ASLD: 3779 return OPVCC(31, 27, 0, 0) 3780 case ASLDCC: 3781 return OPVCC(31, 27, 0, 1) 3782 3783 case ASRAW: 3784 return OPVCC(31, 792, 0, 0) 3785 case ASRAWCC: 3786 return OPVCC(31, 792, 0, 1) 3787 case ASRAD: 3788 return OPVCC(31, 794, 0, 0) 3789 case ASRADCC: 3790 return OPVCC(31, 794, 0, 1) 3791 3792 case ASRW: 3793 return OPVCC(31, 536, 0, 0) 3794 case ASRWCC: 3795 return OPVCC(31, 536, 0, 1) 3796 case ASRD: 3797 return OPVCC(31, 539, 0, 0) 3798 case ASRDCC: 3799 return OPVCC(31, 539, 0, 1) 3800 3801 case ASUB: 3802 return OPVCC(31, 40, 0, 0) 3803 case ASUBCC: 3804 return OPVCC(31, 40, 0, 1) 3805 case ASUBV: 3806 return OPVCC(31, 40, 1, 0) 3807 case ASUBVCC: 3808 return OPVCC(31, 40, 1, 1) 3809 case ASUBC: 3810 return OPVCC(31, 8, 0, 0) 3811 case ASUBCCC: 3812 return OPVCC(31, 8, 0, 1) 3813 case ASUBCV: 3814 return OPVCC(31, 8, 1, 0) 3815 case ASUBCVCC: 3816 return OPVCC(31, 8, 1, 1) 3817 case ASUBE: 3818 return OPVCC(31, 136, 0, 0) 3819 case ASUBECC: 3820 return OPVCC(31, 136, 0, 1) 3821 case ASUBEV: 3822 return OPVCC(31, 136, 1, 0) 3823 case ASUBEVCC: 3824 return OPVCC(31, 136, 1, 1) 3825 case ASUBME: 3826 return OPVCC(31, 232, 0, 0) 3827 case ASUBMECC: 3828 return OPVCC(31, 232, 0, 1) 3829 case ASUBMEV: 3830 return OPVCC(31, 232, 1, 0) 3831 case ASUBMEVCC: 3832 return OPVCC(31, 232, 1, 1) 3833 case ASUBZE: 3834 return OPVCC(31, 200, 0, 0) 3835 case ASUBZECC: 3836 return OPVCC(31, 200, 0, 1) 3837 case ASUBZEV: 3838 return OPVCC(31, 200, 1, 0) 3839 case ASUBZEVCC: 3840 return OPVCC(31, 200, 1, 1) 3841 3842 case ASYNC: 3843 return OPVCC(31, 598, 0, 0) 3844 case ALWSYNC: 3845 return OPVCC(31, 598, 0, 0) | 1<<21 3846 3847 case APTESYNC: 3848 return OPVCC(31, 598, 0, 0) | 2<<21 3849 3850 case ATLBIE: 3851 return OPVCC(31, 306, 0, 0) 3852 case ATLBIEL: 3853 return OPVCC(31, 274, 0, 0) 3854 case ATLBSYNC: 3855 return OPVCC(31, 566, 0, 0) 3856 case ASLBIA: 3857 return OPVCC(31, 498, 0, 0) 3858 case ASLBIE: 3859 return OPVCC(31, 434, 0, 0) 3860 case ASLBMFEE: 3861 return OPVCC(31, 915, 0, 0) 3862 case ASLBMFEV: 3863 return OPVCC(31, 851, 0, 0) 3864 case ASLBMTE: 3865 return OPVCC(31, 402, 0, 0) 3866 3867 case ATW: 3868 return OPVCC(31, 4, 0, 0) 3869 case ATD: 3870 return OPVCC(31, 68, 0, 0) 3871 3872 /* Vector (VMX/Altivec) instructions */ 3873 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 3874 /* are enabled starting at POWER6 (ISA 2.05). */ 3875 case AVAND: 3876 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */ 3877 case AVANDC: 3878 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */ 3879 case AVNAND: 3880 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */ 3881 3882 case AVOR: 3883 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */ 3884 case AVORC: 3885 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */ 3886 case AVNOR: 3887 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */ 3888 case AVXOR: 3889 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */ 3890 case AVEQV: 3891 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */ 3892 3893 case AVADDUBM: 3894 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */ 3895 case AVADDUHM: 3896 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */ 3897 case AVADDUWM: 3898 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */ 3899 case AVADDUDM: 3900 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */ 3901 case AVADDUQM: 3902 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */ 3903 3904 case AVADDCUQ: 3905 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */ 3906 case AVADDCUW: 3907 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */ 3908 3909 case AVADDUBS: 3910 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */ 3911 case AVADDUHS: 3912 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */ 3913 case AVADDUWS: 3914 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */ 3915 3916 case AVADDSBS: 3917 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */ 3918 case AVADDSHS: 3919 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */ 3920 case AVADDSWS: 3921 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */ 3922 3923 case AVADDEUQM: 3924 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */ 3925 case AVADDECUQ: 3926 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */ 3927 3928 case AVPMSUMB: 3929 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */ 3930 case AVPMSUMH: 3931 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */ 3932 case AVPMSUMW: 3933 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */ 3934 case AVPMSUMD: 3935 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ 3936 3937 case AVSUBUBM: 3938 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ 3939 case AVSUBUHM: 3940 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */ 3941 case AVSUBUWM: 3942 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */ 3943 case AVSUBUDM: 3944 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */ 3945 case AVSUBUQM: 3946 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */ 3947 3948 case AVSUBCUQ: 3949 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */ 3950 case AVSUBCUW: 3951 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */ 3952 3953 case AVSUBUBS: 3954 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */ 3955 case AVSUBUHS: 3956 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */ 3957 case AVSUBUWS: 3958 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */ 3959 3960 case AVSUBSBS: 3961 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */ 3962 case AVSUBSHS: 3963 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */ 3964 case AVSUBSWS: 3965 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */ 3966 3967 case AVSUBEUQM: 3968 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */ 3969 case AVSUBECUQ: 3970 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */ 3971 3972 case AVRLB: 3973 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */ 3974 case AVRLH: 3975 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */ 3976 case AVRLW: 3977 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */ 3978 case AVRLD: 3979 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */ 3980 3981 case AVSLB: 3982 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */ 3983 case AVSLH: 3984 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */ 3985 case AVSLW: 3986 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */ 3987 case AVSL: 3988 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */ 3989 case AVSLO: 3990 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */ 3991 case AVSRB: 3992 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */ 3993 case AVSRH: 3994 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */ 3995 case AVSRW: 3996 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */ 3997 case AVSR: 3998 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */ 3999 case AVSRO: 4000 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */ 4001 case AVSLD: 4002 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */ 4003 case AVSRD: 4004 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */ 4005 4006 case AVSRAB: 4007 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */ 4008 case AVSRAH: 4009 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */ 4010 case AVSRAW: 4011 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */ 4012 case AVSRAD: 4013 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ 4014 4015 case AVCLZB: 4016 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ 4017 case AVCLZH: 4018 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */ 4019 case AVCLZW: 4020 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */ 4021 case AVCLZD: 4022 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */ 4023 4024 case AVPOPCNTB: 4025 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */ 4026 case AVPOPCNTH: 4027 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */ 4028 case AVPOPCNTW: 4029 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */ 4030 case AVPOPCNTD: 4031 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */ 4032 4033 case AVCMPEQUB: 4034 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */ 4035 case AVCMPEQUBCC: 4036 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */ 4037 case AVCMPEQUH: 4038 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */ 4039 case AVCMPEQUHCC: 4040 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */ 4041 case AVCMPEQUW: 4042 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */ 4043 case AVCMPEQUWCC: 4044 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */ 4045 case AVCMPEQUD: 4046 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */ 4047 case AVCMPEQUDCC: 4048 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */ 4049 4050 case AVCMPGTUB: 4051 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */ 4052 case AVCMPGTUBCC: 4053 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */ 4054 case AVCMPGTUH: 4055 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */ 4056 case AVCMPGTUHCC: 4057 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */ 4058 case AVCMPGTUW: 4059 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */ 4060 case AVCMPGTUWCC: 4061 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */ 4062 case AVCMPGTUD: 4063 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */ 4064 case AVCMPGTUDCC: 4065 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */ 4066 case AVCMPGTSB: 4067 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */ 4068 case AVCMPGTSBCC: 4069 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */ 4070 case AVCMPGTSH: 4071 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */ 4072 case AVCMPGTSHCC: 4073 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */ 4074 case AVCMPGTSW: 4075 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */ 4076 case AVCMPGTSWCC: 4077 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */ 4078 case AVCMPGTSD: 4079 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */ 4080 case AVCMPGTSDCC: 4081 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ 4082 4083 case AVPERM: 4084 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ 4085 4086 case AVSEL: 4087 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ 4088 4089 case AVCIPHER: 4090 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */ 4091 case AVCIPHERLAST: 4092 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */ 4093 case AVNCIPHER: 4094 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */ 4095 case AVNCIPHERLAST: 4096 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */ 4097 case AVSBOX: 4098 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */ 4099 /* End of vector instructions */ 4100 4101 /* Vector scalar (VSX) instructions */ 4102 /* ISA 2.06 enables these for POWER7. */ 4103 case AMFVSRD, AMFVRD, AMFFPRD: 4104 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ 4105 case AMFVSRWZ: 4106 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ 4107 4108 case AMTVSRD, AMTFPRD, AMTVRD: 4109 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ 4110 case AMTVSRWA: 4111 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ 4112 case AMTVSRWZ: 4113 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ 4114 4115 case AXXLANDQ: 4116 return OPVXX3(60, 130, 0) /* xxland - v2.06 */ 4117 case AXXLANDC: 4118 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */ 4119 case AXXLEQV: 4120 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */ 4121 case AXXLNAND: 4122 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */ 4123 4124 case AXXLORC: 4125 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */ 4126 case AXXLNOR: 4127 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */ 4128 case AXXLORQ: 4129 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */ 4130 case AXXLXOR: 4131 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */ 4132 4133 case AXXSEL: 4134 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */ 4135 4136 case AXXMRGHW: 4137 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */ 4138 case AXXMRGLW: 4139 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */ 4140 4141 case AXXSPLTW: 4142 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */ 4143 4144 case AXXPERMDI: 4145 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */ 4146 4147 case AXXSLDWI: 4148 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */ 4149 4150 case AXSCVDPSP: 4151 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */ 4152 case AXSCVSPDP: 4153 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */ 4154 case AXSCVDPSPN: 4155 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */ 4156 case AXSCVSPDPN: 4157 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */ 4158 4159 case AXVCVDPSP: 4160 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */ 4161 case AXVCVSPDP: 4162 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */ 4163 4164 case AXSCVDPSXDS: 4165 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */ 4166 case AXSCVDPSXWS: 4167 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */ 4168 case AXSCVDPUXDS: 4169 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */ 4170 case AXSCVDPUXWS: 4171 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */ 4172 4173 case AXSCVSXDDP: 4174 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */ 4175 case AXSCVUXDDP: 4176 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */ 4177 case AXSCVSXDSP: 4178 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */ 4179 case AXSCVUXDSP: 4180 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */ 4181 4182 case AXVCVDPSXDS: 4183 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */ 4184 case AXVCVDPSXWS: 4185 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */ 4186 case AXVCVDPUXDS: 4187 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */ 4188 case AXVCVDPUXWS: 4189 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */ 4190 case AXVCVSPSXDS: 4191 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */ 4192 case AXVCVSPSXWS: 4193 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */ 4194 case AXVCVSPUXDS: 4195 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */ 4196 case AXVCVSPUXWS: 4197 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */ 4198 4199 case AXVCVSXDDP: 4200 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */ 4201 case AXVCVSXWDP: 4202 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */ 4203 case AXVCVUXDDP: 4204 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */ 4205 case AXVCVUXWDP: 4206 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */ 4207 case AXVCVSXDSP: 4208 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */ 4209 case AXVCVSXWSP: 4210 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */ 4211 case AXVCVUXDSP: 4212 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */ 4213 case AXVCVUXWSP: 4214 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ 4215 /* End of VSX instructions */ 4216 4217 case AXOR: 4218 return OPVCC(31, 316, 0, 0) 4219 case AXORCC: 4220 return OPVCC(31, 316, 0, 1) 4221 } 4222 4223 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a) 4224 return 0 4225 } 4226 4227 func (c *ctxt9) opirrr(a obj.As) uint32 { 4228 switch a { 4229 /* Vector (VMX/Altivec) instructions */ 4230 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4231 /* are enabled starting at POWER6 (ISA 2.05). */ 4232 case AVSLDOI: 4233 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */ 4234 } 4235 4236 c.ctxt.Diag("bad i/r/r/r opcode %v", a) 4237 return 0 4238 } 4239 4240 func (c *ctxt9) opiirr(a obj.As) uint32 { 4241 switch a { 4242 /* Vector (VMX/Altivec) instructions */ 4243 /* ISA 2.07 enables these for POWER8 and beyond. */ 4244 case AVSHASIGMAW: 4245 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */ 4246 case AVSHASIGMAD: 4247 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */ 4248 } 4249 4250 c.ctxt.Diag("bad i/i/r/r opcode %v", a) 4251 return 0 4252 } 4253 4254 func (c *ctxt9) opirr(a obj.As) uint32 { 4255 switch a { 4256 case AADD: 4257 return OPVCC(14, 0, 0, 0) 4258 case AADDC: 4259 return OPVCC(12, 0, 0, 0) 4260 case AADDCCC: 4261 return OPVCC(13, 0, 0, 0) 4262 case -AADD: 4263 return OPVCC(15, 0, 0, 0) /* ADDIS/CAU */ 4264 4265 case AANDCC: 4266 return OPVCC(28, 0, 0, 0) 4267 case -AANDCC: 4268 return OPVCC(29, 0, 0, 0) /* ANDIS./ANDIU. */ 4269 4270 case ABR: 4271 return OPVCC(18, 0, 0, 0) 4272 case ABL: 4273 return OPVCC(18, 0, 0, 0) | 1 4274 case obj.ADUFFZERO: 4275 return OPVCC(18, 0, 0, 0) | 1 4276 case obj.ADUFFCOPY: 4277 return OPVCC(18, 0, 0, 0) | 1 4278 case ABC: 4279 return OPVCC(16, 0, 0, 0) 4280 case ABCL: 4281 return OPVCC(16, 0, 0, 0) | 1 4282 4283 case ABEQ: 4284 return AOP_RRR(16<<26, 12, 2, 0) 4285 case ABGE: 4286 return AOP_RRR(16<<26, 4, 0, 0) 4287 case ABGT: 4288 return AOP_RRR(16<<26, 12, 1, 0) 4289 case ABLE: 4290 return AOP_RRR(16<<26, 4, 1, 0) 4291 case ABLT: 4292 return AOP_RRR(16<<26, 12, 0, 0) 4293 case ABNE: 4294 return AOP_RRR(16<<26, 4, 2, 0) 4295 case ABVC: 4296 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear 4297 case ABVS: 4298 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set 4299 4300 case ACMP: 4301 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */ 4302 case ACMPU: 4303 return OPVCC(10, 0, 0, 0) | 1<<21 4304 case ACMPW: 4305 return OPVCC(11, 0, 0, 0) /* L=0 */ 4306 case ACMPWU: 4307 return OPVCC(10, 0, 0, 0) 4308 case ALSW: 4309 return OPVCC(31, 597, 0, 0) 4310 4311 case AMULLW: 4312 return OPVCC(7, 0, 0, 0) 4313 4314 case AOR: 4315 return OPVCC(24, 0, 0, 0) 4316 case -AOR: 4317 return OPVCC(25, 0, 0, 0) /* ORIS/ORIU */ 4318 4319 case ARLWMI: 4320 return OPVCC(20, 0, 0, 0) /* rlwimi */ 4321 case ARLWMICC: 4322 return OPVCC(20, 0, 0, 1) 4323 case ARLDMI: 4324 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */ 4325 case ARLDMICC: 4326 return OPVCC(30, 0, 0, 1) | 3<<2 4327 case ARLDIMI: 4328 return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */ 4329 case ARLDIMICC: 4330 return OPVCC(30, 0, 0, 1) | 3<<2 4331 case ARLWNM: 4332 return OPVCC(21, 0, 0, 0) /* rlwinm */ 4333 case ARLWNMCC: 4334 return OPVCC(21, 0, 0, 1) 4335 4336 case ARLDCL: 4337 return OPVCC(30, 0, 0, 0) /* rldicl */ 4338 case ARLDCLCC: 4339 return OPVCC(30, 0, 0, 1) 4340 case ARLDCR: 4341 return OPVCC(30, 1, 0, 0) /* rldicr */ 4342 case ARLDCRCC: 4343 return OPVCC(30, 1, 0, 1) 4344 case ARLDC: 4345 return OPVCC(30, 0, 0, 0) | 2<<2 4346 case ARLDCCC: 4347 return OPVCC(30, 0, 0, 1) | 2<<2 4348 4349 case ASRAW: 4350 return OPVCC(31, 824, 0, 0) 4351 case ASRAWCC: 4352 return OPVCC(31, 824, 0, 1) 4353 case ASRAD: 4354 return OPVCC(31, (413 << 1), 0, 0) 4355 case ASRADCC: 4356 return OPVCC(31, (413 << 1), 0, 1) 4357 4358 case ASTSW: 4359 return OPVCC(31, 725, 0, 0) 4360 4361 case ASUBC: 4362 return OPVCC(8, 0, 0, 0) 4363 4364 case ATW: 4365 return OPVCC(3, 0, 0, 0) 4366 case ATD: 4367 return OPVCC(2, 0, 0, 0) 4368 4369 /* Vector (VMX/Altivec) instructions */ 4370 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4371 /* are enabled starting at POWER6 (ISA 2.05). */ 4372 case AVSPLTB: 4373 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */ 4374 case AVSPLTH: 4375 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */ 4376 case AVSPLTW: 4377 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */ 4378 4379 case AVSPLTISB: 4380 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */ 4381 case AVSPLTISH: 4382 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */ 4383 case AVSPLTISW: 4384 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */ 4385 /* End of vector instructions */ 4386 4387 case AFTDIV: 4388 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */ 4389 case AFTSQRT: 4390 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */ 4391 4392 case AXOR: 4393 return OPVCC(26, 0, 0, 0) /* XORIL */ 4394 case -AXOR: 4395 return OPVCC(27, 0, 0, 0) /* XORIU */ 4396 } 4397 4398 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a) 4399 return 0 4400 } 4401 4402 /* 4403 * load o(a),d 4404 */ 4405 func (c *ctxt9) opload(a obj.As) uint32 { 4406 switch a { 4407 case AMOVD: 4408 return OPVCC(58, 0, 0, 0) /* ld */ 4409 case AMOVDU: 4410 return OPVCC(58, 0, 0, 1) /* ldu */ 4411 case AMOVWZ: 4412 return OPVCC(32, 0, 0, 0) /* lwz */ 4413 case AMOVWZU: 4414 return OPVCC(33, 0, 0, 0) /* lwzu */ 4415 case AMOVW: 4416 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */ 4417 4418 /* no AMOVWU */ 4419 case AMOVB, AMOVBZ: 4420 return OPVCC(34, 0, 0, 0) 4421 /* load */ 4422 4423 case AMOVBU, AMOVBZU: 4424 return OPVCC(35, 0, 0, 0) 4425 case AFMOVD: 4426 return OPVCC(50, 0, 0, 0) 4427 case AFMOVDU: 4428 return OPVCC(51, 0, 0, 0) 4429 case AFMOVS: 4430 return OPVCC(48, 0, 0, 0) 4431 case AFMOVSU: 4432 return OPVCC(49, 0, 0, 0) 4433 case AMOVH: 4434 return OPVCC(42, 0, 0, 0) 4435 case AMOVHU: 4436 return OPVCC(43, 0, 0, 0) 4437 case AMOVHZ: 4438 return OPVCC(40, 0, 0, 0) 4439 case AMOVHZU: 4440 return OPVCC(41, 0, 0, 0) 4441 case AMOVMW: 4442 return OPVCC(46, 0, 0, 0) /* lmw */ 4443 } 4444 4445 c.ctxt.Diag("bad load opcode %v", a) 4446 return 0 4447 } 4448 4449 /* 4450 * indexed load a(b),d 4451 */ 4452 func (c *ctxt9) oploadx(a obj.As) uint32 { 4453 switch a { 4454 case AMOVWZ: 4455 return OPVCC(31, 23, 0, 0) /* lwzx */ 4456 case AMOVWZU: 4457 return OPVCC(31, 55, 0, 0) /* lwzux */ 4458 case AMOVW: 4459 return OPVCC(31, 341, 0, 0) /* lwax */ 4460 case AMOVWU: 4461 return OPVCC(31, 373, 0, 0) /* lwaux */ 4462 4463 case AMOVB, AMOVBZ: 4464 return OPVCC(31, 87, 0, 0) /* lbzx */ 4465 4466 case AMOVBU, AMOVBZU: 4467 return OPVCC(31, 119, 0, 0) /* lbzux */ 4468 case AFMOVD: 4469 return OPVCC(31, 599, 0, 0) /* lfdx */ 4470 case AFMOVDU: 4471 return OPVCC(31, 631, 0, 0) /* lfdux */ 4472 case AFMOVS: 4473 return OPVCC(31, 535, 0, 0) /* lfsx */ 4474 case AFMOVSU: 4475 return OPVCC(31, 567, 0, 0) /* lfsux */ 4476 case AFMOVSX: 4477 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */ 4478 case AFMOVSZ: 4479 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */ 4480 case AMOVH: 4481 return OPVCC(31, 343, 0, 0) /* lhax */ 4482 case AMOVHU: 4483 return OPVCC(31, 375, 0, 0) /* lhaux */ 4484 case AMOVHBR: 4485 return OPVCC(31, 790, 0, 0) /* lhbrx */ 4486 case AMOVWBR: 4487 return OPVCC(31, 534, 0, 0) /* lwbrx */ 4488 case AMOVDBR: 4489 return OPVCC(31, 532, 0, 0) /* ldbrx */ 4490 case AMOVHZ: 4491 return OPVCC(31, 279, 0, 0) /* lhzx */ 4492 case AMOVHZU: 4493 return OPVCC(31, 311, 0, 0) /* lhzux */ 4494 case AECIWX: 4495 return OPVCC(31, 310, 0, 0) /* eciwx */ 4496 case ALBAR: 4497 return OPVCC(31, 52, 0, 0) /* lbarx */ 4498 case ALWAR: 4499 return OPVCC(31, 20, 0, 0) /* lwarx */ 4500 case ALDAR: 4501 return OPVCC(31, 84, 0, 0) 4502 case ALSW: 4503 return OPVCC(31, 533, 0, 0) /* lswx */ 4504 case AMOVD: 4505 return OPVCC(31, 21, 0, 0) /* ldx */ 4506 case AMOVDU: 4507 return OPVCC(31, 53, 0, 0) /* ldux */ 4508 4509 /* Vector (VMX/Altivec) instructions */ 4510 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4511 /* are enabled starting at POWER6 (ISA 2.05). */ 4512 case ALVEBX: 4513 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */ 4514 case ALVEHX: 4515 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */ 4516 case ALVEWX: 4517 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */ 4518 case ALVX: 4519 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */ 4520 case ALVXL: 4521 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */ 4522 case ALVSL: 4523 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */ 4524 case ALVSR: 4525 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */ 4526 /* End of vector instructions */ 4527 4528 /* Vector scalar (VSX) instructions */ 4529 /* ISA 2.06 enables these for POWER7. */ 4530 case ALXVD2X: 4531 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */ 4532 case ALXVDSX: 4533 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */ 4534 case ALXVW4X: 4535 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */ 4536 4537 case ALXSDX: 4538 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */ 4539 4540 case ALXSIWAX: 4541 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */ 4542 case ALXSIWZX: 4543 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */ 4544 /* End of vector scalar instructions */ 4545 4546 } 4547 4548 c.ctxt.Diag("bad loadx opcode %v", a) 4549 return 0 4550 } 4551 4552 /* 4553 * store s,o(d) 4554 */ 4555 func (c *ctxt9) opstore(a obj.As) uint32 { 4556 switch a { 4557 case AMOVB, AMOVBZ: 4558 return OPVCC(38, 0, 0, 0) /* stb */ 4559 4560 case AMOVBU, AMOVBZU: 4561 return OPVCC(39, 0, 0, 0) /* stbu */ 4562 case AFMOVD: 4563 return OPVCC(54, 0, 0, 0) /* stfd */ 4564 case AFMOVDU: 4565 return OPVCC(55, 0, 0, 0) /* stfdu */ 4566 case AFMOVS: 4567 return OPVCC(52, 0, 0, 0) /* stfs */ 4568 case AFMOVSU: 4569 return OPVCC(53, 0, 0, 0) /* stfsu */ 4570 4571 case AMOVHZ, AMOVH: 4572 return OPVCC(44, 0, 0, 0) /* sth */ 4573 4574 case AMOVHZU, AMOVHU: 4575 return OPVCC(45, 0, 0, 0) /* sthu */ 4576 case AMOVMW: 4577 return OPVCC(47, 0, 0, 0) /* stmw */ 4578 case ASTSW: 4579 return OPVCC(31, 725, 0, 0) /* stswi */ 4580 4581 case AMOVWZ, AMOVW: 4582 return OPVCC(36, 0, 0, 0) /* stw */ 4583 4584 case AMOVWZU, AMOVWU: 4585 return OPVCC(37, 0, 0, 0) /* stwu */ 4586 case AMOVD: 4587 return OPVCC(62, 0, 0, 0) /* std */ 4588 case AMOVDU: 4589 return OPVCC(62, 0, 0, 1) /* stdu */ 4590 } 4591 4592 c.ctxt.Diag("unknown store opcode %v", a) 4593 return 0 4594 } 4595 4596 /* 4597 * indexed store s,a(b) 4598 */ 4599 func (c *ctxt9) opstorex(a obj.As) uint32 { 4600 switch a { 4601 case AMOVB, AMOVBZ: 4602 return OPVCC(31, 215, 0, 0) /* stbx */ 4603 4604 case AMOVBU, AMOVBZU: 4605 return OPVCC(31, 247, 0, 0) /* stbux */ 4606 case AFMOVD: 4607 return OPVCC(31, 727, 0, 0) /* stfdx */ 4608 case AFMOVDU: 4609 return OPVCC(31, 759, 0, 0) /* stfdux */ 4610 case AFMOVS: 4611 return OPVCC(31, 663, 0, 0) /* stfsx */ 4612 case AFMOVSU: 4613 return OPVCC(31, 695, 0, 0) /* stfsux */ 4614 case AFMOVSX: 4615 return OPVCC(31, 983, 0, 0) /* stfiwx */ 4616 4617 case AMOVHZ, AMOVH: 4618 return OPVCC(31, 407, 0, 0) /* sthx */ 4619 case AMOVHBR: 4620 return OPVCC(31, 918, 0, 0) /* sthbrx */ 4621 4622 case AMOVHZU, AMOVHU: 4623 return OPVCC(31, 439, 0, 0) /* sthux */ 4624 4625 case AMOVWZ, AMOVW: 4626 return OPVCC(31, 151, 0, 0) /* stwx */ 4627 4628 case AMOVWZU, AMOVWU: 4629 return OPVCC(31, 183, 0, 0) /* stwux */ 4630 case ASTSW: 4631 return OPVCC(31, 661, 0, 0) /* stswx */ 4632 case AMOVWBR: 4633 return OPVCC(31, 662, 0, 0) /* stwbrx */ 4634 case ASTBCCC: 4635 return OPVCC(31, 694, 0, 1) /* stbcx. */ 4636 case ASTWCCC: 4637 return OPVCC(31, 150, 0, 1) /* stwcx. */ 4638 case ASTDCCC: 4639 return OPVCC(31, 214, 0, 1) /* stwdx. */ 4640 case AECOWX: 4641 return OPVCC(31, 438, 0, 0) /* ecowx */ 4642 case AMOVD: 4643 return OPVCC(31, 149, 0, 0) /* stdx */ 4644 case AMOVDU: 4645 return OPVCC(31, 181, 0, 0) /* stdux */ 4646 4647 /* Vector (VMX/Altivec) instructions */ 4648 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ 4649 /* are enabled starting at POWER6 (ISA 2.05). */ 4650 case ASTVEBX: 4651 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */ 4652 case ASTVEHX: 4653 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */ 4654 case ASTVEWX: 4655 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */ 4656 case ASTVX: 4657 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */ 4658 case ASTVXL: 4659 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */ 4660 /* End of vector instructions */ 4661 4662 /* Vector scalar (VSX) instructions */ 4663 /* ISA 2.06 enables these for POWER7. */ 4664 case ASTXVD2X: 4665 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */ 4666 case ASTXVW4X: 4667 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */ 4668 4669 case ASTXSDX: 4670 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */ 4671 4672 case ASTXSIWX: 4673 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */ 4674 /* End of vector scalar instructions */ 4675 4676 } 4677 4678 c.ctxt.Diag("unknown storex opcode %v", a) 4679 return 0 4680 }