github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/cmd/compile/internal/ssa/gen/PPC64Ops.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build ignore 6 7 package main 8 9 import "strings" 10 11 // Notes: 12 // - Less-than-64-bit integer types live in the low portion of registers. 13 // For now, the upper portion is junk; sign/zero-extension might be optimized in the future, but not yet. 14 // - Boolean types are zero or 1; stored in a byte, but loaded with AMOVBZ so the upper bytes of a register are zero. 15 // - *const instructions may use a constant larger than the instruction can encode. 16 // In this case the assembler expands to multiple instructions and uses tmp 17 // register (R31). 18 19 var regNamesPPC64 = []string{ 20 "R0", // REGZERO, not used, but simplifies counting in regalloc 21 "SP", // REGSP 22 "SB", // REGSB 23 "R3", 24 "R4", 25 "R5", 26 "R6", 27 "R7", 28 "R8", 29 "R9", 30 "R10", 31 "R11", // REGCTXT for closures 32 "R12", 33 "R13", // REGTLS 34 "R14", 35 "R15", 36 "R16", 37 "R17", 38 "R18", 39 "R19", 40 "R20", 41 "R21", 42 "R22", 43 "R23", 44 "R24", 45 "R25", 46 "R26", 47 "R27", 48 "R28", 49 "R29", 50 "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen". 51 "R31", // REGTMP 52 53 "F0", 54 "F1", 55 "F2", 56 "F3", 57 "F4", 58 "F5", 59 "F6", 60 "F7", 61 "F8", 62 "F9", 63 "F10", 64 "F11", 65 "F12", 66 "F13", 67 "F14", 68 "F15", 69 "F16", 70 "F17", 71 "F18", 72 "F19", 73 "F20", 74 "F21", 75 "F22", 76 "F23", 77 "F24", 78 "F25", 79 "F26", 80 "F27", 81 "F28", 82 "F29", 83 "F30", 84 "F31", 85 86 // "CR0", 87 // "CR1", 88 // "CR2", 89 // "CR3", 90 // "CR4", 91 // "CR5", 92 // "CR6", 93 // "CR7", 94 95 // "CR", 96 // "XER", 97 // "LR", 98 // "CTR", 99 } 100 101 func init() { 102 // Make map from reg names to reg integers. 103 if len(regNamesPPC64) > 64 { 104 panic("too many registers") 105 } 106 num := map[string]int{} 107 for i, name := range regNamesPPC64 { 108 num[name] = i 109 } 110 buildReg := func(s string) regMask { 111 m := regMask(0) 112 for _, r := range strings.Split(s, " ") { 113 if n, ok := num[r]; ok { 114 m |= regMask(1) << uint(n) 115 continue 116 } 117 panic("register " + r + " not found") 118 } 119 return m 120 } 121 122 var ( 123 gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29") 124 fp = buildReg("F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26") 125 sp = buildReg("SP") 126 sb = buildReg("SB") 127 gr = buildReg("g") 128 // cr = buildReg("CR") 129 // ctr = buildReg("CTR") 130 // lr = buildReg("LR") 131 tmp = buildReg("R31") 132 ctxt = buildReg("R11") 133 callptr = buildReg("R12") 134 // tls = buildReg("R13") 135 gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} 136 gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} 137 gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} 138 gp1cr = regInfo{inputs: []regMask{gp | sp | sb}} 139 gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} 140 crgp = regInfo{inputs: nil, outputs: []regMask{gp}} 141 gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} 142 gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} 143 gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value 144 gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} 145 gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}} 146 fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} 147 fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} 148 fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} 149 gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} 150 fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} 151 fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} 152 fp2cr = regInfo{inputs: []regMask{fp, fp}} 153 fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}} 154 fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}} 155 callerSave = regMask(gp | fp | gr) 156 ) 157 ops := []opData{ 158 {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 159 {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt 160 {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 161 {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 162 {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 163 {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 164 {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 165 166 {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit) 167 {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit) 168 169 {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed 170 {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed 171 {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned 172 {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned 173 174 {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1 175 {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1 176 177 {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2 178 {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2 179 {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2 180 {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2 181 182 {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // arg0 >>a arg1, 64 bits (all sign if arg1 & 64 != 0) 183 {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >>a arg1, 32 bits (all sign if arg1 & 32 != 0) 184 {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // arg0 >> arg1, 64 bits (0 if arg1 & 64 != 0) 185 {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // arg0 >> arg1, 32 bits (0 if arg1 & 32 != 0) 186 {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << arg1, 64 bits (0 if arg1 & 64 != 0) 187 {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << arg1, 32 bits (0 if arg1 & 32 != 0) 188 189 {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 190 {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 191 192 {name: "ADDconstForCarry", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, aux: "Int16", asm: "ADDC", typ: "Flags"}, // _, carry := arg0 + aux 193 {name: "MaskIfNotCarry", argLength: 1, reg: crgp, asm: "ADDME", typ: "Int64"}, // carry - 1 (if carry then 0 else -1) 194 195 {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // arg0 >>a aux, 64 bits 196 {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // arg0 >>a aux, 32 bits 197 {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // arg0 >> aux, 64 bits 198 {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // arg0 >> aux, 32 bits 199 {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << aux, 64 bits 200 {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << aux, 32 bits 201 202 {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits 203 {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits 204 205 {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros 206 {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit) 207 208 {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0 209 {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word 210 {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresonding byte 211 212 {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1 213 {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1 214 215 {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit) 216 {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit) 217 {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit) 218 {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit) 219 220 // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1 221 222 // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register. 223 {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero 224 {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero 225 {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float 226 {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float 227 {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value 228 229 // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC. 230 // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the 231 // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues). 232 // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use 233 // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr) 234 235 {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register 236 {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register 237 238 {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 239 {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 240 {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 241 {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 242 {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) 243 {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 244 {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 245 {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) 246 {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) 247 {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) 248 {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) 249 {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 250 {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 251 {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 252 253 {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux 254 {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux 255 {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always. 256 {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}}, asm: "ANDCC", aux: "Int64", typ: "Flags"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. 257 258 {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 259 {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 260 {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64 261 {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64 262 {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64 263 {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64 264 {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint8 to uint64 265 {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int16 to int64 266 {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint16 to uint64 267 {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // sign extend int32 to int64 268 {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // zero extend uint32 to uint64 269 {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, 270 271 {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, 272 {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, 273 {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, 274 {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, 275 {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, 276 {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, 277 {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, 278 {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, 279 280 {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero byte to arg0+aux. arg1=mem 281 {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes to ... 282 {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes to ... 283 {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes to ... 284 285 {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB 286 287 {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, // 288 {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, // 289 {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, // 290 {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"}, 291 292 {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 293 {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 294 {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 295 {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 296 {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"}, 297 {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"}, 298 {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"}, 299 {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"}, 300 301 // pseudo-ops 302 {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise. 303 {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise. 304 {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise. 305 {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise. 306 {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise. 307 {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN 308 {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. 309 {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. 310 {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise. 311 {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN 312 313 // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, 314 // and sorts it to the very beginning of the block to prevent other 315 // use of the closure pointer. 316 {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}}, 317 318 // LoweredGetCallerSP returns the SP of the caller of the current function. 319 {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, 320 321 //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. 322 {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, 323 // Round ops to block fused-multiply-add extraction. 324 {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true}, 325 {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true}, 326 327 // Convert pointer to integer, takes a memory operand for ordering. 328 {name: "MOVDconvert", argLength: 2, reg: gp11, asm: "MOVD"}, 329 330 {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true, symEffect: "None"}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem 331 {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem 332 {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem 333 334 // large or unaligned zeroing 335 // arg0 = address of memory to zero (in R3, changed as side effect) 336 // returns mem 337 // 338 // a loop is generated when there is more than one iteration 339 // needed to clear 4 doublewords 340 // 341 // MOVD $len/32,R31 342 // MOVD R31,CTR 343 // loop: 344 // MOVD R0,(R3) 345 // MOVD R0,8(R3) 346 // MOVD R0,16(R3) 347 // MOVD R0,24(R3) 348 // ADD R3,32 349 // BC loop 350 351 // remaining doubleword clears generated as needed 352 // MOVD R0,(R3) 353 // MOVD R0,8(R3) 354 // MOVD R0,16(R3) 355 // MOVD R0,24(R3) 356 357 // one or more of these to clear remainder < 8 bytes 358 // MOVW R0,n1(R3) 359 // MOVH R0,n2(R3) 360 // MOVB R0,n3(R3) 361 { 362 name: "LoweredZero", 363 aux: "Int64", 364 argLength: 2, 365 reg: regInfo{ 366 inputs: []regMask{buildReg("R3")}, 367 clobbers: buildReg("R3"), 368 }, 369 clobberFlags: true, 370 typ: "Mem", 371 faultOnNilArg0: true, 372 }, 373 // Loop code: 374 // MOVD len/32,REG_TMP only for loop 375 // MOVD REG_TMP,CTR only for loop 376 // loop: 377 // MOVD (R4),R7 378 // MOVD 8(R4),R8 379 // MOVD 16(R4),R9 380 // MOVD 24(R4),R10 381 // ADD R4,$32 only with loop 382 // MOVD R7,(R3) 383 // MOVD R8,8(R3) 384 // MOVD R9,16(R3) 385 // MOVD R10,24(R3) 386 // ADD R3,$32 only with loop 387 // BC 16,0,loop only with loop 388 // Bytes not moved by this loop are moved 389 // with a combination of the following instructions, 390 // starting with the largest sizes and generating as 391 // many as needed, using the appropriate offset value. 392 // MOVD n(R4),R7 393 // MOVD R7,n(R3) 394 // MOVW n1(R4),R7 395 // MOVW R7,n1(R3) 396 // MOVH n2(R4),R7 397 // MOVH R7,n2(R3) 398 // MOVB n3(R4),R7 399 // MOVB R7,n3(R3) 400 401 { 402 name: "LoweredMove", 403 aux: "Int64", 404 argLength: 3, 405 reg: regInfo{ 406 inputs: []regMask{buildReg("R3"), buildReg("R4")}, 407 clobbers: buildReg("R3 R4 R7 R8 R9 R10"), 408 }, 409 clobberFlags: true, 410 typ: "Mem", 411 faultOnNilArg0: true, 412 faultOnNilArg1: true, 413 }, 414 415 {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", faultOnNilArg0: true, hasSideEffects: true}, 416 {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", faultOnNilArg0: true, hasSideEffects: true}, 417 418 {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", clobberFlags: true, faultOnNilArg0: true}, 419 {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", clobberFlags: true, faultOnNilArg0: true}, 420 {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", clobberFlags: true, faultOnNilArg0: true}, 421 422 // atomic add32, 64 423 // SYNC 424 // LDAR (Rarg0), Rout 425 // ADD Rarg1, Rout 426 // STDCCC Rout, (Rarg0) 427 // BNE -3(PC) 428 // ISYNC 429 // return new sum 430 431 {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, 432 {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, 433 434 // atomic exchange32, 64 435 // SYNC 436 // LDAR (Rarg0), Rout 437 // STDCCC Rarg1, (Rarg0) 438 // BNE -2(PC) 439 // ISYNC 440 // return old val 441 442 {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, 443 {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, 444 445 // atomic compare and swap. 446 // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero. 447 // if *arg0 == arg1 { 448 // *arg0 = arg2 449 // return (true, memory) 450 // } else { 451 // return (false, memory) 452 // } 453 // SYNC 454 // LDAR (Rarg0), Rtmp 455 // CMP Rarg1, Rtmp 456 // BNE 3(PC) 457 // STDCCC Rarg2, (Rarg0) 458 // BNE -4(PC) 459 // CBNZ Rtmp, -4(PC) 460 // CSET EQ, Rout 461 {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, 462 {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, 463 464 // atomic 8 and/or. 465 // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero. 466 // LBAR (Rarg0), Rtmp 467 // AND/OR Rarg1, Rtmp 468 // STBCCC Rtmp, (Rarg0), Rtmp 469 // BNE Rtmp, -3(PC) 470 471 {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, 472 {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, 473 474 // (InvertFlags (CMP a b)) == (CMP b a) 475 // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant, 476 // then we do (LessThan (InvertFlags (CMP b a))) instead. 477 // Rewrites will convert this to (GreaterThan (CMP b a)). 478 // InvertFlags is a pseudo-op which can't appear in assembly output. 479 {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 480 481 // Constant flag values. For any comparison, there are 3 possible 482 // outcomes: either the three from the signed total order (<,==,>) 483 // or the three from the unsigned total order, depending on which 484 // comparison operation was used (CMP or CMPU -- PPC is different from 485 // the other architectures, which have a single comparison producing 486 // both signed and unsigned comparison results.) 487 488 // These ops are for temporary use by rewrite rules. They 489 // cannot appear in the generated assembly. 490 {name: "FlagEQ"}, // equal 491 {name: "FlagLT"}, // signed < or unsigned < 492 {name: "FlagGT"}, // signed > or unsigned > 493 494 } 495 496 blocks := []blockData{ 497 {name: "EQ"}, 498 {name: "NE"}, 499 {name: "LT"}, 500 {name: "LE"}, 501 {name: "GT"}, 502 {name: "GE"}, 503 {name: "FLT"}, 504 {name: "FLE"}, 505 {name: "FGT"}, 506 {name: "FGE"}, 507 } 508 509 archs = append(archs, arch{ 510 name: "PPC64", 511 pkg: "cmd/internal/obj/ppc64", 512 genfile: "../../ppc64/ssa.go", 513 ops: ops, 514 blocks: blocks, 515 regnames: regNamesPPC64, 516 gpregmask: gp, 517 fpregmask: fp, 518 framepointerreg: int8(num["SP"]), 519 linkreg: -1, // not used 520 }) 521 }