github.com/rakyll/go@v0.0.0-20170216000551-64c02460d703/src/cmd/compile/internal/ssa/gen/AMD64Ops.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build ignore 6 7 package main 8 9 import "strings" 10 11 // Notes: 12 // - Integer types live in the low portion of registers. Upper portions are junk. 13 // - Boolean types use the low-order byte of a register. 0=false, 1=true. 14 // Upper bytes are junk. 15 // - Floating-point types live in the low natural slot of an sse2 register. 16 // Unused portions are junk. 17 // - We do not use AH,BH,CH,DH registers. 18 // - When doing sub-register operations, we try to write the whole 19 // destination register to avoid a partial-register write. 20 // - Unused portions of AuxInt (or the Val portion of ValAndOff) are 21 // filled by sign-extending the used portion. Users of AuxInt which interpret 22 // AuxInt as unsigned (e.g. shifts) must be careful. 23 24 // Suffixes encode the bit width of various instructions. 25 // Q (quad word) = 64 bit 26 // L (long word) = 32 bit 27 // W (word) = 16 bit 28 // B (byte) = 8 bit 29 30 // copied from ../../amd64/reg.go 31 var regNamesAMD64 = []string{ 32 "AX", 33 "CX", 34 "DX", 35 "BX", 36 "SP", 37 "BP", 38 "SI", 39 "DI", 40 "R8", 41 "R9", 42 "R10", 43 "R11", 44 "R12", 45 "R13", 46 "R14", 47 "R15", 48 "X0", 49 "X1", 50 "X2", 51 "X3", 52 "X4", 53 "X5", 54 "X6", 55 "X7", 56 "X8", 57 "X9", 58 "X10", 59 "X11", 60 "X12", 61 "X13", 62 "X14", 63 "X15", 64 65 // pseudo-registers 66 "SB", 67 } 68 69 func init() { 70 // Make map from reg names to reg integers. 71 if len(regNamesAMD64) > 64 { 72 panic("too many registers") 73 } 74 num := map[string]int{} 75 for i, name := range regNamesAMD64 { 76 num[name] = i 77 } 78 buildReg := func(s string) regMask { 79 m := regMask(0) 80 for _, r := range strings.Split(s, " ") { 81 if n, ok := num[r]; ok { 82 m |= regMask(1) << uint(n) 83 continue 84 } 85 panic("register " + r + " not found") 86 } 87 return m 88 } 89 90 // Common individual register masks 91 var ( 92 ax = buildReg("AX") 93 cx = buildReg("CX") 94 dx = buildReg("DX") 95 gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R14 R15") 96 fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15") 97 gpsp = gp | buildReg("SP") 98 gpspsb = gpsp | buildReg("SB") 99 callerSave = gp | fp 100 ) 101 // Common slices of register masks 102 var ( 103 gponly = []regMask{gp} 104 fponly = []regMask{fp} 105 ) 106 107 // Common regInfo 108 var ( 109 gp01 = regInfo{inputs: nil, outputs: gponly} 110 gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly} 111 gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly} 112 gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly} 113 gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly} 114 gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly} 115 gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly} 116 gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}} 117 gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}} 118 gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax} 119 120 gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}} 121 gp1flags = regInfo{inputs: []regMask{gpsp}} 122 flagsgp = regInfo{inputs: nil, outputs: gponly} 123 124 gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}} 125 126 readflags = regInfo{inputs: nil, outputs: gponly} 127 flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}} 128 129 gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly} 130 gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly} 131 132 gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} 133 gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}} 134 gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}} 135 gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}} 136 gpstorexchg = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp}} 137 cmpxchg = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax} 138 139 fp01 = regInfo{inputs: nil, outputs: fponly} 140 fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly} 141 fpgp = regInfo{inputs: fponly, outputs: gponly} 142 gpfp = regInfo{inputs: gponly, outputs: fponly} 143 fp11 = regInfo{inputs: fponly, outputs: fponly} 144 fp2flags = regInfo{inputs: []regMask{fp, fp}} 145 146 fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly} 147 fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly} 148 149 fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}} 150 fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}} 151 ) 152 153 var AMD64ops = []opData{ 154 // fp ops 155 {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add 156 {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add 157 {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub 158 {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub 159 {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul 160 {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul 161 {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div 162 {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div 163 164 {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true}, // fp32 load 165 {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true}, // fp64 load 166 {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant 167 {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant 168 {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by i 169 {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff"}, // fp32 load indexed by 4*i 170 {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by i 171 {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff"}, // fp64 load indexed by 8*i 172 173 {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true}, // fp32 store 174 {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true}, // fp64 store 175 {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by i store 176 {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff"}, // fp32 indexed by 4i store 177 {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by i store 178 {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff"}, // fp64 indexed by 8i store 179 180 // binary ops 181 {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1 182 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 183 {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint 184 {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint 185 186 {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 187 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 188 {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint 189 {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint 190 191 {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 192 {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 193 {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint 194 {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint 195 196 {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width 197 {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width 198 {name: "HMULW", argLength: 2, reg: gp21hmul, asm: "IMULW", clobberFlags: true}, // (arg0 * arg1) >> width 199 {name: "HMULB", argLength: 2, reg: gp21hmul, asm: "IMULB", clobberFlags: true}, // (arg0 * arg1) >> width 200 {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width 201 {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width 202 {name: "HMULWU", argLength: 2, reg: gp21hmul, asm: "MULW", clobberFlags: true}, // (arg0 * arg1) >> width 203 {name: "HMULBU", argLength: 2, reg: gp21hmul, asm: "MULB", clobberFlags: true}, // (arg0 * arg1) >> width 204 205 {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits 206 207 {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] 208 {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] 209 {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] 210 {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] 211 {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] 212 {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1] 213 214 {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo) 215 {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r) 216 217 {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 218 {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 219 {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint 220 {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint 221 222 {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 223 {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 224 {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint 225 {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint 226 227 {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 228 {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 229 {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint 230 {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint 231 232 {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1 233 {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1 234 {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 235 {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1 236 {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint 237 {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint 238 {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint 239 {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint 240 241 {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32 242 {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64 243 244 {name: "TESTQ", argLength: 2, reg: gp2flags, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0 245 {name: "TESTL", argLength: 2, reg: gp2flags, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 246 {name: "TESTW", argLength: 2, reg: gp2flags, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 247 {name: "TESTB", argLength: 2, reg: gp2flags, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 248 {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0 249 {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 250 {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0 251 {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0 252 253 {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64 254 {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32 255 {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63 256 {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31 257 // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount! 258 259 {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64 260 {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 261 {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 262 {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32 263 {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63 264 {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31 265 {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15 266 {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7 267 268 {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64 269 {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 270 {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 271 {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32 272 {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63 273 {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31 274 {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15 275 {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7 276 277 {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63 278 {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31 279 {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15 280 {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7 281 282 // unary ops 283 {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true, clobberFlags: true}, // -arg0 284 {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0 285 286 {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true, clobberFlags: true}, // ^arg0 287 {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true, clobberFlags: true}, // ^arg0 288 289 // BSF{L,Q} returns a tuple [result, flags] 290 // result is undefined if the input is zero. 291 // flags are set to "equal" if the input is zero, "not equal" otherwise. 292 {name: "BSFQ", argLength: 1, reg: gp11flags, asm: "BSFQ", typ: "(UInt64,Flags)"}, // # of low-order zeroes in 64-bit arg 293 {name: "BSFL", argLength: 1, reg: gp11flags, asm: "BSFL", typ: "(UInt32,Flags)"}, // # of low-order zeroes in 32-bit arg 294 295 // Note ASM for ops moves whole register 296 // 297 {name: "CMOVQEQ", argLength: 3, reg: gp21, asm: "CMOVQEQ", resultInArg0: true}, // if arg2 encodes "equal" return arg1 else arg0 298 {name: "CMOVLEQ", argLength: 3, reg: gp21, asm: "CMOVLEQ", resultInArg0: true}, // if arg2 encodes "equal" return arg1 else arg0 299 300 {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes 301 {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes 302 303 {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0) 304 305 {name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear. 306 {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear. 307 // Note: SBBW and SBBB are subsumed by SBBL 308 309 {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0 310 {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0 311 {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0 312 {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0 313 {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0 314 {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0 315 {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0 316 {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0 317 {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0 318 {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0 319 // Need different opcodes for floating point conditions because 320 // any comparison involving a NaN is always FALSE and thus 321 // the patterns for inverting conditions cannot be used. 322 {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0 323 {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0 324 {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0 325 {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0 326 327 {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0 328 {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0 329 330 {name: "MOVBQSX", argLength: 1, reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64 331 {name: "MOVBQZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int64 332 {name: "MOVWQSX", argLength: 1, reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64 333 {name: "MOVWQZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int64 334 {name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64 335 {name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVL"}, // zero extend arg0 from int32 to int64 336 337 {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint 338 {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint 339 340 {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32 341 {name: "CVTTSD2SQ", argLength: 1, reg: fpgp, asm: "CVTTSD2SQ"}, // convert float64 to int64 342 {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32 343 {name: "CVTTSS2SQ", argLength: 1, reg: fpgp, asm: "CVTTSS2SQ"}, // convert float32 to int64 344 {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32 345 {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64 346 {name: "CVTSQ2SS", argLength: 1, reg: gpfp, asm: "CVTSQ2SS"}, // convert int64 to float32 347 {name: "CVTSQ2SD", argLength: 1, reg: gpfp, asm: "CVTSQ2SD"}, // convert int64 to float64 348 {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32 349 {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64 350 351 {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation. 352 353 {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux 354 {name: "LEAQ1", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + arg1 + auxint + aux 355 {name: "LEAQ2", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 2*arg1 + auxint + aux 356 {name: "LEAQ4", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 4*arg1 + auxint + aux 357 {name: "LEAQ8", argLength: 2, reg: gp21sb, aux: "SymOff"}, // arg0 + 8*arg1 + auxint + aux 358 // Note: LEAQ{1,2,4,8} must not have OpSB as either argument. 359 360 {name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true}, // arg0 + auxint + offset encoded in aux 361 362 // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address 363 {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend. 364 {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true}, // ditto, sign extend to int64 365 {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend. 366 {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true}, // ditto, sign extend to int64 367 {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend. 368 {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true}, // ditto, sign extend to int64 369 {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true}, // load 8 bytes from arg0+auxint+aux. arg1=mem 370 {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store byte in arg1 to arg0+auxint+aux. arg2=mem 371 {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem 372 {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem 373 {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem 374 {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true}, // load 16 bytes from arg0+auxint+aux. arg1=mem 375 {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem 376 377 // indexed loads/stores 378 {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, asm: "MOVBLZX", aux: "SymOff"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem 379 {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem 380 {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem 381 {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem 382 {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem 383 {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem 384 {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", aux: "SymOff"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem 385 // TODO: sign-extending indexed loads 386 {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVB", aux: "SymOff"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem 387 {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem 388 {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem 389 {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem 390 {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem 391 {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem 392 {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", aux: "SymOff"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem 393 // TODO: add size-mismatched indexed loads, like MOVBstoreidx4. 394 395 // For storeconst ops, the AuxInt field encodes both 396 // the value to store and an address offset of the store. 397 // Cast AuxInt to a ValAndOff to extract Val and Off fields. 398 {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem 399 {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store low 2 bytes of ... 400 {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store low 4 bytes of ... 401 {name: "MOVQstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true}, // store 8 bytes of ... 402 403 {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem 404 {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... arg1 ... 405 {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem"}, // store low 2 bytes of ... 2*arg1 ... 406 {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ... arg1 ... 407 {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem"}, // store low 4 bytes of ... 4*arg1 ... 408 {name: "MOVQstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem"}, // store 8 bytes of ... arg1 ... 409 {name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem"}, // store 8 bytes of ... 8*arg1 ... 410 411 // arg0 = pointer to start of memory to zero 412 // arg1 = value to store (will always be zero) 413 // arg2 = mem 414 // auxint = # of bytes to zero 415 // returns mem 416 { 417 name: "DUFFZERO", 418 aux: "Int64", 419 argLength: 3, 420 reg: regInfo{ 421 inputs: []regMask{buildReg("DI"), buildReg("X0")}, 422 clobbers: buildReg("DI"), 423 }, 424 clobberFlags: true, 425 faultOnNilArg0: true, 426 }, 427 {name: "MOVOconst", reg: regInfo{nil, 0, []regMask{fp}}, typ: "Int128", aux: "Int128", rematerializeable: true}, 428 429 // arg0 = address of memory to zero 430 // arg1 = # of 8-byte words to zero 431 // arg2 = value to store (will always be zero) 432 // arg3 = mem 433 // returns mem 434 { 435 name: "REPSTOSQ", 436 argLength: 4, 437 reg: regInfo{ 438 inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")}, 439 clobbers: buildReg("DI CX"), 440 }, 441 faultOnNilArg0: true, 442 }, 443 444 {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "SymOff", clobberFlags: true, call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem 445 {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem 446 {name: "CALLdefer", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call deferproc. arg0=mem, auxint=argsize, returns mem 447 {name: "CALLgo", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call newproc. arg0=mem, auxint=argsize, returns mem 448 {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "Int64", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem 449 450 // arg0 = destination pointer 451 // arg1 = source pointer 452 // arg2 = mem 453 // auxint = offset from duffcopy symbol to call 454 // returns memory 455 { 456 name: "DUFFCOPY", 457 aux: "Int64", 458 argLength: 3, 459 reg: regInfo{ 460 inputs: []regMask{buildReg("DI"), buildReg("SI")}, 461 clobbers: buildReg("DI SI X0"), // uses X0 as a temporary 462 }, 463 clobberFlags: true, 464 faultOnNilArg0: true, 465 faultOnNilArg1: true, 466 }, 467 468 // arg0 = destination pointer 469 // arg1 = source pointer 470 // arg2 = # of 8-byte words to copy 471 // arg3 = mem 472 // returns memory 473 { 474 name: "REPMOVSQ", 475 argLength: 4, 476 reg: regInfo{ 477 inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")}, 478 clobbers: buildReg("DI SI CX"), 479 }, 480 faultOnNilArg0: true, 481 faultOnNilArg1: true, 482 }, 483 484 // (InvertFlags (CMPQ a b)) == (CMPQ b a) 485 // So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant, 486 // then we do (SETL (InvertFlags (CMPQ b a))) instead. 487 // Rewrites will convert this to (SETG (CMPQ b a)). 488 // InvertFlags is a pseudo-op which can't appear in assembly output. 489 {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 490 491 // Pseudo-ops 492 {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem 493 // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, 494 // and sorts it to the very beginning of the block to prevent other 495 // use of DX (the closure pointer) 496 {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}}, 497 //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. 498 {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, 499 500 // MOVQconvert converts between pointers and integers. 501 // We have a special op for this so as to not confuse GC 502 // (particularly stack maps). It takes a memory arg so it 503 // gets correctly ordered with respect to GC safepoints. 504 // arg0=ptr/int arg1=mem, output=int/ptr 505 {name: "MOVQconvert", argLength: 2, reg: gp11, asm: "MOVQ"}, 506 {name: "MOVLconvert", argLength: 2, reg: gp11, asm: "MOVL"}, // amd64p32 equivalent 507 508 // Constant flag values. For any comparison, there are 5 possible 509 // outcomes: the three from the signed total order (<,==,>) and the 510 // three from the unsigned total order. The == cases overlap. 511 // Note: there's a sixth "unordered" outcome for floating-point 512 // comparisons, but we don't use such a beast yet. 513 // These ops are for temporary use by rewrite rules. They 514 // cannot appear in the generated assembly. 515 {name: "FlagEQ"}, // equal 516 {name: "FlagLT_ULT"}, // signed < and unsigned < 517 {name: "FlagLT_UGT"}, // signed < and unsigned > 518 {name: "FlagGT_UGT"}, // signed > and unsigned < 519 {name: "FlagGT_ULT"}, // signed > and unsigned > 520 521 // Atomic loads. These are just normal loads but return <value,memory> tuples 522 // so they can be properly ordered with other loads. 523 // load from arg0+auxint+aux. arg1=mem. 524 {name: "MOVLatomicload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", faultOnNilArg0: true}, 525 {name: "MOVQatomicload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", faultOnNilArg0: true}, 526 527 // Atomic stores and exchanges. Stores use XCHG to get the right memory ordering semantics. 528 // store arg0 to arg1+auxint+aux, arg2=mem. 529 // These ops return a tuple of <old contents of *(arg1+auxint+aux), memory>. 530 // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)! 531 {name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true}, 532 {name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true}, 533 534 // Atomic adds. 535 // *(arg1+auxint+aux) += arg0. arg2=mem. 536 // Returns a tuple of <old contents of *(arg1+auxint+aux), memory>. 537 // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)! 538 {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true}, 539 {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true}, 540 {name: "AddTupleFirst32", argLength: 2}, // arg0=tuple <x,y>. Returns <x+arg1,y>. 541 {name: "AddTupleFirst64", argLength: 2}, // arg0=tuple <x,y>. Returns <x+arg1,y>. 542 543 // Compare and swap. 544 // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. 545 // if *(arg0+auxint+aux) == arg1 { 546 // *(arg0+auxint+aux) = arg2 547 // return (true, memory) 548 // } else { 549 // return (false, memory) 550 // } 551 // Note that these instructions also return the old value in AX, but we ignore it. 552 // TODO: have these return flags instead of bool. The current system generates: 553 // CMPXCHGQ ... 554 // SETEQ AX 555 // CMPB AX, $0 556 // JNE ... 557 // instead of just 558 // CMPXCHGQ ... 559 // JEQ ... 560 // but we can't do that because memory-using ops can't generate flags yet 561 // (flagalloc wants to move flag-generating instructions around). 562 {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true}, 563 {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true}, 564 565 // Atomic memory updates. 566 {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true}, // *(arg0+auxint+aux) &= arg1 567 {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true}, // *(arg0+auxint+aux) |= arg1 568 } 569 570 var AMD64blocks = []blockData{ 571 {name: "EQ"}, 572 {name: "NE"}, 573 {name: "LT"}, 574 {name: "LE"}, 575 {name: "GT"}, 576 {name: "GE"}, 577 {name: "ULT"}, 578 {name: "ULE"}, 579 {name: "UGT"}, 580 {name: "UGE"}, 581 {name: "EQF"}, 582 {name: "NEF"}, 583 {name: "ORD"}, // FP, ordered comparison (parity zero) 584 {name: "NAN"}, // FP, unordered comparison (parity one) 585 } 586 587 archs = append(archs, arch{ 588 name: "AMD64", 589 pkg: "cmd/internal/obj/x86", 590 genfile: "../../amd64/ssa.go", 591 ops: AMD64ops, 592 blocks: AMD64blocks, 593 regnames: regNamesAMD64, 594 gpregmask: gp, 595 fpregmask: fp, 596 framepointerreg: int8(num["BP"]), 597 linkreg: -1, // not used 598 }) 599 }