github.com/Rookout/GoSDK@v0.1.48/pkg/services/instrumentation/hooker/regbackup/regbackup_amd64.go (about) 1 package regbackup 2 3 import ( 4 "fmt" 5 "unsafe" 6 7 "github.com/Rookout/GoSDK/pkg/rookoutErrors" 8 "github.com/Rookout/GoSDK/pkg/services/assembler" 9 "github.com/Rookout/GoSDK/pkg/services/assembler/common" 10 "golang.org/x/arch/x86/x86asm" 11 ) 12 13 type XMM struct { 14 A uintptr 15 B uintptr 16 } 17 18 type Backup struct { 19 Lock uintptr 20 RDI uintptr 21 RAX uintptr 22 RBX uintptr 23 RCX uintptr 24 RDX uintptr 25 RSI uintptr 26 R8 uintptr 27 R9 uintptr 28 R10 uintptr 29 R11 uintptr 30 XMM0 XMM 31 XMM1 XMM 32 XMM2 XMM 33 XMM3 XMM 34 XMM4 XMM 35 XMM5 XMM 36 XMM6 XMM 37 XMM7 XMM 38 XMM8 XMM 39 XMM9 XMM 40 XMM10 XMM 41 XMM11 XMM 42 XMM12 XMM 43 XMM13 XMM 44 XMM14 XMM 45 } 46 47 var smallRegToOffsetInBackup = map[assembler.Reg]uintptr{ 48 x86asm.RDI: unsafe.Offsetof(Backup{}.RDI), 49 x86asm.RAX: unsafe.Offsetof(Backup{}.RAX), 50 x86asm.RBX: unsafe.Offsetof(Backup{}.RBX), 51 x86asm.RCX: unsafe.Offsetof(Backup{}.RCX), 52 x86asm.RDX: unsafe.Offsetof(Backup{}.RDX), 53 x86asm.RSI: unsafe.Offsetof(Backup{}.RSI), 54 x86asm.R8: unsafe.Offsetof(Backup{}.R8), 55 x86asm.R9: unsafe.Offsetof(Backup{}.R9), 56 x86asm.R10: unsafe.Offsetof(Backup{}.R10), 57 x86asm.R11: unsafe.Offsetof(Backup{}.R11), 58 } 59 var bigRegToOffsetInBackup = map[assembler.Reg]uintptr{ 60 x86asm.X0: unsafe.Offsetof(Backup{}.XMM0), 61 x86asm.X1: unsafe.Offsetof(Backup{}.XMM1), 62 x86asm.X2: unsafe.Offsetof(Backup{}.XMM2), 63 x86asm.X3: unsafe.Offsetof(Backup{}.XMM3), 64 x86asm.X4: unsafe.Offsetof(Backup{}.XMM4), 65 x86asm.X5: unsafe.Offsetof(Backup{}.XMM5), 66 x86asm.X6: unsafe.Offsetof(Backup{}.XMM6), 67 x86asm.X7: unsafe.Offsetof(Backup{}.XMM7), 68 x86asm.X8: unsafe.Offsetof(Backup{}.XMM8), 69 x86asm.X9: unsafe.Offsetof(Backup{}.XMM9), 70 x86asm.X10: unsafe.Offsetof(Backup{}.XMM10), 71 x86asm.X11: unsafe.Offsetof(Backup{}.XMM11), 72 x86asm.X12: unsafe.Offsetof(Backup{}.XMM12), 73 x86asm.X13: unsafe.Offsetof(Backup{}.XMM13), 74 x86asm.X14: unsafe.Offsetof(Backup{}.XMM14), 75 } 76 77 var prevStackHi = assembler.Mem{Base: x86asm.RSP, Disp: 0x10} 78 var prevStackLo = assembler.Mem{Base: x86asm.RSP, Disp: 0x8} 79 var r12Backup = assembler.Mem{Base: x86asm.RSP, Disp: 0x0} 80 81 const backupSlotAddrReg = x86asm.R12 82 83 type Generator struct { 84 stackUsage int 85 backupBuffer []Backup 86 onFailLabel string 87 regsToUpdate []assembler.Reg 88 } 89 90 func NewGenerator(backupBuffer []Backup, onFailLabel string, regsToUpdate []assembler.Reg) *Generator { 91 stackUsage := 0x8 92 if len(regsToUpdate) > 0 { 93 stackUsage += 0x10 94 } 95 96 return &Generator{ 97 backupBuffer: backupBuffer, 98 stackUsage: stackUsage, 99 onFailLabel: onFailLabel, 100 regsToUpdate: regsToUpdate, 101 } 102 } 103 104 func (g *Generator) generateFindFreeBackupSlot(b *assembler.Builder) rookoutErrors.RookoutError { 105 backupBufferAddr := uintptr(unsafe.Pointer(&g.backupBuffer[0])) 106 backupSlotIndexReg := x86asm.R13 107 108 return b.AddInstructions( 109 110 b.Inst(assembler.AMOVQ, backupSlotAddrReg, x86asm.Imm(backupBufferAddr)), 111 112 b.Inst(assembler.AMOVQ, backupSlotIndexReg, x86asm.Imm(1)), 113 b.Label("findFreeBackupLoop"), 114 b.Cmp(backupSlotIndexReg, x86asm.Imm(len(g.backupBuffer))), 115 116 b.BranchToLabel(assembler.AJGT, g.onFailLabel), 117 118 b.Inst(assembler.AXCHGQ, backupSlotIndexReg, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(unsafe.Offsetof(Backup{}.Lock))}), 119 120 b.Cmp(backupSlotIndexReg, x86asm.Imm(0)), 121 b.BranchToLabel(assembler.AJEQ, "backupRegs"), 122 123 b.Inst(assembler.AADDQ, backupSlotAddrReg, x86asm.Imm(unsafe.Sizeof(Backup{}))), 124 125 b.Inst(assembler.AADDQ, backupSlotIndexReg, x86asm.Imm(1)), 126 127 b.BranchToLabel(assembler.AJMP, "findFreeBackupLoop"), 128 ) 129 } 130 131 func (g *Generator) generateBackup(b *assembler.Builder) rookoutErrors.RookoutError { 132 err := b.AddInstructions( 133 b.Label("backupRegs"), 134 ) 135 if err != nil { 136 return err 137 } 138 139 140 for arg, offset := range smallRegToOffsetInBackup { 141 err := b.AddInstructions( 142 b.Inst(assembler.AMOVQ, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}, arg), 143 ) 144 if err != nil { 145 return err 146 } 147 } 148 for arg, offset := range bigRegToOffsetInBackup { 149 err := b.AddInstructions( 150 b.Inst(assembler.AMOVUPS, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}, arg), 151 ) 152 if err != nil { 153 return err 154 } 155 } 156 157 return nil 158 } 159 160 161 func (g *Generator) generateBackupStackAddrs(b *assembler.Builder) rookoutErrors.RookoutError { 162 return b.AddInstructions( 163 common.MovGToR12(b), 164 b.Inst(assembler.AMOVQ, x86asm.R13, assembler.Mem{Base: x86asm.R12, Disp: common.StackLoOffset}), 165 b.Inst(assembler.AMOVQ, prevStackLo, x86asm.R13), 166 b.Inst(assembler.AMOVQ, x86asm.R13, assembler.Mem{Base: x86asm.R12, Disp: common.StackHiOffset}), 167 b.Inst(assembler.AMOVQ, prevStackHi, x86asm.R13), 168 ) 169 } 170 171 func (g *Generator) GenerateRegBackup(b *assembler.Builder) rookoutErrors.RookoutError { 172 err := g.generateFindFreeBackupSlot(b) 173 if err != nil { 174 return err 175 } 176 err = g.generateBackup(b) 177 if err != nil { 178 return err 179 } 180 181 err = g.generateBackupSlot(b) 182 if err != nil { 183 return err 184 } 185 if len(g.regsToUpdate) > 0 { 186 err = g.generateBackupStackAddrs(b) 187 if err != nil { 188 return err 189 } 190 } 191 192 return nil 193 } 194 195 func (g *Generator) generateRestore(b *assembler.Builder) rookoutErrors.RookoutError { 196 for arg, offset := range smallRegToOffsetInBackup { 197 err := b.AddInstructions( 198 b.Inst(assembler.AMOVQ, arg, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}), 199 ) 200 if err != nil { 201 return err 202 } 203 } 204 for arg, offset := range bigRegToOffsetInBackup { 205 err := b.AddInstructions( 206 b.Inst(assembler.AMOVUPS, arg, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}), 207 ) 208 if err != nil { 209 return err 210 } 211 } 212 213 return nil 214 } 215 216 func (g *Generator) generateReleaseBackupSlot(b *assembler.Builder) rookoutErrors.RookoutError { 217 return b.AddInstructions( 218 b.Inst(assembler.AXORQ, x86asm.R13, x86asm.R13), 219 b.Inst(assembler.AXCHGQ, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(unsafe.Offsetof(Backup{}.Lock))}, x86asm.R13), 220 ) 221 } 222 223 func (g *Generator) GenerateRegRestore(b *assembler.Builder) rookoutErrors.RookoutError { 224 err := g.generateRestoreSlot(b) 225 if err != nil { 226 return err 227 } 228 err = g.generateRestore(b) 229 if err != nil { 230 return err 231 } 232 err = g.generateReleaseBackupSlot(b) 233 if err != nil { 234 return err 235 } 236 err = g.generateRegUpdate(b) 237 if err != nil { 238 return err 239 } 240 err = b.AddInstructions( 241 b.Inst(assembler.AADDQ, x86asm.RSP, assembler.Imm(g.stackUsage)), 242 ) 243 if err != nil { 244 return err 245 } 246 247 return nil 248 } 249 250 func (g *Generator) generateBackupSlot(b *assembler.Builder) rookoutErrors.RookoutError { 251 return b.AddInstructions( 252 b.Inst(assembler.ASUBQ, x86asm.RSP, assembler.Imm(g.stackUsage)), 253 b.Inst(assembler.AMOVQ, r12Backup, x86asm.R12), 254 ) 255 } 256 257 func (g *Generator) generateRestoreSlot(b *assembler.Builder) rookoutErrors.RookoutError { 258 return b.AddInstructions( 259 b.Inst(assembler.AMOVQ, x86asm.R12, r12Backup), 260 ) 261 } 262 263 func (g *Generator) generateRegUpdate(b *assembler.Builder) rookoutErrors.RookoutError { 264 for _, reg := range g.regsToUpdate { 265 startLabel := fmt.Sprintf("%sBackupStart", reg.String()) 266 endLabel := fmt.Sprintf("%sBackupEnd", reg.String()) 267 err := b.AddInstructions( 268 b.Label(startLabel), 269 b.Cmp(reg, prevStackLo), 270 b.BranchToLabel(assembler.AJLT, endLabel), 271 b.Cmp(reg, prevStackHi), 272 b.BranchToLabel(assembler.AJGT, endLabel), 273 b.Inst(assembler.ASUBQ, reg, prevStackHi), 274 common.MovGToR12(b), 275 b.Inst(assembler.AADDQ, reg, assembler.Mem{Base: x86asm.R12, Disp: common.StackHiOffset}), 276 b.Label(endLabel), 277 b.PsuedoNop(), 278 ) 279 if err != nil { 280 return err 281 } 282 } 283 284 return nil 285 }