github.com/Rookout/GoSDK@v0.1.48/pkg/services/instrumentation/hooker/regbackup/regbackup_arm64.go (about)

     1  package regbackup
     2  
     3  import (
     4  	"fmt"
     5  	"unsafe"
     6  
     7  	"github.com/Rookout/GoSDK/pkg/rookoutErrors"
     8  	"github.com/Rookout/GoSDK/pkg/services/assembler"
     9  	"github.com/Rookout/GoSDK/pkg/services/assembler/common"
    10  	"golang.org/x/arch/arm64/arm64asm"
    11  )
    12  
    13  type Q struct {
    14  	A uintptr
    15  	B uintptr
    16  }
    17  
    18  type Backup struct {
    19  	Lock uintptr
    20  	X0   uintptr
    21  	X1   uintptr
    22  	X2   uintptr
    23  	X3   uintptr
    24  	X4   uintptr
    25  	X5   uintptr
    26  	X6   uintptr
    27  	X7   uintptr
    28  	X8   uintptr
    29  	X9   uintptr
    30  	X10  uintptr
    31  	X11  uintptr
    32  	X12  uintptr
    33  	X13  uintptr
    34  	X14  uintptr
    35  	X15  uintptr
    36  	X29  uintptr
    37  	X30  uintptr
    38  	_    uintptr 
    39  	Q0   Q
    40  	Q1   Q
    41  	Q2   Q
    42  	Q3   Q
    43  	Q4   Q
    44  	Q5   Q
    45  	Q6   Q
    46  	Q7   Q
    47  	Q8   Q
    48  	Q9   Q
    49  	Q10  Q
    50  	Q11  Q
    51  	Q12  Q
    52  	Q13  Q
    53  	Q14  Q
    54  	Q15  Q
    55  }
    56  
    57  var prevStackLoStorage = assembler.Mem{Base: arm64asm.SP, Disp: 0x10}
    58  var prevStackHiStorage = assembler.Mem{Base: arm64asm.SP, Disp: 0x18}
    59  
    60  const prevStackHiReg = arm64asm.X16
    61  const prevStackLoReg = arm64asm.X17
    62  const backupSlotAddrReg = arm64asm.X19
    63  
    64  var smallRegRegToOffsetInBackup = map[assembler.Arg]uintptr{
    65  	assembler.RegReg(arm64asm.X0, arm64asm.X1):   unsafe.Offsetof(Backup{}.X0),
    66  	assembler.RegReg(arm64asm.X2, arm64asm.X3):   unsafe.Offsetof(Backup{}.X2),
    67  	assembler.RegReg(arm64asm.X4, arm64asm.X5):   unsafe.Offsetof(Backup{}.X4),
    68  	assembler.RegReg(arm64asm.X6, arm64asm.X7):   unsafe.Offsetof(Backup{}.X6),
    69  	assembler.RegReg(arm64asm.X8, arm64asm.X9):   unsafe.Offsetof(Backup{}.X8),
    70  	assembler.RegReg(arm64asm.X10, arm64asm.X11): unsafe.Offsetof(Backup{}.X10),
    71  	assembler.RegReg(arm64asm.X12, arm64asm.X13): unsafe.Offsetof(Backup{}.X12),
    72  	assembler.RegReg(arm64asm.X14, arm64asm.X15): unsafe.Offsetof(Backup{}.X14),
    73  	assembler.RegReg(arm64asm.X29, arm64asm.X30): unsafe.Offsetof(Backup{}.X29),
    74  }
    75  var bigRegRegToOffsetInBackup = map[assembler.Arg]uintptr{
    76  	assembler.RegReg(arm64asm.Q0, arm64asm.Q1):   unsafe.Offsetof(Backup{}.Q0),
    77  	assembler.RegReg(arm64asm.Q2, arm64asm.Q3):   unsafe.Offsetof(Backup{}.Q2),
    78  	assembler.RegReg(arm64asm.Q4, arm64asm.Q5):   unsafe.Offsetof(Backup{}.Q4),
    79  	assembler.RegReg(arm64asm.Q6, arm64asm.Q7):   unsafe.Offsetof(Backup{}.Q6),
    80  	assembler.RegReg(arm64asm.Q8, arm64asm.Q9):   unsafe.Offsetof(Backup{}.Q8),
    81  	assembler.RegReg(arm64asm.Q10, arm64asm.Q11): unsafe.Offsetof(Backup{}.Q10),
    82  	assembler.RegReg(arm64asm.Q12, arm64asm.Q13): unsafe.Offsetof(Backup{}.Q12),
    83  	assembler.RegReg(arm64asm.Q14, arm64asm.Q15): unsafe.Offsetof(Backup{}.Q14),
    84  }
    85  
    86  type Generator struct {
    87  	stackUsage   int
    88  	backupBuffer []Backup
    89  	onFailLabel  string
    90  	regsToUpdate []assembler.Reg
    91  }
    92  
    93  func NewGenerator(backupBuffer []Backup, onFailLabel string, regsToUpdate []assembler.Reg) *Generator {
    94  	stackUsage := 0x20
    95  	if len(regsToUpdate) > 0 {
    96  		stackUsage += 0x10 
    97  	}
    98  
    99  	return &Generator{
   100  		backupBuffer: backupBuffer,
   101  		stackUsage:   stackUsage,
   102  		onFailLabel:  onFailLabel,
   103  		regsToUpdate: regsToUpdate,
   104  	}
   105  }
   106  
   107  func (g *Generator) generateFindFreeBackupSlot(b *assembler.Builder) rookoutErrors.RookoutError {
   108  	backupBufferAddr := uintptr(unsafe.Pointer(&g.backupBuffer[0]))
   109  	backupSlotIndexReg := arm64asm.X20
   110  
   111  	return b.AddInstructions(
   112  		
   113  		b.Inst(assembler.AMOVD, backupSlotAddrReg, assembler.Imm(uint64(backupBufferAddr))),
   114  		
   115  		b.Inst(assembler.AMOVD, backupSlotIndexReg, assembler.Imm(1)),
   116  
   117  		b.Label("findFreeBackupLoop"),
   118  		b.Cmp(backupSlotIndexReg, assembler.Imm(uint64(len(g.backupBuffer)))),
   119  		
   120  		b.BranchToLabel(assembler.ABGT, g.onFailLabel), 
   121  		
   122  		b.Swpal(backupSlotIndexReg, backupSlotIndexReg, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(unsafe.Offsetof(Backup{}.Lock))}),
   123  		
   124  		b.BranchToLabel(assembler.ACBZ, "backupRegs", backupSlotIndexReg),
   125  
   126  		
   127  		b.Inst(assembler.AADD, backupSlotAddrReg, assembler.Imm(uint64(unsafe.Sizeof(g.backupBuffer[0])))),
   128  		
   129  		b.Inst(assembler.AADD, backupSlotIndexReg, assembler.Imm(1)),
   130  		
   131  		b.BranchToLabel(assembler.AJMP, "findFreeBackupLoop"),
   132  	)
   133  }
   134  
   135  func (g *Generator) generateBackup(b *assembler.Builder) rookoutErrors.RookoutError {
   136  	err := b.AddInstructions(
   137  		b.Label("backupRegs"),
   138  	)
   139  	if err != nil {
   140  		return err
   141  	}
   142  
   143  	for arg, offset := range smallRegRegToOffsetInBackup {
   144  		err := b.AddInstructions(
   145  			b.Inst(assembler.ASTP, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}, arg),
   146  		)
   147  		if err != nil {
   148  			return err
   149  		}
   150  	}
   151  	for arg, offset := range bigRegRegToOffsetInBackup {
   152  		err := b.AddInstructions(
   153  			b.Inst(assembler.AFSTPQ, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}, arg),
   154  		)
   155  		if err != nil {
   156  			return err
   157  		}
   158  	}
   159  
   160  	return nil
   161  }
   162  
   163  
   164  func (g *Generator) generateBackupStackAddrs(b *assembler.Builder) rookoutErrors.RookoutError {
   165  	return b.AddInstructions(
   166  		common.MovGToX20(b),
   167  		b.Inst(assembler.AMOVD, arm64asm.X19, assembler.Mem{Base: arm64asm.X20, Disp: common.StackLoOffset}),
   168  		b.Inst(assembler.AMOVD, prevStackLoStorage, arm64asm.X19),
   169  		b.Inst(assembler.AMOVD, arm64asm.X19, assembler.Mem{Base: arm64asm.X20, Disp: common.StackHiOffset}),
   170  		b.Inst(assembler.AMOVD, prevStackHiStorage, arm64asm.X19),
   171  	)
   172  }
   173  
   174  func (g *Generator) GenerateRegBackup(b *assembler.Builder) rookoutErrors.RookoutError {
   175  	err := g.generateFindFreeBackupSlot(b)
   176  	if err != nil {
   177  		return err
   178  	}
   179  	err = g.generateBackup(b)
   180  	if err != nil {
   181  		return err
   182  	}
   183  
   184  	err = g.generateBackupSlot(b)
   185  	if err != nil {
   186  		return err
   187  	}
   188  	if len(g.regsToUpdate) > 0 {
   189  		err = g.generateBackupStackAddrs(b)
   190  		if err != nil {
   191  			return err
   192  		}
   193  	}
   194  
   195  	return nil
   196  }
   197  
   198  func (g *Generator) generateRestore(b *assembler.Builder) rookoutErrors.RookoutError {
   199  	for arg, offset := range smallRegRegToOffsetInBackup {
   200  		err := b.AddInstructions(
   201  			b.Inst(assembler.ALDP, arg, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}),
   202  		)
   203  		if err != nil {
   204  			return err
   205  		}
   206  	}
   207  	for arg, offset := range bigRegRegToOffsetInBackup {
   208  		err := b.AddInstructions(
   209  			b.Inst(assembler.AFLDPQ, arg, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(offset)}),
   210  		)
   211  		if err != nil {
   212  			return err
   213  		}
   214  	}
   215  
   216  	return nil
   217  }
   218  
   219  func (g *Generator) generateReleaseBackupSlot(b *assembler.Builder) rookoutErrors.RookoutError {
   220  	return b.AddInstructions(
   221  		b.Swpal(assembler.RegZero, assembler.RegZero, assembler.Mem{Base: backupSlotAddrReg, Disp: int64(unsafe.Offsetof(Backup{}.Lock))}),
   222  	)
   223  }
   224  
   225  func (g *Generator) GenerateRegRestore(b *assembler.Builder) rookoutErrors.RookoutError {
   226  	if len(g.regsToUpdate) > 0 {
   227  		err := b.AddInstructions(
   228  			b.Inst(assembler.AMOVD, prevStackLoReg, prevStackLoStorage),
   229  			b.Inst(assembler.AMOVD, prevStackHiReg, prevStackHiStorage),
   230  		)
   231  		if err != nil {
   232  			return err
   233  		}
   234  	}
   235  	err := g.generateRestoreSlot(b)
   236  	if err != nil {
   237  		return err
   238  	}
   239  	err = g.generateRestore(b)
   240  	if err != nil {
   241  		return err
   242  	}
   243  	err = g.generateReleaseBackupSlot(b)
   244  	if err != nil {
   245  		return err
   246  	}
   247  	err = g.generateRegUpdate(b)
   248  	if err != nil {
   249  		return err
   250  	}
   251  
   252  	return nil
   253  }
   254  
   255  func (g *Generator) generateBackupSlot(b *assembler.Builder) rookoutErrors.RookoutError {
   256  	return b.AddInstructions(
   257  		b.Inst(assembler.ASTP, assembler.Mem{Base: arm64asm.SP, Disp: -int64(g.stackUsage)}, assembler.RegReg(arm64asm.X30, backupSlotAddrReg), assembler.C_XPRE),
   258  		b.Inst(assembler.AMOVD, assembler.Mem{Base: arm64asm.SP, Disp: -0x8}, arm64asm.X29),
   259  		b.Sub3(arm64asm.X29, arm64asm.SP, assembler.Imm(0x8)),
   260  	)
   261  }
   262  
   263  func (g *Generator) generateRestoreSlot(b *assembler.Builder) rookoutErrors.RookoutError {
   264  	return b.AddInstructions(
   265  		b.Inst(assembler.ALDP, assembler.RegReg(arm64asm.X30, backupSlotAddrReg), assembler.Mem{Base: arm64asm.SP, Disp: int64(g.stackUsage)}, assembler.C_XPOST),
   266  	)
   267  }
   268  
   269  func (g *Generator) generateRegUpdate(b *assembler.Builder) rookoutErrors.RookoutError {
   270  	for _, reg := range g.regsToUpdate {
   271  		startLabel := fmt.Sprintf("%sBackupStart", reg.String())
   272  		endLabel := fmt.Sprintf("%sBackupEnd", reg.String())
   273  		err := b.AddInstructions(
   274  			b.Label(startLabel),
   275  			b.Cmp(reg, prevStackLoReg),
   276  			b.BranchToLabel(assembler.ABLT, endLabel),
   277  			b.Cmp(reg, prevStackHiReg),
   278  			b.BranchToLabel(assembler.ABGT, endLabel),
   279  			b.Inst(assembler.ASUB, reg, prevStackHiReg),
   280  			common.MovGToX20(b),
   281  			b.Inst(assembler.AMOVD, arm64asm.X20, assembler.Mem{Base: arm64asm.X20, Disp: common.StackHiOffset}),
   282  			b.Inst(assembler.AADD, reg, arm64asm.X20),
   283  			b.Label(endLabel),
   284  			b.PsuedoNop(),
   285  		)
   286  		if err != nil {
   287  			return err
   288  		}
   289  	}
   290  
   291  	return nil
   292  }