github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/ring0/entry_arm64.s (about)

     1  // Copyright 2019 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  #include "funcdata.h"
    16  #include "textflag.h"
    17  
    18  #define CPU_SELF             0   // +checkoffset . CPU.self
    19  #define CPU_REGISTERS        224 // +checkoffset . CPU.registers
    20  #define CPU_ARCH_STATE       16  // +checkoffset . CPU.CPUArchState
    21  #define CPU_STACK_BOTTOM     CPU_ARCH_STATE+0     // +checkoffset . CPUArchState.stack
    22  #define CPU_STACK_TOP        CPU_STACK_BOTTOM+128 // +checksize . CPUArchState.stack
    23  #define CPU_ERROR_CODE       CPU_ARCH_STATE+128   // +checkoffset . CPUArchState.errorCode
    24  #define CPU_ERROR_TYPE       CPU_ARCH_STATE+136   // +checkoffset . CPUArchState.errorType
    25  #define CPU_FAULT_ADDR       CPU_ARCH_STATE+144   // +checkoffset . CPUArchState.faultAddr
    26  #define CPU_FPSTATE_EL0      CPU_ARCH_STATE+152   // +checkoffset . CPUArchState.el0Fp
    27  #define CPU_TTBR0_KVM        CPU_ARCH_STATE+160   // +checkoffset . CPUArchState.ttbr0Kvm
    28  #define CPU_TTBR0_APP        CPU_ARCH_STATE+168   // +checkoffset . CPUArchState.ttbr0App
    29  #define CPU_VECTOR_CODE      CPU_ARCH_STATE+176   // +checkoffset . CPUArchState.vecCode
    30  #define CPU_APP_ADDR         CPU_ARCH_STATE+184   // +checkoffset . CPUArchState.appAddr
    31  #define CPU_LAZY_VFP         CPU_ARCH_STATE+192   // +checkoffset . CPUArchState.lazyVFP
    32  #define CPU_APP_ASID         CPU_ARCH_STATE+200   // +checkoffset . CPUArchState.appASID
    33  
    34  // Bits.
    35  #define _KERNEL_FLAGS 965 // +checkconst . KernelFlagsSet
    36  
    37  // Vectors.
    38  #define El1Sync                 4  // +checkconst . El1Sync
    39  #define El1Irq                  5  // +checkconst . El1Irq
    40  #define El1Fiq                  6  // +checkconst . El1Fiq
    41  #define El1Err                  7  // +checkconst . El1Err
    42  #define El0Sync                 8  // +checkconst . El0Sync
    43  #define El0Irq                  9  // +checkconst . El0Irq
    44  #define El0Fiq                  10 // +checkconst . El0Fiq
    45  #define El0Err                  11 // +checkconst . El0Err
    46  #define El1SyncDa               16 // +checkconst . El1SyncDa
    47  #define El1SyncIa               17 // +checkconst . El1SyncIa
    48  #define El1SyncSpPc             18 // +checkconst . El1SyncSpPc
    49  #define El1SyncUndef            19 // +checkconst . El1SyncUndef
    50  #define El1SyncDbg              20 // +checkconst . El1SyncDbg
    51  #define El1SyncInv              21 // +checkconst . El1SyncInv
    52  #define El0SyncSVC              22 // +checkconst . El0SyncSVC
    53  #define El0SyncDa               23 // +checkconst . El0SyncDa
    54  #define El0SyncIa               24 // +checkconst . El0SyncIa
    55  #define El0SyncFpsimdAcc        25 // +checkconst . El0SyncFpsimdAcc
    56  #define El0SyncSveAcc           26 // +checkconst . El0SyncSveAcc
    57  #define El0SyncFpsimdExc        27 // +checkconst . El0SyncFpsimdExc
    58  #define El0SyncSys              28 // +checkconst . El0SyncSys
    59  #define El0SyncSpPc             29 // +checkconst . El0SyncSpPc
    60  #define El0SyncUndef            30 // +checkconst . El0SyncUndef
    61  #define El0SyncDbg              31 // +checkconst . El0SyncDbg
    62  #define El0SyncWfx              32 // +checkconst . El0SyncWfx
    63  #define El0SyncInv              33 // +checkconst . El0SyncInv
    64  #define El0ErrNMI               34 // +checkconst . El0ErrNMI
    65  #define PageFault               23 // +checkconst . PageFault
    66  #define Syscall                 22 // +checkconst . Syscall
    67  #define VirtualizationException 35 // +checkconst . VirtualizationException
    68  
    69  #define PTRACE_REGS     0 // +checkoffset linux PtraceRegs.Regs
    70  #define PTRACE_R0       (PTRACE_REGS + 0*8)
    71  #define PTRACE_R1       (PTRACE_REGS + 1*8)
    72  #define PTRACE_R2       (PTRACE_REGS + 2*8)
    73  #define PTRACE_R3       (PTRACE_REGS + 3*8)
    74  #define PTRACE_R4       (PTRACE_REGS + 4*8)
    75  #define PTRACE_R5       (PTRACE_REGS + 5*8)
    76  #define PTRACE_R6       (PTRACE_REGS + 6*8)
    77  #define PTRACE_R7       (PTRACE_REGS + 7*8)
    78  #define PTRACE_R8       (PTRACE_REGS + 8*8)
    79  #define PTRACE_R9       (PTRACE_REGS + 9*8)
    80  #define PTRACE_R10      (PTRACE_REGS + 10*8)
    81  #define PTRACE_R11      (PTRACE_REGS + 11*8)
    82  #define PTRACE_R12      (PTRACE_REGS + 12*8)
    83  #define PTRACE_R13      (PTRACE_REGS + 13*8)
    84  #define PTRACE_R14      (PTRACE_REGS + 14*8)
    85  #define PTRACE_R15      (PTRACE_REGS + 15*8)
    86  #define PTRACE_R16      (PTRACE_REGS + 16*8)
    87  #define PTRACE_R17      (PTRACE_REGS + 17*8)
    88  #define PTRACE_R18      (PTRACE_REGS + 18*8)
    89  #define PTRACE_R19      (PTRACE_REGS + 19*8)
    90  #define PTRACE_R20      (PTRACE_REGS + 20*8)
    91  #define PTRACE_R21      (PTRACE_REGS + 21*8)
    92  #define PTRACE_R22      (PTRACE_REGS + 22*8)
    93  #define PTRACE_R23      (PTRACE_REGS + 23*8)
    94  #define PTRACE_R24      (PTRACE_REGS + 24*8)
    95  #define PTRACE_R25      (PTRACE_REGS + 25*8)
    96  #define PTRACE_R26      (PTRACE_REGS + 26*8)
    97  #define PTRACE_R27      (PTRACE_REGS + 27*8)
    98  #define PTRACE_R28      (PTRACE_REGS + 28*8)
    99  #define PTRACE_R29      (PTRACE_REGS + 29*8)
   100  #define PTRACE_R30      (PTRACE_REGS + 30*8)
   101  #define PTRACE_SP       248 // +checkoffset linux PtraceRegs.Sp
   102  #define PTRACE_PC       256 // +checkoffset linux PtraceRegs.Pc
   103  #define PTRACE_PSTATE   264 // +checkoffset linux PtraceRegs.Pstate
   104  #define PTRACE_TLS      272 // +checkoffset arch Registers.TPIDR_EL0
   105  
   106  // Saves a register set.
   107  //
   108  // This is a macro because it may need to executed in contents where a stack is
   109  // not available for calls.
   110  //
   111  
   112  // ERET returns using the ELR and SPSR for the current exception level.
   113  #define ERET() \
   114    WORD $0xd69f03e0; \
   115    DSB $7; \
   116    ISB $15;
   117  
   118  // RSV_REG is a register that holds el1 information temporarily.
   119  #define RSV_REG 	R18_PLATFORM
   120  
   121  // RSV_REG_APP is a register that holds el0 information temporarily.
   122  #define RSV_REG_APP 	R19
   123  
   124  #define FPEN_NOTRAP 	0x3
   125  #define FPEN_SHIFT 	20
   126  
   127  #define FPEN_ENABLE (FPEN_NOTRAP << FPEN_SHIFT)
   128  
   129  // Saves a register set.
   130  //
   131  // This is a macro because it may need to executed in contents where a stack is
   132  // not available for calls.
   133  //
   134  // The following registers are not saved: R18, R19.
   135  #define REGISTERS_SAVE(reg, offset) \
   136    STP (R0, R1), offset+PTRACE_R0(reg); \
   137    STP (R2, R3), offset+PTRACE_R2(reg); \
   138    STP (R4, R5), offset+PTRACE_R4(reg); \
   139    STP (R6, R7), offset+PTRACE_R6(reg); \
   140    STP (R8, R9), offset+PTRACE_R8(reg); \
   141    STP (R10, R11), offset+PTRACE_R10(reg); \
   142    STP (R12, R13), offset+PTRACE_R12(reg); \
   143    STP (R14, R15), offset+PTRACE_R14(reg); \
   144    STP (R16, R17), offset+PTRACE_R16(reg); \
   145    STP (R20, R21), offset+PTRACE_R20(reg); \
   146    STP (R22, R23), offset+PTRACE_R22(reg); \
   147    STP (R24, R25), offset+PTRACE_R24(reg); \
   148    STP (R26, R27), offset+PTRACE_R26(reg); \
   149    STP (g, R29), offset+PTRACE_R28(reg); \
   150    MOVD R30, offset+PTRACE_R30(reg);
   151  
   152  // Loads a register set.
   153  //
   154  // This is a macro because it may need to executed in contents where a stack is
   155  // not available for calls.
   156  //
   157  // The following registers are not loaded: R18, R19.
   158  #define REGISTERS_LOAD(reg, offset) \
   159    LDP offset+PTRACE_R0(reg), (R0, R1); \
   160    LDP offset+PTRACE_R2(reg), (R2, R3); \
   161    LDP offset+PTRACE_R4(reg), (R4, R5); \
   162    LDP offset+PTRACE_R6(reg), (R6, R7); \
   163    LDP offset+PTRACE_R8(reg), (R8, R9); \
   164    LDP offset+PTRACE_R10(reg), (R10, R11); \
   165    LDP offset+PTRACE_R12(reg), (R12, R13); \
   166    LDP offset+PTRACE_R14(reg), (R14, R15); \
   167    LDP offset+PTRACE_R16(reg), (R16, R17); \
   168    LDP offset+PTRACE_R20(reg), (R20, R21); \
   169    LDP offset+PTRACE_R22(reg), (R22, R23); \
   170    LDP offset+PTRACE_R24(reg), (R24, R25); \
   171    LDP offset+PTRACE_R26(reg), (R26, R27); \
   172    LDP offset+PTRACE_R28(reg), (g, R29); \
   173    MOVD offset+PTRACE_R30(reg), R30;
   174  
   175  // Loads the application's fpstate.
   176  #define FPSTATE_EL0_LOAD() \
   177    MRS TPIDR_EL1, RSV_REG; \
   178    MOVD CPU_FPSTATE_EL0(RSV_REG), RSV_REG; \
   179    MOVD 0(RSV_REG), RSV_REG_APP; \
   180    MOVD RSV_REG_APP, FPSR; \
   181    MOVD 8(RSV_REG), RSV_REG_APP; \
   182    MOVD RSV_REG_APP, FPCR; \
   183    ADD $16, RSV_REG, RSV_REG; \
   184    WORD $0xad400640; \ // ldp q0, q1, [x18]
   185    WORD $0xad410e42; \
   186    WORD $0xad421644; \
   187    WORD $0xad431e46; \
   188    WORD $0xad442648; \
   189    WORD $0xad452e4a; \
   190    WORD $0xad46364c; \
   191    WORD $0xad473e4e; \
   192    WORD $0xad484650; \
   193    WORD $0xad494e52; \
   194    WORD $0xad4a5654; \
   195    WORD $0xad4b5e56; \
   196    WORD $0xad4c6658; \
   197    WORD $0xad4d6e5a; \
   198    WORD $0xad4e765c; \
   199    WORD $0xad4f7e5e;
   200  
   201  #define ESR_ELx_EC_UNKNOWN	(0x00)
   202  #define ESR_ELx_EC_WFx		(0x01)
   203  /* Unallocated EC: 0x02 */
   204  #define ESR_ELx_EC_CP15_32	(0x03)
   205  #define ESR_ELx_EC_CP15_64	(0x04)
   206  #define ESR_ELx_EC_CP14_MR	(0x05)
   207  #define ESR_ELx_EC_CP14_LS	(0x06)
   208  #define ESR_ELx_EC_FP_ASIMD	(0x07)
   209  #define ESR_ELx_EC_CP10_ID	(0x08)	/* EL2 only */
   210  #define ESR_ELx_EC_PAC		(0x09)	/* EL2 and above */
   211  /* Unallocated EC: 0x0A - 0x0B */
   212  #define ESR_ELx_EC_CP14_64	(0x0C)
   213  /* Unallocated EC: 0x0d */
   214  #define ESR_ELx_EC_ILL		(0x0E)
   215  /* Unallocated EC: 0x0F - 0x10 */
   216  #define ESR_ELx_EC_SVC32	(0x11)
   217  #define ESR_ELx_EC_HVC32	(0x12)	/* EL2 only */
   218  #define ESR_ELx_EC_SMC32	(0x13)	/* EL2 and above */
   219  /* Unallocated EC: 0x14 */
   220  #define ESR_ELx_EC_SVC64	(0x15)
   221  #define ESR_ELx_EC_HVC64	(0x16)	/* EL2 and above */
   222  #define ESR_ELx_EC_SMC64	(0x17)	/* EL2 and above */
   223  #define ESR_ELx_EC_SYS64	(0x18)
   224  #define ESR_ELx_EC_SVE		(0x19)
   225  /* Unallocated EC: 0x1A - 0x1E */
   226  #define ESR_ELx_EC_IMP_DEF	(0x1f)	/* EL3 only */
   227  #define ESR_ELx_EC_IABT_LOW	(0x20)
   228  #define ESR_ELx_EC_IABT_CUR	(0x21)
   229  #define ESR_ELx_EC_PC_ALIGN	(0x22)
   230  /* Unallocated EC: 0x23 */
   231  #define ESR_ELx_EC_DABT_LOW	(0x24)
   232  #define ESR_ELx_EC_DABT_CUR	(0x25)
   233  #define ESR_ELx_EC_SP_ALIGN	(0x26)
   234  /* Unallocated EC: 0x27 */
   235  #define ESR_ELx_EC_FP_EXC32	(0x28)
   236  /* Unallocated EC: 0x29 - 0x2B */
   237  #define ESR_ELx_EC_FP_EXC64	(0x2C)
   238  /* Unallocated EC: 0x2D - 0x2E */
   239  #define ESR_ELx_EC_SERROR	(0x2F)
   240  #define ESR_ELx_EC_BREAKPT_LOW	(0x30)
   241  #define ESR_ELx_EC_BREAKPT_CUR	(0x31)
   242  #define ESR_ELx_EC_SOFTSTP_LOW	(0x32)
   243  #define ESR_ELx_EC_SOFTSTP_CUR	(0x33)
   244  #define ESR_ELx_EC_WATCHPT_LOW	(0x34)
   245  #define ESR_ELx_EC_WATCHPT_CUR	(0x35)
   246  /* Unallocated EC: 0x36 - 0x37 */
   247  #define ESR_ELx_EC_BKPT32	(0x38)
   248  /* Unallocated EC: 0x39 */
   249  #define ESR_ELx_EC_VECTOR32	(0x3A)	/* EL2 only */
   250  /* Unallocted EC: 0x3B */
   251  #define ESR_ELx_EC_BRK64	(0x3C)
   252  /* Unallocated EC: 0x3D - 0x3F */
   253  #define ESR_ELx_EC_MAX		(0x3F)
   254  
   255  #define ESR_ELx_EC_SHIFT	(26)
   256  #define ESR_ELx_EC_MASK		(UL(0x3F) << ESR_ELx_EC_SHIFT)
   257  #define ESR_ELx_EC(esr)		(((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
   258  
   259  #define ESR_ELx_IL_SHIFT	(25)
   260  #define ESR_ELx_IL		(UL(1) << ESR_ELx_IL_SHIFT)
   261  #define ESR_ELx_ISS_MASK	(ESR_ELx_IL - 1)
   262  
   263  /* ISS field definitions shared by different classes */
   264  #define ESR_ELx_WNR_SHIFT	(6)
   265  #define ESR_ELx_WNR		(UL(1) << ESR_ELx_WNR_SHIFT)
   266  
   267  /* Asynchronous Error Type */
   268  #define ESR_ELx_IDS_SHIFT	(24)
   269  #define ESR_ELx_IDS		(UL(1) << ESR_ELx_IDS_SHIFT)
   270  #define ESR_ELx_AET_SHIFT	(10)
   271  #define ESR_ELx_AET		(UL(0x7) << ESR_ELx_AET_SHIFT)
   272  
   273  #define ESR_ELx_AET_UC		(UL(0) << ESR_ELx_AET_SHIFT)
   274  #define ESR_ELx_AET_UEU		(UL(1) << ESR_ELx_AET_SHIFT)
   275  #define ESR_ELx_AET_UEO		(UL(2) << ESR_ELx_AET_SHIFT)
   276  #define ESR_ELx_AET_UER		(UL(3) << ESR_ELx_AET_SHIFT)
   277  #define ESR_ELx_AET_CE		(UL(6) << ESR_ELx_AET_SHIFT)
   278  
   279  /* Shared ISS field definitions for Data/Instruction aborts */
   280  #define ESR_ELx_SET_SHIFT	(11)
   281  #define ESR_ELx_SET_MASK	(UL(3) << ESR_ELx_SET_SHIFT)
   282  #define ESR_ELx_FnV_SHIFT	(10)
   283  #define ESR_ELx_FnV		(UL(1) << ESR_ELx_FnV_SHIFT)
   284  #define ESR_ELx_EA_SHIFT	(9)
   285  #define ESR_ELx_EA		(UL(1) << ESR_ELx_EA_SHIFT)
   286  #define ESR_ELx_S1PTW_SHIFT	(7)
   287  #define ESR_ELx_S1PTW		(UL(1) << ESR_ELx_S1PTW_SHIFT)
   288  
   289  /* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
   290  #define ESR_ELx_FSC		(0x3F)
   291  #define ESR_ELx_FSC_TYPE	(0x3C)
   292  #define ESR_ELx_FSC_EXTABT	(0x10)
   293  #define ESR_ELx_FSC_SERROR	(0x11)
   294  #define ESR_ELx_FSC_ACCESS	(0x08)
   295  #define ESR_ELx_FSC_FAULT	(0x04)
   296  #define ESR_ELx_FSC_PERM	(0x0C)
   297  
   298  /* ISS field definitions for Data Aborts */
   299  #define ESR_ELx_ISV_SHIFT	(24)
   300  #define ESR_ELx_ISV		(UL(1) << ESR_ELx_ISV_SHIFT)
   301  #define ESR_ELx_SAS_SHIFT	(22)
   302  #define ESR_ELx_SAS		(UL(3) << ESR_ELx_SAS_SHIFT)
   303  #define ESR_ELx_SSE_SHIFT	(21)
   304  #define ESR_ELx_SSE		(UL(1) << ESR_ELx_SSE_SHIFT)
   305  #define ESR_ELx_SRT_SHIFT	(16)
   306  #define ESR_ELx_SRT_MASK	(UL(0x1F) << ESR_ELx_SRT_SHIFT)
   307  #define ESR_ELx_SF_SHIFT	(15)
   308  #define ESR_ELx_SF 		(UL(1) << ESR_ELx_SF_SHIFT)
   309  #define ESR_ELx_AR_SHIFT	(14)
   310  #define ESR_ELx_AR 		(UL(1) << ESR_ELx_AR_SHIFT)
   311  #define ESR_ELx_CM_SHIFT	(8)
   312  #define ESR_ELx_CM 		(UL(1) << ESR_ELx_CM_SHIFT)
   313  
   314  /* ISS field definitions for exceptions taken in to Hyp */
   315  #define ESR_ELx_CV		(UL(1) << 24)
   316  #define ESR_ELx_COND_SHIFT	(20)
   317  #define ESR_ELx_COND_MASK	(UL(0xF) << ESR_ELx_COND_SHIFT)
   318  #define ESR_ELx_WFx_ISS_TI	(UL(1) << 0)
   319  #define ESR_ELx_WFx_ISS_WFI	(UL(0) << 0)
   320  #define ESR_ELx_WFx_ISS_WFE	(UL(1) << 0)
   321  #define ESR_ELx_xVC_IMM_MASK	((1UL << 16) - 1)
   322  
   323  /* ISS field definitions for system error */
   324  #define ESR_ELx_SERR_MASK	(0x1)
   325  #define ESR_ELx_SERR_NMI	(0x1)
   326  
   327  // LOAD_KERNEL_ADDRESS loads a kernel address.
   328  #define LOAD_KERNEL_ADDRESS(from, to) \
   329  	MOVD from, to; \
   330  	ORR $0xffff000000000000, to, to;
   331  
   332  // LOAD_KERNEL_STACK loads the kernel temporary stack.
   333  #define LOAD_KERNEL_STACK(from) \
   334  	LOAD_KERNEL_ADDRESS(CPU_SELF(from), RSV_REG); \
   335  	MOVD $CPU_STACK_TOP(RSV_REG), RSV_REG; \
   336  	MOVD RSV_REG, RSP; \
   337  	WORD $0xd538d092;   //MRS   TPIDR_EL1, R18
   338  
   339  // SWITCH_TO_APP_PAGETABLE sets a new pagetable for a container application.
   340  #define SWITCH_TO_APP_PAGETABLE() \
   341  	MOVD CPU_APP_ASID(RSV_REG), RSV_REG_APP; \
   342  	MOVD CPU_TTBR0_APP(RSV_REG), RSV_REG; \
   343  	BFI $48, RSV_REG_APP, $16, RSV_REG; \
   344  	MSR RSV_REG, TTBR0_EL1; \
   345  	ISB $15;
   346  
   347  // SWITCH_TO_KVM_PAGETABLE sets the kvm pagetable.
   348  #define SWITCH_TO_KVM_PAGETABLE() \
   349  	MOVD CPU_TTBR0_KVM(RSV_REG), RSV_REG; \
   350  	MOVD $1, RSV_REG_APP; \
   351  	BFI $48, RSV_REG_APP, $16, RSV_REG; \
   352  	MSR RSV_REG, TTBR0_EL1; \
   353  	ISB $15;
   354  
   355  // FPSIMDDisableTrap disables the trap for accessing fpsimd.
   356  TEXT ·FPSIMDDisableTrap(SB),NOSPLIT,$0
   357  	MOVD $FPEN_ENABLE, R0
   358  	MSR R0, CPACR_EL1
   359  	ISB $15
   360  	RET
   361  
   362  // FPSIMDEnableTrap enables the trap for accessing fpsimd.
   363  TEXT ·FPSIMDEnableTrap(SB),NOSPLIT,$0
   364  	MSR $0, CPACR_EL1
   365  	ISB $15
   366  	RET
   367  
   368  // FPSIMD_DISABLE_TRAP disables the trap for accessing fpsimd.
   369  #define FPSIMD_DISABLE_TRAP(reg) \
   370  	MOVD $FPEN_ENABLE, reg; \
   371  	MSR reg, CPACR_EL1; \
   372  	ISB $15;
   373  
   374  // FPSIMD_ENABLE_TRAP enables the trap for accessing fpsimd.
   375  #define FPSIMD_ENABLE_TRAP(reg) \
   376  	MSR $0, CPACR_EL1; \
   377  	ISB $15;
   378  
   379  // KERNEL_ENTRY_FROM_EL0 is the entry code of the vcpu from el0 to el1.
   380  #define KERNEL_ENTRY_FROM_EL0 \
   381  	SUB $16, RSP, RSP; \		// step1, save r18, r19 into kernel temporary stack.
   382  	STP (RSV_REG, RSV_REG_APP), 16*0(RSP); \
   383  	WORD $0xd538d092; \    // MRS   TPIDR_EL1, R18
   384  	MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP; \ // step2, load app context pointer.
   385  	REGISTERS_SAVE(RSV_REG_APP, 0); \          // step3, save app context.
   386  	MOVD RSV_REG_APP, R20; \
   387  	LDP 16*0(RSP), (RSV_REG, RSV_REG_APP); \
   388  	ADD $16, RSP, RSP; \
   389  	STP (RSV_REG, RSV_REG_APP), PTRACE_R18(R20); \
   390  	MRS TPIDR_EL0, R3; \
   391  	MOVD R3, PTRACE_TLS(R20); \
   392  	WORD $0xd5384003; \      //  MRS SPSR_EL1, R3
   393  	MOVD R3, PTRACE_PSTATE(R20); \
   394  	MRS ELR_EL1, R3; \
   395  	MOVD R3, PTRACE_PC(R20); \
   396  	WORD $0xd5384103; \      //  MRS SP_EL0, R3
   397  	MOVD R3, PTRACE_SP(R20);
   398  
   399  // KERNEL_ENTRY_FROM_EL1 is the entry code of the vcpu from el1 to el1.
   400  #define KERNEL_ENTRY_FROM_EL1 \
   401  	WORD $0xd538d092; \   //MRS   TPIDR_EL1, R18
   402  	REGISTERS_SAVE(RSV_REG, CPU_REGISTERS); \	// Save sentry context.
   403  	MOVD RSV_REG_APP, CPU_REGISTERS+PTRACE_R19(RSV_REG); \
   404  	MRS TPIDR_EL0, R4; \
   405  	MOVD R4, CPU_REGISTERS+PTRACE_TLS(RSV_REG); \
   406  	WORD $0xd5384004; \    //    MRS SPSR_EL1, R4
   407  	MOVD R4, CPU_REGISTERS+PTRACE_PSTATE(RSV_REG); \
   408  	MRS ELR_EL1, R4; \
   409  	MOVD R4, CPU_REGISTERS+PTRACE_PC(RSV_REG); \
   410  	MOVD RSP, R4; \
   411  	MOVD R4, CPU_REGISTERS+PTRACE_SP(RSV_REG); \
   412  	LOAD_KERNEL_STACK(RSV_REG);  // Load the temporary stack.
   413  
   414  // EXCEPTION_EL0 is a common el0 exception handler function.
   415  #define EXCEPTION_EL0(vector) \
   416  	WORD $0xd538d092; \	//MRS   TPIDR_EL1, R18
   417  	WORD $0xd538601a; \	//MRS   FAR_EL1, R26
   418  	MOVD R26, CPU_FAULT_ADDR(RSV_REG); \
   419  	MOVD $1, R3; \
   420  	MOVD R3, CPU_ERROR_TYPE(RSV_REG); \	// Set error type to user.
   421  	MOVD $vector, R3; \
   422  	MOVD R3, CPU_VECTOR_CODE(RSV_REG); \
   423  	MRS ESR_EL1, R3; \
   424  	MOVD R3, CPU_ERROR_CODE(RSV_REG); \
   425  	B ·kernelExitToEl1(SB);
   426  
   427  // EXCEPTION_EL1 is a common el1 exception handler function.
   428  #define EXCEPTION_EL1(vector) \
   429  	MOVD $vector, R3; \
   430  	MOVD R3, 8(RSP); \
   431  	B ·HaltEl1ExceptionAndResume(SB);
   432  
   433  // storeEl0Fpstate writes the address of application's fpstate.
   434  TEXT ·storeEl0Fpstate(SB),NOSPLIT,$0-8
   435  	MOVD value+0(FP), R1
   436  	ORR $0xffff000000000000, R1, R1
   437  	MRS  TPIDR_EL1, RSV_REG
   438  	MOVD R1, CPU_FPSTATE_EL0(RSV_REG)
   439  	RET
   440  
   441  // storeAppASID writes the application's asid value.
   442  TEXT ·storeAppASID(SB),NOSPLIT,$0-8
   443  	MOVD asid+0(FP), R1
   444  	MRS  TPIDR_EL1, RSV_REG
   445  	MOVD R1, CPU_APP_ASID(RSV_REG)
   446  	RET
   447  
   448  // Halt halts execution.
   449  TEXT ·Halt(SB),NOSPLIT,$0
   450  	// Disable fpsimd.
   451  	WORD $0xd5381041 // MRS CPACR_EL1, R1
   452  	MOVD R1, CPU_LAZY_VFP(RSV_REG)
   453  	DSB $15
   454  
   455  	FPSIMD_ENABLE_TRAP(RSV_REG)
   456  
   457  	// Trigger MMIO_EXIT/_KVM_HYPERCALL_VMEXIT.
   458  	//
   459  	// To keep it simple, I used the address of exception table as the
   460  	// MMIO base address, so that I can trigger a MMIO-EXIT by forcibly writing
   461  	// a read-only space.
   462  	// Also, the length is engough to match a sufficient number of hypercall ID.
   463  	// Then, in host user space, I can calculate this address to find out
   464  	// which hypercall.
   465  	MRS VBAR_EL1, R9
   466  	MOVD R0, 0x0(R9)
   467  
   468  	RET
   469  
   470  // HaltAndResume halts execution and point the pointer to the resume function.
   471  TEXT ·HaltAndResume(SB),NOSPLIT,$0
   472  	BL ·Halt(SB)
   473  	B ·kernelExitToEl1(SB) // Resume.
   474  
   475  // HaltEl1SvcAndResume calls Hooks.KernelSyscall and resume.
   476  TEXT ·HaltEl1SvcAndResume(SB),NOSPLIT,$0
   477  	WORD $0xd538d092            // MRS TPIDR_EL1, R18
   478  	MOVD CPU_SELF(RSV_REG), R3  // Load vCPU.
   479  	MOVD R3, 8(RSP)             // First argument (vCPU).
   480  	CALL ·kernelSyscall(SB)     // Call the trampoline.
   481  	B ·kernelExitToEl1(SB)      // Resume.
   482  
   483  // HaltEl1ExceptionAndResume calls Hooks.KernelException and resume.
   484  TEXT ·HaltEl1ExceptionAndResume(SB),NOSPLIT,$0
   485  	WORD $0xd538d092            // MRS TPIDR_EL1, R18
   486  	MOVD CPU_SELF(RSV_REG), R3  // Load vCPU.
   487  	MOVD R3, 8(RSP)             // First argument (vCPU).
   488  	MOVD vector+0(FP), R3
   489  	MOVD R3, 16(RSP)            // Second argument (vector).
   490  	CALL ·kernelException(SB)   // Call the trampoline.
   491  	B ·kernelExitToEl1(SB)      // Resume.
   492  
   493  // Shutdown stops the guest.
   494  TEXT ·Shutdown(SB),NOSPLIT,$0
   495  	// PSCI EVENT.
   496  	MOVD $0x84000009, R0
   497  	HVC $0
   498  
   499  #define STACK_FRAME_SIZE 32
   500  
   501  // kernelExitToEl0 is the entrypoint for application in guest_el0.
   502  // Prepare the vcpu environment for container application.
   503  TEXT ·kernelExitToEl0(SB),NOSPLIT,$0
   504  	// Step1, save sentry context into memory.
   505  	MRS TPIDR_EL1, RSV_REG
   506  	REGISTERS_SAVE(RSV_REG, CPU_REGISTERS)
   507  	MOVD RSV_REG_APP, CPU_REGISTERS+PTRACE_R19(RSV_REG)
   508  	MRS TPIDR_EL0, R3
   509  	MOVD R3, CPU_REGISTERS+PTRACE_TLS(RSV_REG)
   510  
   511  	WORD $0xd5384003    //    MRS SPSR_EL1, R3
   512  	MOVD R3, CPU_REGISTERS+PTRACE_PSTATE(RSV_REG)
   513  	MOVD R30, CPU_REGISTERS+PTRACE_PC(RSV_REG)
   514  	MOVD RSP, R3
   515  	MOVD R3, CPU_REGISTERS+PTRACE_SP(RSV_REG)
   516  
   517  	MOVD CPU_REGISTERS+PTRACE_R3(RSV_REG), R3
   518  
   519  	// Step2, switch to temporary stack.
   520  	LOAD_KERNEL_STACK(RSV_REG)
   521  
   522  	// Step3, load app context pointer.
   523  	MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP
   524  
   525  	// Step4, prepare the environment for container application.
   526  	// set sp_el0.
   527  	MOVD PTRACE_SP(RSV_REG_APP), R1
   528  	WORD $0xd5184101        //MSR R1, SP_EL0
   529  	// set pc.
   530  	MOVD PTRACE_PC(RSV_REG_APP), R1
   531  	MSR R1, ELR_EL1
   532  	// set pstate.
   533  	MOVD PTRACE_PSTATE(RSV_REG_APP), R1
   534  	WORD $0xd5184001  //MSR R1, SPSR_EL1
   535  
   536  	// need use kernel space address to excute below code, since
   537  	// after SWITCH_TO_APP_PAGETABLE the ASID is changed to app's
   538  	// ASID.
   539  	WORD $0x10000061		// ADR R1, do_exit_to_el0
   540  	ORR $0xffff000000000000, R1, R1
   541  	JMP (R1)
   542  
   543  do_exit_to_el0:
   544  	// RSV_REG & RSV_REG_APP will be loaded at the end.
   545  	REGISTERS_LOAD(RSV_REG_APP, 0)
   546  	MOVD PTRACE_TLS(RSV_REG_APP), RSV_REG
   547  	MSR RSV_REG, TPIDR_EL0
   548  
   549  	// switch to user pagetable.
   550  	LDP PTRACE_R18(RSV_REG_APP), (RSV_REG, RSV_REG_APP)
   551  
   552  	SUB $STACK_FRAME_SIZE, RSP, RSP
   553  	STP (RSV_REG, RSV_REG_APP), 16*0(RSP)
   554  	STP (R0, R1), 16*1(RSP)
   555  
   556  	WORD $0xd538d092    //MRS   TPIDR_EL1, R18
   557  
   558  	SWITCH_TO_APP_PAGETABLE()
   559  
   560  	LDP 16*1(RSP), (R0, R1)
   561  	LDP 16*0(RSP), (RSV_REG, RSV_REG_APP)
   562  	ADD $STACK_FRAME_SIZE, RSP, RSP
   563  
   564  	ERET()
   565  
   566  // kernelExitToEl1 is the entrypoint for sentry in guest_el1.
   567  // Prepare the vcpu environment for sentry.
   568  TEXT ·kernelExitToEl1(SB),NOSPLIT,$0
   569  	WORD $0xd538d092     //MRS   TPIDR_EL1, R18
   570  	MOVD CPU_REGISTERS+PTRACE_PSTATE(RSV_REG), R1
   571  	WORD $0xd5184001  //MSR R1, SPSR_EL1
   572  
   573  	MOVD CPU_REGISTERS+PTRACE_PC(RSV_REG), R1
   574  	MSR R1, ELR_EL1
   575  
   576  	// restore sentry's tls.
   577  	MOVD CPU_REGISTERS+PTRACE_TLS(RSV_REG), R1
   578  	MSR R1, TPIDR_EL0
   579  
   580  	MOVD CPU_REGISTERS+PTRACE_SP(RSV_REG), R1
   581  	MOVD R1, RSP
   582  
   583  	REGISTERS_LOAD(RSV_REG, CPU_REGISTERS)
   584  	SWITCH_TO_KVM_PAGETABLE()
   585  	MRS TPIDR_EL1, RSV_REG
   586  
   587  	MOVD CPU_REGISTERS+PTRACE_R19(RSV_REG), RSV_REG_APP
   588  
   589  	ERET()
   590  
   591  TEXT ·start(SB),NOSPLIT,$0
   592  	DSB $7          // dsb(nsh)
   593  	ISB $15
   594  	B ·kernelExitToEl1(SB)
   595  
   596  // func AddrOfStart() uintptr
   597  TEXT ·AddrOfStart(SB), $0-8
   598  	MOVD	$·start(SB), R0
   599  	MOVD	R0, ret+0(FP)
   600  	RET
   601  
   602  // El1_sync_invalid is the handler for an invalid EL1_sync.
   603  TEXT ·El1_sync_invalid(SB),NOSPLIT,$0
   604  	B ·Shutdown(SB)
   605  
   606  // El1_irq_invalid is the handler for an invalid El1_irq.
   607  TEXT ·El1_irq_invalid(SB),NOSPLIT,$0
   608  	B ·Shutdown(SB)
   609  
   610  // El1_fiq_invalid is the handler for an invalid El1_fiq.
   611  TEXT ·El1_fiq_invalid(SB),NOSPLIT,$0
   612  	B ·Shutdown(SB)
   613  
   614  // El1_error_invalid is the handler for an invalid El1_error.
   615  TEXT ·El1_error_invalid(SB),NOSPLIT,$0
   616  	B ·Shutdown(SB)
   617  
   618  // El1_sync is the handler for El1_sync.
   619  TEXT ·El1_sync(SB),NOSPLIT,$0
   620  	KERNEL_ENTRY_FROM_EL1
   621  	MRS ESR_EL1, R25                  // read the syndrome register
   622  	LSR  $ESR_ELx_EC_SHIFT, R25, R24  // exception class
   623  	CMP $ESR_ELx_EC_DABT_CUR, R24
   624  	BEQ el1_da                        // data abort in EL1
   625  	CMP $ESR_ELx_EC_IABT_CUR, R24
   626  	BEQ el1_ia                        // instruction abort in EL1
   627  	CMP $ESR_ELx_EC_FP_ASIMD, R24
   628  	BEQ el1_fpsimd_acc                // FP/ASIMD access
   629  	CMP $ESR_ELx_EC_SVE, R24
   630  	BEQ el1_sve_acc                   // SVE access
   631  	CMP $ESR_ELx_EC_SP_ALIGN, R24
   632  	BEQ el1_sp_pc                     // stack alignment exception
   633  	CMP $ESR_ELx_EC_PC_ALIGN, R24
   634  	BEQ el1_sp_pc                     // pc alignment exception
   635  	CMP $ESR_ELx_EC_UNKNOWN, R24
   636  	BEQ el1_undef                     // unknown exception in EL1
   637  	CMP $ESR_ELx_EC_SVC64, R24
   638  	BEQ el1_svc                       // SVC in 64-bit state
   639  	CMP $ESR_ELx_EC_BREAKPT_CUR, R24
   640  	BEQ el1_dbg                       // debug exception in EL1
   641  	B el1_invalid
   642  
   643  el1_da:
   644  	EXCEPTION_EL1(El1SyncDa)
   645  el1_ia:
   646  	EXCEPTION_EL1(El1SyncIa)
   647  el1_sp_pc:
   648  	EXCEPTION_EL1(El1SyncSpPc)
   649  el1_undef:
   650  	EXCEPTION_EL1(El1SyncUndef)
   651  el1_svc:
   652  	B ·HaltEl1SvcAndResume(SB)
   653  el1_dbg:
   654  	EXCEPTION_EL1(El1SyncDbg)
   655  el1_fpsimd_acc:
   656  el1_sve_acc:
   657  	FPSIMD_DISABLE_TRAP(RSV_REG)
   658  
   659  	// Restore context.
   660  	MRS TPIDR_EL1, RSV_REG
   661  
   662  	// Restore sp.
   663  	MOVD CPU_REGISTERS+PTRACE_SP(RSV_REG), R1
   664  	MOVD R1, RSP
   665  
   666  	// Restore common registers.
   667  	REGISTERS_LOAD(RSV_REG, CPU_REGISTERS)
   668  	MOVD CPU_REGISTERS+PTRACE_R19(RSV_REG), RSV_REG_APP
   669  
   670  	ERET()	// return to el1.
   671  
   672  el1_invalid:
   673  	EXCEPTION_EL1(El1SyncInv)
   674  
   675  // El1_irq is the handler for El1_irq.
   676  TEXT ·El1_irq(SB),NOSPLIT,$0
   677  	B ·Shutdown(SB)
   678  
   679  // El1_fiq is the handler for El1_fiq.
   680  TEXT ·El1_fiq(SB),NOSPLIT,$0
   681  	B ·Shutdown(SB)
   682  
   683  // El1_error is the handler for El1_error.
   684  TEXT ·El1_error(SB),NOSPLIT,$0
   685  	B ·Shutdown(SB)
   686  
   687  // El0_sync is the handler for El0_sync.
   688  TEXT ·El0_sync(SB),NOSPLIT,$0
   689  	KERNEL_ENTRY_FROM_EL0
   690  	MRS ESR_EL1, R25                  // read the syndrome register
   691  	LSR  $ESR_ELx_EC_SHIFT, R25, R24  // exception class
   692  	CMP $ESR_ELx_EC_SVC64, R24
   693  	BEQ el0_svc                       // SVC in 64-bit state
   694  	CMP $ESR_ELx_EC_DABT_LOW, R24
   695  	BEQ el0_da                        // data abort in EL0
   696  	CMP $ESR_ELx_EC_IABT_LOW, R24
   697  	BEQ el0_ia                        // instruction abort in EL0
   698  	CMP $ESR_ELx_EC_FP_ASIMD, R24
   699  	BEQ el0_fpsimd_acc                // FP/ASIMD access
   700  	CMP $ESR_ELx_EC_SVE, R24
   701  	BEQ el0_sve_acc                   // SVE access
   702  	CMP $ESR_ELx_EC_FP_EXC64, R24
   703  	BEQ el0_fpsimd_exc                // FP/ASIMD exception
   704  	CMP $ESR_ELx_EC_SP_ALIGN, R24
   705  	BEQ el0_sp_pc                     // stack alignment exception
   706  	CMP $ESR_ELx_EC_PC_ALIGN, R24
   707  	BEQ el0_sp_pc                     // pc alignment exception
   708  	CMP $ESR_ELx_EC_UNKNOWN, R24
   709  	BEQ el0_undef                     // unknown exception in EL0
   710  	CMP $ESR_ELx_EC_BREAKPT_LOW, R24
   711  	BEQ el0_dbg                       // debug exception in EL0
   712  	CMP $ESR_ELx_EC_SYS64, R24
   713  	BEQ el0_sys                       // configurable trap
   714  	CMP $ESR_ELx_EC_WFx, R24
   715  	BEQ el0_wfx                       // WFX trap
   716  	B   el0_invalid
   717  
   718  el0_svc:
   719  	WORD $0xd538d092     //MRS   TPIDR_EL1, R18
   720  
   721  	MOVD $0, CPU_ERROR_CODE(RSV_REG) // Clear error code.
   722  
   723  	MOVD $1, R3
   724  	MOVD R3, CPU_ERROR_TYPE(RSV_REG) // Set error type to user.
   725  
   726  	MOVD $Syscall, R3
   727  	MOVD R3, CPU_VECTOR_CODE(RSV_REG)
   728  
   729  	B ·kernelExitToEl1(SB)
   730  
   731  el0_da:
   732  el0_ia:
   733  	EXCEPTION_EL0(PageFault)
   734  el0_fpsimd_acc:
   735  el0_sve_acc:
   736  	FPSIMD_DISABLE_TRAP(RSV_REG)
   737  	FPSTATE_EL0_LOAD()
   738  
   739  	// Restore context.
   740  	MRS TPIDR_EL1, RSV_REG
   741  	MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP
   742  
   743  	// Restore R0-R30
   744  	REGISTERS_LOAD(RSV_REG_APP, 0)
   745  	MOVD PTRACE_R18(RSV_REG_APP), RSV_REG
   746  	MOVD PTRACE_R19(RSV_REG_APP), RSV_REG_APP
   747  
   748  	ERET()  // return to el0.
   749  el0_fpsimd_exc:
   750  	EXCEPTION_EL0(El0SyncFpsimdExc)
   751  el0_sp_pc:
   752  	EXCEPTION_EL0(El0SyncSpPc)
   753  el0_undef:
   754  	EXCEPTION_EL0(El0SyncUndef)
   755  el0_dbg:
   756  	EXCEPTION_EL0(El0SyncDbg)
   757  el0_sys:
   758  	EXCEPTION_EL0(El0SyncSys)
   759  el0_wfx:
   760  	EXCEPTION_EL0(El0SyncWfx)
   761  el0_invalid:
   762  	EXCEPTION_EL0(El0SyncInv)
   763  
   764  TEXT ·El0_irq(SB),NOSPLIT,$0
   765  	B ·Shutdown(SB)
   766  
   767  TEXT ·El0_fiq(SB),NOSPLIT,$0
   768  	B ·Shutdown(SB)
   769  
   770  TEXT ·El0_error(SB),NOSPLIT,$0
   771  	KERNEL_ENTRY_FROM_EL0
   772  	WORD $0xd5385219        // MRS ESR_EL1, R25
   773  	AND $ESR_ELx_SERR_MASK, R25, R24
   774  	CMP $ESR_ELx_SERR_NMI, R24
   775  	BEQ el0_nmi
   776  	B el0_bounce
   777  
   778  el0_nmi:
   779  	EXCEPTION_EL0(El0ErrNMI)
   780  el0_bounce:
   781  	EXCEPTION_EL0(VirtualizationException)
   782  
   783  TEXT ·El0_sync_invalid(SB),NOSPLIT,$0
   784  	B ·Shutdown(SB)
   785  
   786  TEXT ·El0_irq_invalid(SB),NOSPLIT,$0
   787  	B ·Shutdown(SB)
   788  
   789  TEXT ·El0_fiq_invalid(SB),NOSPLIT,$0
   790  	B ·Shutdown(SB)
   791  
   792  TEXT ·El0_error_invalid(SB),NOSPLIT,$0
   793  	B ·Shutdown(SB)
   794  
   795  // vectors implements exception vector table.
   796  // The start address of exception vector table should be 11-bits aligned.
   797  // For detail, please refer to arm developer document:
   798  // https://developer.arm.com/documentation/100933/0100/AArch64-exception-vector-table
   799  // Also can refer to the code in linux kernel: arch/arm64/kernel/entry.S
   800  TEXT ·vectors(SB),NOSPLIT,$0
   801  	PCALIGN $2048
   802  	B ·El1_sync_invalid(SB)
   803  	PCALIGN $128
   804  	B ·El1_irq_invalid(SB)
   805  	PCALIGN $128
   806  	B ·El1_fiq_invalid(SB)
   807  	PCALIGN $128
   808  	B ·El1_error_invalid(SB)
   809  
   810  	PCALIGN $128
   811  	B ·El1_sync(SB)
   812  	PCALIGN $128
   813  	B ·El1_irq(SB)
   814  	PCALIGN $128
   815  	B ·El1_fiq(SB)
   816  	PCALIGN $128
   817  	B ·El1_error(SB)
   818  
   819  	PCALIGN $128
   820  	B ·El0_sync(SB)
   821  	PCALIGN $128
   822  	B ·El0_irq(SB)
   823  	PCALIGN $128
   824  	B ·El0_fiq(SB)
   825  	PCALIGN $128
   826  	B ·El0_error(SB)
   827  
   828  	PCALIGN $128
   829  	B ·El0_sync_invalid(SB)
   830  	PCALIGN $128
   831  	B ·El0_irq_invalid(SB)
   832  	PCALIGN $128
   833  	B ·El0_fiq_invalid(SB)
   834  	PCALIGN $128
   835  	B ·El0_error_invalid(SB)
   836  
   837  // func AddrOfVectors() uintptr
   838  TEXT ·AddrOfVectors(SB), $0-8
   839         MOVD    $·vectors(SB), R0
   840         MOVD    R0, ret+0(FP)
   841         RET