github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/ring0/entry_arm64.s (about) 1 // Copyright 2019 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 #include "funcdata.h" 16 #include "textflag.h" 17 18 // NB: Offsets are programatically generated (see BUILD). 19 // 20 // This file is concatenated with the definitions. 21 22 // Saves a register set. 23 // 24 // This is a macro because it may need to executed in contents where a stack is 25 // not available for calls. 26 // 27 28 // ERET returns using the ELR and SPSR for the current exception level. 29 #define ERET() \ 30 WORD $0xd69f03e0; \ 31 DSB $7; \ 32 ISB $15; 33 34 // RSV_REG is a register that holds el1 information temporarily. 35 #define RSV_REG R18_PLATFORM 36 37 // RSV_REG_APP is a register that holds el0 information temporarily. 38 #define RSV_REG_APP R19 39 40 #define FPEN_NOTRAP 0x3 41 #define FPEN_SHIFT 20 42 43 #define FPEN_ENABLE (FPEN_NOTRAP << FPEN_SHIFT) 44 45 // sctlr_el1: system control register el1. 46 #define SCTLR_M 1 << 0 47 #define SCTLR_C 1 << 2 48 #define SCTLR_I 1 << 12 49 #define SCTLR_DZE 1 << 14 50 #define SCTLR_UCT 1 << 15 51 #define SCTLR_UCI 1 << 26 52 53 #define SCTLR_EL1_DEFAULT (SCTLR_M | SCTLR_C | SCTLR_I | SCTLR_UCT | SCTLR_UCI | SCTLR_DZE) 54 55 // cntkctl_el1: counter-timer kernel control register el1. 56 #define CNTKCTL_EL0PCTEN 1 << 0 57 #define CNTKCTL_EL0VCTEN 1 << 1 58 59 #define CNTKCTL_EL1_DEFAULT (CNTKCTL_EL0PCTEN | CNTKCTL_EL0VCTEN) 60 61 // Saves a register set. 62 // 63 // This is a macro because it may need to executed in contents where a stack is 64 // not available for calls. 65 // 66 // The following registers are not saved: R18, R19. 67 #define REGISTERS_SAVE(reg, offset) \ 68 STP (R0, R1), offset+PTRACE_R0(reg); \ 69 STP (R2, R3), offset+PTRACE_R2(reg); \ 70 STP (R4, R5), offset+PTRACE_R4(reg); \ 71 STP (R6, R7), offset+PTRACE_R6(reg); \ 72 STP (R8, R9), offset+PTRACE_R8(reg); \ 73 STP (R10, R11), offset+PTRACE_R10(reg); \ 74 STP (R12, R13), offset+PTRACE_R12(reg); \ 75 STP (R14, R15), offset+PTRACE_R14(reg); \ 76 STP (R16, R17), offset+PTRACE_R16(reg); \ 77 STP (R20, R21), offset+PTRACE_R20(reg); \ 78 STP (R22, R23), offset+PTRACE_R22(reg); \ 79 STP (R24, R25), offset+PTRACE_R24(reg); \ 80 STP (R26, R27), offset+PTRACE_R26(reg); \ 81 STP (g, R29), offset+PTRACE_R28(reg); \ 82 MOVD R30, offset+PTRACE_R30(reg); 83 84 // Loads a register set. 85 // 86 // This is a macro because it may need to executed in contents where a stack is 87 // not available for calls. 88 // 89 // The following registers are not loaded: R18, R19. 90 #define REGISTERS_LOAD(reg, offset) \ 91 LDP offset+PTRACE_R0(reg), (R0, R1); \ 92 LDP offset+PTRACE_R2(reg), (R2, R3); \ 93 LDP offset+PTRACE_R4(reg), (R4, R5); \ 94 LDP offset+PTRACE_R6(reg), (R6, R7); \ 95 LDP offset+PTRACE_R8(reg), (R8, R9); \ 96 LDP offset+PTRACE_R10(reg), (R10, R11); \ 97 LDP offset+PTRACE_R12(reg), (R12, R13); \ 98 LDP offset+PTRACE_R14(reg), (R14, R15); \ 99 LDP offset+PTRACE_R16(reg), (R16, R17); \ 100 LDP offset+PTRACE_R20(reg), (R20, R21); \ 101 LDP offset+PTRACE_R22(reg), (R22, R23); \ 102 LDP offset+PTRACE_R24(reg), (R24, R25); \ 103 LDP offset+PTRACE_R26(reg), (R26, R27); \ 104 LDP offset+PTRACE_R28(reg), (g, R29); \ 105 MOVD offset+PTRACE_R30(reg), R30; 106 107 // Loads the application's fpstate. 108 #define FPSTATE_EL0_LOAD() \ 109 MRS TPIDR_EL1, RSV_REG; \ 110 MOVD CPU_FPSTATE_EL0(RSV_REG), RSV_REG; \ 111 MOVD 0(RSV_REG), RSV_REG_APP; \ 112 MOVD RSV_REG_APP, FPSR; \ 113 MOVD 8(RSV_REG), RSV_REG_APP; \ 114 MOVD RSV_REG_APP, FPCR; \ 115 ADD $16, RSV_REG, RSV_REG; \ 116 WORD $0xad400640; \ // ldp q0, q1, [x18] 117 WORD $0xad410e42; \ 118 WORD $0xad421644; \ 119 WORD $0xad431e46; \ 120 WORD $0xad442648; \ 121 WORD $0xad452e4a; \ 122 WORD $0xad46364c; \ 123 WORD $0xad473e4e; \ 124 WORD $0xad484650; \ 125 WORD $0xad494e52; \ 126 WORD $0xad4a5654; \ 127 WORD $0xad4b5e56; \ 128 WORD $0xad4c6658; \ 129 WORD $0xad4d6e5a; \ 130 WORD $0xad4e765c; \ 131 WORD $0xad4f7e5e; 132 133 #define ESR_ELx_EC_UNKNOWN (0x00) 134 #define ESR_ELx_EC_WFx (0x01) 135 /* Unallocated EC: 0x02 */ 136 #define ESR_ELx_EC_CP15_32 (0x03) 137 #define ESR_ELx_EC_CP15_64 (0x04) 138 #define ESR_ELx_EC_CP14_MR (0x05) 139 #define ESR_ELx_EC_CP14_LS (0x06) 140 #define ESR_ELx_EC_FP_ASIMD (0x07) 141 #define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */ 142 #define ESR_ELx_EC_PAC (0x09) /* EL2 and above */ 143 /* Unallocated EC: 0x0A - 0x0B */ 144 #define ESR_ELx_EC_CP14_64 (0x0C) 145 /* Unallocated EC: 0x0d */ 146 #define ESR_ELx_EC_ILL (0x0E) 147 /* Unallocated EC: 0x0F - 0x10 */ 148 #define ESR_ELx_EC_SVC32 (0x11) 149 #define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */ 150 #define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */ 151 /* Unallocated EC: 0x14 */ 152 #define ESR_ELx_EC_SVC64 (0x15) 153 #define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */ 154 #define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ 155 #define ESR_ELx_EC_SYS64 (0x18) 156 #define ESR_ELx_EC_SVE (0x19) 157 /* Unallocated EC: 0x1A - 0x1E */ 158 #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ 159 #define ESR_ELx_EC_IABT_LOW (0x20) 160 #define ESR_ELx_EC_IABT_CUR (0x21) 161 #define ESR_ELx_EC_PC_ALIGN (0x22) 162 /* Unallocated EC: 0x23 */ 163 #define ESR_ELx_EC_DABT_LOW (0x24) 164 #define ESR_ELx_EC_DABT_CUR (0x25) 165 #define ESR_ELx_EC_SP_ALIGN (0x26) 166 /* Unallocated EC: 0x27 */ 167 #define ESR_ELx_EC_FP_EXC32 (0x28) 168 /* Unallocated EC: 0x29 - 0x2B */ 169 #define ESR_ELx_EC_FP_EXC64 (0x2C) 170 /* Unallocated EC: 0x2D - 0x2E */ 171 #define ESR_ELx_EC_SERROR (0x2F) 172 #define ESR_ELx_EC_BREAKPT_LOW (0x30) 173 #define ESR_ELx_EC_BREAKPT_CUR (0x31) 174 #define ESR_ELx_EC_SOFTSTP_LOW (0x32) 175 #define ESR_ELx_EC_SOFTSTP_CUR (0x33) 176 #define ESR_ELx_EC_WATCHPT_LOW (0x34) 177 #define ESR_ELx_EC_WATCHPT_CUR (0x35) 178 /* Unallocated EC: 0x36 - 0x37 */ 179 #define ESR_ELx_EC_BKPT32 (0x38) 180 /* Unallocated EC: 0x39 */ 181 #define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */ 182 /* Unallocted EC: 0x3B */ 183 #define ESR_ELx_EC_BRK64 (0x3C) 184 /* Unallocated EC: 0x3D - 0x3F */ 185 #define ESR_ELx_EC_MAX (0x3F) 186 187 #define ESR_ELx_EC_SHIFT (26) 188 #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT) 189 #define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT) 190 191 #define ESR_ELx_IL_SHIFT (25) 192 #define ESR_ELx_IL (UL(1) << ESR_ELx_IL_SHIFT) 193 #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1) 194 195 /* ISS field definitions shared by different classes */ 196 #define ESR_ELx_WNR_SHIFT (6) 197 #define ESR_ELx_WNR (UL(1) << ESR_ELx_WNR_SHIFT) 198 199 /* Asynchronous Error Type */ 200 #define ESR_ELx_IDS_SHIFT (24) 201 #define ESR_ELx_IDS (UL(1) << ESR_ELx_IDS_SHIFT) 202 #define ESR_ELx_AET_SHIFT (10) 203 #define ESR_ELx_AET (UL(0x7) << ESR_ELx_AET_SHIFT) 204 205 #define ESR_ELx_AET_UC (UL(0) << ESR_ELx_AET_SHIFT) 206 #define ESR_ELx_AET_UEU (UL(1) << ESR_ELx_AET_SHIFT) 207 #define ESR_ELx_AET_UEO (UL(2) << ESR_ELx_AET_SHIFT) 208 #define ESR_ELx_AET_UER (UL(3) << ESR_ELx_AET_SHIFT) 209 #define ESR_ELx_AET_CE (UL(6) << ESR_ELx_AET_SHIFT) 210 211 /* Shared ISS field definitions for Data/Instruction aborts */ 212 #define ESR_ELx_SET_SHIFT (11) 213 #define ESR_ELx_SET_MASK (UL(3) << ESR_ELx_SET_SHIFT) 214 #define ESR_ELx_FnV_SHIFT (10) 215 #define ESR_ELx_FnV (UL(1) << ESR_ELx_FnV_SHIFT) 216 #define ESR_ELx_EA_SHIFT (9) 217 #define ESR_ELx_EA (UL(1) << ESR_ELx_EA_SHIFT) 218 #define ESR_ELx_S1PTW_SHIFT (7) 219 #define ESR_ELx_S1PTW (UL(1) << ESR_ELx_S1PTW_SHIFT) 220 221 /* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */ 222 #define ESR_ELx_FSC (0x3F) 223 #define ESR_ELx_FSC_TYPE (0x3C) 224 #define ESR_ELx_FSC_EXTABT (0x10) 225 #define ESR_ELx_FSC_SERROR (0x11) 226 #define ESR_ELx_FSC_ACCESS (0x08) 227 #define ESR_ELx_FSC_FAULT (0x04) 228 #define ESR_ELx_FSC_PERM (0x0C) 229 230 /* ISS field definitions for Data Aborts */ 231 #define ESR_ELx_ISV_SHIFT (24) 232 #define ESR_ELx_ISV (UL(1) << ESR_ELx_ISV_SHIFT) 233 #define ESR_ELx_SAS_SHIFT (22) 234 #define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT) 235 #define ESR_ELx_SSE_SHIFT (21) 236 #define ESR_ELx_SSE (UL(1) << ESR_ELx_SSE_SHIFT) 237 #define ESR_ELx_SRT_SHIFT (16) 238 #define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT) 239 #define ESR_ELx_SF_SHIFT (15) 240 #define ESR_ELx_SF (UL(1) << ESR_ELx_SF_SHIFT) 241 #define ESR_ELx_AR_SHIFT (14) 242 #define ESR_ELx_AR (UL(1) << ESR_ELx_AR_SHIFT) 243 #define ESR_ELx_CM_SHIFT (8) 244 #define ESR_ELx_CM (UL(1) << ESR_ELx_CM_SHIFT) 245 246 /* ISS field definitions for exceptions taken in to Hyp */ 247 #define ESR_ELx_CV (UL(1) << 24) 248 #define ESR_ELx_COND_SHIFT (20) 249 #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT) 250 #define ESR_ELx_WFx_ISS_TI (UL(1) << 0) 251 #define ESR_ELx_WFx_ISS_WFI (UL(0) << 0) 252 #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) 253 #define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1) 254 255 /* ISS field definitions for system error */ 256 #define ESR_ELx_SERR_MASK (0x1) 257 #define ESR_ELx_SERR_NMI (0x1) 258 259 // LOAD_KERNEL_ADDRESS loads a kernel address. 260 #define LOAD_KERNEL_ADDRESS(from, to) \ 261 MOVD from, to; \ 262 ORR $0xffff000000000000, to, to; 263 264 // LOAD_KERNEL_STACK loads the kernel temporary stack. 265 #define LOAD_KERNEL_STACK(from) \ 266 LOAD_KERNEL_ADDRESS(CPU_SELF(from), RSV_REG); \ 267 MOVD $CPU_STACK_TOP(RSV_REG), RSV_REG; \ 268 MOVD RSV_REG, RSP; \ 269 WORD $0xd538d092; //MRS TPIDR_EL1, R18 270 271 // SWITCH_TO_APP_PAGETABLE sets a new pagetable for a container application. 272 #define SWITCH_TO_APP_PAGETABLE() \ 273 MOVD CPU_APP_ASID(RSV_REG), RSV_REG_APP; \ 274 MOVD CPU_TTBR0_APP(RSV_REG), RSV_REG; \ 275 BFI $48, RSV_REG_APP, $16, RSV_REG; \ 276 MSR RSV_REG, TTBR0_EL1; \ 277 ISB $15; 278 279 // SWITCH_TO_KVM_PAGETABLE sets the kvm pagetable. 280 #define SWITCH_TO_KVM_PAGETABLE() \ 281 MOVD CPU_TTBR0_KVM(RSV_REG), RSV_REG; \ 282 MOVD $1, RSV_REG_APP; \ 283 BFI $48, RSV_REG_APP, $16, RSV_REG; \ 284 MSR RSV_REG, TTBR0_EL1; \ 285 ISB $15; 286 287 // FPSIMDDisableTrap disables the trap for accessing fpsimd. 288 TEXT ·FPSIMDDisableTrap(SB),NOSPLIT,$0 289 MOVD $FPEN_ENABLE, R0 290 MSR R0, CPACR_EL1 291 ISB $15 292 RET 293 294 // FPSIMDEnableTrap enables the trap for accessing fpsimd. 295 TEXT ·FPSIMDEnableTrap(SB),NOSPLIT,$0 296 MSR $0, CPACR_EL1 297 ISB $15 298 RET 299 300 // FPSIMD_DISABLE_TRAP disables the trap for accessing fpsimd. 301 #define FPSIMD_DISABLE_TRAP(reg) \ 302 MOVD $FPEN_ENABLE, reg; \ 303 MSR reg, CPACR_EL1; \ 304 ISB $15; 305 306 // FPSIMD_ENABLE_TRAP enables the trap for accessing fpsimd. 307 #define FPSIMD_ENABLE_TRAP(reg) \ 308 MSR $0, CPACR_EL1; \ 309 ISB $15; 310 311 // KERNEL_ENTRY_FROM_EL0 is the entry code of the vcpu from el0 to el1. 312 #define KERNEL_ENTRY_FROM_EL0 \ 313 SUB $16, RSP, RSP; \ // step1, save r18, r19 into kernel temporary stack. 314 STP (RSV_REG, RSV_REG_APP), 16*0(RSP); \ 315 WORD $0xd538d092; \ // MRS TPIDR_EL1, R18 316 MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP; \ // step2, load app context pointer. 317 REGISTERS_SAVE(RSV_REG_APP, 0); \ // step3, save app context. 318 MOVD RSV_REG_APP, R20; \ 319 LDP 16*0(RSP), (RSV_REG, RSV_REG_APP); \ 320 ADD $16, RSP, RSP; \ 321 STP (RSV_REG, RSV_REG_APP), PTRACE_R18(R20); \ 322 MRS TPIDR_EL0, R3; \ 323 MOVD R3, PTRACE_TLS(R20); \ 324 WORD $0xd5384003; \ // MRS SPSR_EL1, R3 325 MOVD R3, PTRACE_PSTATE(R20); \ 326 MRS ELR_EL1, R3; \ 327 MOVD R3, PTRACE_PC(R20); \ 328 WORD $0xd5384103; \ // MRS SP_EL0, R3 329 MOVD R3, PTRACE_SP(R20); 330 331 // KERNEL_ENTRY_FROM_EL1 is the entry code of the vcpu from el1 to el1. 332 #define KERNEL_ENTRY_FROM_EL1 \ 333 WORD $0xd538d092; \ //MRS TPIDR_EL1, R18 334 REGISTERS_SAVE(RSV_REG, CPU_REGISTERS); \ // Save sentry context. 335 MOVD RSV_REG_APP, CPU_REGISTERS+PTRACE_R19(RSV_REG); \ 336 MRS TPIDR_EL0, R4; \ 337 MOVD R4, CPU_REGISTERS+PTRACE_TLS(RSV_REG); \ 338 WORD $0xd5384004; \ // MRS SPSR_EL1, R4 339 MOVD R4, CPU_REGISTERS+PTRACE_PSTATE(RSV_REG); \ 340 MRS ELR_EL1, R4; \ 341 MOVD R4, CPU_REGISTERS+PTRACE_PC(RSV_REG); \ 342 MOVD RSP, R4; \ 343 MOVD R4, CPU_REGISTERS+PTRACE_SP(RSV_REG); \ 344 LOAD_KERNEL_STACK(RSV_REG); // Load the temporary stack. 345 346 // EXCEPTION_EL0 is a common el0 exception handler function. 347 #define EXCEPTION_EL0(vector) \ 348 WORD $0xd538d092; \ //MRS TPIDR_EL1, R18 349 WORD $0xd538601a; \ //MRS FAR_EL1, R26 350 MOVD R26, CPU_FAULT_ADDR(RSV_REG); \ 351 MOVD $1, R3; \ 352 MOVD R3, CPU_ERROR_TYPE(RSV_REG); \ // Set error type to user. 353 MOVD $vector, R3; \ 354 MOVD R3, CPU_VECTOR_CODE(RSV_REG); \ 355 MRS ESR_EL1, R3; \ 356 MOVD R3, CPU_ERROR_CODE(RSV_REG); \ 357 B ·kernelExitToEl1(SB); 358 359 // EXCEPTION_EL1 is a common el1 exception handler function. 360 #define EXCEPTION_EL1(vector) \ 361 MOVD $vector, R3; \ 362 MOVD R3, 8(RSP); \ 363 B ·HaltEl1ExceptionAndResume(SB); 364 365 // storeEl0Fpstate writes the address of application's fpstate. 366 TEXT ·storeEl0Fpstate(SB),NOSPLIT,$0-8 367 MOVD value+0(FP), R1 368 ORR $0xffff000000000000, R1, R1 369 MRS TPIDR_EL1, RSV_REG 370 MOVD R1, CPU_FPSTATE_EL0(RSV_REG) 371 RET 372 373 // storeAppASID writes the application's asid value. 374 TEXT ·storeAppASID(SB),NOSPLIT,$0-8 375 MOVD asid+0(FP), R1 376 MRS TPIDR_EL1, RSV_REG 377 MOVD R1, CPU_APP_ASID(RSV_REG) 378 RET 379 380 // Halt halts execution. 381 TEXT ·Halt(SB),NOSPLIT,$0 382 // Disable fpsimd. 383 WORD $0xd5381041 // MRS CPACR_EL1, R1 384 MOVD R1, CPU_LAZY_VFP(RSV_REG) 385 DSB $15 386 387 FPSIMD_ENABLE_TRAP(RSV_REG) 388 389 // Trigger MMIO_EXIT/_KVM_HYPERCALL_VMEXIT. 390 // 391 // To keep it simple, I used the address of exception table as the 392 // MMIO base address, so that I can trigger a MMIO-EXIT by forcibly writing 393 // a read-only space. 394 // Also, the length is engough to match a sufficient number of hypercall ID. 395 // Then, in host user space, I can calculate this address to find out 396 // which hypercall. 397 MRS VBAR_EL1, R9 398 MOVD R0, 0x0(R9) 399 400 RET 401 402 // HaltAndResume halts execution and point the pointer to the resume function. 403 TEXT ·HaltAndResume(SB),NOSPLIT,$0 404 BL ·Halt(SB) 405 B ·kernelExitToEl1(SB) // Resume. 406 407 // HaltEl1SvcAndResume calls Hooks.KernelSyscall and resume. 408 TEXT ·HaltEl1SvcAndResume(SB),NOSPLIT,$0 409 WORD $0xd538d092 // MRS TPIDR_EL1, R18 410 MOVD CPU_SELF(RSV_REG), R3 // Load vCPU. 411 MOVD R3, 8(RSP) // First argument (vCPU). 412 CALL ·kernelSyscall(SB) // Call the trampoline. 413 B ·kernelExitToEl1(SB) // Resume. 414 415 // HaltEl1ExceptionAndResume calls Hooks.KernelException and resume. 416 TEXT ·HaltEl1ExceptionAndResume(SB),NOSPLIT,$0-8 417 WORD $0xd538d092 // MRS TPIDR_EL1, R18 418 MOVD CPU_SELF(RSV_REG), R3 // Load vCPU. 419 MOVD R3, 8(RSP) // First argument (vCPU). 420 MOVD vector+0(FP), R3 421 MOVD R3, 16(RSP) // Second argument (vector). 422 CALL ·kernelException(SB) // Call the trampoline. 423 B ·kernelExitToEl1(SB) // Resume. 424 425 // Shutdown stops the guest. 426 TEXT ·Shutdown(SB),NOSPLIT,$0 427 // PSCI EVENT. 428 MOVD $0x84000009, R0 429 HVC $0 430 431 // See kernel.go. 432 TEXT ·Current(SB),NOSPLIT,$0-8 433 MOVD CPU_SELF(RSV_REG), R8 434 MOVD R8, ret+0(FP) 435 RET 436 437 #define STACK_FRAME_SIZE 32 438 439 // kernelExitToEl0 is the entrypoint for application in guest_el0. 440 // Prepare the vcpu environment for container application. 441 TEXT ·kernelExitToEl0(SB),NOSPLIT,$0 442 // Step1, save sentry context into memory. 443 MRS TPIDR_EL1, RSV_REG 444 REGISTERS_SAVE(RSV_REG, CPU_REGISTERS) 445 MOVD RSV_REG_APP, CPU_REGISTERS+PTRACE_R19(RSV_REG) 446 MRS TPIDR_EL0, R3 447 MOVD R3, CPU_REGISTERS+PTRACE_TLS(RSV_REG) 448 449 WORD $0xd5384003 // MRS SPSR_EL1, R3 450 MOVD R3, CPU_REGISTERS+PTRACE_PSTATE(RSV_REG) 451 MOVD R30, CPU_REGISTERS+PTRACE_PC(RSV_REG) 452 MOVD RSP, R3 453 MOVD R3, CPU_REGISTERS+PTRACE_SP(RSV_REG) 454 455 MOVD CPU_REGISTERS+PTRACE_R3(RSV_REG), R3 456 457 // Step2, switch to temporary stack. 458 LOAD_KERNEL_STACK(RSV_REG) 459 460 // Step3, load app context pointer. 461 MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP 462 463 // Step4, prepare the environment for container application. 464 // set sp_el0. 465 MOVD PTRACE_SP(RSV_REG_APP), R1 466 WORD $0xd5184101 //MSR R1, SP_EL0 467 // set pc. 468 MOVD PTRACE_PC(RSV_REG_APP), R1 469 MSR R1, ELR_EL1 470 // set pstate. 471 MOVD PTRACE_PSTATE(RSV_REG_APP), R1 472 WORD $0xd5184001 //MSR R1, SPSR_EL1 473 474 // need use kernel space address to excute below code, since 475 // after SWITCH_TO_APP_PAGETABLE the ASID is changed to app's 476 // ASID. 477 WORD $0x10000061 // ADR R1, do_exit_to_el0 478 ORR $0xffff000000000000, R1, R1 479 JMP (R1) 480 481 do_exit_to_el0: 482 // RSV_REG & RSV_REG_APP will be loaded at the end. 483 REGISTERS_LOAD(RSV_REG_APP, 0) 484 MOVD PTRACE_TLS(RSV_REG_APP), RSV_REG 485 MSR RSV_REG, TPIDR_EL0 486 487 // switch to user pagetable. 488 LDP PTRACE_R18(RSV_REG_APP), (RSV_REG, RSV_REG_APP) 489 490 SUB $STACK_FRAME_SIZE, RSP, RSP 491 STP (RSV_REG, RSV_REG_APP), 16*0(RSP) 492 STP (R0, R1), 16*1(RSP) 493 494 WORD $0xd538d092 //MRS TPIDR_EL1, R18 495 496 SWITCH_TO_APP_PAGETABLE() 497 498 LDP 16*1(RSP), (R0, R1) 499 LDP 16*0(RSP), (RSV_REG, RSV_REG_APP) 500 ADD $STACK_FRAME_SIZE, RSP, RSP 501 502 ERET() 503 504 // kernelExitToEl1 is the entrypoint for sentry in guest_el1. 505 // Prepare the vcpu environment for sentry. 506 TEXT ·kernelExitToEl1(SB),NOSPLIT,$0 507 WORD $0xd538d092 //MRS TPIDR_EL1, R18 508 MOVD CPU_REGISTERS+PTRACE_PSTATE(RSV_REG), R1 509 WORD $0xd5184001 //MSR R1, SPSR_EL1 510 511 MOVD CPU_REGISTERS+PTRACE_PC(RSV_REG), R1 512 MSR R1, ELR_EL1 513 514 // restore sentry's tls. 515 MOVD CPU_REGISTERS+PTRACE_TLS(RSV_REG), R1 516 MSR R1, TPIDR_EL0 517 518 MOVD CPU_REGISTERS+PTRACE_SP(RSV_REG), R1 519 MOVD R1, RSP 520 521 REGISTERS_LOAD(RSV_REG, CPU_REGISTERS) 522 SWITCH_TO_KVM_PAGETABLE() 523 MRS TPIDR_EL1, RSV_REG 524 525 MOVD CPU_REGISTERS+PTRACE_R19(RSV_REG), RSV_REG_APP 526 527 ERET() 528 529 // Start is the CPU entrypoint. 530 TEXT ·Start(SB),NOSPLIT,$0 531 // Init. 532 WORD $0xd508871f // __tlbi(vmalle1) 533 DSB $7 // dsb(nsh) 534 535 MOVD $1<<12, R1 // Reset mdscr_el1 and disable 536 MSR R1, MDSCR_EL1 // access to the DCC from EL0 537 ISB $15 538 539 MRS TTBR1_EL1, R1 540 MSR R1, TTBR0_EL1 541 ISB $15 542 543 MOVD $CNTKCTL_EL1_DEFAULT, R1 544 MSR R1, CNTKCTL_EL1 545 546 MOVD R8, RSV_REG 547 ORR $0xffff000000000000, RSV_REG, RSV_REG 548 WORD $0xd518d092 //MSR R18, TPIDR_EL1 549 550 // Enable trap for accessing fpsimd. 551 MSR $0, CPACR_EL1 552 553 // Init. 554 MOVD $SCTLR_EL1_DEFAULT, R1 // re-enable the mmu. 555 MSR R1, SCTLR_EL1 556 ISB $15 557 WORD $0xd508751f // ic iallu 558 559 DSB $7 // dsb(nsh) 560 ISB $15 561 562 B ·kernelExitToEl1(SB) 563 564 // El1_sync_invalid is the handler for an invalid EL1_sync. 565 TEXT ·El1_sync_invalid(SB),NOSPLIT,$0 566 B ·Shutdown(SB) 567 568 // El1_irq_invalid is the handler for an invalid El1_irq. 569 TEXT ·El1_irq_invalid(SB),NOSPLIT,$0 570 B ·Shutdown(SB) 571 572 // El1_fiq_invalid is the handler for an invalid El1_fiq. 573 TEXT ·El1_fiq_invalid(SB),NOSPLIT,$0 574 B ·Shutdown(SB) 575 576 // El1_error_invalid is the handler for an invalid El1_error. 577 TEXT ·El1_error_invalid(SB),NOSPLIT,$0 578 B ·Shutdown(SB) 579 580 // El1_sync is the handler for El1_sync. 581 TEXT ·El1_sync(SB),NOSPLIT,$0 582 KERNEL_ENTRY_FROM_EL1 583 MRS ESR_EL1, R25 // read the syndrome register 584 LSR $ESR_ELx_EC_SHIFT, R25, R24 // exception class 585 CMP $ESR_ELx_EC_DABT_CUR, R24 586 BEQ el1_da // data abort in EL1 587 CMP $ESR_ELx_EC_IABT_CUR, R24 588 BEQ el1_ia // instruction abort in EL1 589 CMP $ESR_ELx_EC_FP_ASIMD, R24 590 BEQ el1_fpsimd_acc // FP/ASIMD access 591 CMP $ESR_ELx_EC_SVE, R24 592 BEQ el1_sve_acc // SVE access 593 CMP $ESR_ELx_EC_SP_ALIGN, R24 594 BEQ el1_sp_pc // stack alignment exception 595 CMP $ESR_ELx_EC_PC_ALIGN, R24 596 BEQ el1_sp_pc // pc alignment exception 597 CMP $ESR_ELx_EC_UNKNOWN, R24 598 BEQ el1_undef // unknown exception in EL1 599 CMP $ESR_ELx_EC_SVC64, R24 600 BEQ el1_svc // SVC in 64-bit state 601 CMP $ESR_ELx_EC_BREAKPT_CUR, R24 602 BEQ el1_dbg // debug exception in EL1 603 B el1_invalid 604 605 el1_da: 606 EXCEPTION_EL1(El1SyncDa) 607 el1_ia: 608 EXCEPTION_EL1(El1SyncIa) 609 el1_sp_pc: 610 EXCEPTION_EL1(El1SyncSpPc) 611 el1_undef: 612 EXCEPTION_EL1(El1SyncUndef) 613 el1_svc: 614 B ·HaltEl1SvcAndResume(SB) 615 el1_dbg: 616 EXCEPTION_EL1(El1SyncDbg) 617 el1_fpsimd_acc: 618 el1_sve_acc: 619 FPSIMD_DISABLE_TRAP(RSV_REG) 620 621 // Restore context. 622 MRS TPIDR_EL1, RSV_REG 623 624 // Restore sp. 625 MOVD CPU_REGISTERS+PTRACE_SP(RSV_REG), R1 626 MOVD R1, RSP 627 628 // Restore common registers. 629 REGISTERS_LOAD(RSV_REG, CPU_REGISTERS) 630 MOVD CPU_REGISTERS+PTRACE_R19(RSV_REG), RSV_REG_APP 631 632 ERET() // return to el1. 633 634 el1_invalid: 635 EXCEPTION_EL1(El1SyncInv) 636 637 // El1_irq is the handler for El1_irq. 638 TEXT ·El1_irq(SB),NOSPLIT,$0 639 B ·Shutdown(SB) 640 641 // El1_fiq is the handler for El1_fiq. 642 TEXT ·El1_fiq(SB),NOSPLIT,$0 643 B ·Shutdown(SB) 644 645 // El1_error is the handler for El1_error. 646 TEXT ·El1_error(SB),NOSPLIT,$0 647 B ·Shutdown(SB) 648 649 // El0_sync is the handler for El0_sync. 650 TEXT ·El0_sync(SB),NOSPLIT,$0 651 KERNEL_ENTRY_FROM_EL0 652 MRS ESR_EL1, R25 // read the syndrome register 653 LSR $ESR_ELx_EC_SHIFT, R25, R24 // exception class 654 CMP $ESR_ELx_EC_SVC64, R24 655 BEQ el0_svc // SVC in 64-bit state 656 CMP $ESR_ELx_EC_DABT_LOW, R24 657 BEQ el0_da // data abort in EL0 658 CMP $ESR_ELx_EC_IABT_LOW, R24 659 BEQ el0_ia // instruction abort in EL0 660 CMP $ESR_ELx_EC_FP_ASIMD, R24 661 BEQ el0_fpsimd_acc // FP/ASIMD access 662 CMP $ESR_ELx_EC_SVE, R24 663 BEQ el0_sve_acc // SVE access 664 CMP $ESR_ELx_EC_FP_EXC64, R24 665 BEQ el0_fpsimd_exc // FP/ASIMD exception 666 CMP $ESR_ELx_EC_SP_ALIGN, R24 667 BEQ el0_sp_pc // stack alignment exception 668 CMP $ESR_ELx_EC_PC_ALIGN, R24 669 BEQ el0_sp_pc // pc alignment exception 670 CMP $ESR_ELx_EC_UNKNOWN, R24 671 BEQ el0_undef // unknown exception in EL0 672 CMP $ESR_ELx_EC_BREAKPT_LOW, R24 673 BEQ el0_dbg // debug exception in EL0 674 CMP $ESR_ELx_EC_SYS64, R24 675 BEQ el0_sys // configurable trap 676 CMP $ESR_ELx_EC_WFx, R24 677 BEQ el0_wfx // WFX trap 678 B el0_invalid 679 680 el0_svc: 681 WORD $0xd538d092 //MRS TPIDR_EL1, R18 682 683 MOVD $0, CPU_ERROR_CODE(RSV_REG) // Clear error code. 684 685 MOVD $1, R3 686 MOVD R3, CPU_ERROR_TYPE(RSV_REG) // Set error type to user. 687 688 MOVD $Syscall, R3 689 MOVD R3, CPU_VECTOR_CODE(RSV_REG) 690 691 B ·kernelExitToEl1(SB) 692 693 el0_da: 694 el0_ia: 695 EXCEPTION_EL0(PageFault) 696 el0_fpsimd_acc: 697 el0_sve_acc: 698 FPSIMD_DISABLE_TRAP(RSV_REG) 699 FPSTATE_EL0_LOAD() 700 701 // Restore context. 702 MRS TPIDR_EL1, RSV_REG 703 MOVD CPU_APP_ADDR(RSV_REG), RSV_REG_APP 704 705 // Restore R0-R30 706 REGISTERS_LOAD(RSV_REG_APP, 0) 707 MOVD PTRACE_R18(RSV_REG_APP), RSV_REG 708 MOVD PTRACE_R19(RSV_REG_APP), RSV_REG_APP 709 710 ERET() // return to el0. 711 el0_fpsimd_exc: 712 EXCEPTION_EL0(El0SyncFpsimdExc) 713 el0_sp_pc: 714 EXCEPTION_EL0(El0SyncSpPc) 715 el0_undef: 716 EXCEPTION_EL0(El0SyncUndef) 717 el0_dbg: 718 EXCEPTION_EL0(El0SyncDbg) 719 el0_sys: 720 EXCEPTION_EL0(El0SyncSys) 721 el0_wfx: 722 EXCEPTION_EL0(El0SyncWfx) 723 el0_invalid: 724 EXCEPTION_EL0(El0SyncInv) 725 726 TEXT ·El0_irq(SB),NOSPLIT,$0 727 B ·Shutdown(SB) 728 729 TEXT ·El0_fiq(SB),NOSPLIT,$0 730 B ·Shutdown(SB) 731 732 TEXT ·El0_error(SB),NOSPLIT,$0 733 KERNEL_ENTRY_FROM_EL0 734 WORD $0xd5385219 // MRS ESR_EL1, R25 735 AND $ESR_ELx_SERR_MASK, R25, R24 736 CMP $ESR_ELx_SERR_NMI, R24 737 BEQ el0_nmi 738 B el0_bounce 739 740 el0_nmi: 741 EXCEPTION_EL0(El0ErrNMI) 742 el0_bounce: 743 EXCEPTION_EL0(VirtualizationException) 744 745 TEXT ·El0_sync_invalid(SB),NOSPLIT,$0 746 B ·Shutdown(SB) 747 748 TEXT ·El0_irq_invalid(SB),NOSPLIT,$0 749 B ·Shutdown(SB) 750 751 TEXT ·El0_fiq_invalid(SB),NOSPLIT,$0 752 B ·Shutdown(SB) 753 754 TEXT ·El0_error_invalid(SB),NOSPLIT,$0 755 B ·Shutdown(SB) 756 757 // Vectors implements exception vector table. 758 // The start address of exception vector table should be 11-bits aligned. 759 // For detail, please refer to arm developer document: 760 // https://developer.arm.com/documentation/100933/0100/AArch64-exception-vector-table 761 // Also can refer to the code in linux kernel: arch/arm64/kernel/entry.S 762 TEXT ·Vectors(SB),NOSPLIT,$0 763 PCALIGN $2048 764 B ·El1_sync_invalid(SB) 765 PCALIGN $128 766 B ·El1_irq_invalid(SB) 767 PCALIGN $128 768 B ·El1_fiq_invalid(SB) 769 PCALIGN $128 770 B ·El1_error_invalid(SB) 771 772 PCALIGN $128 773 B ·El1_sync(SB) 774 PCALIGN $128 775 B ·El1_irq(SB) 776 PCALIGN $128 777 B ·El1_fiq(SB) 778 PCALIGN $128 779 B ·El1_error(SB) 780 781 PCALIGN $128 782 B ·El0_sync(SB) 783 PCALIGN $128 784 B ·El0_irq(SB) 785 PCALIGN $128 786 B ·El0_fiq(SB) 787 PCALIGN $128 788 B ·El0_error(SB) 789 790 PCALIGN $128 791 B ·El0_sync_invalid(SB) 792 PCALIGN $128 793 B ·El0_irq_invalid(SB) 794 PCALIGN $128 795 B ·El0_fiq_invalid(SB) 796 PCALIGN $128 797 B ·El0_error_invalid(SB)