github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sentry/platform/ptrace/subprocess_amd64.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //go:build amd64 16 // +build amd64 17 18 package ptrace 19 20 import ( 21 "fmt" 22 "strings" 23 24 "golang.org/x/sys/unix" 25 "github.com/nicocha30/gvisor-ligolo/pkg/abi/linux" 26 "github.com/nicocha30/gvisor-ligolo/pkg/seccomp" 27 "github.com/nicocha30/gvisor-ligolo/pkg/sentry/arch" 28 ) 29 30 const ( 31 // initRegsRipAdjustment is the size of the syscall instruction. 32 initRegsRipAdjustment = 2 33 ) 34 35 // resetSysemuRegs sets up emulation registers. 36 // 37 // This should be called prior to calling sysemu. 38 func (t *thread) resetSysemuRegs(regs *arch.Registers) { 39 regs.Cs = t.initRegs.Cs 40 regs.Ss = t.initRegs.Ss 41 regs.Ds = t.initRegs.Ds 42 regs.Es = t.initRegs.Es 43 regs.Fs = t.initRegs.Fs 44 regs.Gs = t.initRegs.Gs 45 } 46 47 // createSyscallRegs sets up syscall registers. 48 // 49 // This should be called to generate registers for a system call. 50 func createSyscallRegs(initRegs *arch.Registers, sysno uintptr, args ...arch.SyscallArgument) arch.Registers { 51 // Copy initial registers. 52 regs := *initRegs 53 54 // Set our syscall number. 55 regs.Rax = uint64(sysno) 56 if len(args) >= 1 { 57 regs.Rdi = args[0].Uint64() 58 } 59 if len(args) >= 2 { 60 regs.Rsi = args[1].Uint64() 61 } 62 if len(args) >= 3 { 63 regs.Rdx = args[2].Uint64() 64 } 65 if len(args) >= 4 { 66 regs.R10 = args[3].Uint64() 67 } 68 if len(args) >= 5 { 69 regs.R8 = args[4].Uint64() 70 } 71 if len(args) >= 6 { 72 regs.R9 = args[5].Uint64() 73 } 74 75 return regs 76 } 77 78 // isSingleStepping determines if the registers indicate single-stepping. 79 func isSingleStepping(regs *arch.Registers) bool { 80 return (regs.Eflags & arch.X86TrapFlag) != 0 81 } 82 83 // updateSyscallRegs updates registers after finishing sysemu. 84 func updateSyscallRegs(regs *arch.Registers) { 85 // Ptrace puts -ENOSYS in rax on syscall-enter-stop. 86 regs.Rax = regs.Orig_rax 87 } 88 89 // syscallReturnValue extracts a sensible return from registers. 90 func syscallReturnValue(regs *arch.Registers) (uintptr, error) { 91 rval := int64(regs.Rax) 92 if rval < 0 { 93 return 0, unix.Errno(-rval) 94 } 95 return uintptr(rval), nil 96 } 97 98 func dumpRegs(regs *arch.Registers) string { 99 var m strings.Builder 100 101 fmt.Fprintf(&m, "Registers:\n") 102 fmt.Fprintf(&m, "\tR15\t = %016x\n", regs.R15) 103 fmt.Fprintf(&m, "\tR14\t = %016x\n", regs.R14) 104 fmt.Fprintf(&m, "\tR13\t = %016x\n", regs.R13) 105 fmt.Fprintf(&m, "\tR12\t = %016x\n", regs.R12) 106 fmt.Fprintf(&m, "\tRbp\t = %016x\n", regs.Rbp) 107 fmt.Fprintf(&m, "\tRbx\t = %016x\n", regs.Rbx) 108 fmt.Fprintf(&m, "\tR11\t = %016x\n", regs.R11) 109 fmt.Fprintf(&m, "\tR10\t = %016x\n", regs.R10) 110 fmt.Fprintf(&m, "\tR9\t = %016x\n", regs.R9) 111 fmt.Fprintf(&m, "\tR8\t = %016x\n", regs.R8) 112 fmt.Fprintf(&m, "\tRax\t = %016x\n", regs.Rax) 113 fmt.Fprintf(&m, "\tRcx\t = %016x\n", regs.Rcx) 114 fmt.Fprintf(&m, "\tRdx\t = %016x\n", regs.Rdx) 115 fmt.Fprintf(&m, "\tRsi\t = %016x\n", regs.Rsi) 116 fmt.Fprintf(&m, "\tRdi\t = %016x\n", regs.Rdi) 117 fmt.Fprintf(&m, "\tOrig_rax = %016x\n", regs.Orig_rax) 118 fmt.Fprintf(&m, "\tRip\t = %016x\n", regs.Rip) 119 fmt.Fprintf(&m, "\tCs\t = %016x\n", regs.Cs) 120 fmt.Fprintf(&m, "\tEflags\t = %016x\n", regs.Eflags) 121 fmt.Fprintf(&m, "\tRsp\t = %016x\n", regs.Rsp) 122 fmt.Fprintf(&m, "\tSs\t = %016x\n", regs.Ss) 123 fmt.Fprintf(&m, "\tFs_base\t = %016x\n", regs.Fs_base) 124 fmt.Fprintf(&m, "\tGs_base\t = %016x\n", regs.Gs_base) 125 fmt.Fprintf(&m, "\tDs\t = %016x\n", regs.Ds) 126 fmt.Fprintf(&m, "\tEs\t = %016x\n", regs.Es) 127 fmt.Fprintf(&m, "\tFs\t = %016x\n", regs.Fs) 128 fmt.Fprintf(&m, "\tGs\t = %016x\n", regs.Gs) 129 130 return m.String() 131 } 132 133 // adjustInitregsRip adjust the current register RIP value to 134 // be just before the system call instruction excution 135 func (t *thread) adjustInitRegsRip() { 136 t.initRegs.Rip -= initRegsRipAdjustment 137 } 138 139 // Pass the expected PPID to the child via R15 when creating stub process. 140 func initChildProcessPPID(initregs *arch.Registers, ppid int32) { 141 initregs.R15 = uint64(ppid) 142 // Rbx has to be set to 1 when creating stub process. 143 initregs.Rbx = 1 144 } 145 146 // patchSignalInfo patches the signal info to account for hitting the seccomp 147 // filters from vsyscall emulation, specified below. We allow for SIGSYS as a 148 // synchronous trap, but patch the structure to appear like a SIGSEGV with the 149 // Rip as the faulting address. 150 // 151 // Note that this should only be called after verifying that the signalInfo has 152 // been generated by the kernel. 153 func patchSignalInfo(regs *arch.Registers, signalInfo *linux.SignalInfo) { 154 if linux.Signal(signalInfo.Signo) == linux.SIGSYS { 155 signalInfo.Signo = int32(linux.SIGSEGV) 156 157 // Unwind the kernel emulation, if any has occurred. A SIGSYS is delivered 158 // with the si_call_addr field pointing to the current RIP. This field 159 // aligns with the si_addr field for a SIGSEGV, so we don't need to touch 160 // anything there. We do need to unwind emulation however, so we set the 161 // instruction pointer to the faulting value, and "unpop" the stack. 162 regs.Rip = signalInfo.Addr() 163 regs.Rsp -= 8 164 } 165 } 166 167 // enableCpuidFault enables cpuid-faulting. 168 // 169 // This may fail on older kernels or hardware, so we just disregard the result. 170 // Host CPUID will be enabled. 171 // 172 // This is safe to call in an afterFork context. 173 // 174 //go:norace 175 //go:nosplit 176 func enableCpuidFault() { 177 unix.RawSyscall6(unix.SYS_ARCH_PRCTL, linux.ARCH_SET_CPUID, 0, 0, 0, 0, 0) 178 } 179 180 // appendArchSeccompRules append architecture specific seccomp rules when creating BPF program. 181 // Ref attachedThread() for more detail. 182 func appendArchSeccompRules(rules []seccomp.RuleSet, defaultAction linux.BPFAction) []seccomp.RuleSet { 183 rules = append(rules, 184 // Rules for trapping vsyscall access. 185 seccomp.RuleSet{ 186 Rules: seccomp.SyscallRules{ 187 unix.SYS_GETTIMEOFDAY: {}, 188 unix.SYS_TIME: {}, 189 unix.SYS_GETCPU: {}, // SYS_GETCPU was not defined in package syscall on amd64. 190 }, 191 Action: linux.SECCOMP_RET_TRAP, 192 Vsyscall: true, 193 }) 194 if defaultAction != linux.SECCOMP_RET_ALLOW { 195 rules = append(rules, 196 seccomp.RuleSet{ 197 Rules: seccomp.SyscallRules{ 198 unix.SYS_ARCH_PRCTL: []seccomp.Rule{ 199 {seccomp.EqualTo(linux.ARCH_SET_CPUID), seccomp.EqualTo(0)}, 200 }, 201 }, 202 Action: linux.SECCOMP_RET_ALLOW, 203 }) 204 } 205 return rules 206 } 207 208 // probeSeccomp returns true iff seccomp is run after ptrace notifications, 209 // which is generally the case for kernel version >= 4.8. This check is dynamic 210 // because kernels have be backported behavior. 211 // 212 // See createStub for more information. 213 // 214 // Precondition: the runtime OS thread must be locked. 215 func probeSeccomp() bool { 216 // Create a completely new, destroyable process. 217 t, err := attachedThread(0, linux.SECCOMP_RET_ERRNO) 218 if err != nil { 219 panic(fmt.Sprintf("seccomp probe failed: %v", err)) 220 } 221 defer t.destroy() 222 223 // Set registers to the yield system call. This call is not allowed 224 // by the filters specified in the attachThread function. 225 regs := createSyscallRegs(&t.initRegs, unix.SYS_SCHED_YIELD) 226 if err := t.setRegs(®s); err != nil { 227 panic(fmt.Sprintf("ptrace set regs failed: %v", err)) 228 } 229 230 for { 231 // Attempt an emulation. 232 if _, _, errno := unix.RawSyscall6(unix.SYS_PTRACE, unix.PTRACE_SYSEMU, uintptr(t.tid), 0, 0, 0, 0); errno != 0 { 233 panic(fmt.Sprintf("ptrace syscall-enter failed: %v", errno)) 234 } 235 236 sig := t.wait(stopped) 237 if sig == (syscallEvent | unix.SIGTRAP) { 238 // Did the seccomp errno hook already run? This would 239 // indicate that seccomp is first in line and we're 240 // less than 4.8. 241 if err := t.getRegs(®s); err != nil { 242 panic(fmt.Sprintf("ptrace get-regs failed: %v", err)) 243 } 244 if _, err := syscallReturnValue(®s); err == nil { 245 // The seccomp errno mode ran first, and reset 246 // the error in the registers. 247 return false 248 } 249 // The seccomp hook did not run yet, and therefore it 250 // is safe to use RET_KILL mode for dispatched calls. 251 return true 252 } 253 } 254 } 255 256 func (s *subprocess) arm64SyscallWorkaround(t *thread, regs *arch.Registers) { 257 }