github.com/MerlinKodo/gvisor@v0.0.0-20231110090155-957f62ecf90e/pkg/sentry/syscalls/linux/sys_time.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package linux 16 17 import ( 18 "fmt" 19 "time" 20 21 "github.com/MerlinKodo/gvisor/pkg/abi/linux" 22 "github.com/MerlinKodo/gvisor/pkg/errors/linuxerr" 23 "github.com/MerlinKodo/gvisor/pkg/hostarch" 24 "github.com/MerlinKodo/gvisor/pkg/marshal/primitive" 25 "github.com/MerlinKodo/gvisor/pkg/sentry/arch" 26 "github.com/MerlinKodo/gvisor/pkg/sentry/kernel" 27 ktime "github.com/MerlinKodo/gvisor/pkg/sentry/kernel/time" 28 ) 29 30 // The most significant 29 bits hold either a pid or a file descriptor. 31 func pidOfClockID(c int32) kernel.ThreadID { 32 return kernel.ThreadID(^(c >> 3)) 33 } 34 35 // whichCPUClock returns one of CPUCLOCK_PERF, CPUCLOCK_VIRT, CPUCLOCK_SCHED or 36 // CLOCK_FD. 37 func whichCPUClock(c int32) int32 { 38 return c & linux.CPUCLOCK_CLOCK_MASK 39 } 40 41 // isCPUClockPerThread returns true if the CPUCLOCK_PERTHREAD bit is set in the 42 // clock id. 43 func isCPUClockPerThread(c int32) bool { 44 return c&linux.CPUCLOCK_PERTHREAD_MASK != 0 45 } 46 47 // isValidCPUClock returns checks that the cpu clock id is valid. 48 func isValidCPUClock(c int32) bool { 49 // Bits 0, 1, and 2 cannot all be set. 50 if c&7 == 7 { 51 return false 52 } 53 if whichCPUClock(c) >= linux.CPUCLOCK_MAX { 54 return false 55 } 56 return true 57 } 58 59 // targetTask returns the kernel.Task for the given clock id. 60 func targetTask(t *kernel.Task, c int32) *kernel.Task { 61 pid := pidOfClockID(c) 62 if pid == 0 { 63 return t 64 } 65 return t.PIDNamespace().TaskWithID(pid) 66 } 67 68 // ClockGetres implements linux syscall clock_getres(2). 69 func ClockGetres(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 70 clockID := int32(args[0].Int()) 71 addr := args[1].Pointer() 72 r := linux.Timespec{ 73 Sec: 0, 74 Nsec: 1, 75 } 76 77 if _, err := getClock(t, clockID); err != nil { 78 return 0, nil, linuxerr.EINVAL 79 } 80 81 if addr == 0 { 82 // Don't need to copy out. 83 return 0, nil, nil 84 } 85 86 return 0, nil, copyTimespecOut(t, addr, &r) 87 } 88 89 type cpuClocker interface { 90 UserCPUClock() ktime.Clock 91 CPUClock() ktime.Clock 92 } 93 94 func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) { 95 if clockID < 0 { 96 if !isValidCPUClock(clockID) { 97 return nil, linuxerr.EINVAL 98 } 99 100 targetTask := targetTask(t, clockID) 101 if targetTask == nil { 102 return nil, linuxerr.EINVAL 103 } 104 105 var target cpuClocker 106 if isCPUClockPerThread(clockID) { 107 target = targetTask 108 } else { 109 target = targetTask.ThreadGroup() 110 } 111 112 switch whichCPUClock(clockID) { 113 case linux.CPUCLOCK_VIRT: 114 return target.UserCPUClock(), nil 115 case linux.CPUCLOCK_PROF, linux.CPUCLOCK_SCHED: 116 // CPUCLOCK_SCHED is approximated by CPUCLOCK_PROF. 117 return target.CPUClock(), nil 118 default: 119 return nil, linuxerr.EINVAL 120 } 121 } 122 123 switch clockID { 124 case linux.CLOCK_REALTIME, linux.CLOCK_REALTIME_COARSE: 125 return t.Kernel().RealtimeClock(), nil 126 case linux.CLOCK_MONOTONIC, linux.CLOCK_MONOTONIC_COARSE, 127 linux.CLOCK_MONOTONIC_RAW, linux.CLOCK_BOOTTIME: 128 // CLOCK_MONOTONIC approximates CLOCK_MONOTONIC_RAW. 129 // CLOCK_BOOTTIME is internally mapped to CLOCK_MONOTONIC, as: 130 // - CLOCK_BOOTTIME should behave as CLOCK_MONOTONIC while also 131 // including suspend time. 132 // - gVisor has no concept of suspend/resume. 133 // - CLOCK_MONOTONIC already includes save/restore time, which is 134 // the closest to suspend time. 135 return t.Kernel().MonotonicClock(), nil 136 case linux.CLOCK_PROCESS_CPUTIME_ID: 137 return t.ThreadGroup().CPUClock(), nil 138 case linux.CLOCK_THREAD_CPUTIME_ID: 139 return t.CPUClock(), nil 140 default: 141 return nil, linuxerr.EINVAL 142 } 143 } 144 145 // ClockGettime implements linux syscall clock_gettime(2). 146 func ClockGettime(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 147 clockID := int32(args[0].Int()) 148 addr := args[1].Pointer() 149 150 c, err := getClock(t, clockID) 151 if err != nil { 152 return 0, nil, err 153 } 154 ts := c.Now().Timespec() 155 return 0, nil, copyTimespecOut(t, addr, &ts) 156 } 157 158 // ClockSettime implements linux syscall clock_settime(2). 159 func ClockSettime(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 160 return 0, nil, linuxerr.EPERM 161 } 162 163 // Time implements linux syscall time(2). 164 func Time(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 165 addr := args[0].Pointer() 166 167 r := t.Kernel().RealtimeClock().Now().TimeT() 168 if addr == hostarch.Addr(0) { 169 return uintptr(r), nil, nil 170 } 171 172 if _, err := r.CopyOut(t, addr); err != nil { 173 return 0, nil, err 174 } 175 return uintptr(r), nil, nil 176 } 177 178 // Nanosleep implements linux syscall Nanosleep(2). 179 func Nanosleep(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 180 addr := args[0].Pointer() 181 rem := args[1].Pointer() 182 183 ts, err := copyTimespecIn(t, addr) 184 if err != nil { 185 return 0, nil, err 186 } 187 188 if !ts.Valid() { 189 return 0, nil, linuxerr.EINVAL 190 } 191 192 // Just like linux, we cap the timeout with the max number that int64 can 193 // represent which is roughly 292 years. 194 dur := time.Duration(ts.ToNsecCapped()) * time.Nanosecond 195 c := t.Kernel().MonotonicClock() 196 return 0, nil, clockNanosleepUntil(t, c, c.Now().Add(dur), rem, true) 197 } 198 199 // ClockNanosleep implements linux syscall clock_nanosleep(2). 200 func ClockNanosleep(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 201 clockID := int32(args[0].Int()) 202 flags := args[1].Int() 203 addr := args[2].Pointer() 204 rem := args[3].Pointer() 205 206 req, err := copyTimespecIn(t, addr) 207 if err != nil { 208 return 0, nil, err 209 } 210 211 if !req.Valid() { 212 return 0, nil, linuxerr.EINVAL 213 } 214 215 // Only allow clock constants also allowed by Linux. (CLOCK_TAI is 216 // unimplemented.) 217 if clockID > 0 { 218 if clockID != linux.CLOCK_REALTIME && 219 clockID != linux.CLOCK_MONOTONIC && 220 clockID != linux.CLOCK_BOOTTIME && 221 clockID != linux.CLOCK_PROCESS_CPUTIME_ID { 222 return 0, nil, linuxerr.EINVAL 223 } 224 } 225 226 c, err := getClock(t, clockID) 227 if err != nil { 228 return 0, nil, err 229 } 230 231 if flags&linux.TIMER_ABSTIME != 0 { 232 return 0, nil, clockNanosleepUntil(t, c, ktime.FromTimespec(req), 0, false) 233 } 234 235 dur := time.Duration(req.ToNsecCapped()) * time.Nanosecond 236 return 0, nil, clockNanosleepUntil(t, c, c.Now().Add(dur), rem, true) 237 } 238 239 // clockNanosleepUntil blocks until a specified time. 240 // 241 // If blocking is interrupted, the syscall is restarted with the original 242 // arguments. 243 func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, end ktime.Time, rem hostarch.Addr, needRestartBlock bool) error { 244 var err error 245 if c == t.Kernel().MonotonicClock() { 246 err = t.BlockWithDeadline(nil, true, end) 247 } else { 248 notifier, tchan := ktime.NewChannelNotifier() 249 timer := ktime.NewTimer(c, notifier) 250 timer.Swap(ktime.Setting{ 251 Period: 0, 252 Enabled: true, 253 Next: end, 254 }) 255 err = t.BlockWithTimer(nil, tchan) 256 timer.Destroy() 257 } 258 259 switch { 260 case linuxerr.Equals(linuxerr.ETIMEDOUT, err): 261 // Slept for entire timeout. 262 return nil 263 case err == linuxerr.ErrInterrupted: 264 // Interrupted. 265 remaining := end.Sub(c.Now()) 266 if remaining <= 0 { 267 return nil 268 } 269 270 // Copy out remaining time. 271 if rem != 0 { 272 timeleft := linux.NsecToTimespec(remaining.Nanoseconds()) 273 if err := copyTimespecOut(t, rem, &timeleft); err != nil { 274 return err 275 } 276 } 277 if needRestartBlock { 278 // Arrange for a restart with the remaining duration. 279 t.SetSyscallRestartBlock(&clockNanosleepRestartBlock{ 280 c: c, 281 end: end, 282 rem: rem, 283 }) 284 return linuxerr.ERESTART_RESTARTBLOCK 285 } 286 return linuxerr.ERESTARTNOHAND 287 default: 288 panic(fmt.Sprintf("Impossible BlockWithTimer error %v", err)) 289 } 290 } 291 292 // clockNanosleepRestartBlock encapsulates the state required to restart 293 // clock_nanosleep(2) via restart_syscall(2). 294 // 295 // +stateify savable 296 type clockNanosleepRestartBlock struct { 297 c ktime.Clock 298 end ktime.Time 299 rem hostarch.Addr 300 } 301 302 // Restart implements kernel.SyscallRestartBlock.Restart. 303 func (n *clockNanosleepRestartBlock) Restart(t *kernel.Task) (uintptr, error) { 304 return 0, clockNanosleepUntil(t, n.c, n.end, n.rem, true) 305 } 306 307 // Gettimeofday implements linux syscall gettimeofday(2). 308 func Gettimeofday(t *kernel.Task, sysno uintptr, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) { 309 tv := args[0].Pointer() 310 tz := args[1].Pointer() 311 312 if tv != hostarch.Addr(0) { 313 nowTv := t.Kernel().RealtimeClock().Now().Timeval() 314 if err := copyTimevalOut(t, tv, &nowTv); err != nil { 315 return 0, nil, err 316 } 317 } 318 319 if tz != hostarch.Addr(0) { 320 // Ask the time package for the timezone. 321 _, offset := time.Now().Zone() 322 // This int32 array mimics linux's struct timezone. 323 timezone := []int32{-int32(offset) / 60, 0} 324 _, err := primitive.CopyInt32SliceOut(t, tz, timezone) 325 return 0, nil, err 326 } 327 return 0, nil, nil 328 }