github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/syscalls/linux/sys_time.go (about)

     1  // Copyright 2018 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package linux
    16  
    17  import (
    18  	"fmt"
    19  	"time"
    20  
    21  	"github.com/SagerNet/gvisor/pkg/abi/linux"
    22  	"github.com/SagerNet/gvisor/pkg/errors/linuxerr"
    23  	"github.com/SagerNet/gvisor/pkg/hostarch"
    24  	"github.com/SagerNet/gvisor/pkg/marshal/primitive"
    25  	"github.com/SagerNet/gvisor/pkg/sentry/arch"
    26  	"github.com/SagerNet/gvisor/pkg/sentry/kernel"
    27  	ktime "github.com/SagerNet/gvisor/pkg/sentry/kernel/time"
    28  	"github.com/SagerNet/gvisor/pkg/syserror"
    29  )
    30  
    31  // The most significant 29 bits hold either a pid or a file descriptor.
    32  func pidOfClockID(c int32) kernel.ThreadID {
    33  	return kernel.ThreadID(^(c >> 3))
    34  }
    35  
    36  // whichCPUClock returns one of CPUCLOCK_PERF, CPUCLOCK_VIRT, CPUCLOCK_SCHED or
    37  // CLOCK_FD.
    38  func whichCPUClock(c int32) int32 {
    39  	return c & linux.CPUCLOCK_CLOCK_MASK
    40  }
    41  
    42  // isCPUClockPerThread returns true if the CPUCLOCK_PERTHREAD bit is set in the
    43  // clock id.
    44  func isCPUClockPerThread(c int32) bool {
    45  	return c&linux.CPUCLOCK_PERTHREAD_MASK != 0
    46  }
    47  
    48  // isValidCPUClock returns checks that the cpu clock id is valid.
    49  func isValidCPUClock(c int32) bool {
    50  	// Bits 0, 1, and 2 cannot all be set.
    51  	if c&7 == 7 {
    52  		return false
    53  	}
    54  	if whichCPUClock(c) >= linux.CPUCLOCK_MAX {
    55  		return false
    56  	}
    57  	return true
    58  }
    59  
    60  // targetTask returns the kernel.Task for the given clock id.
    61  func targetTask(t *kernel.Task, c int32) *kernel.Task {
    62  	pid := pidOfClockID(c)
    63  	if pid == 0 {
    64  		return t
    65  	}
    66  	return t.PIDNamespace().TaskWithID(pid)
    67  }
    68  
    69  // ClockGetres implements linux syscall clock_getres(2).
    70  func ClockGetres(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
    71  	clockID := int32(args[0].Int())
    72  	addr := args[1].Pointer()
    73  	r := linux.Timespec{
    74  		Sec:  0,
    75  		Nsec: 1,
    76  	}
    77  
    78  	if _, err := getClock(t, clockID); err != nil {
    79  		return 0, nil, linuxerr.EINVAL
    80  	}
    81  
    82  	if addr == 0 {
    83  		// Don't need to copy out.
    84  		return 0, nil, nil
    85  	}
    86  
    87  	return 0, nil, copyTimespecOut(t, addr, &r)
    88  }
    89  
    90  type cpuClocker interface {
    91  	UserCPUClock() ktime.Clock
    92  	CPUClock() ktime.Clock
    93  }
    94  
    95  func getClock(t *kernel.Task, clockID int32) (ktime.Clock, error) {
    96  	if clockID < 0 {
    97  		if !isValidCPUClock(clockID) {
    98  			return nil, linuxerr.EINVAL
    99  		}
   100  
   101  		targetTask := targetTask(t, clockID)
   102  		if targetTask == nil {
   103  			return nil, linuxerr.EINVAL
   104  		}
   105  
   106  		var target cpuClocker
   107  		if isCPUClockPerThread(clockID) {
   108  			target = targetTask
   109  		} else {
   110  			target = targetTask.ThreadGroup()
   111  		}
   112  
   113  		switch whichCPUClock(clockID) {
   114  		case linux.CPUCLOCK_VIRT:
   115  			return target.UserCPUClock(), nil
   116  		case linux.CPUCLOCK_PROF, linux.CPUCLOCK_SCHED:
   117  			// CPUCLOCK_SCHED is approximated by CPUCLOCK_PROF.
   118  			return target.CPUClock(), nil
   119  		default:
   120  			return nil, linuxerr.EINVAL
   121  		}
   122  	}
   123  
   124  	switch clockID {
   125  	case linux.CLOCK_REALTIME, linux.CLOCK_REALTIME_COARSE:
   126  		return t.Kernel().RealtimeClock(), nil
   127  	case linux.CLOCK_MONOTONIC, linux.CLOCK_MONOTONIC_COARSE,
   128  		linux.CLOCK_MONOTONIC_RAW, linux.CLOCK_BOOTTIME:
   129  		// CLOCK_MONOTONIC approximates CLOCK_MONOTONIC_RAW.
   130  		// CLOCK_BOOTTIME is internally mapped to CLOCK_MONOTONIC, as:
   131  		// - CLOCK_BOOTTIME should behave as CLOCK_MONOTONIC while also
   132  		//   including suspend time.
   133  		// - gVisor has no concept of suspend/resume.
   134  		// - CLOCK_MONOTONIC already includes save/restore time, which is
   135  		//   the closest to suspend time.
   136  		return t.Kernel().MonotonicClock(), nil
   137  	case linux.CLOCK_PROCESS_CPUTIME_ID:
   138  		return t.ThreadGroup().CPUClock(), nil
   139  	case linux.CLOCK_THREAD_CPUTIME_ID:
   140  		return t.CPUClock(), nil
   141  	default:
   142  		return nil, linuxerr.EINVAL
   143  	}
   144  }
   145  
   146  // ClockGettime implements linux syscall clock_gettime(2).
   147  func ClockGettime(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
   148  	clockID := int32(args[0].Int())
   149  	addr := args[1].Pointer()
   150  
   151  	c, err := getClock(t, clockID)
   152  	if err != nil {
   153  		return 0, nil, err
   154  	}
   155  	ts := c.Now().Timespec()
   156  	return 0, nil, copyTimespecOut(t, addr, &ts)
   157  }
   158  
   159  // ClockSettime implements linux syscall clock_settime(2).
   160  func ClockSettime(*kernel.Task, arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
   161  	return 0, nil, linuxerr.EPERM
   162  }
   163  
   164  // Time implements linux syscall time(2).
   165  func Time(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
   166  	addr := args[0].Pointer()
   167  
   168  	r := t.Kernel().RealtimeClock().Now().TimeT()
   169  	if addr == hostarch.Addr(0) {
   170  		return uintptr(r), nil, nil
   171  	}
   172  
   173  	if _, err := r.CopyOut(t, addr); err != nil {
   174  		return 0, nil, err
   175  	}
   176  	return uintptr(r), nil, nil
   177  }
   178  
   179  // clockNanosleepRestartBlock encapsulates the state required to restart
   180  // clock_nanosleep(2) via restart_syscall(2).
   181  //
   182  // +stateify savable
   183  type clockNanosleepRestartBlock struct {
   184  	c   ktime.Clock
   185  	end ktime.Time
   186  	rem hostarch.Addr
   187  }
   188  
   189  // Restart implements kernel.SyscallRestartBlock.Restart.
   190  func (n *clockNanosleepRestartBlock) Restart(t *kernel.Task) (uintptr, error) {
   191  	return 0, clockNanosleepUntil(t, n.c, n.end, n.rem, true)
   192  }
   193  
   194  // clockNanosleepUntil blocks until a specified time.
   195  //
   196  // If blocking is interrupted, the syscall is restarted with the original
   197  // arguments.
   198  func clockNanosleepUntil(t *kernel.Task, c ktime.Clock, end ktime.Time, rem hostarch.Addr, needRestartBlock bool) error {
   199  	notifier, tchan := ktime.NewChannelNotifier()
   200  	timer := ktime.NewTimer(c, notifier)
   201  
   202  	// Turn on the timer.
   203  	timer.Swap(ktime.Setting{
   204  		Period:  0,
   205  		Enabled: true,
   206  		Next:    end,
   207  	})
   208  
   209  	err := t.BlockWithTimer(nil, tchan)
   210  
   211  	timer.Destroy()
   212  
   213  	switch {
   214  	case linuxerr.Equals(linuxerr.ETIMEDOUT, err):
   215  		// Slept for entire timeout.
   216  		return nil
   217  	case err == syserror.ErrInterrupted:
   218  		// Interrupted.
   219  		remaining := end.Sub(c.Now())
   220  		if remaining <= 0 {
   221  			return nil
   222  		}
   223  
   224  		// Copy out remaining time.
   225  		if rem != 0 {
   226  			timeleft := linux.NsecToTimespec(remaining.Nanoseconds())
   227  			if err := copyTimespecOut(t, rem, &timeleft); err != nil {
   228  				return err
   229  			}
   230  		}
   231  		if needRestartBlock {
   232  			// Arrange for a restart with the remaining duration.
   233  			t.SetSyscallRestartBlock(&clockNanosleepRestartBlock{
   234  				c:   c,
   235  				end: end,
   236  				rem: rem,
   237  			})
   238  			return syserror.ERESTART_RESTARTBLOCK
   239  		}
   240  		return syserror.ERESTARTNOHAND
   241  	default:
   242  		panic(fmt.Sprintf("Impossible BlockWithTimer error %v", err))
   243  	}
   244  }
   245  
   246  // Nanosleep implements linux syscall Nanosleep(2).
   247  func Nanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
   248  	addr := args[0].Pointer()
   249  	rem := args[1].Pointer()
   250  
   251  	ts, err := copyTimespecIn(t, addr)
   252  	if err != nil {
   253  		return 0, nil, err
   254  	}
   255  
   256  	if !ts.Valid() {
   257  		return 0, nil, linuxerr.EINVAL
   258  	}
   259  
   260  	// Just like linux, we cap the timeout with the max number that int64 can
   261  	// represent which is roughly 292 years.
   262  	dur := time.Duration(ts.ToNsecCapped()) * time.Nanosecond
   263  	c := t.Kernel().MonotonicClock()
   264  	return 0, nil, clockNanosleepUntil(t, c, c.Now().Add(dur), rem, true)
   265  }
   266  
   267  // ClockNanosleep implements linux syscall clock_nanosleep(2).
   268  func ClockNanosleep(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
   269  	clockID := int32(args[0].Int())
   270  	flags := args[1].Int()
   271  	addr := args[2].Pointer()
   272  	rem := args[3].Pointer()
   273  
   274  	req, err := copyTimespecIn(t, addr)
   275  	if err != nil {
   276  		return 0, nil, err
   277  	}
   278  
   279  	if !req.Valid() {
   280  		return 0, nil, linuxerr.EINVAL
   281  	}
   282  
   283  	// Only allow clock constants also allowed by Linux.
   284  	if clockID > 0 {
   285  		if clockID != linux.CLOCK_REALTIME &&
   286  			clockID != linux.CLOCK_MONOTONIC &&
   287  			clockID != linux.CLOCK_PROCESS_CPUTIME_ID {
   288  			return 0, nil, linuxerr.EINVAL
   289  		}
   290  	}
   291  
   292  	c, err := getClock(t, clockID)
   293  	if err != nil {
   294  		return 0, nil, err
   295  	}
   296  
   297  	if flags&linux.TIMER_ABSTIME != 0 {
   298  		return 0, nil, clockNanosleepUntil(t, c, ktime.FromTimespec(req), 0, false)
   299  	}
   300  
   301  	dur := time.Duration(req.ToNsecCapped()) * time.Nanosecond
   302  	return 0, nil, clockNanosleepUntil(t, c, c.Now().Add(dur), rem, true)
   303  }
   304  
   305  // Gettimeofday implements linux syscall gettimeofday(2).
   306  func Gettimeofday(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {
   307  	tv := args[0].Pointer()
   308  	tz := args[1].Pointer()
   309  
   310  	if tv != hostarch.Addr(0) {
   311  		nowTv := t.Kernel().RealtimeClock().Now().Timeval()
   312  		if err := copyTimevalOut(t, tv, &nowTv); err != nil {
   313  			return 0, nil, err
   314  		}
   315  	}
   316  
   317  	if tz != hostarch.Addr(0) {
   318  		// Ask the time package for the timezone.
   319  		_, offset := time.Now().Zone()
   320  		// This int32 array mimics linux's struct timezone.
   321  		timezone := []int32{-int32(offset) / 60, 0}
   322  		_, err := primitive.CopyInt32SliceOut(t, tz, timezone)
   323  		return 0, nil, err
   324  	}
   325  	return 0, nil, nil
   326  }