github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/pkg/sentry/kernel/task_acct.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package kernel 16 17 // Accounting, limits, timers. 18 19 import ( 20 "github.com/SagerNet/gvisor/pkg/abi/linux" 21 "github.com/SagerNet/gvisor/pkg/errors/linuxerr" 22 ktime "github.com/SagerNet/gvisor/pkg/sentry/kernel/time" 23 "github.com/SagerNet/gvisor/pkg/sentry/limits" 24 "github.com/SagerNet/gvisor/pkg/sentry/usage" 25 ) 26 27 // Getitimer implements getitimer(2). 28 // 29 // Preconditions: The caller must be running on the task goroutine. 30 func (t *Task) Getitimer(id int32) (linux.ItimerVal, error) { 31 var tm ktime.Time 32 var s ktime.Setting 33 switch id { 34 case linux.ITIMER_REAL: 35 tm, s = t.tg.itimerRealTimer.Get() 36 case linux.ITIMER_VIRTUAL: 37 tm = t.tg.UserCPUClock().Now() 38 t.tg.signalHandlers.mu.Lock() 39 s, _ = t.tg.itimerVirtSetting.At(tm) 40 t.tg.signalHandlers.mu.Unlock() 41 case linux.ITIMER_PROF: 42 tm = t.tg.CPUClock().Now() 43 t.tg.signalHandlers.mu.Lock() 44 s, _ = t.tg.itimerProfSetting.At(tm) 45 t.tg.signalHandlers.mu.Unlock() 46 default: 47 return linux.ItimerVal{}, linuxerr.EINVAL 48 } 49 val, iv := ktime.SpecFromSetting(tm, s) 50 return linux.ItimerVal{ 51 Value: linux.DurationToTimeval(val), 52 Interval: linux.DurationToTimeval(iv), 53 }, nil 54 } 55 56 // Setitimer implements setitimer(2). 57 // 58 // Preconditions: The caller must be running on the task goroutine. 59 func (t *Task) Setitimer(id int32, newitv linux.ItimerVal) (linux.ItimerVal, error) { 60 var tm ktime.Time 61 var olds ktime.Setting 62 switch id { 63 case linux.ITIMER_REAL: 64 news, err := ktime.SettingFromSpec(newitv.Value.ToDuration(), newitv.Interval.ToDuration(), t.tg.itimerRealTimer.Clock()) 65 if err != nil { 66 return linux.ItimerVal{}, err 67 } 68 tm, olds = t.tg.itimerRealTimer.Swap(news) 69 case linux.ITIMER_VIRTUAL: 70 c := t.tg.UserCPUClock() 71 var err error 72 t.k.cpuClockTicker.Atomically(func() { 73 tm = c.Now() 74 var news ktime.Setting 75 news, err = ktime.SettingFromSpecAt(newitv.Value.ToDuration(), newitv.Interval.ToDuration(), tm) 76 if err != nil { 77 return 78 } 79 t.tg.signalHandlers.mu.Lock() 80 olds = t.tg.itimerVirtSetting 81 t.tg.itimerVirtSetting = news 82 t.tg.updateCPUTimersEnabledLocked() 83 t.tg.signalHandlers.mu.Unlock() 84 }) 85 if err != nil { 86 return linux.ItimerVal{}, err 87 } 88 case linux.ITIMER_PROF: 89 c := t.tg.CPUClock() 90 var err error 91 t.k.cpuClockTicker.Atomically(func() { 92 tm = c.Now() 93 var news ktime.Setting 94 news, err = ktime.SettingFromSpecAt(newitv.Value.ToDuration(), newitv.Interval.ToDuration(), tm) 95 if err != nil { 96 return 97 } 98 t.tg.signalHandlers.mu.Lock() 99 olds = t.tg.itimerProfSetting 100 t.tg.itimerProfSetting = news 101 t.tg.updateCPUTimersEnabledLocked() 102 t.tg.signalHandlers.mu.Unlock() 103 }) 104 if err != nil { 105 return linux.ItimerVal{}, err 106 } 107 default: 108 return linux.ItimerVal{}, linuxerr.EINVAL 109 } 110 oldval, oldiv := ktime.SpecFromSetting(tm, olds) 111 return linux.ItimerVal{ 112 Value: linux.DurationToTimeval(oldval), 113 Interval: linux.DurationToTimeval(oldiv), 114 }, nil 115 } 116 117 // IOUsage returns the io usage of the thread. 118 func (t *Task) IOUsage() *usage.IO { 119 return t.ioUsage 120 } 121 122 // IOUsage returns the total io usage of all dead and live threads in the group. 123 func (tg *ThreadGroup) IOUsage() *usage.IO { 124 tg.pidns.owner.mu.RLock() 125 defer tg.pidns.owner.mu.RUnlock() 126 127 io := *tg.ioUsage 128 // Account for active tasks. 129 for t := tg.tasks.Front(); t != nil; t = t.Next() { 130 io.Accumulate(t.IOUsage()) 131 } 132 return &io 133 } 134 135 // Name returns t's name. 136 func (t *Task) Name() string { 137 t.mu.Lock() 138 defer t.mu.Unlock() 139 return t.image.Name 140 } 141 142 // SetName changes t's name. 143 func (t *Task) SetName(name string) { 144 t.mu.Lock() 145 defer t.mu.Unlock() 146 t.image.Name = name 147 t.Debugf("Set thread name to %q", name) 148 } 149 150 // Limits implements context.Context.Limits. 151 func (t *Task) Limits() *limits.LimitSet { 152 return t.ThreadGroup().Limits() 153 } 154 155 // StartTime returns t's start time. 156 func (t *Task) StartTime() ktime.Time { 157 t.mu.Lock() 158 defer t.mu.Unlock() 159 return t.startTime 160 } 161 162 // MaxRSS returns the maximum resident set size of the task in bytes. which 163 // should be one of RUSAGE_SELF, RUSAGE_CHILDREN, RUSAGE_THREAD, or 164 // RUSAGE_BOTH. See getrusage(2) for documentation on the behavior of these 165 // flags. 166 func (t *Task) MaxRSS(which int32) uint64 { 167 t.tg.pidns.owner.mu.RLock() 168 defer t.tg.pidns.owner.mu.RUnlock() 169 170 switch which { 171 case linux.RUSAGE_SELF, linux.RUSAGE_THREAD: 172 // If there's an active mm we can use its value. 173 if mm := t.MemoryManager(); mm != nil { 174 if mmMaxRSS := mm.MaxResidentSetSize(); mmMaxRSS > t.tg.maxRSS { 175 return mmMaxRSS 176 } 177 } 178 return t.tg.maxRSS 179 case linux.RUSAGE_CHILDREN: 180 return t.tg.childMaxRSS 181 case linux.RUSAGE_BOTH: 182 maxRSS := t.tg.maxRSS 183 if maxRSS < t.tg.childMaxRSS { 184 maxRSS = t.tg.childMaxRSS 185 } 186 if mm := t.MemoryManager(); mm != nil { 187 if mmMaxRSS := mm.MaxResidentSetSize(); mmMaxRSS > maxRSS { 188 return mmMaxRSS 189 } 190 } 191 return maxRSS 192 default: 193 // We'll only get here if which is invalid. 194 return 0 195 } 196 }