github.com/nicocha30/gvisor-ligolo@v0.0.0-20230726075806-989fa2c0a413/pkg/sentry/kernel/task_acct.go (about) 1 // Copyright 2018 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package kernel 16 17 // Accounting, limits, timers. 18 19 import ( 20 "github.com/nicocha30/gvisor-ligolo/pkg/abi/linux" 21 "github.com/nicocha30/gvisor-ligolo/pkg/errors/linuxerr" 22 ktime "github.com/nicocha30/gvisor-ligolo/pkg/sentry/kernel/time" 23 "github.com/nicocha30/gvisor-ligolo/pkg/sentry/limits" 24 "github.com/nicocha30/gvisor-ligolo/pkg/sentry/usage" 25 ) 26 27 // Getitimer implements getitimer(2). 28 // 29 // Preconditions: The caller must be running on the task goroutine. 30 func (t *Task) Getitimer(id int32) (linux.ItimerVal, error) { 31 var tm ktime.Time 32 var s ktime.Setting 33 switch id { 34 case linux.ITIMER_REAL: 35 tm, s = t.tg.itimerRealTimer.Get() 36 case linux.ITIMER_VIRTUAL: 37 tm = t.tg.UserCPUClock().Now() 38 t.tg.signalHandlers.mu.Lock() 39 s, _ = t.tg.itimerVirtSetting.At(tm) 40 t.tg.signalHandlers.mu.Unlock() 41 case linux.ITIMER_PROF: 42 tm = t.tg.CPUClock().Now() 43 t.tg.signalHandlers.mu.Lock() 44 s, _ = t.tg.itimerProfSetting.At(tm) 45 t.tg.signalHandlers.mu.Unlock() 46 default: 47 return linux.ItimerVal{}, linuxerr.EINVAL 48 } 49 val, iv := ktime.SpecFromSetting(tm, s) 50 return linux.ItimerVal{ 51 Value: linux.DurationToTimeval(val), 52 Interval: linux.DurationToTimeval(iv), 53 }, nil 54 } 55 56 // Setitimer implements setitimer(2). 57 // 58 // Preconditions: The caller must be running on the task goroutine. 59 func (t *Task) Setitimer(id int32, newitv linux.ItimerVal) (linux.ItimerVal, error) { 60 var tm ktime.Time 61 var olds ktime.Setting 62 switch id { 63 case linux.ITIMER_REAL: 64 news, err := ktime.SettingFromSpec(newitv.Value.ToDuration(), newitv.Interval.ToDuration(), t.tg.itimerRealTimer.Clock()) 65 if err != nil { 66 return linux.ItimerVal{}, err 67 } 68 tm, olds = t.tg.itimerRealTimer.Swap(news) 69 case linux.ITIMER_VIRTUAL: 70 c := t.tg.UserCPUClock() 71 t.k.cpuClockMu.Lock() 72 defer t.k.cpuClockMu.Unlock() 73 tm = c.Now() 74 news, err := ktime.SettingFromSpecAt(newitv.Value.ToDuration(), newitv.Interval.ToDuration(), tm) 75 if err != nil { 76 return linux.ItimerVal{}, err 77 } 78 t.tg.signalHandlers.mu.Lock() 79 olds = t.tg.itimerVirtSetting 80 t.tg.itimerVirtSetting = news 81 t.tg.updateCPUTimersEnabledLocked() 82 t.tg.signalHandlers.mu.Unlock() 83 case linux.ITIMER_PROF: 84 c := t.tg.CPUClock() 85 t.k.cpuClockMu.Lock() 86 defer t.k.cpuClockMu.Unlock() 87 tm = c.Now() 88 news, err := ktime.SettingFromSpecAt(newitv.Value.ToDuration(), newitv.Interval.ToDuration(), tm) 89 if err != nil { 90 return linux.ItimerVal{}, err 91 } 92 t.tg.signalHandlers.mu.Lock() 93 olds = t.tg.itimerProfSetting 94 t.tg.itimerProfSetting = news 95 t.tg.updateCPUTimersEnabledLocked() 96 t.tg.signalHandlers.mu.Unlock() 97 default: 98 return linux.ItimerVal{}, linuxerr.EINVAL 99 } 100 oldval, oldiv := ktime.SpecFromSetting(tm, olds) 101 return linux.ItimerVal{ 102 Value: linux.DurationToTimeval(oldval), 103 Interval: linux.DurationToTimeval(oldiv), 104 }, nil 105 } 106 107 // IOUsage returns the io usage of the thread. 108 func (t *Task) IOUsage() *usage.IO { 109 return t.ioUsage 110 } 111 112 // IOUsage returns the total io usage of all dead and live threads in the group. 113 func (tg *ThreadGroup) IOUsage() *usage.IO { 114 tg.pidns.owner.mu.RLock() 115 defer tg.pidns.owner.mu.RUnlock() 116 117 var io usage.IO 118 tg.ioUsage.Clone(&io) 119 // Account for active tasks. 120 for t := tg.tasks.Front(); t != nil; t = t.Next() { 121 io.Accumulate(t.IOUsage()) 122 } 123 return &io 124 } 125 126 // Name returns t's name. 127 func (t *Task) Name() string { 128 t.mu.Lock() 129 defer t.mu.Unlock() 130 return t.image.Name 131 } 132 133 // SetName changes t's name. 134 func (t *Task) SetName(name string) { 135 t.mu.Lock() 136 defer t.mu.Unlock() 137 t.image.Name = name 138 t.Debugf("Set thread name to %q", name) 139 } 140 141 // Limits implements context.Context.Limits. 142 func (t *Task) Limits() *limits.LimitSet { 143 return t.ThreadGroup().Limits() 144 } 145 146 // StartTime returns t's start time. 147 func (t *Task) StartTime() ktime.Time { 148 t.mu.Lock() 149 defer t.mu.Unlock() 150 return t.startTime 151 } 152 153 // MaxRSS returns the maximum resident set size of the task in bytes. which 154 // should be one of RUSAGE_SELF, RUSAGE_CHILDREN, RUSAGE_THREAD, or 155 // RUSAGE_BOTH. See getrusage(2) for documentation on the behavior of these 156 // flags. 157 func (t *Task) MaxRSS(which int32) uint64 { 158 t.tg.pidns.owner.mu.RLock() 159 defer t.tg.pidns.owner.mu.RUnlock() 160 161 switch which { 162 case linux.RUSAGE_SELF, linux.RUSAGE_THREAD: 163 // If there's an active mm we can use its value. 164 if mm := t.MemoryManager(); mm != nil { 165 if mmMaxRSS := mm.MaxResidentSetSize(); mmMaxRSS > t.tg.maxRSS { 166 return mmMaxRSS 167 } 168 } 169 return t.tg.maxRSS 170 case linux.RUSAGE_CHILDREN: 171 return t.tg.childMaxRSS 172 case linux.RUSAGE_BOTH: 173 maxRSS := t.tg.maxRSS 174 if maxRSS < t.tg.childMaxRSS { 175 maxRSS = t.tg.childMaxRSS 176 } 177 if mm := t.MemoryManager(); mm != nil { 178 if mmMaxRSS := mm.MaxResidentSetSize(); mmMaxRSS > maxRSS { 179 return mmMaxRSS 180 } 181 } 182 return maxRSS 183 default: 184 // We'll only get here if which is invalid. 185 return 0 186 } 187 }