github.com/inspektor-gadget/inspektor-gadget@v0.28.1/pkg/gadgets/profile/cpu/tracer/tracer.go (about) 1 // Copyright 2019-2023 The Inspektor Gadget authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //go:build !withoutebpf 16 17 package tracer 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "runtime" 24 "sort" 25 "unsafe" 26 27 "github.com/cilium/ebpf" 28 log "github.com/sirupsen/logrus" 29 "golang.org/x/sys/unix" 30 31 gadgetcontext "github.com/inspektor-gadget/inspektor-gadget/pkg/gadget-context" 32 "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets" 33 "github.com/inspektor-gadget/inspektor-gadget/pkg/gadgets/profile/cpu/types" 34 "github.com/inspektor-gadget/inspektor-gadget/pkg/kallsyms" 35 eventtypes "github.com/inspektor-gadget/inspektor-gadget/pkg/types" 36 ) 37 38 //go:generate go run github.com/cilium/ebpf/cmd/bpf2go -target $TARGET -type key_t -cc clang -cflags ${CFLAGS} profile ./bpf/profile.bpf.c -- -I./bpf/ 39 40 type Config struct { 41 MountnsMap *ebpf.Map 42 UserStackOnly bool 43 KernelStackOnly bool 44 } 45 46 type Tracer struct { 47 enricher gadgets.DataEnricherByMntNs 48 objs profileObjects 49 perfFds []int 50 config *Config 51 } 52 53 const ( 54 perfMaxStackDepth = 127 55 perfSampleFreq = 49 56 // In C, struct perf_event_attr has a freq field which is a bit in a 57 // 64-length bitfield. 58 // In Golang, there is a Bits field which 64 bits long. 59 // From C, we can deduce freq (which permits using frequency not period) 60 // is the 10th bit. 61 frequencyBit = 1 << 10 62 ) 63 64 func NewTracer(enricher gadgets.DataEnricherByMntNs, config *Config) (*Tracer, error) { 65 t := &Tracer{ 66 enricher: enricher, 67 config: config, 68 } 69 70 if err := t.install(); err != nil { 71 t.Stop() 72 return nil, err 73 } 74 75 return t, nil 76 } 77 78 type keyCount struct { 79 key profileKeyT 80 value uint64 81 } 82 83 func (t *Tracer) readCountsMap() ([]keyCount, error) { 84 var prev *profileKeyT = nil 85 counts := t.objs.profileMaps.Counts 86 keysCounts := []keyCount{} 87 key := profileKeyT{} 88 89 if t.objs.profileMaps.Counts == nil { 90 return nil, fmt.Errorf("counts map was not created at moment of stop") 91 } 92 93 i := 0 94 for { 95 if err := counts.NextKey(unsafe.Pointer(prev), unsafe.Pointer(&key)); err != nil { 96 if errors.Is(err, ebpf.ErrKeyNotExist) { 97 break 98 } 99 return nil, fmt.Errorf("getting next key: %w", err) 100 } 101 102 var value uint64 103 err := counts.Lookup(key, unsafe.Pointer(&value)) 104 if err != nil { 105 return nil, err 106 } 107 108 kv := keyCount{ 109 key: key, 110 value: value, 111 } 112 113 if i < len(keysCounts)-1 { 114 keysCounts[i] = kv 115 } else { 116 keysCounts = append(keysCounts, kv) 117 } 118 119 if value == 0 { 120 continue 121 } 122 123 prev = &key 124 i++ 125 } 126 127 return keysCounts, nil 128 } 129 130 func getReport(t *Tracer, kAllSyms *kallsyms.KAllSyms, stack *ebpf.Map, keyCount keyCount) (types.Report, error) { 131 kernelInstructionPointers := [perfMaxStackDepth]uint64{} 132 userInstructionPointers := [perfMaxStackDepth]uint64{} 133 v := keyCount.value 134 k := keyCount.key 135 136 // if (!env.kernel_stacks_only && k->user_stack_id >= 0) { 137 if k.UserStackId >= 0 { 138 err := stack.Lookup(k.UserStackId, unsafe.Pointer(&userInstructionPointers)) 139 if err != nil { 140 return types.Report{}, err 141 } 142 } 143 144 // if (!env.user_stacks_only && k->kern_stack_id >= 0) { 145 if k.KernStackId >= 0 { 146 err := stack.Lookup(k.KernStackId, unsafe.Pointer(&kernelInstructionPointers)) 147 if err != nil { 148 return types.Report{}, err 149 } 150 } 151 152 userSymbols := []string{} 153 for _, ip := range userInstructionPointers { 154 if ip == 0 { 155 break 156 } 157 158 // We will not support getting userland symbols. 159 userSymbols = append(userSymbols, "[unknown]") 160 } 161 162 kernelSymbols := []string{} 163 for _, ip := range kernelInstructionPointers { 164 if ip == 0 { 165 break 166 } 167 168 kernelSymbols = append(kernelSymbols, kAllSyms.LookupByInstructionPointer(ip)) 169 } 170 171 report := types.Report{ 172 Comm: gadgets.FromCString(k.Name[:]), 173 Pid: k.Pid, 174 UserStack: userSymbols, 175 KernelStack: kernelSymbols, 176 Count: v, 177 } 178 179 if t.enricher != nil { 180 t.enricher.EnrichByMntNs(&report.CommonData, k.MntnsId) 181 } 182 183 return report, nil 184 } 185 186 func (t *Tracer) Stop() (string, error) { 187 defer t.close() 188 189 result, err := t.collectResult() 190 if err != nil { 191 return "", err 192 } 193 return string(result), nil 194 } 195 196 func (t *Tracer) close() { 197 t.objs.Close() 198 199 for _, fd := range t.perfFds { 200 // Disable perf event. 201 err := unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_DISABLE, 0) 202 if err != nil { 203 log.Errorf("Failed to disable perf fd: %v", err) 204 } 205 206 err = unix.Close(fd) 207 if err != nil { 208 log.Errorf("Failed to close perf fd: %v", err) 209 } 210 } 211 } 212 213 func (t *Tracer) collectResult() ([]byte, error) { 214 keysCounts, err := t.readCountsMap() 215 if err != nil { 216 return nil, err 217 } 218 219 sort.Slice(keysCounts, func(i, j int) bool { 220 if keysCounts[i].value > keysCounts[j].value { 221 return false 222 } 223 return keysCounts[i].value != keysCounts[j].value 224 }) 225 226 kAllSyms, err := kallsyms.NewKAllSyms() 227 if err != nil { 228 return nil, err 229 } 230 231 reports := make([]types.Report, len(keysCounts)) 232 for i, keyVal := range keysCounts { 233 report, err := getReport(t, kAllSyms, t.objs.profileMaps.Stackmap, keyVal) 234 if err != nil { 235 return nil, err 236 } 237 238 reports[i] = report 239 } 240 241 return json.Marshal(reports) 242 } 243 244 func (t *Tracer) install() error { 245 spec, err := loadProfile() 246 if err != nil { 247 return fmt.Errorf("loading ebpf program: %w", err) 248 } 249 250 consts := map[string]interface{}{ 251 "kernel_stacks_only": t.config.KernelStackOnly, 252 "user_stacks_only": t.config.UserStackOnly, 253 } 254 255 if err := gadgets.LoadeBPFSpec(t.config.MountnsMap, spec, consts, &t.objs); err != nil { 256 return fmt.Errorf("loading ebpf spec: %w", err) 257 } 258 259 for cpu := 0; cpu < runtime.NumCPU(); cpu++ { 260 // Highly inspired from: 261 // https://gist.github.com/florianl/5d9cc9dbb3822e03f6f65a073ffbedbb#file-main-go-L101 262 // https://github.com/iovisor/bcc/pull/3782/commits/8ee4449fa091c70f3c60cbe95929481c0d6711d1#diff-61b9f61545aedae166fcc06305a62f12699219aed0eb1e1fb4abe74fa31cb3d7R196 263 // https://github.com/libbpf/libbpf/blob/645500dd7d2d6b5bb76e4c0375d597d4f0c4814e/src/libbpf.c#L10546 264 fd, err := unix.PerfEventOpen( 265 &unix.PerfEventAttr{ 266 Type: unix.PERF_TYPE_SOFTWARE, 267 Config: unix.PERF_COUNT_SW_CPU_CLOCK, 268 Sample_type: unix.PERF_SAMPLE_RAW, 269 Sample: perfSampleFreq, 270 Bits: frequencyBit, 271 }, 272 -1, 273 cpu, 274 -1, 275 unix.PERF_FLAG_FD_CLOEXEC, 276 ) 277 if err != nil { 278 return fmt.Errorf("creating the perf fd: %w", err) 279 } 280 281 t.perfFds = append(t.perfFds, fd) 282 283 // Attach program to perf event. 284 if err := unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_SET_BPF, t.objs.IgProfCpu.FD()); err != nil { 285 return fmt.Errorf("attaching eBPF program to perf fd: %w", err) 286 } 287 288 // Start perf event. 289 if err := unix.IoctlSetInt(fd, unix.PERF_EVENT_IOC_ENABLE, 0); err != nil { 290 return fmt.Errorf("enabling perf fd: %w", err) 291 } 292 } 293 294 return nil 295 } 296 297 // --- 298 299 // TracerWrap is required to implement interfaces 300 type TracerWrap struct { 301 Tracer 302 enricherFunc func(ev any) error 303 eventCallback func(ev *types.Report) 304 } 305 306 func (t *TracerWrap) Run(gadgetCtx gadgets.GadgetContext) error { 307 params := gadgetCtx.GadgetParams() 308 t.config.UserStackOnly = params.Get(ParamUserStack).AsBool() 309 t.config.KernelStackOnly = params.Get(ParamKernelStack).AsBool() 310 311 defer t.close() 312 if err := t.install(); err != nil { 313 return fmt.Errorf("installing tracer: %w", err) 314 } 315 316 gadgetcontext.WaitForTimeoutOrDone(gadgetCtx) 317 318 res, err := t.collectResult() 319 if err != nil { 320 return fmt.Errorf("collecting result: %w", err) 321 } 322 323 var reports []*types.Report 324 if err = json.Unmarshal(res, &reports); err != nil { 325 return fmt.Errorf("unmarshaling report: %w", err) 326 } 327 for _, report := range reports { 328 t.eventCallback(report) 329 } 330 331 return nil 332 } 333 334 func (t *TracerWrap) SetEventHandler(handler any) { 335 nh, ok := handler.(func(ev *types.Report)) 336 if !ok { 337 panic("event handler invalid") 338 } 339 t.eventCallback = nh 340 } 341 342 func (t *TracerWrap) SetEventEnricher(enricher func(ev any) error) { 343 t.enricherFunc = enricher 344 t.Tracer.enricher = t 345 } 346 347 func (t *TracerWrap) EnrichByMntNs(event *eventtypes.CommonData, mountnsid uint64) { 348 // TODO: This is ugly as it temporarily wraps and unwraps the event; should be changed in the original gadget code 349 // after full migration to NewInstance() 350 wrap := &types.Report{CommonData: *event, MntnsID: mountnsid} 351 t.enricherFunc(wrap) 352 *event = wrap.CommonData 353 } 354 355 func (t *TracerWrap) SetMountNsMap(mountNsMap *ebpf.Map) { 356 t.Tracer.config.MountnsMap = mountNsMap 357 } 358 359 func (g *GadgetDesc) NewInstance() (gadgets.Gadget, error) { 360 tracer := &TracerWrap{ 361 Tracer: Tracer{ 362 config: &Config{}, 363 }, 364 } 365 return tracer, nil 366 }