github.com/cilium/ebpf@v0.10.0/link/kprobe_test.go (about) 1 package link 2 3 import ( 4 "errors" 5 "fmt" 6 "os" 7 "testing" 8 9 qt "github.com/frankban/quicktest" 10 11 "github.com/cilium/ebpf" 12 "github.com/cilium/ebpf/asm" 13 "github.com/cilium/ebpf/internal" 14 "github.com/cilium/ebpf/internal/testutils" 15 "github.com/cilium/ebpf/internal/unix" 16 ) 17 18 // Global symbol, present on all tested kernels. 19 var ksym = "vprintk" 20 21 // Collection of various symbols present in all tested kernels. 22 // Compiler optimizations result in different names for these symbols. 23 var symTests = []string{ 24 "echo_char.isra.0", // function optimized by -fipa-sra 25 "proc_get_long.constprop.0", // optimized function with constant operands 26 "unregister_kprobes.part.0", // function body that was split and partially inlined 27 } 28 29 func TestKprobe(t *testing.T) { 30 prog := mustLoadProgram(t, ebpf.Kprobe, 0, "") 31 32 for _, tt := range symTests { 33 t.Run(tt, func(t *testing.T) { 34 k, err := Kprobe(tt, prog, nil) 35 if err != nil { 36 t.Fatal(err) 37 } 38 defer k.Close() 39 }) 40 } 41 42 c := qt.New(t) 43 44 k, err := Kprobe("bogus", prog, nil) 45 c.Assert(err, qt.ErrorIs, os.ErrNotExist, qt.Commentf("got error: %s", err)) 46 if k != nil { 47 k.Close() 48 } 49 50 k, err = Kprobe(ksym, prog, nil) 51 c.Assert(err, qt.IsNil) 52 defer k.Close() 53 54 testLink(t, k, prog) 55 } 56 57 func TestKprobeOffset(t *testing.T) { 58 prog := mustLoadProgram(t, ebpf.Kprobe, 0, "") 59 60 // The layout of a function is compiler and arch dependent, so we try to 61 // find a valid attach target in the first few bytes of the function. 62 for i := uint64(1); i < 16; i++ { 63 k, err := Kprobe("inet6_release", prog, &KprobeOptions{Offset: i}) 64 if err != nil { 65 continue 66 } 67 k.Close() 68 return 69 } 70 71 t.Fatal("Can't attach with non-zero offset") 72 } 73 74 func TestKretprobeMaxActive(t *testing.T) { 75 prog := mustLoadProgram(t, ebpf.Kprobe, 0, "") 76 77 k, err := Kprobe("do_sys_open", prog, &KprobeOptions{RetprobeMaxActive: 4096}) 78 if !errors.Is(err, errInvalidMaxActive) { 79 t.Fatal("Expected errInvalidMaxActive, got", err) 80 } 81 if k != nil { 82 k.Close() 83 } 84 85 k, err = Kretprobe("__put_task_struct", prog, &KprobeOptions{RetprobeMaxActive: 4096}) 86 if testutils.MustKernelVersion().Less(internal.Version{4, 12, 0}) { 87 if !errors.Is(err, ErrNotSupported) { 88 t.Fatal("Expected ErrNotSupported, got", err) 89 } 90 } else if err != nil { 91 t.Fatal("Kretprobe with maxactive returned an error:", err) 92 } 93 if k != nil { 94 k.Close() 95 } 96 } 97 98 func TestKretprobe(t *testing.T) { 99 prog := mustLoadProgram(t, ebpf.Kprobe, 0, "") 100 101 for _, tt := range symTests { 102 t.Run(tt, func(t *testing.T) { 103 k, err := Kretprobe(tt, prog, nil) 104 if err != nil { 105 t.Fatal(err) 106 } 107 defer k.Close() 108 }) 109 } 110 111 c := qt.New(t) 112 113 k, err := Kretprobe("bogus", prog, nil) 114 if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL)) { 115 t.Fatal(err) 116 } 117 if k != nil { 118 k.Close() 119 } 120 121 k, err = Kretprobe(ksym, prog, nil) 122 c.Assert(err, qt.IsNil) 123 defer k.Close() 124 125 testLink(t, k, prog) 126 } 127 128 func TestKprobeErrors(t *testing.T) { 129 c := qt.New(t) 130 131 // Invalid Kprobe incantations. Kretprobe uses the same code paths 132 // with a different ret flag. 133 _, err := Kprobe("", nil, nil) // empty symbol 134 c.Assert(errors.Is(err, errInvalidInput), qt.IsTrue) 135 136 _, err = Kprobe("_", nil, nil) // empty prog 137 c.Assert(errors.Is(err, errInvalidInput), qt.IsTrue) 138 139 _, err = Kprobe(".", &ebpf.Program{}, nil) // illegal chars in symbol 140 c.Assert(errors.Is(err, errInvalidInput), qt.IsTrue) 141 142 _, err = Kprobe("foo", &ebpf.Program{}, nil) // wrong prog type 143 c.Assert(errors.Is(err, errInvalidInput), qt.IsTrue) 144 } 145 146 // Test k(ret)probe creation using perf_kprobe PMU. 147 func TestKprobeCreatePMU(t *testing.T) { 148 // Requires at least 4.17 (e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU") 149 testutils.SkipOnOldKernel(t, "4.17", "perf_kprobe PMU") 150 151 c := qt.New(t) 152 153 // kprobe happy path. printk is always present. 154 pk, err := pmuKprobe(probeArgs{symbol: ksym}) 155 c.Assert(err, qt.IsNil) 156 defer pk.Close() 157 158 c.Assert(pk.typ, qt.Equals, kprobeEvent) 159 160 // kretprobe happy path. 161 pr, err := pmuKprobe(probeArgs{symbol: ksym, ret: true}) 162 c.Assert(err, qt.IsNil) 163 defer pr.Close() 164 165 c.Assert(pr.typ, qt.Equals, kretprobeEvent) 166 167 // Expect os.ErrNotExist when specifying a non-existent kernel symbol 168 // on kernels 4.17 and up. 169 _, err = pmuKprobe(probeArgs{symbol: "bogus"}) 170 c.Assert(errors.Is(err, os.ErrNotExist), qt.IsTrue, qt.Commentf("got error: %s", err)) 171 172 // A kernel bug was fixed in 97c753e62e6c where EINVAL was returned instead 173 // of ENOENT, but only for kretprobes. 174 _, err = pmuKprobe(probeArgs{symbol: "bogus", ret: true}) 175 c.Assert(errors.Is(err, os.ErrNotExist), qt.IsTrue, qt.Commentf("got error: %s", err)) 176 } 177 178 // Test fallback behaviour on kernels without perf_kprobe PMU available. 179 func TestKprobePMUUnavailable(t *testing.T) { 180 c := qt.New(t) 181 182 pk, err := pmuKprobe(probeArgs{symbol: ksym}) 183 if err == nil { 184 pk.Close() 185 t.Skipf("Kernel supports perf_kprobe PMU, not asserting error.") 186 } 187 188 // Only allow a PMU creation with a valid kernel symbol to fail with ErrNotSupported. 189 c.Assert(errors.Is(err, ErrNotSupported), qt.IsTrue, qt.Commentf("got error: %s", err)) 190 } 191 192 func BenchmarkKprobeCreatePMU(b *testing.B) { 193 for n := 0; n < b.N; n++ { 194 pr, err := pmuKprobe(probeArgs{symbol: ksym}) 195 if err != nil { 196 b.Error("error creating perf_kprobe PMU:", err) 197 } 198 199 if err := pr.Close(); err != nil { 200 b.Error("error closing perf_kprobe PMU:", err) 201 } 202 } 203 } 204 205 // Test tracefs k(ret)probe creation on all kernel versions. 206 func TestKprobeTraceFS(t *testing.T) { 207 c := qt.New(t) 208 209 // Open and close tracefs k(ret)probes, checking all errors. 210 kp, err := tracefsKprobe(probeArgs{symbol: ksym}) 211 c.Assert(err, qt.IsNil) 212 c.Assert(kp.Close(), qt.IsNil) 213 c.Assert(kp.typ, qt.Equals, kprobeEvent) 214 215 kp, err = tracefsKprobe(probeArgs{symbol: ksym, ret: true}) 216 c.Assert(err, qt.IsNil) 217 c.Assert(kp.Close(), qt.IsNil) 218 c.Assert(kp.typ, qt.Equals, kretprobeEvent) 219 220 // Create two identical trace events, ensure their IDs differ. 221 k1, err := tracefsKprobe(probeArgs{symbol: ksym}) 222 c.Assert(err, qt.IsNil) 223 defer k1.Close() 224 c.Assert(k1.tracefsID, qt.Not(qt.Equals), 0) 225 226 k2, err := tracefsKprobe(probeArgs{symbol: ksym}) 227 c.Assert(err, qt.IsNil) 228 defer k2.Close() 229 c.Assert(k2.tracefsID, qt.Not(qt.Equals), 0) 230 231 // Compare the kprobes' tracefs IDs. 232 c.Assert(k1.tracefsID, qt.Not(qt.CmpEquals()), k2.tracefsID) 233 234 // Prepare probe args. 235 args := probeArgs{group: "testgroup", symbol: "symbol"} 236 237 // Write a k(ret)probe event for a non-existing symbol. 238 _, err = createTraceFSProbeEvent(kprobeType, args) 239 c.Assert(errors.Is(err, os.ErrNotExist), qt.IsTrue, qt.Commentf("got error: %s", err)) 240 241 // A kernel bug was fixed in 97c753e62e6c where EINVAL was returned instead 242 // of ENOENT, but only for kretprobes. 243 args.ret = true 244 _, err = createTraceFSProbeEvent(kprobeType, args) 245 if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL)) { 246 t.Fatal(err) 247 } 248 } 249 250 func BenchmarkKprobeCreateTraceFS(b *testing.B) { 251 for n := 0; n < b.N; n++ { 252 // Include <tracefs>/kprobe_events operations in the benchmark loop 253 // because we create one per perf event. 254 pr, err := tracefsKprobe(probeArgs{symbol: ksym}) 255 if err != nil { 256 b.Error("error creating tracefs perf event:", err) 257 } 258 259 if err := pr.Close(); err != nil { 260 b.Error("error closing tracefs perf event:", err) 261 } 262 } 263 } 264 265 // Test k(ret)probe creation writing directly to <tracefs>/kprobe_events. 266 func TestKprobeCreateTraceFS(t *testing.T) { 267 c := qt.New(t) 268 269 pg, _ := randomGroup("ebpftest") 270 rg, _ := randomGroup("ebpftest") 271 272 // Tee up cleanups in case any of the Asserts abort the function. 273 defer func() { 274 _ = closeTraceFSProbeEvent(kprobeType, pg, ksym) 275 _ = closeTraceFSProbeEvent(kprobeType, rg, ksym) 276 }() 277 278 // Prepare probe args. 279 args := probeArgs{group: pg, symbol: ksym} 280 281 // Create a kprobe. 282 _, err := createTraceFSProbeEvent(kprobeType, args) 283 c.Assert(err, qt.IsNil) 284 285 // Attempt to create an identical kprobe using tracefs, 286 // expect it to fail with os.ErrExist. 287 _, err = createTraceFSProbeEvent(kprobeType, args) 288 c.Assert(errors.Is(err, os.ErrExist), qt.IsTrue, 289 qt.Commentf("expected consecutive kprobe creation to contain os.ErrExist, got: %v", err)) 290 291 // Expect a successful close of the kprobe. 292 c.Assert(closeTraceFSProbeEvent(kprobeType, pg, ksym), qt.IsNil) 293 294 args.group = rg 295 args.ret = true 296 297 // Same test for a kretprobe. 298 _, err = createTraceFSProbeEvent(kprobeType, args) 299 c.Assert(err, qt.IsNil) 300 301 _, err = createTraceFSProbeEvent(kprobeType, args) 302 c.Assert(os.IsExist(err), qt.IsFalse, 303 qt.Commentf("expected consecutive kretprobe creation to contain os.ErrExist, got: %v", err)) 304 305 // Expect a successful close of the kretprobe. 306 c.Assert(closeTraceFSProbeEvent(kprobeType, rg, ksym), qt.IsNil) 307 } 308 309 func TestKprobeTraceFSGroup(t *testing.T) { 310 c := qt.New(t) 311 312 // Expect <prefix>_<16 random hex chars>. 313 g, err := randomGroup("ebpftest") 314 c.Assert(err, qt.IsNil) 315 c.Assert(g, qt.Matches, `ebpftest_[a-f0-9]{16}`) 316 317 // Expect error when the generator's output exceeds 63 characters. 318 p := make([]byte, 47) // 63 - 17 (length of the random suffix and underscore) + 1 319 for i := range p { 320 p[i] = byte('a') 321 } 322 _, err = randomGroup(string(p)) 323 c.Assert(err, qt.Not(qt.IsNil)) 324 325 // Reject non-alphanumeric characters. 326 _, err = randomGroup("/") 327 c.Assert(err, qt.Not(qt.IsNil)) 328 } 329 330 func TestKprobeProgramCall(t *testing.T) { 331 m, p := newUpdaterMapProg(t, ebpf.Kprobe, 0) 332 333 // Open Kprobe on `sys_getpid` and attach it 334 // to the ebpf program created above. 335 k, err := Kprobe("sys_getpid", p, nil) 336 if err != nil { 337 t.Fatal(err) 338 } 339 340 // Trigger ebpf program call. 341 unix.Getpid() 342 343 // Assert that the value at index 0 has been updated to 1. 344 assertMapValue(t, m, 0, 1) 345 346 // Detach the Kprobe. 347 if err := k.Close(); err != nil { 348 t.Fatal(err) 349 } 350 351 // Reset map value to 0 at index 0. 352 if err := m.Update(uint32(0), uint32(0), ebpf.UpdateExist); err != nil { 353 t.Fatal(err) 354 } 355 356 // Retrigger the ebpf program call. 357 unix.Getpid() 358 359 // Assert that this time the value has not been updated. 360 assertMapValue(t, m, 0, 0) 361 } 362 363 func newUpdaterMapProg(t *testing.T, typ ebpf.ProgramType, attach ebpf.AttachType) (*ebpf.Map, *ebpf.Program) { 364 // Create ebpf map. Will contain only one key with initial value 0. 365 m, err := ebpf.NewMap(&ebpf.MapSpec{ 366 Type: ebpf.Array, 367 KeySize: 4, 368 ValueSize: 4, 369 MaxEntries: 1, 370 }) 371 if err != nil { 372 t.Fatal(err) 373 } 374 375 // Create ebpf program. When called, will set the value of key 0 in 376 // the map created above to 1. 377 p, err := ebpf.NewProgram(&ebpf.ProgramSpec{ 378 Type: typ, 379 Instructions: asm.Instructions{ 380 // u32 key = 0 381 asm.Mov.Imm(asm.R1, 0), 382 asm.StoreMem(asm.RFP, -4, asm.R1, asm.Word), 383 384 // u32 val = 1 385 asm.Mov.Imm(asm.R1, 1), 386 asm.StoreMem(asm.RFP, -8, asm.R1, asm.Word), 387 388 // bpf_map_update_elem(...) 389 asm.Mov.Reg(asm.R2, asm.RFP), 390 asm.Add.Imm(asm.R2, -4), 391 asm.Mov.Reg(asm.R3, asm.RFP), 392 asm.Add.Imm(asm.R3, -8), 393 asm.LoadMapPtr(asm.R1, m.FD()), 394 asm.Mov.Imm(asm.R4, 0), 395 asm.FnMapUpdateElem.Call(), 396 397 // exit 0 398 asm.Mov.Imm(asm.R0, 0), 399 asm.Return(), 400 }, 401 AttachType: attach, 402 License: "Dual MIT/GPL", 403 }) 404 if err != nil { 405 t.Fatal(err) 406 } 407 408 // Close the program and map on test teardown. 409 t.Cleanup(func() { 410 m.Close() 411 p.Close() 412 }) 413 414 return m, p 415 } 416 417 func assertMapValue(t *testing.T, m *ebpf.Map, k, v uint32) { 418 var val uint32 419 if err := m.Lookup(k, &val); err != nil { 420 t.Fatal(err) 421 } 422 if val != v { 423 t.Fatalf("unexpected value: want '%d', got '%d'", v, val) 424 } 425 } 426 427 func TestKprobeCookie(t *testing.T) { 428 testutils.SkipOnOldKernel(t, "5.15", "bpf_perf_link") 429 430 prog := mustLoadProgram(t, ebpf.Kprobe, 0, "") 431 k, err := Kprobe(ksym, prog, &KprobeOptions{Cookie: 1000}) 432 if err != nil { 433 t.Fatal(err) 434 } 435 k.Close() 436 } 437 438 func TestKprobeToken(t *testing.T) { 439 tests := []struct { 440 args probeArgs 441 expected string 442 }{ 443 {probeArgs{symbol: "symbol"}, "symbol"}, 444 {probeArgs{symbol: "symbol", offset: 1}, "symbol+0x1"}, 445 {probeArgs{symbol: "symbol", offset: 65535}, "symbol+0xffff"}, 446 {probeArgs{symbol: "symbol", offset: 65536}, "symbol+0x10000"}, 447 } 448 449 for i, tt := range tests { 450 t.Run(fmt.Sprint(i), func(t *testing.T) { 451 po := kprobeToken(tt.args) 452 if tt.expected != po { 453 t.Errorf("Expected symbol+offset to be '%s', got '%s'", tt.expected, po) 454 } 455 }) 456 } 457 }