github.com/cilium/ebpf@v0.15.1-0.20240517100537-8079b37aa138/link/kprobe_test.go (about)

     1  package link
     2  
     3  import (
     4  	"errors"
     5  	"os"
     6  	"testing"
     7  
     8  	"github.com/go-quicktest/qt"
     9  
    10  	"github.com/cilium/ebpf"
    11  	"github.com/cilium/ebpf/asm"
    12  	"github.com/cilium/ebpf/internal/testutils"
    13  	"github.com/cilium/ebpf/internal/tracefs"
    14  	"github.com/cilium/ebpf/internal/unix"
    15  )
    16  
    17  // Global symbol, present on all tested kernels.
    18  var ksym = "vprintk"
    19  
    20  // Collection of various symbols present in all tested kernels.
    21  // Compiler optimizations result in different names for these symbols.
    22  var symTests = []string{
    23  	"echo_char.isra.0",          // function optimized by -fipa-sra
    24  	"proc_get_long.constprop.0", // optimized function with constant operands
    25  	"unregister_kprobes.part.0", // function body that was split and partially inlined
    26  }
    27  
    28  func TestKprobe(t *testing.T) {
    29  	prog := mustLoadProgram(t, ebpf.Kprobe, 0, "")
    30  
    31  	for _, tt := range symTests {
    32  		t.Run(tt, func(t *testing.T) {
    33  			k, err := Kprobe(tt, prog, nil)
    34  			if err != nil {
    35  				t.Fatal(err)
    36  			}
    37  			defer k.Close()
    38  		})
    39  	}
    40  
    41  	k, err := Kprobe("bogus", prog, nil)
    42  	qt.Assert(t, qt.ErrorIs(err, os.ErrNotExist), qt.Commentf("got error: %s", err))
    43  	if k != nil {
    44  		k.Close()
    45  	}
    46  
    47  	k, err = Kprobe(ksym, prog, nil)
    48  	qt.Assert(t, qt.IsNil(err))
    49  	defer k.Close()
    50  
    51  	testLink(t, k, prog)
    52  }
    53  
    54  func TestKprobeOffset(t *testing.T) {
    55  	prog := mustLoadProgram(t, ebpf.Kprobe, 0, "")
    56  
    57  	// The layout of a function is compiler and arch dependent, so we try to
    58  	// find a valid attach target in the first few bytes of the function.
    59  	for i := uint64(1); i < 16; i++ {
    60  		k, err := Kprobe("inet6_release", prog, &KprobeOptions{Offset: i})
    61  		if err != nil {
    62  			continue
    63  		}
    64  		k.Close()
    65  		return
    66  	}
    67  
    68  	t.Fatal("Can't attach with non-zero offset")
    69  }
    70  
    71  func TestKretprobeMaxActive(t *testing.T) {
    72  	prog := mustLoadProgram(t, ebpf.Kprobe, 0, "")
    73  	defer prog.Close()
    74  
    75  	_, err := Kprobe("do_sys_open", prog, &KprobeOptions{RetprobeMaxActive: 4096})
    76  	if !errors.Is(err, tracefs.ErrInvalidMaxActive) {
    77  		t.Fatal("Expected ErrInvalidMaxActive, got", err)
    78  	}
    79  
    80  	k, err := Kretprobe("__put_task_struct", prog, &KprobeOptions{RetprobeMaxActive: 4096})
    81  	if testutils.IsKernelLessThan(t, "4.12") && errors.Is(err, ErrNotSupported) {
    82  		t.Skip("Kernel doesn't support maxactive")
    83  	}
    84  	if err != nil {
    85  		t.Fatal("Kretprobe with maxactive returned an error:", err)
    86  	}
    87  	if err := k.Close(); err != nil {
    88  		t.Fatal("Closing kretprobe:", err)
    89  	}
    90  }
    91  
    92  func TestKretprobe(t *testing.T) {
    93  	prog := mustLoadProgram(t, ebpf.Kprobe, 0, "")
    94  
    95  	for _, tt := range symTests {
    96  		t.Run(tt, func(t *testing.T) {
    97  			k, err := Kretprobe(tt, prog, nil)
    98  			if err != nil {
    99  				t.Fatal(err)
   100  			}
   101  			defer k.Close()
   102  		})
   103  	}
   104  
   105  	k, err := Kretprobe("bogus", prog, nil)
   106  	if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL)) {
   107  		t.Fatal(err)
   108  	}
   109  	if k != nil {
   110  		k.Close()
   111  	}
   112  
   113  	k, err = Kretprobe(ksym, prog, nil)
   114  	qt.Assert(t, qt.IsNil(err))
   115  	defer k.Close()
   116  
   117  	testLink(t, k, prog)
   118  }
   119  
   120  func TestKprobeErrors(t *testing.T) {
   121  	// Invalid Kprobe incantations. Kretprobe uses the same code paths
   122  	// with a different ret flag.
   123  	_, err := Kprobe("", nil, nil) // empty symbol
   124  	qt.Assert(t, qt.ErrorIs(err, errInvalidInput))
   125  
   126  	_, err = Kprobe("_", nil, nil) // empty prog
   127  	qt.Assert(t, qt.ErrorIs(err, errInvalidInput))
   128  
   129  	_, err = Kprobe(".", &ebpf.Program{}, nil) // illegal chars in symbol
   130  	qt.Assert(t, qt.ErrorIs(err, errInvalidInput))
   131  
   132  	_, err = Kprobe("foo", &ebpf.Program{}, nil) // wrong prog type
   133  	qt.Assert(t, qt.ErrorIs(err, errInvalidInput))
   134  }
   135  
   136  // Test k(ret)probe creation using perf_kprobe PMU.
   137  func TestKprobeCreatePMU(t *testing.T) {
   138  	// Requires at least 4.17 (e12f03d7031a "perf/core: Implement the 'perf_kprobe' PMU")
   139  	testutils.SkipOnOldKernel(t, "4.17", "perf_kprobe PMU")
   140  
   141  	// kprobe happy path. printk is always present.
   142  	pk, err := pmuProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym})
   143  	qt.Assert(t, qt.IsNil(err))
   144  	defer pk.Close()
   145  
   146  	// kretprobe happy path.
   147  	pr, err := pmuProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym, Ret: true})
   148  	qt.Assert(t, qt.IsNil(err))
   149  	defer pr.Close()
   150  
   151  	// Expect os.ErrNotExist when specifying a non-existent kernel symbol
   152  	// on kernels 4.17 and up.
   153  	_, err = pmuProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: "bogus"})
   154  	qt.Assert(t, qt.ErrorIs(err, os.ErrNotExist), qt.Commentf("got error: %s", err))
   155  
   156  	// A kernel bug was fixed in 97c753e62e6c where EINVAL was returned instead
   157  	// of ENOENT, but only for kretprobes.
   158  	_, err = pmuProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: "bogus", Ret: true})
   159  	qt.Assert(t, qt.ErrorIs(err, os.ErrNotExist), qt.Commentf("got error: %s", err))
   160  }
   161  
   162  // Test fallback behaviour on kernels without perf_kprobe PMU available.
   163  func TestKprobePMUUnavailable(t *testing.T) {
   164  	pk, err := pmuProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym})
   165  	if err == nil {
   166  		pk.Close()
   167  		t.Skipf("Kernel supports perf_kprobe PMU, not asserting error.")
   168  	}
   169  
   170  	// Only allow a PMU creation with a valid kernel symbol to fail with ErrNotSupported.
   171  	qt.Assert(t, qt.ErrorIs(err, ErrNotSupported), qt.Commentf("got error: %s", err))
   172  }
   173  
   174  func BenchmarkKprobeCreatePMU(b *testing.B) {
   175  	for n := 0; n < b.N; n++ {
   176  		pr, err := pmuProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym})
   177  		if err != nil {
   178  			b.Error("error creating perf_kprobe PMU:", err)
   179  		}
   180  
   181  		if err := pr.Close(); err != nil {
   182  			b.Error("error closing perf_kprobe PMU:", err)
   183  		}
   184  	}
   185  }
   186  
   187  // Test tracefs k(ret)probe creation on all kernel versions.
   188  func TestKprobeTraceFS(t *testing.T) {
   189  	// Open and close tracefs k(ret)probes, checking all errors.
   190  	kp, err := tracefsProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym})
   191  	qt.Assert(t, qt.IsNil(err))
   192  	qt.Assert(t, qt.IsNil(kp.Close()))
   193  
   194  	kp, err = tracefsProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym, Ret: true})
   195  	qt.Assert(t, qt.IsNil(err))
   196  	qt.Assert(t, qt.IsNil(kp.Close()))
   197  
   198  	// Create two identical trace events, ensure their IDs differ.
   199  	k1, err := tracefsProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym})
   200  	qt.Assert(t, qt.IsNil(err))
   201  	defer k1.Close()
   202  	qt.Assert(t, qt.IsNotNil(k1.tracefsEvent))
   203  
   204  	k2, err := tracefsProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym})
   205  	qt.Assert(t, qt.IsNil(err))
   206  	defer k2.Close()
   207  	qt.Assert(t, qt.IsNotNil(k2.tracefsEvent))
   208  
   209  	// Compare the kprobes' tracefs IDs.
   210  	qt.Assert(t, qt.Not(qt.Equals(k1.tracefsEvent.ID(), k2.tracefsEvent.ID())))
   211  
   212  	// Expect an error when supplying an invalid custom group name
   213  	_, err = tracefsProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym, Group: "/"})
   214  	qt.Assert(t, qt.Not(qt.IsNil(err)))
   215  
   216  	cg := "customgroup"
   217  	k3, err := tracefsProbe(tracefs.ProbeArgs{Type: tracefs.Kprobe, Symbol: ksym, Group: cg})
   218  	qt.Assert(t, qt.IsNil(err))
   219  	defer k3.Close()
   220  	qt.Assert(t, qt.Matches(k3.tracefsEvent.Group(), `customgroup_[a-f0-9]{16}`))
   221  
   222  	// Prepare probe args.
   223  	args := tracefs.ProbeArgs{Type: tracefs.Kprobe, Group: "testgroup", Symbol: "symbol"}
   224  
   225  	// Write a k(ret)probe event for a non-existing symbol.
   226  	_, err = tracefs.NewEvent(args)
   227  	qt.Assert(t, qt.ErrorIs(err, os.ErrNotExist), qt.Commentf("got error: %s", err))
   228  
   229  	// A kernel bug was fixed in 97c753e62e6c where EINVAL was returned instead
   230  	// of ENOENT, but only for kretprobes.
   231  	args.Ret = true
   232  	_, err = tracefs.NewEvent(args)
   233  	if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, unix.EINVAL)) {
   234  		t.Fatal(err)
   235  	}
   236  }
   237  
   238  func BenchmarkKprobeCreateTraceFS(b *testing.B) {
   239  	for n := 0; n < b.N; n++ {
   240  		// Include <tracefs>/kprobe_events operations in the benchmark loop
   241  		// because we create one per perf event.
   242  		pr, err := tracefsProbe(tracefs.ProbeArgs{Symbol: ksym})
   243  		if err != nil {
   244  			b.Error("error creating tracefs perf event:", err)
   245  		}
   246  
   247  		if err := pr.Close(); err != nil {
   248  			b.Error("error closing tracefs perf event:", err)
   249  		}
   250  	}
   251  }
   252  
   253  func TestKprobeProgramCall(t *testing.T) {
   254  	m, p := newUpdaterMapProg(t, ebpf.Kprobe, 0)
   255  
   256  	// Open Kprobe on `sys_getpid` and attach it
   257  	// to the ebpf program created above.
   258  	k, err := Kprobe("sys_getpid", p, nil)
   259  	if err != nil {
   260  		t.Fatal(err)
   261  	}
   262  
   263  	// Trigger ebpf program call.
   264  	unix.Getpid()
   265  
   266  	// Assert that the value got incremented to at least 1, while allowing
   267  	// for bigger values, because we could race with other getpid callers.
   268  	assertMapValueGE(t, m, 0, 1)
   269  
   270  	// Detach the Kprobe.
   271  	if err := k.Close(); err != nil {
   272  		t.Fatal(err)
   273  	}
   274  
   275  	// Reset map value to 0 at index 0.
   276  	if err := m.Update(uint32(0), uint32(0), ebpf.UpdateExist); err != nil {
   277  		t.Fatal(err)
   278  	}
   279  
   280  	// Retrigger the ebpf program call.
   281  	unix.Getpid()
   282  
   283  	// Assert that this time the value has not been updated.
   284  	assertMapValue(t, m, 0, 0)
   285  }
   286  
   287  func newUpdaterMapProg(t *testing.T, typ ebpf.ProgramType, attach ebpf.AttachType) (*ebpf.Map, *ebpf.Program) {
   288  	// Create ebpf map. Will contain only one key with initial value 0.
   289  	m, err := ebpf.NewMap(&ebpf.MapSpec{
   290  		Type:       ebpf.Array,
   291  		KeySize:    4,
   292  		ValueSize:  4,
   293  		MaxEntries: 1,
   294  	})
   295  	if err != nil {
   296  		t.Fatal(err)
   297  	}
   298  
   299  	// Create ebpf program. When called, will increase the value of key 0 by 1
   300  	// in the map created above.
   301  	p, err := ebpf.NewProgram(&ebpf.ProgramSpec{
   302  		Type: typ,
   303  		Instructions: asm.Instructions{
   304  			// R1 map
   305  			asm.LoadMapPtr(asm.R1, m.FD()),
   306  
   307  			// R2 key
   308  			asm.Mov.Reg(asm.R2, asm.R10),
   309  			asm.Add.Imm(asm.R2, -4),
   310  			asm.StoreImm(asm.R2, 0, 0, asm.Word),
   311  
   312  			// Lookup map[0]
   313  			asm.FnMapLookupElem.Call(),
   314  			asm.JEq.Imm(asm.R0, 0, "ret"),
   315  
   316  			// u32 val = R0++
   317  			asm.LoadMem(asm.R1, asm.R0, 0, asm.Word),
   318  			asm.Add.Imm(asm.R1, 1),
   319  			asm.StoreMem(asm.RFP, -8, asm.R1, asm.Word),
   320  
   321  			// u32 key = 0
   322  			asm.Mov.Imm(asm.R1, 0),
   323  			asm.StoreMem(asm.RFP, -4, asm.R1, asm.Word),
   324  
   325  			// bpf_map_update_elem(...)
   326  			asm.Mov.Reg(asm.R2, asm.RFP),
   327  			asm.Add.Imm(asm.R2, -4),
   328  			asm.Mov.Reg(asm.R3, asm.RFP),
   329  			asm.Add.Imm(asm.R3, -8),
   330  			asm.LoadMapPtr(asm.R1, m.FD()),
   331  			asm.Mov.Imm(asm.R4, 0),
   332  			asm.FnMapUpdateElem.Call(),
   333  
   334  			// exit 0
   335  			asm.Mov.Imm(asm.R0, 0),
   336  			asm.Return().WithSymbol("ret"),
   337  		},
   338  		AttachType: attach,
   339  		License:    "Dual MIT/GPL",
   340  	})
   341  	if err != nil {
   342  		t.Fatal(err)
   343  	}
   344  
   345  	// Close the program and map on test teardown.
   346  	t.Cleanup(func() {
   347  		m.Close()
   348  		p.Close()
   349  	})
   350  
   351  	return m, p
   352  }
   353  
   354  func assertMapValue(t *testing.T, m *ebpf.Map, k, v uint32) {
   355  	var val uint32
   356  	if err := m.Lookup(k, &val); err != nil {
   357  		t.Fatal(err)
   358  	}
   359  	if val != v {
   360  		t.Fatalf("unexpected value: want '%d', got '%d'", v, val)
   361  	}
   362  }
   363  
   364  func assertMapValueGE(t *testing.T, m *ebpf.Map, k, v uint32) {
   365  	var val uint32
   366  	if err := m.Lookup(k, &val); err != nil {
   367  		t.Fatal(err)
   368  	}
   369  	if val < v {
   370  		t.Fatalf("unexpected value: want >= '%d', got '%d'", v, val)
   371  	}
   372  }
   373  
   374  func TestKprobeCookie(t *testing.T) {
   375  	testutils.SkipOnOldKernel(t, "5.15", "bpf_perf_link")
   376  
   377  	prog := mustLoadProgram(t, ebpf.Kprobe, 0, "")
   378  	k, err := Kprobe(ksym, prog, &KprobeOptions{Cookie: 1000})
   379  	if err != nil {
   380  		t.Fatal(err)
   381  	}
   382  	k.Close()
   383  }