github.com/inspektor-gadget/inspektor-gadget@v0.28.1/pkg/gadgets/trace/bind/tracer/bpf/bindsnoop.bpf.c (about)

     1  /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
     2  /* Copyright (c) 2021 Hengqi Chen */
     3  #include <vmlinux.h>
     4  #include <bpf/bpf_helpers.h>
     5  #include <bpf/bpf_core_read.h>
     6  #include <bpf/bpf_tracing.h>
     7  #include <bpf/bpf_endian.h>
     8  #include "bindsnoop.h"
     9  #include <gadget/mntns_filter.h>
    10  
    11  #define MAX_ENTRIES 10240
    12  #define MAX_PORTS 1024
    13  
    14  const volatile pid_t target_pid = 0;
    15  const volatile bool ignore_errors = true;
    16  const volatile bool filter_by_port = false;
    17  
    18  // we need this to make sure the compiler doesn't remove our struct
    19  const struct bind_event *unusedbindevent __attribute__((unused));
    20  
    21  struct {
    22  	__uint(type, BPF_MAP_TYPE_HASH);
    23  	__uint(max_entries, MAX_ENTRIES);
    24  	__type(key, __u32);
    25  	__type(value, struct socket *);
    26  } sockets SEC(".maps");
    27  
    28  struct {
    29  	__uint(type, BPF_MAP_TYPE_HASH);
    30  	__uint(max_entries, MAX_PORTS);
    31  	__type(key, __u16);
    32  	__type(value, __u16);
    33  } ports SEC(".maps");
    34  
    35  struct {
    36  	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
    37  	__uint(key_size, sizeof(__u32));
    38  	__uint(value_size, sizeof(__u32));
    39  } events SEC(".maps");
    40  
    41  static int probe_entry(struct pt_regs *ctx, struct socket *socket)
    42  {
    43  	__u64 pid_tgid = bpf_get_current_pid_tgid();
    44  	__u32 pid = pid_tgid >> 32;
    45  	__u32 tid = (__u32)pid_tgid;
    46  
    47  	if (target_pid && target_pid != pid)
    48  		return 0;
    49  
    50  	bpf_map_update_elem(&sockets, &tid, &socket, BPF_ANY);
    51  	return 0;
    52  };
    53  
    54  static int probe_exit(struct pt_regs *ctx, short ver)
    55  {
    56  	__u64 pid_tgid = bpf_get_current_pid_tgid();
    57  	__u32 pid = pid_tgid >> 32;
    58  	__u32 tid = (__u32)pid_tgid;
    59  	__u64 uid_gid = bpf_get_current_uid_gid();
    60  	u64 mntns_id;
    61  	struct socket **socketp, *socket;
    62  	struct inet_sock *inet_sock;
    63  	struct sock *sock;
    64  	union bind_options opts;
    65  	struct bind_event event = {};
    66  	__u16 sport = 0, *port;
    67  	int ret;
    68  
    69  	socketp = bpf_map_lookup_elem(&sockets, &tid);
    70  	if (!socketp)
    71  		return 0;
    72  
    73  	mntns_id = gadget_get_mntns_id();
    74  
    75  	if (gadget_should_discard_mntns_id(mntns_id))
    76  		goto cleanup;
    77  
    78  	ret = PT_REGS_RC(ctx);
    79  	if (ignore_errors && ret != 0)
    80  		goto cleanup;
    81  
    82  	socket = *socketp;
    83  	sock = BPF_CORE_READ(socket, sk);
    84  	inet_sock = (struct inet_sock *)sock;
    85  
    86  	sport = bpf_ntohs(BPF_CORE_READ(inet_sock, inet_sport));
    87  	port = bpf_map_lookup_elem(&ports, &sport);
    88  	if (filter_by_port && !port)
    89  		goto cleanup;
    90  
    91  	opts.fields.freebind = get_inet_sock_freebind(inet_sock);
    92  	opts.fields.transparent = get_inet_sock_transparent(inet_sock);
    93  	opts.fields.bind_address_no_port =
    94  		get_inet_sock_bind_address_no_port(inet_sock);
    95  	opts.fields.reuseaddress =
    96  		BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuse);
    97  	opts.fields.reuseport =
    98  		BPF_CORE_READ_BITFIELD_PROBED(sock, __sk_common.skc_reuseport);
    99  	event.opts = opts.data;
   100  	event.ts_us = bpf_ktime_get_ns() / 1000;
   101  	event.pid = pid;
   102  	event.port = sport;
   103  	event.bound_dev_if = BPF_CORE_READ(sock, __sk_common.skc_bound_dev_if);
   104  	event.ret = ret;
   105  	event.proto = BPF_CORE_READ_BITFIELD_PROBED(sock, sk_protocol);
   106  	event.mount_ns_id = mntns_id;
   107  	event.timestamp = bpf_ktime_get_boot_ns();
   108  	event.uid = (u32)uid_gid;
   109  	event.gid = (u32)(uid_gid >> 32);
   110  	bpf_get_current_comm(&event.task, sizeof(event.task));
   111  	if (ver == 4) {
   112  		event.ver = ver;
   113  		bpf_probe_read_kernel(&event.addr, sizeof(event.addr),
   114  				      &inet_sock->inet_saddr);
   115  	} else { /* ver == 6 */
   116  		event.ver = ver;
   117  		bpf_probe_read_kernel(
   118  			&event.addr, sizeof(event.addr),
   119  			sock->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
   120  	}
   121  	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, &event,
   122  			      sizeof(event));
   123  
   124  cleanup:
   125  	bpf_map_delete_elem(&sockets, &tid);
   126  	return 0;
   127  }
   128  
   129  SEC("kprobe/inet_bind")
   130  int BPF_KPROBE(ig_bind_ipv4_e, struct socket *socket)
   131  {
   132  	return probe_entry(ctx, socket);
   133  }
   134  
   135  SEC("kretprobe/inet_bind")
   136  int BPF_KRETPROBE(ig_bind_ipv4_x)
   137  {
   138  	return probe_exit(ctx, 4);
   139  }
   140  
   141  SEC("kprobe/inet6_bind")
   142  int BPF_KPROBE(ig_bind_ipv6_e, struct socket *socket)
   143  {
   144  	return probe_entry(ctx, socket);
   145  }
   146  
   147  SEC("kretprobe/inet6_bind")
   148  int BPF_KRETPROBE(ig_bind_ipv6_x)
   149  {
   150  	return probe_exit(ctx, 6);
   151  }
   152  
   153  char LICENSE[] SEC("license") = "Dual BSD/GPL";