github.com/inspektor-gadget/inspektor-gadget@v0.28.1/pkg/gadgets/profile/block-io/tracer/bpf/biolatency.bpf.c (about)

     1  // SPDX-License-Identifier: GPL-2.0
     2  // Copyright (c) 2020 Wenbo Zhang
     3  #include <vmlinux.h>
     4  #include <bpf/bpf_helpers.h>
     5  #include <bpf/bpf_core_read.h>
     6  #include <bpf/bpf_tracing.h>
     7  #include "biolatency.h"
     8  #include <gadget/bits.bpf.h>
     9  #include <gadget/core_fixes.bpf.h>
    10  
    11  #define MAX_ENTRIES 10240
    12  
    13  const volatile bool filter_cg = false;
    14  const volatile bool targ_per_disk = false;
    15  const volatile bool targ_per_flag = false;
    16  const volatile bool targ_queued = false;
    17  const volatile bool targ_ms = false;
    18  const volatile bool insert_arg_single = true;
    19  const volatile bool issue_arg_single = true;
    20  const volatile bool filter_dev = false;
    21  const volatile __u32 targ_dev = 0;
    22  
    23  struct {
    24  	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
    25  	__type(key, u32);
    26  	__type(value, u32);
    27  	__uint(max_entries, 1);
    28  } cgroup_map SEC(".maps");
    29  
    30  struct {
    31  	__uint(type, BPF_MAP_TYPE_HASH);
    32  	__uint(max_entries, MAX_ENTRIES);
    33  	__type(key, struct request *);
    34  	__type(value, u64);
    35  } start SEC(".maps");
    36  
    37  static struct hist initial_hist;
    38  
    39  struct {
    40  	__uint(type, BPF_MAP_TYPE_HASH);
    41  	__uint(max_entries, MAX_ENTRIES);
    42  	__type(key, struct hist_key);
    43  	__type(value, struct hist);
    44  } hists SEC(".maps");
    45  
    46  static __always_inline int trace_rq_start(struct request *rq, int issue)
    47  {
    48  	if (issue && targ_queued && BPF_CORE_READ(rq, q, elevator))
    49  		return 0;
    50  
    51  	u64 ts = bpf_ktime_get_ns();
    52  
    53  	if (filter_dev) {
    54  		struct gendisk *disk = get_disk(rq);
    55  		u32 dev;
    56  
    57  		dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
    58  				   BPF_CORE_READ(disk, first_minor)) :
    59  			     0;
    60  		if (targ_dev != dev)
    61  			return 0;
    62  	}
    63  	bpf_map_update_elem(&start, &rq, &ts, 0);
    64  	return 0;
    65  }
    66  
    67  SEC("raw_tp/block_rq_insert")
    68  int ig_profio_ins(u64 *ctx)
    69  {
    70  	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
    71  		return 0;
    72  
    73  	/**
    74  	 * commit a54895fa (v5.11-rc1) changed tracepoint argument list
    75  	 * from TP_PROTO(struct request_queue *q, struct request *rq)
    76  	 * to TP_PROTO(struct request *rq)
    77  	 */
    78  	if (!insert_arg_single)
    79  		return trace_rq_start((void *)ctx[1], false);
    80  	else
    81  		return trace_rq_start((void *)ctx[0], false);
    82  }
    83  
    84  SEC("raw_tp/block_rq_issue")
    85  int ig_profio_iss(u64 *ctx)
    86  {
    87  	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
    88  		return 0;
    89  
    90  	/**
    91  	 * commit a54895fa (v5.11-rc1) changed tracepoint argument list
    92  	 * from TP_PROTO(struct request_queue *q, struct request *rq)
    93  	 * to TP_PROTO(struct request *rq)
    94  	 */
    95  	if (!issue_arg_single)
    96  		return trace_rq_start((void *)ctx[1], true);
    97  	else
    98  		return trace_rq_start((void *)ctx[0], true);
    99  }
   100  
   101  SEC("raw_tp/block_rq_complete")
   102  int BPF_PROG(ig_profio_done, struct request *rq, int error,
   103  	     unsigned int nr_bytes)
   104  {
   105  	if (filter_cg && !bpf_current_task_under_cgroup(&cgroup_map, 0))
   106  		return 0;
   107  
   108  	u64 slot, *tsp, ts = bpf_ktime_get_ns();
   109  	struct hist_key hkey = {};
   110  	struct hist *histp;
   111  	s64 delta;
   112  
   113  	tsp = bpf_map_lookup_elem(&start, &rq);
   114  	if (!tsp)
   115  		return 0;
   116  	delta = (s64)(ts - *tsp);
   117  	if (delta < 0)
   118  		goto cleanup;
   119  
   120  	if (targ_per_disk) {
   121  		struct request_queue___x *q = (void *)BPF_CORE_READ(rq, q);
   122  		struct gendisk *disk = get_disk(rq);
   123  
   124  		hkey.dev = disk ? MKDEV(BPF_CORE_READ(disk, major),
   125  					BPF_CORE_READ(disk, first_minor)) :
   126  				  0;
   127  	}
   128  	if (targ_per_flag)
   129  		hkey.cmd_flags = BPF_CORE_READ(rq, cmd_flags);
   130  
   131  	histp = bpf_map_lookup_elem(&hists, &hkey);
   132  	if (!histp) {
   133  		bpf_map_update_elem(&hists, &hkey, &initial_hist, 0);
   134  		histp = bpf_map_lookup_elem(&hists, &hkey);
   135  		if (!histp)
   136  			goto cleanup;
   137  	}
   138  
   139  	if (targ_ms)
   140  		delta /= 1000000U;
   141  	else
   142  		delta /= 1000U;
   143  	slot = log2l(delta);
   144  	if (slot >= MAX_SLOTS)
   145  		slot = MAX_SLOTS - 1;
   146  	__sync_fetch_and_add(&histp->slots[slot], 1);
   147  
   148  cleanup:
   149  	bpf_map_delete_elem(&start, &rq);
   150  	return 0;
   151  }
   152  
   153  char LICENSE[] SEC("license") = "GPL";