github.com/kubeshark/ebpf@v0.9.2/btf/testdata/bpf_core_read.h (about)

     1  /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
     2  #ifndef __BPF_CORE_READ_H__
     3  #define __BPF_CORE_READ_H__
     4  
     5  /*
     6   * enum bpf_field_info_kind is passed as a second argument into
     7   * __builtin_preserve_field_info() built-in to get a specific aspect of
     8   * a field, captured as a first argument. __builtin_preserve_field_info(field,
     9   * info_kind) returns __u32 integer and produces BTF field relocation, which
    10   * is understood and processed by libbpf during BPF object loading. See
    11   * selftests/bpf for examples.
    12   */
    13  enum bpf_field_info_kind {
    14  	BPF_FIELD_BYTE_OFFSET = 0,	/* field byte offset */
    15  	BPF_FIELD_BYTE_SIZE = 1,
    16  	BPF_FIELD_EXISTS = 2,		/* field existence in target kernel */
    17  	BPF_FIELD_SIGNED = 3,
    18  	BPF_FIELD_LSHIFT_U64 = 4,
    19  	BPF_FIELD_RSHIFT_U64 = 5,
    20  };
    21  
    22  /* second argument to __builtin_btf_type_id() built-in */
    23  enum bpf_type_id_kind {
    24  	BPF_TYPE_ID_LOCAL = 0,		/* BTF type ID in local program */
    25  	BPF_TYPE_ID_TARGET = 1,		/* BTF type ID in target kernel */
    26  };
    27  
    28  /* second argument to __builtin_preserve_type_info() built-in */
    29  enum bpf_type_info_kind {
    30  	BPF_TYPE_EXISTS = 0,		/* type existence in target kernel */
    31  	BPF_TYPE_SIZE = 1,		/* type size in target kernel */
    32  };
    33  
    34  /* second argument to __builtin_preserve_enum_value() built-in */
    35  enum bpf_enum_value_kind {
    36  	BPF_ENUMVAL_EXISTS = 0,		/* enum value existence in kernel */
    37  	BPF_ENUMVAL_VALUE = 1,		/* enum value value relocation */
    38  };
    39  
    40  #define __CORE_RELO(src, field, info)					      \
    41  	__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
    42  
    43  #if __BYTE_ORDER == __LITTLE_ENDIAN
    44  #define __CORE_BITFIELD_PROBE_READ(dst, src, fld)			      \
    45  	bpf_probe_read_kernel(						      \
    46  			(void *)dst,				      \
    47  			__CORE_RELO(src, fld, BYTE_SIZE),		      \
    48  			(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
    49  #else
    50  /* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
    51   * for big-endian we need to adjust destination pointer accordingly, based on
    52   * field byte size
    53   */
    54  #define __CORE_BITFIELD_PROBE_READ(dst, src, fld)			      \
    55  	bpf_probe_read_kernel(						      \
    56  			(void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
    57  			__CORE_RELO(src, fld, BYTE_SIZE),		      \
    58  			(const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
    59  #endif
    60  
    61  /*
    62   * Extract bitfield, identified by s->field, and return its value as u64.
    63   * All this is done in relocatable manner, so bitfield changes such as
    64   * signedness, bit size, offset changes, this will be handled automatically.
    65   * This version of macro is using bpf_probe_read_kernel() to read underlying
    66   * integer storage. Macro functions as an expression and its return type is
    67   * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
    68   */
    69  #define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({			      \
    70  	unsigned long long val = 0;					      \
    71  									      \
    72  	__CORE_BITFIELD_PROBE_READ(&val, s, field);			      \
    73  	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
    74  	if (__CORE_RELO(s, field, SIGNED))				      \
    75  		val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64);  \
    76  	else								      \
    77  		val = val >> __CORE_RELO(s, field, RSHIFT_U64);		      \
    78  	val;								      \
    79  })
    80  
    81  /*
    82   * Extract bitfield, identified by s->field, and return its value as u64.
    83   * This version of macro is using direct memory reads and should be used from
    84   * BPF program types that support such functionality (e.g., typed raw
    85   * tracepoints).
    86   */
    87  #define BPF_CORE_READ_BITFIELD(s, field) ({				      \
    88  	const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
    89  	unsigned long long val;						      \
    90  									      \
    91  	/* This is a so-called barrier_var() operation that makes specified   \
    92  	 * variable "a black box" for optimizing compiler.		      \
    93  	 * It forces compiler to perform BYTE_OFFSET relocation on p and use  \
    94  	 * its calculated value in the switch below, instead of applying      \
    95  	 * the same relocation 4 times for each individual memory load.       \
    96  	 */								      \
    97  	asm volatile("" : "=r"(p) : "0"(p));				      \
    98  									      \
    99  	switch (__CORE_RELO(s, field, BYTE_SIZE)) {			      \
   100  	case 1: val = *(const unsigned char *)p; break;			      \
   101  	case 2: val = *(const unsigned short *)p; break;		      \
   102  	case 4: val = *(const unsigned int *)p; break;			      \
   103  	case 8: val = *(const unsigned long long *)p; break;		      \
   104  	}								      \
   105  	val <<= __CORE_RELO(s, field, LSHIFT_U64);			      \
   106  	if (__CORE_RELO(s, field, SIGNED))				      \
   107  		val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64);  \
   108  	else								      \
   109  		val = val >> __CORE_RELO(s, field, RSHIFT_U64);		      \
   110  	val;								      \
   111  })
   112  
   113  /*
   114   * Convenience macro to check that field actually exists in target kernel's.
   115   * Returns:
   116   *    1, if matching field is present in target kernel;
   117   *    0, if no matching field found.
   118   */
   119  #define bpf_core_field_exists(field)					    \
   120  	__builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
   121  
   122  /*
   123   * Convenience macro to get the byte size of a field. Works for integers,
   124   * struct/unions, pointers, arrays, and enums.
   125   */
   126  #define bpf_core_field_size(field)					    \
   127  	__builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
   128  
   129  /*
   130   * Convenience macro to get BTF type ID of a specified type, using a local BTF
   131   * information. Return 32-bit unsigned integer with type ID from program's own
   132   * BTF. Always succeeds.
   133   */
   134  #define bpf_core_type_id_local(type)					    \
   135  	__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
   136  
   137  /*
   138   * Convenience macro to get BTF type ID of a target kernel's type that matches
   139   * specified local type.
   140   * Returns:
   141   *    - valid 32-bit unsigned type ID in kernel BTF;
   142   *    - 0, if no matching type was found in a target kernel BTF.
   143   */
   144  #define bpf_core_type_id_kernel(type)					    \
   145  	__builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
   146  
   147  /*
   148   * Convenience macro to check that provided named type
   149   * (struct/union/enum/typedef) exists in a target kernel.
   150   * Returns:
   151   *    1, if such type is present in target kernel's BTF;
   152   *    0, if no matching type is found.
   153   */
   154  #define bpf_core_type_exists(type)					    \
   155  	__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
   156  
   157  /*
   158   * Convenience macro to get the byte size of a provided named type
   159   * (struct/union/enum/typedef) in a target kernel.
   160   * Returns:
   161   *    >= 0 size (in bytes), if type is present in target kernel's BTF;
   162   *    0, if no matching type is found.
   163   */
   164  #define bpf_core_type_size(type)					    \
   165  	__builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
   166  
   167  /*
   168   * Convenience macro to check that provided enumerator value is defined in
   169   * a target kernel.
   170   * Returns:
   171   *    1, if specified enum type and its enumerator value are present in target
   172   *    kernel's BTF;
   173   *    0, if no matching enum and/or enum value within that enum is found.
   174   */
   175  #define bpf_core_enum_value_exists(enum_type, enum_value)		    \
   176  	__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
   177  
   178  /*
   179   * Convenience macro to get the integer value of an enumerator value in
   180   * a target kernel.
   181   * Returns:
   182   *    64-bit value, if specified enum type and its enumerator value are
   183   *    present in target kernel's BTF;
   184   *    0, if no matching enum and/or enum value within that enum is found.
   185   */
   186  #define bpf_core_enum_value(enum_type, enum_value)			    \
   187  	__builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
   188  
   189  /*
   190   * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
   191   * offset relocation for source address using __builtin_preserve_access_index()
   192   * built-in, provided by Clang.
   193   *
   194   * __builtin_preserve_access_index() takes as an argument an expression of
   195   * taking an address of a field within struct/union. It makes compiler emit
   196   * a relocation, which records BTF type ID describing root struct/union and an
   197   * accessor string which describes exact embedded field that was used to take
   198   * an address. See detailed description of this relocation format and
   199   * semantics in comments to struct bpf_field_reloc in libbpf_internal.h.
   200   *
   201   * This relocation allows libbpf to adjust BPF instruction to use correct
   202   * actual field offset, based on target kernel BTF type that matches original
   203   * (local) BTF, used to record relocation.
   204   */
   205  #define bpf_core_read(dst, sz, src)					    \
   206  	bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src))
   207  
   208  /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
   209  #define bpf_core_read_user(dst, sz, src)				    \
   210  	bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src))
   211  /*
   212   * bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
   213   * additionally emitting BPF CO-RE field relocation for specified source
   214   * argument.
   215   */
   216  #define bpf_core_read_str(dst, sz, src)					    \
   217  	bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
   218  
   219  /* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
   220  #define bpf_core_read_user_str(dst, sz, src)				    \
   221  	bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
   222  
   223  #define ___concat(a, b) a ## b
   224  #define ___apply(fn, n) ___concat(fn, n)
   225  #define ___nth(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, __11, N, ...) N
   226  
   227  /*
   228   * return number of provided arguments; used for switch-based variadic macro
   229   * definitions (see ___last, ___arrow, etc below)
   230   */
   231  #define ___narg(...) ___nth(_, ##__VA_ARGS__, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
   232  /*
   233   * return 0 if no arguments are passed, N - otherwise; used for
   234   * recursively-defined macros to specify termination (0) case, and generic
   235   * (N) case (e.g., ___read_ptrs, ___core_read)
   236   */
   237  #define ___empty(...) ___nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
   238  
   239  #define ___last1(x) x
   240  #define ___last2(a, x) x
   241  #define ___last3(a, b, x) x
   242  #define ___last4(a, b, c, x) x
   243  #define ___last5(a, b, c, d, x) x
   244  #define ___last6(a, b, c, d, e, x) x
   245  #define ___last7(a, b, c, d, e, f, x) x
   246  #define ___last8(a, b, c, d, e, f, g, x) x
   247  #define ___last9(a, b, c, d, e, f, g, h, x) x
   248  #define ___last10(a, b, c, d, e, f, g, h, i, x) x
   249  #define ___last(...) ___apply(___last, ___narg(__VA_ARGS__))(__VA_ARGS__)
   250  
   251  #define ___nolast2(a, _) a
   252  #define ___nolast3(a, b, _) a, b
   253  #define ___nolast4(a, b, c, _) a, b, c
   254  #define ___nolast5(a, b, c, d, _) a, b, c, d
   255  #define ___nolast6(a, b, c, d, e, _) a, b, c, d, e
   256  #define ___nolast7(a, b, c, d, e, f, _) a, b, c, d, e, f
   257  #define ___nolast8(a, b, c, d, e, f, g, _) a, b, c, d, e, f, g
   258  #define ___nolast9(a, b, c, d, e, f, g, h, _) a, b, c, d, e, f, g, h
   259  #define ___nolast10(a, b, c, d, e, f, g, h, i, _) a, b, c, d, e, f, g, h, i
   260  #define ___nolast(...) ___apply(___nolast, ___narg(__VA_ARGS__))(__VA_ARGS__)
   261  
   262  #define ___arrow1(a) a
   263  #define ___arrow2(a, b) a->b
   264  #define ___arrow3(a, b, c) a->b->c
   265  #define ___arrow4(a, b, c, d) a->b->c->d
   266  #define ___arrow5(a, b, c, d, e) a->b->c->d->e
   267  #define ___arrow6(a, b, c, d, e, f) a->b->c->d->e->f
   268  #define ___arrow7(a, b, c, d, e, f, g) a->b->c->d->e->f->g
   269  #define ___arrow8(a, b, c, d, e, f, g, h) a->b->c->d->e->f->g->h
   270  #define ___arrow9(a, b, c, d, e, f, g, h, i) a->b->c->d->e->f->g->h->i
   271  #define ___arrow10(a, b, c, d, e, f, g, h, i, j) a->b->c->d->e->f->g->h->i->j
   272  #define ___arrow(...) ___apply(___arrow, ___narg(__VA_ARGS__))(__VA_ARGS__)
   273  
   274  #define ___type(...) typeof(___arrow(__VA_ARGS__))
   275  
   276  #define ___read(read_fn, dst, src_type, src, accessor)			    \
   277  	read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
   278  
   279  /* "recursively" read a sequence of inner pointers using local __t var */
   280  #define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a);
   281  #define ___rd_last(fn, ...)						    \
   282  	___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
   283  #define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__)
   284  #define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   285  #define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   286  #define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   287  #define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   288  #define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   289  #define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   290  #define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   291  #define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
   292  #define ___read_ptrs(fn, src, ...)					    \
   293  	___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__)
   294  
   295  #define ___core_read0(fn, fn_ptr, dst, src, a)				    \
   296  	___read(fn, dst, ___type(src), src, a);
   297  #define ___core_readN(fn, fn_ptr, dst, src, ...)			    \
   298  	___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__))		    \
   299  	___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t,	    \
   300  		___last(__VA_ARGS__));
   301  #define ___core_read(fn, fn_ptr, dst, src, a, ...)			    \
   302  	___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst,	    \
   303  						      src, a, ##__VA_ARGS__)
   304  
   305  /*
   306   * BPF_CORE_READ_INTO() is a more performance-conscious variant of
   307   * BPF_CORE_READ(), in which final field is read into user-provided storage.
   308   * See BPF_CORE_READ() below for more details on general usage.
   309   */
   310  #define BPF_CORE_READ_INTO(dst, src, a, ...) ({				    \
   311  	___core_read(bpf_core_read, bpf_core_read,			    \
   312  		     dst, (src), a, ##__VA_ARGS__)			    \
   313  })
   314  
   315  /*
   316   * Variant of BPF_CORE_READ_INTO() for reading from user-space memory.
   317   *
   318   * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
   319   */
   320  #define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({			    \
   321  	___core_read(bpf_core_read_user, bpf_core_read_user,		    \
   322  		     dst, (src), a, ##__VA_ARGS__)			    \
   323  })
   324  
   325  /* Non-CO-RE variant of BPF_CORE_READ_INTO() */
   326  #define BPF_PROBE_READ_INTO(dst, src, a, ...) ({			    \
   327  	___core_read(bpf_probe_read, bpf_probe_read,			    \
   328  		     dst, (src), a, ##__VA_ARGS__)			    \
   329  })
   330  
   331  /* Non-CO-RE variant of BPF_CORE_READ_USER_INTO().
   332   *
   333   * As no CO-RE relocations are emitted, source types can be arbitrary and are
   334   * not restricted to kernel types only.
   335   */
   336  #define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({			    \
   337  	___core_read(bpf_probe_read_user, bpf_probe_read_user,		    \
   338  		     dst, (src), a, ##__VA_ARGS__)			    \
   339  })
   340  
   341  /*
   342   * BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
   343   * BPF_CORE_READ() for intermediate pointers, but then executes (and returns
   344   * corresponding error code) bpf_core_read_str() for final string read.
   345   */
   346  #define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({			    \
   347  	___core_read(bpf_core_read_str, bpf_core_read,			    \
   348  		     dst, (src), a, ##__VA_ARGS__)			    \
   349  })
   350  
   351  /*
   352   * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory.
   353   *
   354   * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
   355   */
   356  #define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({		    \
   357  	___core_read(bpf_core_read_user_str, bpf_core_read_user,	    \
   358  		     dst, (src), a, ##__VA_ARGS__)			    \
   359  })
   360  
   361  /* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
   362  #define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({			    \
   363  	___core_read(bpf_probe_read_str, bpf_probe_read,		    \
   364  		     dst, (src), a, ##__VA_ARGS__)			    \
   365  })
   366  
   367  /*
   368   * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO().
   369   *
   370   * As no CO-RE relocations are emitted, source types can be arbitrary and are
   371   * not restricted to kernel types only.
   372   */
   373  #define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({		    \
   374  	___core_read(bpf_probe_read_user_str, bpf_probe_read_user,	    \
   375  		     dst, (src), a, ##__VA_ARGS__)			    \
   376  })
   377  
   378  /*
   379   * BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
   380   * when there are few pointer chasing steps.
   381   * E.g., what in non-BPF world (or in BPF w/ BCC) would be something like:
   382   *	int x = s->a.b.c->d.e->f->g;
   383   * can be succinctly achieved using BPF_CORE_READ as:
   384   *	int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
   385   *
   386   * BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
   387   * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
   388   * equivalent to:
   389   * 1. const void *__t = s->a.b.c;
   390   * 2. __t = __t->d.e;
   391   * 3. __t = __t->f;
   392   * 4. return __t->g;
   393   *
   394   * Equivalence is logical, because there is a heavy type casting/preservation
   395   * involved, as well as all the reads are happening through
   396   * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
   397   * emit CO-RE relocations.
   398   *
   399   * N.B. Only up to 9 "field accessors" are supported, which should be more
   400   * than enough for any practical purpose.
   401   */
   402  #define BPF_CORE_READ(src, a, ...) ({					    \
   403  	___type((src), a, ##__VA_ARGS__) __r;				    \
   404  	BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__);		    \
   405  	__r;								    \
   406  })
   407  
   408  /*
   409   * Variant of BPF_CORE_READ() for reading from user-space memory.
   410   *
   411   * NOTE: all the source types involved are still *kernel types* and need to
   412   * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will
   413   * fail. Custom user types are not relocatable with CO-RE.
   414   * The typical situation in which BPF_CORE_READ_USER() might be used is to
   415   * read kernel UAPI types from the user-space memory passed in as a syscall
   416   * input argument.
   417   */
   418  #define BPF_CORE_READ_USER(src, a, ...) ({				    \
   419  	___type((src), a, ##__VA_ARGS__) __r;				    \
   420  	BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__);		    \
   421  	__r;								    \
   422  })
   423  
   424  /* Non-CO-RE variant of BPF_CORE_READ() */
   425  #define BPF_PROBE_READ(src, a, ...) ({					    \
   426  	___type((src), a, ##__VA_ARGS__) __r;				    \
   427  	BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__);		    \
   428  	__r;								    \
   429  })
   430  
   431  /*
   432   * Non-CO-RE variant of BPF_CORE_READ_USER().
   433   *
   434   * As no CO-RE relocations are emitted, source types can be arbitrary and are
   435   * not restricted to kernel types only.
   436   */
   437  #define BPF_PROBE_READ_USER(src, a, ...) ({				    \
   438  	___type((src), a, ##__VA_ARGS__) __r;				    \
   439  	BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__);	    \
   440  	__r;								    \
   441  })
   442  
   443  #endif
   444