github.com/datadog/cilium@v1.6.12/bpf/lib/utils.h (about)

     1  /*
     2   *  Copyright (C) 2016-2019 Authors of Cilium
     3   *
     4   *  This program is free software; you can redistribute it and/or modify
     5   *  it under the terms of the GNU General Public License as published by
     6   *  the Free Software Foundation; either version 2 of the License, or
     7   *  (at your option) any later version.
     8   *
     9   *  This program is distributed in the hope that it will be useful,
    10   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
    11   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    12   *  GNU General Public License for more details.
    13   *
    14   *  You should have received a copy of the GNU General Public License
    15   *  along with this program; if not, write to the Free Software
    16   *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
    17   */
    18  #ifndef __LIB_UTILS_H_
    19  #define __LIB_UTILS_H_
    20  
    21  #include <bpf/api.h>
    22  
    23  #define min(x, y)		\
    24  ({				\
    25  	typeof(x) _x = (x);	\
    26  	typeof(y) _y = (y);	\
    27  	(void) (&_x == &_y);	\
    28  	_x < _y ? _x : _y;	\
    29  })
    30  
    31  #define max(x, y)		\
    32  ({				\
    33  	typeof(x) _x = (x);	\
    34  	typeof(y) _y = (y);	\
    35  	(void) (&_x == &_y);	\
    36  	_x > _y ? _x : _y;	\
    37  })
    38  
    39  static inline void bpf_barrier(void)
    40  {
    41  	/* Workaround to avoid verifier complaint:
    42  	 * "dereference of modified ctx ptr R5 off=48+0, ctx+const is allowed, ctx+const+const is not"
    43  	 */
    44  	asm volatile("" ::: "memory");
    45  }
    46  
    47  #ifndef __READ_ONCE
    48  # define __READ_ONCE(x)		(*(volatile typeof(x) *)&x)
    49  #endif
    50  #ifndef __WRITE_ONCE
    51  # define __WRITE_ONCE(x, v)	(*(volatile typeof(x) *)&x) = (v)
    52  #endif
    53  
    54  /* {READ,WRITE}_ONCE() with verifier workaround via bpf_barrier(). */
    55  #ifndef READ_ONCE
    56  # define READ_ONCE(x)		\
    57  	({ typeof(x) __val; __val = __READ_ONCE(x); bpf_barrier(); __val; })
    58  #endif
    59  #ifndef WRITE_ONCE
    60  # define WRITE_ONCE(x, v)	\
    61  	({ typeof(x) __val = (v); __WRITE_ONCE(x, __val); bpf_barrier(); __val; })
    62  #endif
    63  
    64  /* Clear CB values */
    65  static inline void bpf_clear_cb(struct __sk_buff *skb)
    66  {
    67  	__u32 zero = 0;
    68  	skb->cb[0] = zero;
    69  	skb->cb[1] = zero;
    70  	skb->cb[2] = zero;
    71  	skb->cb[3] = zero;
    72  	skb->cb[4] = zero;
    73  }
    74  
    75  #define NSEC_PER_SEC	1000000000UL
    76  
    77  /* Monotonic clock, scalar format. */
    78  static inline __u64 bpf_ktime_get_nsec(void)
    79  {
    80  	return ktime_get_ns();
    81  }
    82  
    83  static inline __u32 bpf_ktime_get_sec(void)
    84  {
    85  	/* Ignores remainder subtraction as we'd do in
    86  	 * ns_to_timespec(), but good enough here.
    87  	 */
    88  	return (__u64)(bpf_ktime_get_nsec() / NSEC_PER_SEC);
    89  }
    90  
    91  #if __BYTE_ORDER == __LITTLE_ENDIAN
    92  # define __bpf_ntohs(x)		__builtin_bswap16(x)
    93  # define __bpf_htons(x)		__builtin_bswap16(x)
    94  # define __bpf_ntohl(x)		__builtin_bswap32(x)
    95  # define __bpf_htonl(x)		__builtin_bswap32(x)
    96  #elif __BYTE_ORDER == __BIG_ENDIAN
    97  # define __bpf_ntohs(x)		(x)
    98  # define __bpf_htons(x)		(x)
    99  # define __bpf_ntohl(x)		(x)
   100  # define __bpf_htonl(x)		(x)
   101  #else
   102  # error "Fix your __BYTE_ORDER?!"
   103  #endif
   104  
   105  #define bpf_htons(x)				\
   106  	(__builtin_constant_p(x) ?		\
   107  	 __constant_htons(x) : __bpf_htons(x))
   108  #define bpf_ntohs(x)				\
   109  	(__builtin_constant_p(x) ?		\
   110  	 __constant_ntohs(x) : __bpf_ntohs(x))
   111  
   112  #define bpf_htonl(x)				\
   113  	(__builtin_constant_p(x) ?		\
   114  	 __constant_htonl(x) : __bpf_htonl(x))
   115  #define bpf_ntohl(x)				\
   116  	(__builtin_constant_p(x) ?		\
   117  	 __constant_ntohl(x) : __bpf_ntohl(x))
   118  
   119  #ifndef __fetch
   120  # define __fetch(x) (__u32)(&(x))
   121  #endif
   122  
   123  #ifndef build_bug_on
   124  # define build_bug_on(e) ((void)sizeof(char[1 - 2*!!(e)]))
   125  #endif
   126  
   127  /* fetch_* macros assist in fetching variously sized static data */
   128  #define fetch_u32(x) __fetch(x)
   129  #define fetch_u32_i(x, i) __fetch(x ## _ ## i)
   130  #define fetch_ipv6(x) fetch_u32_i(x, 1), fetch_u32_i(x, 2), fetch_u32_i(x, 3), fetch_u32_i(x, 4)
   131  #define fetch_mac(x) { { fetch_u32_i(x, 1), (__u16)fetch_u32_i(x, 2) } }
   132  
   133  /* DEFINE_* macros help to declare static data. */
   134  #define DEFINE_U32(NAME, value) uint32_t NAME = value
   135  #define DEFINE_U32_I(NAME, i) uint32_t NAME ## _ ## i
   136  #define DEFINE_IPV6(NAME, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16)	\
   137  DEFINE_U32_I(NAME, 1) = bpf_htonl( (a1) << 24 |  (a2) << 16 |  (a3) << 8 |  (a4));			\
   138  DEFINE_U32_I(NAME, 2) = bpf_htonl( (a5) << 24 |  (a6) << 16 |  (a7) << 8 |  (a8));			\
   139  DEFINE_U32_I(NAME, 3) = bpf_htonl( (a9) << 24 | (a10) << 16 | (a11) << 8 | (a12));			\
   140  DEFINE_U32_I(NAME, 4) = bpf_htonl((a13) << 24 | (a14) << 16 | (a15) << 8 | (a16))
   141  
   142  #define DEFINE_MAC(NAME, a1, a2, a3, a4, a5, a6)			\
   143  DEFINE_U32_I(NAME, 1) = (a1) << 24 | (a2) << 16 |  (a3) << 8 | (a4);	\
   144  DEFINE_U32_I(NAME, 2) =                            (a5) << 8 | (a6)
   145  
   146  #endif