github.com/cilium/cilium@v1.16.2/bpf/lib/overloadable_xdp.h (about)

     1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
     2  /* Copyright Authors of Cilium */
     3  
     4  #pragma once
     5  
     6  #include <linux/udp.h>
     7  #include <linux/ip.h>
     8  
     9  static __always_inline __maybe_unused void
    10  bpf_clear_meta(struct xdp_md *ctx __maybe_unused)
    11  {
    12  }
    13  
    14  static __always_inline __maybe_unused int
    15  get_identity(struct xdp_md *ctx __maybe_unused)
    16  {
    17  	return 0;
    18  }
    19  
    20  static __always_inline __maybe_unused void
    21  set_identity_mark(struct xdp_md *ctx __maybe_unused, __u32 identity __maybe_unused,
    22  		  __u32 magic __maybe_unused)
    23  {
    24  }
    25  
    26  static __always_inline __maybe_unused void
    27  set_identity_meta(struct xdp_md *ctx __maybe_unused,
    28  		__u32 identity __maybe_unused)
    29  {
    30  }
    31  
    32  static __always_inline __maybe_unused void
    33  set_encrypt_key_mark(struct xdp_md *ctx __maybe_unused, __u8 key __maybe_unused,
    34  		     __u32 node_id __maybe_unused)
    35  {
    36  }
    37  
    38  static __always_inline __maybe_unused void
    39  set_encrypt_key_meta(struct __sk_buff *ctx __maybe_unused, __u8 key __maybe_unused,
    40  		     __u32 node_id __maybe_unused)
    41  {
    42  }
    43  
    44  static __always_inline __maybe_unused void
    45  ctx_set_cluster_id_mark(struct xdp_md *ctx __maybe_unused, __u32 cluster_id __maybe_unused)
    46  {
    47  }
    48  
    49  static __always_inline __maybe_unused __u32
    50  ctx_get_cluster_id_mark(struct __sk_buff *ctx __maybe_unused)
    51  {
    52  	return 0;
    53  }
    54  
    55  static __always_inline __maybe_unused int
    56  redirect_self(struct xdp_md *ctx __maybe_unused)
    57  {
    58  	return XDP_TX;
    59  }
    60  
    61  static __always_inline __maybe_unused int
    62  redirect_neigh(int ifindex __maybe_unused,
    63  	       struct bpf_redir_neigh *params __maybe_unused,
    64  	       int plen __maybe_unused,
    65  	       __u32 flags __maybe_unused)
    66  {
    67  	return XDP_DROP;
    68  }
    69  
    70  static __always_inline __maybe_unused bool
    71  neigh_resolver_available(void)
    72  {
    73  	return false;
    74  }
    75  
    76  #define RECIRC_MARKER	5 /* tail call recirculation */
    77  #define XFER_MARKER	6 /* xdp -> skb meta transfer */
    78  
    79  static __always_inline __maybe_unused void
    80  ctx_skip_nodeport_clear(struct xdp_md *ctx __maybe_unused)
    81  {
    82  #ifdef ENABLE_NODEPORT
    83  	ctx_store_meta(ctx, RECIRC_MARKER, 0);
    84  #endif
    85  }
    86  
    87  static __always_inline __maybe_unused void
    88  ctx_skip_nodeport_set(struct xdp_md *ctx __maybe_unused)
    89  {
    90  #ifdef ENABLE_NODEPORT
    91  	ctx_store_meta(ctx, RECIRC_MARKER, 1);
    92  #endif
    93  }
    94  
    95  static __always_inline __maybe_unused bool
    96  ctx_skip_nodeport(struct xdp_md *ctx __maybe_unused)
    97  {
    98  #ifdef ENABLE_NODEPORT
    99  	return ctx_load_meta(ctx, RECIRC_MARKER);
   100  #else
   101  	return true;
   102  #endif
   103  }
   104  
   105  static __always_inline __maybe_unused __u32
   106  ctx_get_xfer(struct xdp_md *ctx __maybe_unused, __u32 off __maybe_unused)
   107  {
   108  	return 0; /* Only intended for SKB context. */
   109  }
   110  
   111  static __always_inline __maybe_unused void ctx_set_xfer(struct xdp_md *ctx,
   112  							__u32 meta)
   113  {
   114  	__u32 val = ctx_load_meta(ctx, XFER_MARKER);
   115  
   116  	val |= meta;
   117  	ctx_store_meta(ctx, XFER_MARKER, val);
   118  }
   119  
   120  static __always_inline __maybe_unused void ctx_move_xfer(struct xdp_md *ctx)
   121  {
   122  	__u32 meta_xfer = ctx_load_meta(ctx, XFER_MARKER);
   123  	/* We transfer data from XFER_MARKER. This specifically
   124  	 * does not break packet trains in GRO.
   125  	 */
   126  
   127  	if (meta_xfer) {
   128  		if (!ctx_adjust_meta(ctx, -(int)sizeof(meta_xfer))) {
   129  			__u32 *data_meta = ctx_data_meta(ctx);
   130  			__u32 *data = ctx_data(ctx);
   131  
   132  			if (!ctx_no_room(data_meta + 1, data))
   133  				data_meta[XFER_FLAGS] = meta_xfer;
   134  		}
   135  	}
   136  }
   137  
   138  static __always_inline __maybe_unused int
   139  ctx_change_head(struct xdp_md *ctx __maybe_unused,
   140  		__u32 head_room __maybe_unused,
   141  		__u64 flags __maybe_unused)
   142  {
   143  	return 0; /* Only intended for SKB context. */
   144  }
   145  
   146  static __always_inline void ctx_snat_done_set(struct xdp_md *ctx)
   147  {
   148  	ctx_set_xfer(ctx, XFER_PKT_SNAT_DONE);
   149  }
   150  
   151  static __always_inline bool ctx_snat_done(struct xdp_md *ctx)
   152  {
   153  	/* shouldn't be needed, there's no relevant Egress hook in XDP */
   154  	return ctx_load_meta(ctx, XFER_MARKER) & XFER_PKT_SNAT_DONE;
   155  }
   156  
   157  #ifdef HAVE_ENCAP
   158  static __always_inline __maybe_unused int
   159  ctx_set_encap_info(struct xdp_md *ctx, __u32 src_ip, __be16 src_port,
   160  		   __u32 daddr, __u32 seclabel __maybe_unused,
   161  		   __u32 vni __maybe_unused, void *opt, __u32 opt_len)
   162  {
   163  	__u32 inner_len = ctx_full_len(ctx);
   164  	__u32 tunnel_hdr_len = 8; /* geneve / vxlan */
   165  	void *data, *data_end;
   166  	struct ethhdr *eth;
   167  	struct udphdr *udp;
   168  	struct iphdr *ip4;
   169  	__u32 outer_len;
   170  
   171  	/* Add space in front (50 bytes + options) */
   172  	outer_len = sizeof(*eth) + sizeof(*ip4) + sizeof(*udp) + tunnel_hdr_len + opt_len;
   173  
   174  	if (ctx_adjust_hroom(ctx, outer_len, BPF_ADJ_ROOM_NET, ctx_adjust_hroom_flags()))
   175  		return DROP_INVALID;
   176  
   177  	/* validate access to outer headers: */
   178  	data = ctx_data(ctx);
   179  	data_end = ctx_data_end(ctx);
   180  
   181  	if (data + outer_len > data_end)
   182  		return DROP_INVALID;
   183  
   184  	eth = data;
   185  	ip4 = (void *)eth + sizeof(*eth);
   186  	udp = (void *)ip4 + sizeof(*ip4);
   187  
   188  	memset(data, 0, sizeof(*eth) + sizeof(*ip4) + sizeof(*udp) + tunnel_hdr_len);
   189  
   190  	switch (TUNNEL_PROTOCOL) {
   191  	case TUNNEL_PROTOCOL_GENEVE:
   192  		{
   193  			struct genevehdr *geneve = (void *)udp + sizeof(*udp);
   194  
   195  			if (opt_len > 0)
   196  				memcpy((void *)geneve + sizeof(*geneve), opt, opt_len);
   197  
   198  			geneve->opt_len = (__u8)(opt_len >> 2);
   199  			geneve->protocol_type = bpf_htons(ETH_P_TEB);
   200  
   201  			seclabel = bpf_htonl(get_tunnel_id(seclabel) << 8);
   202  			memcpy(&geneve->vni, &seclabel, sizeof(__u32));
   203  		}
   204  		break;
   205  	case TUNNEL_PROTOCOL_VXLAN:
   206  		if (opt_len > 0)
   207  			return DROP_INVALID;
   208  
   209  		{
   210  			struct vxlanhdr *vxlan = (void *)udp + sizeof(*udp);
   211  
   212  			vxlan->vx_flags = bpf_htonl(1U << 27);
   213  
   214  			seclabel = bpf_htonl(get_tunnel_id(seclabel) << 8);
   215  			memcpy(&vxlan->vx_vni, &seclabel, sizeof(__u32));
   216  		}
   217  		break;
   218  	default:
   219  		__throw_build_bug();
   220  	}
   221  
   222  	udp->source = src_port;
   223  	udp->dest = bpf_htons(TUNNEL_PORT);
   224  	udp->len = bpf_htons((__u16)(sizeof(*udp) + tunnel_hdr_len + opt_len + inner_len));
   225  	udp->check = 0; /* we use BPF_F_ZERO_CSUM_TX */
   226  
   227  	ip4->ihl = 5;
   228  	ip4->version = IPVERSION;
   229  	ip4->tot_len = bpf_htons((__u16)(sizeof(*ip4) + bpf_ntohs(udp->len)));
   230  	ip4->ttl = IPDEFTTL;
   231  	ip4->protocol = IPPROTO_UDP;
   232  	ip4->saddr = src_ip;
   233  	ip4->daddr = bpf_htonl(daddr);
   234  	ip4->check = csum_fold(csum_diff(NULL, 0, ip4, sizeof(*ip4), 0));
   235  
   236  	eth->h_proto = bpf_htons(ETH_P_IP);
   237  
   238  	return CTX_ACT_REDIRECT;
   239  }
   240  
   241  static __always_inline __maybe_unused int
   242  ctx_set_tunnel_opt(struct xdp_md *ctx, void *opt, __u32 opt_len)
   243  {
   244  	const __u32 geneve_off = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct udphdr);
   245  	struct genevehdr geneve;
   246  
   247  	/* add free space after GENEVE header: */
   248  	if (ctx_adjust_hroom(ctx, opt_len, BPF_ADJ_ROOM_MAC, ctx_adjust_hroom_flags()) < 0)
   249  		return DROP_INVALID;
   250  
   251  	/* write the options */
   252  	if (ctx_store_bytes(ctx, geneve_off + sizeof(geneve), opt, opt_len, 0) < 0)
   253  		return DROP_WRITE_ERROR;
   254  
   255  	/* update the options length in the GENEVE header: */
   256  	if (ctx_load_bytes(ctx, geneve_off, &geneve, sizeof(geneve)) < 0)
   257  		return DROP_INVALID;
   258  
   259  	geneve.opt_len += (__u8)(opt_len >> 2);
   260  
   261  	if (ctx_store_bytes(ctx, geneve_off, &geneve, sizeof(geneve), 0) < 0)
   262  		return DROP_WRITE_ERROR;
   263  
   264  	return 0;
   265  }
   266  #endif /* HAVE_ENCAP */