github.com/cilium/cilium@v1.16.2/bpf/tests/pktgen.h (about)

     1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
     2  /* Copyright Authors of Cilium */
     3  
     4  #pragma once
     5  
     6  #include <bpf/compiler.h>
     7  #include <bpf/builtins.h>
     8  #include <bpf/helpers.h>
     9  
    10  #include <lib/endian.h>
    11  #include <lib/tunnel.h>
    12  
    13  #include <linux/byteorder.h>
    14  #include <linux/ip.h>
    15  #include <linux/ipv6.h>
    16  #include <linux/in.h>
    17  #include <linux/if_arp.h>
    18  #include <linux/if_ether.h>
    19  #include <linux/tcp.h>
    20  #include <linux/udp.h>
    21  #include <linux/icmpv6.h>
    22  
    23  /* A collection of pre-defined Ethernet MAC addresses, so tests can reuse them
    24   * without having to come up with custom addresses.
    25   *
    26   * These are declared as volatile const to make them end up in .rodata. Cilium
    27   * inlines global data from .data into bytecode as immediate values for compat
    28   * with kernels before 5.2 that lack read-only map support. This test suite
    29   * doesn't make the same assumptions, so disable the static data inliner by
    30   * putting variables in another section.
    31   */
    32  static volatile const __u8 mac_one[] =   {0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xEF};
    33  static volatile const __u8 mac_two[] =   {0x13, 0x37, 0x13, 0x37, 0x13, 0x37};
    34  static volatile const __u8 mac_three[] = {0x31, 0x41, 0x59, 0x26, 0x35, 0x89};
    35  static volatile const __u8 mac_four[] =  {0x0D, 0x1D, 0x22, 0x59, 0xA9, 0xC2};
    36  static volatile const __u8 mac_five[] =  {0x15, 0x21, 0x39, 0x45, 0x4D, 0x5D};
    37  static volatile const __u8 mac_six[] =   {0x08, 0x14, 0x1C, 0x32, 0x52, 0x7E};
    38  static volatile const __u8 mac_zero[] =  {0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
    39  
    40  /* A collection of pre-defined IP addresses, so tests can reuse them without
    41   *  having to come up with custom ips.
    42   */
    43  
    44  #define IPV4(a, b, c, d) __bpf_htonl(((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
    45  
    46  /* IPv4 addresses for hosts, external to the cluster */
    47  #define v4_ext_one	IPV4(110, 0, 11, 1)
    48  #define v4_ext_two	IPV4(120, 0, 12, 2)
    49  #define v4_ext_three	IPV4(130, 0, 13, 3)
    50  
    51  /* IPv4 addresses for nodes in the cluster */
    52  #define v4_node_one	IPV4(10, 0, 10, 1)
    53  #define v4_node_two	IPV4(10, 0, 10, 2)
    54  #define v4_node_three	IPV4(10, 0, 10, 3)
    55  
    56  /* IPv4 addresses for services in the cluster */
    57  #define v4_svc_one	IPV4(172, 16, 10, 1)
    58  #define v4_svc_two	IPV4(172, 16, 10, 2)
    59  #define v4_svc_three	IPV4(172, 16, 10, 3)
    60  
    61  /* IPv4 addresses for pods in the cluster */
    62  #define v4_pod_one	IPV4(192, 168, 0, 1)
    63  #define v4_pod_two	IPV4(192, 168, 0, 2)
    64  #define v4_pod_three	IPV4(192, 168, 0, 3)
    65  
    66  #define v4_all	IPV4(0, 0, 0, 0)
    67  
    68  /* IPv6 addresses for pods in the cluster */
    69  static volatile const __section(".rodata") __u8 v6_pod_one[] = {0xfd, 0x04, 0, 0, 0, 0, 0, 0,
    70  					   0, 0, 0, 0, 0, 0, 0, 1};
    71  static volatile const __section(".rodata") __u8 v6_pod_two[] = {0xfd, 0x04, 0, 0, 0, 0, 0, 0,
    72  					   0, 0, 0, 0, 0, 0, 0, 2};
    73  static volatile const __section(".rodata") __u8 v6_pod_three[] = {0xfd, 0x04, 0, 0, 0, 0, 0, 0,
    74  					   0, 0, 0, 0, 0, 0, 0, 3};
    75  
    76  /* IPv6 addresses for nodes in the cluster */
    77  static volatile const __section(".rodata") __u8 v6_node_one[] = {0xfd, 0x05, 0, 0, 0, 0, 0, 0,
    78  					   0, 0, 0, 0, 0, 0, 0, 1};
    79  static volatile const __section(".rodata") __u8 v6_node_two[] = {0xfd, 0x06, 0, 0, 0, 0, 0, 0,
    80  					   0, 0, 0, 0, 0, 0, 0, 2};
    81  static volatile const __section(".rodata") __u8 v6_node_three[] = {0xfd, 0x07, 0, 0, 0, 0, 0, 0,
    82  					   0, 0, 0, 0, 0, 0, 0, 3};
    83  
    84  /* Source port to be used by a client */
    85  #define tcp_src_one	__bpf_htons(22330)
    86  #define tcp_src_two	__bpf_htons(33440)
    87  #define tcp_src_three	__bpf_htons(44550)
    88  
    89  #define tcp_dst_one	__bpf_htons(22331)
    90  #define tcp_dst_two	__bpf_htons(33441)
    91  #define tcp_dst_three	__bpf_htons(44551)
    92  
    93  #define tcp_svc_one	__bpf_htons(80)
    94  #define tcp_svc_two	__bpf_htons(443)
    95  #define tcp_svc_three	__bpf_htons(53)
    96  
    97  #define default_data "Should not change!!"
    98  
    99  #define NEXTHDR_HOP             0       /* Hop-by-hop option header. */
   100  #define NEXTHDR_TCP             6       /* TCP segment. */
   101  #define NEXTHDR_UDP             17      /* UDP message. */
   102  #define NEXTHDR_IPV6            41      /* IPv6 in IPv6 */
   103  #define NEXTHDR_ROUTING         43      /* Routing header. */
   104  #define NEXTHDR_FRAGMENT        44      /* Fragmentation/reassembly header. */
   105  #define NEXTHDR_GRE             47      /* GRE header. */
   106  #define NEXTHDR_ESP             50      /* Encapsulating security payload. */
   107  #define NEXTHDR_AUTH            51      /* Authentication header. */
   108  #define NEXTHDR_ICMP            58      /* ICMP for IPv6. */
   109  #define NEXTHDR_NONE            59      /* No next header */
   110  #define NEXTHDR_DEST            60      /* Destination options header. */
   111  #define NEXTHDR_SCTP            132     /* SCTP message. */
   112  #define NEXTHDR_MOBILITY        135     /* Mobility header. */
   113  
   114  #define NEXTHDR_MAX             255
   115  
   116  /* Define SCTP header here because this is all we need. */
   117  struct sctphdr {
   118  	__be16 source;
   119  	__be16 dest;
   120  	__be32 vtag;
   121  	__le32 checksum;
   122  };
   123  
   124  /* Define Ethernet variant ARP header */
   125  struct arphdreth {
   126  	__be16		ar_hrd;		  /* format of hardware address	*/
   127  	__be16		ar_pro;		  /* format of protocol address	*/
   128  	unsigned char	ar_hln;		  /* length of hardware address	*/
   129  	unsigned char	ar_pln;		  /* length of protocol address	*/
   130  	__be16		ar_op;		  /* ARP opcode (command)	*/
   131  	unsigned char	ar_sha[ETH_ALEN]; /* source ethernet address	*/
   132  	__be32		ar_sip;		  /* source IPv4 address	*/
   133  	unsigned char	ar_tha[ETH_ALEN]; /* target ethernet address	*/
   134  	__be32		ar_tip;		  /* target IPv4 address	*/
   135  } __packed;
   136  
   137  enum pkt_layer {
   138  	PKT_LAYER_NONE,
   139  
   140  	/* L2 layers */
   141  	PKT_LAYER_ETH,
   142  	PKT_LAYER_8021Q,
   143  
   144  	/* L3 layers */
   145  	PKT_LAYER_IPV4,
   146  	PKT_LAYER_IPV6,
   147  	PKT_LAYER_ARP,
   148  
   149  	/* IPv6 extension headers */
   150  	PKT_LAYER_IPV6_HOP_BY_HOP,
   151  	PKT_LAYER_IPV6_AUTH,
   152  	PKT_LAYER_IPV6_DEST,
   153  
   154  	/* L4 layers */
   155  	PKT_LAYER_TCP,
   156  	PKT_LAYER_UDP,
   157  	PKT_LAYER_ICMP,
   158  	PKT_LAYER_ICMPV6,
   159  	PKT_LAYER_SCTP,
   160  	PKT_LAYER_ESP,
   161  
   162  	/* Tunnel layers */
   163  	PKT_LAYER_GENEVE,
   164  	PKT_LAYER_VXLAN,
   165  
   166  	/* Packet data*/
   167  	PKT_LAYER_DATA,
   168  };
   169  
   170  #define IPV6_DEFAULT_HOPLIMIT 64
   171  
   172  /* 3 outer headers + {VXLAN, GENEVE} + 3 inner headers. */
   173  #define PKT_BUILDER_LAYERS 7
   174  
   175  #define MAX_PACKET_OFF 0xffff
   176  
   177  /* Packet builder */
   178  struct pktgen {
   179  	struct __ctx_buff *ctx;
   180  	__u64 cur_off;
   181  	__u64 layer_offsets[PKT_BUILDER_LAYERS];
   182  	enum pkt_layer layers[PKT_BUILDER_LAYERS];
   183  };
   184  
   185  static __always_inline
   186  void pktgen__init(struct pktgen *builder, struct __ctx_buff *ctx)
   187  {
   188  	builder->cur_off = 0;
   189  	builder->ctx = ctx;
   190  	#pragma unroll
   191  	for (int i = 0; i < PKT_BUILDER_LAYERS; i++) {
   192  		builder->layers[i] = PKT_LAYER_NONE;
   193  		builder->layer_offsets[i] = 0;
   194  	}
   195  };
   196  
   197  static __always_inline
   198  int pktgen__free_layer(const struct pktgen *builder)
   199  {
   200  	#pragma unroll
   201  	for (int i = 0; i < PKT_BUILDER_LAYERS; i++) {
   202  		if (builder->layers[i] == PKT_LAYER_NONE)
   203  			return i;
   204  	}
   205  
   206  	return -1;
   207  }
   208  
   209  static __always_inline
   210  __attribute__((warn_unused_result))
   211  void *pktgen__push_rawhdr(struct pktgen *builder, __u32 hdrsize, enum pkt_layer type)
   212  {
   213  	struct __ctx_buff *ctx = builder->ctx;
   214  	void *layer = NULL;
   215  	int layer_idx;
   216  
   217  	/* Request additional tailroom, and check that we got it. */
   218  	ctx_adjust_troom(ctx, builder->cur_off + hdrsize - ctx_full_len(ctx));
   219  	if (ctx_data(ctx) + builder->cur_off + hdrsize > ctx_data_end(ctx))
   220  		return NULL;
   221  
   222  	/* Check that any value within the struct will not exceed a u16 which
   223  	 * is the max allowed offset within a packet from ctx->data.
   224  	 */
   225  	if (builder->cur_off >= MAX_PACKET_OFF - hdrsize)
   226  		return NULL;
   227  
   228  	layer = ctx_data(ctx) + builder->cur_off;
   229  	if ((void *)layer + hdrsize > ctx_data_end(ctx))
   230  		return NULL;
   231  
   232  	layer_idx = pktgen__free_layer(builder);
   233  	if (layer_idx < 0)
   234  		return NULL;
   235  
   236  	builder->layers[layer_idx] = type;
   237  	builder->layer_offsets[layer_idx] = builder->cur_off;
   238  	builder->cur_off += hdrsize;
   239  
   240  	return layer;
   241  }
   242  
   243  /* Push an empty ethernet header onto the packet */
   244  static __always_inline
   245  __attribute__((warn_unused_result))
   246  struct ethhdr *pktgen__push_ethhdr(struct pktgen *builder)
   247  {
   248  	return pktgen__push_rawhdr(builder, sizeof(struct ethhdr), PKT_LAYER_ETH);
   249  }
   250  
   251  /* helper to set the source and destination mac address at the same time */
   252  static __always_inline
   253  void ethhdr__set_macs(struct ethhdr *l2, unsigned char *src, unsigned char *dst)
   254  {
   255  	memcpy(l2->h_source, src, ETH_ALEN);
   256  	memcpy(l2->h_dest, dst, ETH_ALEN);
   257  }
   258  
   259  /* Push an empty IPv4 header onto the packet */
   260  static __always_inline
   261  __attribute__((warn_unused_result))
   262  struct iphdr *pktgen__push_iphdr(struct pktgen *builder, __u32 option_bytes)
   263  {
   264  	__u32 length = sizeof(struct iphdr) + option_bytes;
   265  
   266  	if (option_bytes > MAX_IPOPTLEN)
   267  		return 0;
   268  
   269  	return pktgen__push_rawhdr(builder, length, PKT_LAYER_IPV4);
   270  }
   271  
   272  /* helper to set the source and destination ipv6 address at the same time */
   273  static __always_inline
   274  void ipv6hdr__set_addrs(struct ipv6hdr *l3, __u8 *src, __u8 *dst)
   275  {
   276  	memcpy((__u8 *)&l3->saddr, src, 16);
   277  	memcpy((__u8 *)&l3->daddr, dst, 16);
   278  }
   279  
   280  static __always_inline
   281  __attribute__((warn_unused_result))
   282  struct ipv6hdr *pktgen__push_ipv6hdr(struct pktgen *builder)
   283  {
   284  	return pktgen__push_rawhdr(builder, sizeof(struct ipv6hdr), PKT_LAYER_IPV6);
   285  }
   286  
   287  /* Push a IPv4 header with sane defaults and options onto the packet */
   288  static __always_inline
   289  __attribute__((warn_unused_result))
   290  struct iphdr *pktgen__push_default_iphdr_with_options(struct pktgen *builder,
   291  						      __u8 option_words)
   292  {
   293  	__u32 length = option_words * 4;
   294  
   295  	struct iphdr *hdr = pktgen__push_iphdr(builder, length);
   296  
   297  	if (!hdr)
   298  		return NULL;
   299  
   300  	hdr->version = 4;
   301  	hdr->ihl = 5 + option_words;
   302  	hdr->ttl = 64;
   303  	/* No fragmentation by default */
   304  	hdr->frag_off = 0;
   305  
   306  	return hdr;
   307  }
   308  
   309  static __always_inline
   310  __attribute__((warn_unused_result))
   311  struct iphdr *pktgen__push_default_iphdr(struct pktgen *builder)
   312  {
   313  	return pktgen__push_default_iphdr_with_options(builder, 0);
   314  }
   315  
   316  static __always_inline
   317  __attribute__((warn_unused_result))
   318  struct ipv6_opt_hdr *pktgen__append_ipv6_extension_header(struct pktgen *builder,
   319  							  __u8 nexthdr,
   320  							  __u8 length)
   321  {
   322  	struct ipv6_opt_hdr *hdr = NULL;
   323  	__u8 hdrlen = 0;
   324  
   325  	/* TODO improve */
   326  	switch (nexthdr) {
   327  	case NEXTHDR_HOP:
   328  		length = (0 + 1) << 3;
   329  		hdr = pktgen__push_rawhdr(builder, length, PKT_LAYER_IPV6_HOP_BY_HOP);
   330  		break;
   331  	case NEXTHDR_AUTH:
   332  		length = (2 + 2) << 2;
   333  		hdr = pktgen__push_rawhdr(builder, length, PKT_LAYER_IPV6_AUTH);
   334  		hdrlen = 2;
   335  		break;
   336  	case NEXTHDR_DEST:
   337  		hdr = pktgen__push_rawhdr(builder, length, PKT_LAYER_IPV6_DEST);
   338  		hdrlen = (length - 8) / 8;
   339  		break;
   340  	default:
   341  		break;
   342  	}
   343  
   344  	if (!hdr)
   345  		return NULL;
   346  
   347  	hdr->hdrlen = hdrlen;
   348  
   349  	return hdr;
   350  }
   351  
   352  static __always_inline
   353  __attribute__((warn_unused_result))
   354  struct ipv6hdr *pktgen__push_default_ipv6hdr(struct pktgen *builder)
   355  {
   356  	struct ipv6hdr *hdr = pktgen__push_rawhdr(builder,
   357  			sizeof(struct ipv6hdr), PKT_LAYER_IPV6);
   358  
   359  	if (!hdr)
   360  		return NULL;
   361  
   362  	memset(hdr, 0, sizeof(struct ipv6hdr));
   363  	hdr->version = 6;
   364  	hdr->hop_limit = IPV6_DEFAULT_HOPLIMIT;
   365  
   366  	return hdr;
   367  }
   368  
   369  /* Push an empty ARP header onto the packet */
   370  static __always_inline
   371  __attribute__((warn_unused_result))
   372  struct arphdreth *pktgen__push_arphdr_ethernet(struct pktgen *builder)
   373  {
   374  	return pktgen__push_rawhdr(builder, sizeof(struct arphdreth), PKT_LAYER_ARP);
   375  }
   376  
   377  static __always_inline
   378  __attribute__((warn_unused_result))
   379  struct arphdreth *pktgen__push_default_arphdr_ethernet(struct pktgen *builder)
   380  {
   381  	struct arphdreth *arp = pktgen__push_arphdr_ethernet(builder);
   382  
   383  	if (!arp)
   384  		return NULL;
   385  
   386  	arp->ar_hrd = bpf_htons(ARPHRD_ETHER);
   387  	arp->ar_hln = ETH_ALEN;
   388  	arp->ar_pln = 4; /* Size of an IPv4 address */
   389  
   390  	return arp;
   391  }
   392  
   393  /* Push an empty TCP header onto the packet */
   394  static __always_inline
   395  __attribute__((warn_unused_result))
   396  struct tcphdr *pktgen__push_tcphdr(struct pktgen *builder)
   397  {
   398  	return pktgen__push_rawhdr(builder, sizeof(struct tcphdr), PKT_LAYER_TCP);
   399  }
   400  
   401  /* Push a TCP header with sane defaults onto the packet */
   402  static __always_inline
   403  __attribute__((warn_unused_result))
   404  struct tcphdr *pktgen__push_default_tcphdr(struct pktgen *builder)
   405  {
   406  	struct tcphdr *hdr = pktgen__push_tcphdr(builder);
   407  
   408  	if (!hdr)
   409  		return 0;
   410  
   411  	hdr->syn = 1;
   412  	hdr->seq = 123456;
   413  	hdr->window = 65535;
   414  
   415  	/* In most cases the doff is 5, so a good default if we can't
   416  	 * calc the actual offset
   417  	 */
   418  	hdr->doff = 5;
   419  
   420  	return hdr;
   421  }
   422  
   423  static __always_inline
   424  __attribute__((warn_unused_result))
   425  struct icmp6hdr *pktgen__push_icmp6hdr(struct pktgen *builder)
   426  {
   427  	return pktgen__push_rawhdr(builder, sizeof(struct icmp6hdr), PKT_LAYER_ICMPV6);
   428  }
   429  
   430  /* Push an empty ESP header onto the packet */
   431  static __always_inline
   432  __attribute__((warn_unused_result))
   433  struct ip_esp_hdr *pktgen__push_esphdr(struct pktgen *builder)
   434  {
   435  	return pktgen__push_rawhdr(builder, sizeof(struct ip_esp_hdr), PKT_LAYER_ESP);
   436  }
   437  
   438  /* Push a ESP header with sane defaults onto the packet */
   439  static __always_inline
   440  __attribute__((warn_unused_result))
   441  struct ip_esp_hdr *pktgen__push_default_esphdr(struct pktgen *builder)
   442  {
   443  	struct ip_esp_hdr *hdr = pktgen__push_esphdr(builder);
   444  
   445  	if (!hdr)
   446  		return 0;
   447  
   448  	hdr->spi = 1;
   449  	hdr->seq_no = 10000;
   450  
   451  	return hdr;
   452  }
   453  
   454  /* Push an empty SCTP header onto the packet */
   455  static __always_inline
   456  __attribute__((warn_unused_result))
   457  struct sctphdr *pktgen__push_sctphdr(struct pktgen *builder)
   458  {
   459  	return pktgen__push_rawhdr(builder, sizeof(struct sctphdr), PKT_LAYER_SCTP);
   460  }
   461  
   462  /* Push an empty UDP header onto the packet */
   463  static __always_inline
   464  __attribute__((warn_unused_result))
   465  struct udphdr *pktgen__push_udphdr(struct pktgen *builder)
   466  {
   467  	return pktgen__push_rawhdr(builder, sizeof(struct udphdr), PKT_LAYER_UDP);
   468  }
   469  
   470  static __always_inline
   471  __attribute__((warn_unused_result))
   472  struct udphdr *pktgen__push_default_udphdr(struct pktgen *builder)
   473  {
   474  	struct udphdr *hdr = pktgen__push_udphdr(builder);
   475  
   476  	if (!hdr)
   477  		return NULL;
   478  
   479  	memset(hdr, 0, sizeof(*hdr));
   480  
   481  	return hdr;
   482  }
   483  
   484  /* Push an empty VXLAN header onto the packet */
   485  static __always_inline
   486  __attribute__((warn_unused_result))
   487  struct vxlanhdr *pktgen__push_vxlanhdr(struct pktgen *builder)
   488  {
   489  	return pktgen__push_rawhdr(builder, sizeof(struct vxlanhdr), PKT_LAYER_VXLAN);
   490  }
   491  
   492  static __always_inline
   493  __attribute__((warn_unused_result))
   494  struct vxlanhdr *pktgen__push_default_vxlanhdr(struct pktgen *builder)
   495  {
   496  	struct vxlanhdr *hdr = pktgen__push_vxlanhdr(builder);
   497  
   498  	if (!hdr)
   499  		return NULL;
   500  
   501  	memset(hdr, 0, sizeof(*hdr));
   502  
   503  	hdr->vx_flags = bpf_htonl(1U << 27);
   504  
   505  	return hdr;
   506  }
   507  
   508  /* Push an empty GENEVE header onto the packet */
   509  static __always_inline
   510  __attribute__((warn_unused_result))
   511  struct genevehdr *pktgen__push_genevehdr(struct pktgen *builder,
   512  					 __u8 option_bytes)
   513  {
   514  	__u32 length = sizeof(struct genevehdr) + option_bytes;
   515  
   516  	return pktgen__push_rawhdr(builder, length, PKT_LAYER_GENEVE);
   517  }
   518  
   519  static __always_inline
   520  __attribute__((warn_unused_result))
   521  struct genevehdr *pktgen__push_default_genevehdr_with_options(struct pktgen *builder,
   522  							      __u8 option_bytes)
   523  {
   524  	struct genevehdr *hdr = pktgen__push_genevehdr(builder, option_bytes);
   525  
   526  	if (!hdr)
   527  		return NULL;
   528  
   529  	memset(hdr, 0, sizeof(*hdr) + option_bytes);
   530  
   531  	return hdr;
   532  }
   533  
   534  static __always_inline
   535  __attribute__((warn_unused_result))
   536  struct genevehdr *pktgen__push_default_genevehdr(struct pktgen *builder)
   537  {
   538  	struct genevehdr *hdr = pktgen__push_default_genevehdr_with_options(builder, 0);
   539  
   540  	if (!hdr)
   541  		return NULL;
   542  
   543  	memset(hdr, 0, sizeof(*hdr));
   544  
   545  	return hdr;
   546  }
   547  
   548  /* Push room for x bytes of data onto the packet */
   549  static __always_inline
   550  __attribute__((warn_unused_result))
   551  void *pktgen__push_data_room(struct pktgen *builder, int len)
   552  {
   553  	struct __ctx_buff *ctx = builder->ctx;
   554  	void *layer;
   555  	int layer_idx;
   556  
   557  	/* Request additional tailroom, and check that we got it. */
   558  	ctx_adjust_troom(ctx, builder->cur_off + len - ctx_full_len(ctx));
   559  	if (ctx_data(ctx) + builder->cur_off + len > ctx_data_end(ctx))
   560  		return 0;
   561  
   562  	/* Check that any value within the struct will not exceed a u16 which
   563  	 * is the max allowed offset within a packet from ctx->data.
   564  	 */
   565  	if ((__s64)builder->cur_off >= MAX_PACKET_OFF - len)
   566  		return 0;
   567  
   568  	layer = ctx_data(ctx) + builder->cur_off;
   569  	layer_idx = pktgen__free_layer(builder);
   570  
   571  	if (layer_idx < 0)
   572  		return 0;
   573  
   574  	builder->layers[layer_idx] = PKT_LAYER_DATA;
   575  	builder->layer_offsets[layer_idx] = builder->cur_off;
   576  	builder->cur_off += len;
   577  
   578  	return layer;
   579  }
   580  
   581  /* Push data onto the packet */
   582  static __always_inline
   583  __attribute__((warn_unused_result))
   584  void *pktgen__push_data(struct pktgen *builder, void *data, int len)
   585  {
   586  	void *pkt_data = pktgen__push_data_room(builder, len);
   587  
   588  	if (!pkt_data)
   589  		return 0;
   590  	if (pkt_data + len > ctx_data_end(builder->ctx))
   591  		return 0;
   592  
   593  	memcpy(pkt_data, data, len);
   594  
   595  	return pkt_data;
   596  }
   597  
   598  static __always_inline struct iphdr *
   599  pktgen__push_ipv4_packet(struct pktgen *builder,
   600  			 __u8 *smac, __u8 *dmac,
   601  			 __be32 saddr, __be32 daddr)
   602  {
   603  	struct ethhdr *l2;
   604  	struct iphdr *l3;
   605  
   606  	l2 = pktgen__push_ethhdr(builder);
   607  	if (!l2)
   608  		return NULL;
   609  
   610  	ethhdr__set_macs(l2, smac, dmac);
   611  
   612  	l3 = pktgen__push_default_iphdr(builder);
   613  	if (!l3)
   614  		return NULL;
   615  
   616  	l3->saddr = saddr;
   617  	l3->daddr = daddr;
   618  
   619  	return l3;
   620  }
   621  
   622  static __always_inline struct tcphdr *
   623  pktgen__push_ipv4_tcp_packet(struct pktgen *builder,
   624  			     __u8 *smac, __u8 *dmac,
   625  			     __be32 saddr, __be32 daddr,
   626  			     __be16 sport, __be16 dport)
   627  {
   628  	struct tcphdr *l4;
   629  	struct iphdr *l3;
   630  
   631  	l3 = pktgen__push_ipv4_packet(builder, smac, dmac, saddr, daddr);
   632  	if (!l3)
   633  		return NULL;
   634  
   635  	l4 = pktgen__push_default_tcphdr(builder);
   636  	if (!l4)
   637  		return NULL;
   638  
   639  	l4->source = sport;
   640  	l4->dest = dport;
   641  
   642  	return l4;
   643  }
   644  
   645  static __always_inline struct udphdr *
   646  pktgen__push_ipv4_udp_packet(struct pktgen *builder,
   647  			     __u8 *smac, __u8 *dmac,
   648  			     __be32 saddr, __be32 daddr,
   649  			     __be16 sport, __be16 dport)
   650  {
   651  	struct udphdr *l4;
   652  	struct iphdr *l3;
   653  
   654  	l3 = pktgen__push_ipv4_packet(builder, smac, dmac, saddr, daddr);
   655  	if (!l3)
   656  		return NULL;
   657  
   658  	l4 = pktgen__push_default_udphdr(builder);
   659  	if (!l4)
   660  		return NULL;
   661  
   662  	l4->source = sport;
   663  	l4->dest = dport;
   664  
   665  	return l4;
   666  }
   667  
   668  static __always_inline struct vxlanhdr *
   669  pktgen__push_ipv4_vxlan_packet(struct pktgen *builder,
   670  			       __u8 *smac, __u8 *dmac,
   671  			       __be32 saddr, __be32 daddr,
   672  			       __be16 sport, __be16 dport)
   673  {
   674  	struct udphdr *l4;
   675  
   676  	l4 = pktgen__push_ipv4_udp_packet(builder, smac, dmac, saddr, daddr,
   677  					  sport, dport);
   678  	if (!l4)
   679  		return NULL;
   680  
   681  	return pktgen__push_default_vxlanhdr(builder);
   682  }
   683  
   684  static __always_inline struct tcphdr *
   685  pktgen__push_ipv6_tcp_packet(struct pktgen *builder,
   686  			     __u8 *smac, __u8 *dmac,
   687  			     __u8 *saddr, __u8 *daddr,
   688  			     __be16 sport, __be16 dport)
   689  {
   690  	struct ipv6hdr *l3;
   691  	struct tcphdr *l4;
   692  	struct ethhdr *l2;
   693  
   694  	l2 = pktgen__push_ethhdr(builder);
   695  	if (!l2)
   696  		return NULL;
   697  
   698  	ethhdr__set_macs(l2, smac, dmac);
   699  
   700  	l3 = pktgen__push_default_ipv6hdr(builder);
   701  	if (!l3)
   702  		return NULL;
   703  
   704  	ipv6hdr__set_addrs(l3, saddr, daddr);
   705  
   706  	l4 = pktgen__push_default_tcphdr(builder);
   707  	if (!l4)
   708  		return NULL;
   709  
   710  	l4->source = sport;
   711  	l4->dest = dport;
   712  
   713  	return l4;
   714  }
   715  
   716  static __always_inline struct icmp6hdr *
   717  pktgen__push_ipv6_icmp6_packet(struct pktgen *builder,
   718  			       __u8 *smac, __u8 *dmac,
   719  			       __u8 *saddr, __u8 *daddr,
   720  			       __u8 icmp6_type)
   721  {
   722  	struct ethhdr *l2;
   723  	struct ipv6hdr *l3;
   724  	struct icmp6hdr *l4;
   725  
   726  	l2 = pktgen__push_ethhdr(builder);
   727  	if (!l2)
   728  		return NULL;
   729  
   730  	ethhdr__set_macs(l2, smac, dmac);
   731  
   732  	l3 = pktgen__push_default_ipv6hdr(builder);
   733  	if (!l3)
   734  		return NULL;
   735  
   736  	ipv6hdr__set_addrs(l3, saddr, daddr);
   737  
   738  	l4 = pktgen__push_icmp6hdr(builder);
   739  	if (!l4)
   740  		return NULL;
   741  
   742  	l4->icmp6_type = icmp6_type;
   743  	l4->icmp6_code = 0;
   744  	l4->icmp6_cksum = 0;
   745  
   746  	return l4;
   747  }
   748  
   749  static __always_inline void pktgen__finish_eth(const struct pktgen *builder, int i)
   750  {
   751  	struct ethhdr *eth_layer;
   752  	__u64 layer_off;
   753  
   754  	layer_off = builder->layer_offsets[i];
   755  	/* Check that any value within the struct will not exceed a u16 which
   756  	 * is the max allowed offset within a packet from ctx->data.
   757  	 */
   758  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct ethhdr))
   759  		return;
   760  
   761  	eth_layer = ctx_data(builder->ctx) + layer_off;
   762  	if ((void *)eth_layer + sizeof(struct ethhdr) > ctx_data_end(builder->ctx))
   763  		return;
   764  
   765  	if (i + 1 >= PKT_BUILDER_LAYERS)
   766  		return;
   767  
   768  	/* Set the proper next hdr value */
   769  	switch (builder->layers[i + 1]) {
   770  	case PKT_LAYER_IPV4:
   771  		eth_layer->h_proto = __bpf_htons(ETH_P_IP);
   772  		break;
   773  	case PKT_LAYER_IPV6:
   774  		eth_layer->h_proto = __bpf_htons(ETH_P_IPV6);
   775  		break;
   776  	case PKT_LAYER_ARP:
   777  		eth_layer->h_proto = __bpf_htons(ETH_P_ARP);
   778  		break;
   779  	default:
   780  		break;
   781  	}
   782  }
   783  
   784  static __always_inline void pktgen__finish_ipv4(const struct pktgen *builder, int i)
   785  {
   786  	struct iphdr *ipv4_layer;
   787  	__u64 layer_off;
   788  	__u16 v4len;
   789  
   790  	layer_off = builder->layer_offsets[i];
   791  	/* Check that any value within the struct will not exceed a u16 which
   792  	 * is the max allowed offset within a packet from ctx->data.
   793  	 */
   794  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct iphdr))
   795  		return;
   796  
   797  	ipv4_layer = ctx_data(builder->ctx) + layer_off;
   798  	if ((void *)ipv4_layer + sizeof(struct iphdr) > ctx_data_end(builder->ctx))
   799  		return;
   800  
   801  	if (i + 1 >= PKT_BUILDER_LAYERS)
   802  		return;
   803  
   804  	switch (builder->layers[i + 1]) {
   805  	case PKT_LAYER_TCP:
   806  		ipv4_layer->protocol = IPPROTO_TCP;
   807  		break;
   808  	case PKT_LAYER_UDP:
   809  		ipv4_layer->protocol = IPPROTO_UDP;
   810  		break;
   811  	case PKT_LAYER_ICMP:
   812  		ipv4_layer->protocol = IPPROTO_ICMP;
   813  		break;
   814  	case PKT_LAYER_SCTP:
   815  		ipv4_layer->protocol = IPPROTO_SCTP;
   816  		break;
   817  	case PKT_LAYER_ESP:
   818  		ipv4_layer->protocol = IPPROTO_ESP;
   819  		break;
   820  	default:
   821  		break;
   822  	}
   823  
   824  	v4len = (__be16)(builder->cur_off - builder->layer_offsets[i]);
   825  	/* Calculate total length, which is IPv4 hdr + all layers after it */
   826  	ipv4_layer->tot_len = __bpf_htons(v4len);
   827  	ipv4_layer->check = csum_fold(csum_diff(NULL, 0, ipv4_layer, sizeof(struct iphdr), 0));
   828  }
   829  
   830  static __always_inline void pktgen__finish_ipv6(const struct pktgen *builder, int i)
   831  {
   832  	struct ipv6hdr *ipv6_layer;
   833  	__u64 layer_off;
   834  	__u16 v6len;
   835  
   836  	layer_off = builder->layer_offsets[i];
   837  	/* Check that any value within the struct will not exceed a u16 which
   838  	 * is the max allowed offset within a packet from ctx->data.
   839  	 */
   840  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct ipv6hdr))
   841  		return;
   842  
   843  	ipv6_layer = ctx_data(builder->ctx) + builder->layer_offsets[i];
   844  	if ((void *)ipv6_layer + sizeof(struct ipv6hdr) >
   845  		ctx_data_end(builder->ctx))
   846  		return;
   847  
   848  	if (i + 1 >= PKT_BUILDER_LAYERS)
   849  		return;
   850  
   851  	switch (builder->layers[i + 1]) {
   852  	case PKT_LAYER_IPV6_HOP_BY_HOP:
   853  		ipv6_layer->nexthdr = NEXTHDR_HOP;
   854  		break;
   855  	case PKT_LAYER_IPV6_AUTH:
   856  		ipv6_layer->nexthdr = NEXTHDR_AUTH;
   857  		break;
   858  	case PKT_LAYER_IPV6_DEST:
   859  		ipv6_layer->nexthdr = NEXTHDR_DEST;
   860  		break;
   861  	case PKT_LAYER_TCP:
   862  		ipv6_layer->nexthdr = IPPROTO_TCP;
   863  		break;
   864  	case PKT_LAYER_UDP:
   865  		ipv6_layer->nexthdr = IPPROTO_UDP;
   866  		break;
   867  	case PKT_LAYER_ICMPV6:
   868  		ipv6_layer->nexthdr = IPPROTO_ICMPV6;
   869  		break;
   870  	case PKT_LAYER_SCTP:
   871  		ipv6_layer->nexthdr = IPPROTO_SCTP;
   872  		break;
   873  	case PKT_LAYER_ESP:
   874  		ipv6_layer->nexthdr = IPPROTO_ESP;
   875  		break;
   876  	default:
   877  		break;
   878  	}
   879  
   880  	v6len = (__be16)(builder->cur_off + sizeof(struct ipv6hdr) -
   881  		builder->layer_offsets[i]);
   882  
   883  	/* Calculate payload length, which doesn't include the header size */
   884  	ipv6_layer->payload_len = __bpf_htons(v6len);
   885  }
   886  
   887  static __always_inline void pktgen__finish_ipv6_opt(const struct pktgen *builder, int i)
   888  {
   889  	struct ipv6_opt_hdr *ipv6_opt_layer;
   890  	__u64 layer_off;
   891  
   892  	layer_off = builder->layer_offsets[i];
   893  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct ipv6_opt_hdr))
   894  		return;
   895  
   896  	ipv6_opt_layer = ctx_data(builder->ctx) + layer_off;
   897  	if ((void *)(ipv6_opt_layer + 1) > ctx_data_end(builder->ctx))
   898  		return;
   899  
   900  	if (i + 1 >= PKT_BUILDER_LAYERS)
   901  		return;
   902  
   903  	switch (builder->layers[i + 1]) {
   904  	case PKT_LAYER_IPV6_HOP_BY_HOP:
   905  		ipv6_opt_layer->nexthdr = NEXTHDR_HOP;
   906  		break;
   907  	case PKT_LAYER_IPV6_AUTH:
   908  		ipv6_opt_layer->nexthdr = NEXTHDR_AUTH;
   909  		break;
   910  	case PKT_LAYER_IPV6_DEST:
   911  		ipv6_opt_layer->nexthdr = NEXTHDR_DEST;
   912  		break;
   913  	case PKT_LAYER_TCP:
   914  		ipv6_opt_layer->nexthdr = IPPROTO_TCP;
   915  		break;
   916  	case PKT_LAYER_UDP:
   917  		ipv6_opt_layer->nexthdr = IPPROTO_UDP;
   918  		break;
   919  	case PKT_LAYER_ICMPV6:
   920  		ipv6_opt_layer->nexthdr = IPPROTO_ICMPV6;
   921  		break;
   922  	case PKT_LAYER_SCTP:
   923  		ipv6_opt_layer->nexthdr = IPPROTO_SCTP;
   924  		break;
   925  	default:
   926  		break;
   927  	}
   928  }
   929  
   930  static __always_inline void pktgen__finish_tcp(const struct pktgen *builder, int i)
   931  {
   932  	struct tcphdr *tcp_layer;
   933  	__u64 layer_off;
   934  	__u64 hdr_size;
   935  
   936  	layer_off = builder->layer_offsets[i];
   937  	/* Check that any value within the struct will not exceed a u16 which
   938  	 * is the max allowed offset within a packet from ctx->data.
   939  	 */
   940  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct tcphdr))
   941  		return;
   942  
   943  	tcp_layer = ctx_data(builder->ctx) + layer_off;
   944  	if ((void *)tcp_layer + sizeof(struct tcphdr) >
   945  		ctx_data_end(builder->ctx))
   946  		return;
   947  
   948  	if (i + 1 >= PKT_BUILDER_LAYERS)
   949  		return;
   950  
   951  	/* Calculate the data offset, this is the diff between start of header
   952  	 * and start of data in 32-bit words (bytes divided by 4).
   953  	 */
   954  
   955  	if (builder->layers[i + 1] == PKT_LAYER_NONE) {
   956  		/* If no data or next header exists, calc using the current offset */
   957  		hdr_size = builder->cur_off - builder->layer_offsets[i];
   958  	} else {
   959  		hdr_size = builder->layer_offsets[i + 1] -
   960  				builder->layer_offsets[i];
   961  	}
   962  
   963  	tcp_layer->doff = (__u16)hdr_size / 4;
   964  }
   965  
   966  static __always_inline void pktgen__finish_udp(const struct pktgen *builder, int i)
   967  {
   968  	struct udphdr *udp_layer;
   969  	__u64 layer_off;
   970  
   971  	layer_off = builder->layer_offsets[i];
   972  	/* Check that any value within the struct will not exceed a u16 which
   973  	 * is the max allowed offset within a packet from ctx->data.
   974  	 */
   975  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct udphdr))
   976  		return;
   977  
   978  	udp_layer = ctx_data(builder->ctx) + layer_off;
   979  	if ((void *)udp_layer + sizeof(struct udphdr) >
   980  		ctx_data_end(builder->ctx))
   981  		return;
   982  }
   983  
   984  static __always_inline void pktgen__finish_geneve(const struct pktgen *builder, int i)
   985  {
   986  	struct genevehdr *geneve_layer;
   987  	__u64 layer_off;
   988  
   989  	layer_off = builder->layer_offsets[i];
   990  	/* Check that any value within the struct will not exceed a u16 which
   991  	 * is the max allowed offset within a packet from ctx->data.
   992  	 */
   993  	if (layer_off >= MAX_PACKET_OFF - sizeof(struct genevehdr))
   994  		return;
   995  
   996  	geneve_layer = ctx_data(builder->ctx) + layer_off;
   997  	if ((void *)geneve_layer + sizeof(struct genevehdr) >
   998  		ctx_data_end(builder->ctx))
   999  		return;
  1000  
  1001  	if (i + 1 >= PKT_BUILDER_LAYERS)
  1002  		return;
  1003  
  1004  	switch (builder->layers[i + 1]) {
  1005  	case PKT_LAYER_ETH:
  1006  		geneve_layer->protocol_type = __bpf_htons(ETH_P_TEB);
  1007  		break;
  1008  	default:
  1009  		break;
  1010  	}
  1011  }
  1012  
  1013  /* Do a finishing pass on all the layers, which will set correct next layer
  1014   * fields and length values. TODO checksum calculation?
  1015   */
  1016  static __always_inline
  1017  void pktgen__finish(const struct pktgen *builder)
  1018  {
  1019  	#pragma unroll
  1020  	for (int i = 0; i < PKT_BUILDER_LAYERS; i++) {
  1021  		switch (builder->layers[i]) {
  1022  		case PKT_LAYER_NONE:
  1023  			/* A none signals the end of the layer stack */
  1024  			return;
  1025  
  1026  		case PKT_LAYER_ETH:
  1027  			pktgen__finish_eth(builder, i);
  1028  			break;
  1029  
  1030  		case PKT_LAYER_8021Q:
  1031  			/* TODO set next protocol once 802.1Q is added */
  1032  			break;
  1033  
  1034  		case PKT_LAYER_IPV4:
  1035  			pktgen__finish_ipv4(builder, i);
  1036  			break;
  1037  
  1038  		case PKT_LAYER_IPV6:
  1039  			pktgen__finish_ipv6(builder, i);
  1040  			break;
  1041  
  1042  		case PKT_LAYER_IPV6_HOP_BY_HOP:
  1043  		case PKT_LAYER_IPV6_AUTH:
  1044  		case PKT_LAYER_IPV6_DEST:
  1045  			pktgen__finish_ipv6_opt(builder, i);
  1046  			break;
  1047  
  1048  		case PKT_LAYER_TCP:
  1049  			pktgen__finish_tcp(builder, i);
  1050  			break;
  1051  
  1052  		case PKT_LAYER_ESP:
  1053  			/* No sizes or checksums for ESP, so nothing to do */
  1054  			break;
  1055  
  1056  		case PKT_LAYER_ARP:
  1057  			/* No sizes or checksums for ARP, so nothing to do */
  1058  			break;
  1059  
  1060  		case PKT_LAYER_UDP:
  1061  			pktgen__finish_udp(builder, i);
  1062  			break;
  1063  
  1064  		case PKT_LAYER_ICMP:
  1065  			/* TODO implement checksum calc? */
  1066  			break;
  1067  
  1068  		case PKT_LAYER_ICMPV6:
  1069  			/* TODO implement checksum calc? */
  1070  			break;
  1071  
  1072  		case PKT_LAYER_SCTP:
  1073  			/* TODO implement checksum calc */
  1074  			break;
  1075  
  1076  		case PKT_LAYER_GENEVE:
  1077  			pktgen__finish_geneve(builder, i);
  1078  			break;
  1079  
  1080  		case PKT_LAYER_VXLAN:
  1081  			break;
  1082  
  1083  		case PKT_LAYER_DATA:
  1084  			/* User defined data, nothing to do */
  1085  			break;
  1086  		}
  1087  	}
  1088  };