github.com/cilium/cilium@v1.16.2/bpf/lib/conntrack_map.h (about)

     1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
     2  /* Copyright Authors of Cilium */
     3  
     4  #pragma once
     5  
     6  #include "common.h"
     7  #include "config.h"
     8  
     9  #if defined(CT_MAP_TCP4) && defined(CT_MAP_TCP6)
    10  
    11  #ifdef ENABLE_IPV6
    12  struct {
    13  	__uint(type, BPF_MAP_TYPE_LRU_HASH);
    14  	__type(key, struct ipv6_ct_tuple);
    15  	__type(value, struct ct_entry);
    16  	__uint(pinning, LIBBPF_PIN_BY_NAME);
    17  	__uint(max_entries, CT_MAP_SIZE_TCP);
    18  } CT_MAP_TCP6 __section_maps_btf;
    19  
    20  struct {
    21  	__uint(type, BPF_MAP_TYPE_LRU_HASH);
    22  	__type(key, struct ipv6_ct_tuple);
    23  	__type(value, struct ct_entry);
    24  	__uint(pinning, LIBBPF_PIN_BY_NAME);
    25  	__uint(max_entries, CT_MAP_SIZE_ANY);
    26  } CT_MAP_ANY6 __section_maps_btf;
    27  
    28  #ifdef ENABLE_CLUSTER_AWARE_ADDRESSING
    29  /*
    30   * Per-cluster conntrack map
    31   *
    32   * When we have an overlapping IPs among cluster, we need to
    33   * identify the network endpoints using IP address + ClusterID.
    34   * We wanted to add cluster_id field to struct ip{v4,v6}_ct_tuple,
    35   * but there were no enough bit. Since we cannot change the type
    36   * of conntrack map, we decided to separate the conntrack instance
    37   * per cluster. So that we can distinguish the network endpoints
    38   * with the same IP but belong to the different clusters by the
    39   * conntrack instance we are using.
    40   */
    41  struct {
    42  	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
    43  	__type(key, __u32);
    44  	__type(value, __u32);
    45  	__uint(pinning, LIBBPF_PIN_BY_NAME);
    46  	__uint(max_entries, 256);
    47  	__array(values, struct {
    48  		__uint(type, BPF_MAP_TYPE_LRU_HASH);
    49  		__type(key, struct ipv6_ct_tuple);
    50  		__type(value, struct ct_entry);
    51  		__uint(max_entries, CT_MAP_SIZE_TCP);
    52  	});
    53  } PER_CLUSTER_CT_TCP6 __section_maps_btf;
    54  
    55  struct {
    56  	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
    57  	__type(key, __u32);
    58  	__type(value, __u32);
    59  	__uint(pinning, LIBBPF_PIN_BY_NAME);
    60  	__uint(max_entries, 256);
    61  	__array(values, struct {
    62  		__uint(type, BPF_MAP_TYPE_LRU_HASH);
    63  		__type(key, struct ipv6_ct_tuple);
    64  		__type(value, struct ct_entry);
    65  		__uint(max_entries, CT_MAP_SIZE_ANY);
    66  	});
    67  } PER_CLUSTER_CT_ANY6 __section_maps_btf;
    68  #endif
    69  
    70  static __always_inline void *
    71  get_ct_map6(const struct ipv6_ct_tuple *tuple)
    72  {
    73  	if (tuple->nexthdr == IPPROTO_TCP)
    74  		return &CT_MAP_TCP6;
    75  
    76  	return &CT_MAP_ANY6;
    77  }
    78  
    79  static __always_inline void *
    80  get_cluster_ct_map6(const struct ipv6_ct_tuple *tuple, __u32 cluster_id __maybe_unused)
    81  {
    82  #ifdef ENABLE_CLUSTER_AWARE_ADDRESSING
    83  	if (cluster_id != 0 && cluster_id != CLUSTER_ID) {
    84  		if (tuple->nexthdr == IPPROTO_TCP)
    85  			return map_lookup_elem(&PER_CLUSTER_CT_TCP6, &cluster_id);
    86  
    87  		return map_lookup_elem(&PER_CLUSTER_CT_ANY6, &cluster_id);
    88  	}
    89  #endif
    90  
    91  	return get_ct_map6(tuple);
    92  }
    93  
    94  static __always_inline void *
    95  get_cluster_ct_any_map6(__u32 cluster_id __maybe_unused)
    96  {
    97  #ifdef ENABLE_CLUSTER_AWARE_ADDRESSING
    98  	if (cluster_id != 0 && cluster_id != CLUSTER_ID)
    99  		return map_lookup_elem(&PER_CLUSTER_CT_ANY6, &cluster_id);
   100  #endif
   101  	return &CT_MAP_ANY6;
   102  }
   103  #endif
   104  
   105  #ifdef ENABLE_IPV4
   106  struct {
   107  	__uint(type, BPF_MAP_TYPE_LRU_HASH);
   108  	__type(key, struct ipv4_ct_tuple);
   109  	__type(value, struct ct_entry);
   110  	__uint(pinning, LIBBPF_PIN_BY_NAME);
   111  	__uint(max_entries, CT_MAP_SIZE_TCP);
   112  } CT_MAP_TCP4 __section_maps_btf;
   113  
   114  struct {
   115  	__uint(type, BPF_MAP_TYPE_LRU_HASH);
   116  	__type(key, struct ipv4_ct_tuple);
   117  	__type(value, struct ct_entry);
   118  	__uint(pinning, LIBBPF_PIN_BY_NAME);
   119  	__uint(max_entries, CT_MAP_SIZE_ANY);
   120  } CT_MAP_ANY4 __section_maps_btf;
   121  
   122  #ifdef ENABLE_CLUSTER_AWARE_ADDRESSING
   123  struct per_cluster_ct_map4_inner_map {
   124  		__uint(type, BPF_MAP_TYPE_LRU_HASH);
   125  		__type(key, struct ipv4_ct_tuple);
   126  		__type(value, struct ct_entry);
   127  		__uint(max_entries, CT_MAP_SIZE_TCP);
   128  #ifndef BPF_TEST
   129  };
   130  #else
   131  } per_cluster_ct_tcp4_1 __section_maps_btf,
   132    per_cluster_ct_tcp4_2 __section_maps_btf,
   133    per_cluster_ct_any4_1 __section_maps_btf,
   134    per_cluster_ct_any4_2 __section_maps_btf;
   135  #endif
   136  
   137  struct {
   138  	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
   139  	__type(key, __u32);
   140  	__type(value, __u32);
   141  	__uint(pinning, LIBBPF_PIN_BY_NAME);
   142  	__uint(max_entries, 256); /* Keep this sync with ClusterIDMax */
   143  	__array(values, struct per_cluster_ct_map4_inner_map);
   144  #ifndef BPF_TEST
   145  } PER_CLUSTER_CT_TCP4 __section_maps_btf;
   146  #else
   147  } PER_CLUSTER_CT_TCP4 __section_maps_btf = {
   148  	.values = {
   149  		[1] = &per_cluster_ct_tcp4_1,
   150  		[2] = &per_cluster_ct_tcp4_2,
   151  	},
   152  };
   153  #endif
   154  
   155  struct {
   156  	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
   157  	__type(key, __u32);
   158  	__type(value, __u32);
   159  	__uint(pinning, LIBBPF_PIN_BY_NAME);
   160  	__uint(max_entries, 256); /* Keep this sync with ClusterIDMax */
   161  	__array(values, struct per_cluster_ct_map4_inner_map);
   162  #ifndef BPF_TEST
   163  } PER_CLUSTER_CT_ANY4 __section_maps_btf;
   164  #else
   165  } PER_CLUSTER_CT_ANY4 __section_maps_btf = {
   166  	.values = {
   167  		[1] = &per_cluster_ct_any4_1,
   168  		[2] = &per_cluster_ct_any4_2,
   169  	},
   170  };
   171  #endif
   172  #endif
   173  
   174  static __always_inline void *
   175  get_ct_map4(const struct ipv4_ct_tuple *tuple)
   176  {
   177  	if (tuple->nexthdr == IPPROTO_TCP)
   178  		return &CT_MAP_TCP4;
   179  
   180  	return &CT_MAP_ANY4;
   181  }
   182  
   183  static __always_inline void *
   184  get_cluster_ct_map4(const struct ipv4_ct_tuple *tuple, __u32 cluster_id __maybe_unused)
   185  {
   186  #ifdef ENABLE_CLUSTER_AWARE_ADDRESSING
   187  	if (cluster_id != 0 && cluster_id != CLUSTER_ID) {
   188  		if (tuple->nexthdr == IPPROTO_TCP)
   189  			return map_lookup_elem(&PER_CLUSTER_CT_TCP4, &cluster_id);
   190  
   191  		return map_lookup_elem(&PER_CLUSTER_CT_ANY4, &cluster_id);
   192  	}
   193  #endif
   194  
   195  	return get_ct_map4(tuple);
   196  }
   197  
   198  static __always_inline void *
   199  get_cluster_ct_any_map4(__u32 cluster_id __maybe_unused)
   200  {
   201  #ifdef ENABLE_CLUSTER_AWARE_ADDRESSING
   202  	if (cluster_id != 0 && cluster_id != CLUSTER_ID)
   203  		return map_lookup_elem(&PER_CLUSTER_CT_ANY4, &cluster_id);
   204  #endif
   205  	return &CT_MAP_ANY4;
   206  }
   207  #endif
   208  #endif