github.com/cilium/cilium@v1.16.2/bpf/lib/maps.h (about) 1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ 2 /* Copyright Authors of Cilium */ 3 4 #pragma once 5 6 #include "common.h" 7 #include "ipv6.h" 8 #include "ids.h" 9 10 #include "bpf/compiler.h" 11 12 struct { 13 __uint(type, BPF_MAP_TYPE_HASH); 14 __type(key, struct endpoint_key); 15 __type(value, struct endpoint_info); 16 __uint(pinning, LIBBPF_PIN_BY_NAME); 17 __uint(max_entries, ENDPOINTS_MAP_SIZE); 18 __uint(map_flags, CONDITIONAL_PREALLOC); 19 } ENDPOINTS_MAP __section_maps_btf; 20 21 struct { 22 __uint(type, BPF_MAP_TYPE_PERCPU_HASH); 23 __type(key, struct metrics_key); 24 __type(value, struct metrics_value); 25 __uint(pinning, LIBBPF_PIN_BY_NAME); 26 __uint(max_entries, METRICS_MAP_SIZE); 27 __uint(map_flags, CONDITIONAL_PREALLOC); 28 } METRICS_MAP __section_maps_btf; 29 30 31 #ifndef SKIP_POLICY_MAP 32 /* Global map to jump into policy enforcement of receiving endpoint */ 33 struct bpf_elf_map __section_maps POLICY_CALL_MAP = { 34 .type = BPF_MAP_TYPE_PROG_ARRAY, 35 .id = CILIUM_MAP_POLICY, 36 .size_key = sizeof(__u32), 37 .size_value = sizeof(__u32), 38 .pinning = LIBBPF_PIN_BY_NAME, 39 .max_elem = POLICY_PROG_MAP_SIZE, 40 }; 41 42 static __always_inline __must_check int 43 tail_call_policy(struct __ctx_buff *ctx, __u16 endpoint_id) 44 { 45 if (__builtin_constant_p(endpoint_id)) { 46 tail_call_static(ctx, POLICY_CALL_MAP, endpoint_id); 47 } else { 48 tail_call_dynamic(ctx, &POLICY_CALL_MAP, endpoint_id); 49 } 50 51 /* When forwarding from a BPF program to some endpoint, 52 * there are inherent races that can result in the endpoint's 53 * policy program being unavailable (eg. if the endpoint is 54 * terminating). 55 */ 56 return DROP_EP_NOT_READY; 57 } 58 #endif /* SKIP_POLICY_MAP */ 59 60 #ifdef ENABLE_L7_LB 61 /* Global map to jump into policy enforcement of sending endpoint */ 62 struct bpf_elf_map __section_maps POLICY_EGRESSCALL_MAP = { 63 .type = BPF_MAP_TYPE_PROG_ARRAY, 64 .id = CILIUM_MAP_EGRESSPOLICY, 65 .size_key = sizeof(__u32), 66 .size_value = sizeof(__u32), 67 .pinning = LIBBPF_PIN_BY_NAME, 68 .max_elem = POLICY_PROG_MAP_SIZE, 69 }; 70 71 static __always_inline __must_check int 72 tail_call_egress_policy(struct __ctx_buff *ctx, __u16 endpoint_id) 73 { 74 tail_call_dynamic(ctx, &POLICY_EGRESSCALL_MAP, endpoint_id); 75 /* same issue as for the POLICY_CALL_MAP calls */ 76 return DROP_EP_NOT_READY; 77 } 78 79 #endif 80 81 #ifdef POLICY_MAP 82 /* Per-endpoint policy enforcement map */ 83 struct { 84 __uint(type, BPF_MAP_TYPE_LPM_TRIE); 85 __type(key, struct policy_key); 86 __type(value, struct policy_entry); 87 __uint(pinning, LIBBPF_PIN_BY_NAME); 88 __uint(max_entries, POLICY_MAP_SIZE); 89 __uint(map_flags, BPF_F_NO_PREALLOC); 90 } POLICY_MAP __section_maps_btf; 91 #endif 92 93 #ifdef AUTH_MAP 94 /* Global auth map for enforcing authentication policy */ 95 struct { 96 __uint(type, BPF_MAP_TYPE_HASH); 97 __type(key, struct auth_key); 98 __type(value, struct auth_info); 99 __uint(pinning, LIBBPF_PIN_BY_NAME); 100 __uint(max_entries, AUTH_MAP_SIZE); 101 __uint(map_flags, BPF_F_NO_PREALLOC); 102 } AUTH_MAP __section_maps_btf; 103 #endif 104 105 #ifdef CONFIG_MAP 106 /* 107 * CONFIG_MAP is an array containing runtime configuration information to the 108 * bpf datapath. Each element in the array is a 64-bit integer, meaning of 109 * which is defined by the source of that index. 110 */ 111 struct { 112 __uint(type, BPF_MAP_TYPE_ARRAY); 113 __type(key, __u32); 114 __type(value, __u64); 115 __uint(pinning, LIBBPF_PIN_BY_NAME); 116 __uint(max_entries, CONFIG_MAP_SIZE); 117 } CONFIG_MAP __section_maps_btf; 118 #endif 119 120 #ifndef SKIP_CALLS_MAP 121 /* Private per-EP map for internal tail calls. Its bpffs pin is replaced every 122 * time the BPF object is loaded. An existing pinned map is never reused. 123 */ 124 struct bpf_elf_map __section_maps CALLS_MAP = { 125 .type = BPF_MAP_TYPE_PROG_ARRAY, 126 .id = CILIUM_MAP_CALLS, 127 .size_key = sizeof(__u32), 128 .size_value = sizeof(__u32), 129 .pinning = CILIUM_PIN_REPLACE, 130 .max_elem = CILIUM_CALL_SIZE, 131 }; 132 #endif /* SKIP_CALLS_MAP */ 133 134 #if defined(ENABLE_CUSTOM_CALLS) && defined(CUSTOM_CALLS_MAP) 135 /* Private per-EP map for tail calls to user-defined programs. 136 * CUSTOM_CALLS_MAP is a per-EP map name, only defined for programs that need 137 * to use the map, so we do not want to compile this definition if 138 * CUSTOM_CALLS_MAP has not been #define-d. 139 */ 140 struct bpf_elf_map __section_maps CUSTOM_CALLS_MAP = { 141 .type = BPF_MAP_TYPE_PROG_ARRAY, 142 .id = CILIUM_MAP_CUSTOM_CALLS, 143 .size_key = sizeof(__u32), 144 .size_value = sizeof(__u32), 145 .pinning = LIBBPF_PIN_BY_NAME, 146 .max_elem = 4, /* ingress and egress, IPv4 and IPv6 */ 147 }; 148 149 #define CUSTOM_CALLS_IDX_IPV4_INGRESS 0 150 #define CUSTOM_CALLS_IDX_IPV4_EGRESS 1 151 #define CUSTOM_CALLS_IDX_IPV6_INGRESS 2 152 #define CUSTOM_CALLS_IDX_IPV6_EGRESS 3 153 #endif /* ENABLE_CUSTOM_CALLS && CUSTOM_CALLS_MAP */ 154 155 struct ipcache_key { 156 struct bpf_lpm_trie_key lpm_key; 157 __u16 cluster_id; 158 __u8 pad1; 159 __u8 family; 160 union { 161 struct { 162 __u32 ip4; 163 __u32 pad4; 164 __u32 pad5; 165 __u32 pad6; 166 }; 167 union v6addr ip6; 168 }; 169 } __packed; 170 171 /* Global IP -> Identity map for applying egress label-based policy */ 172 struct { 173 __uint(type, BPF_MAP_TYPE_LPM_TRIE); 174 __type(key, struct ipcache_key); 175 __type(value, struct remote_endpoint_info); 176 __uint(pinning, LIBBPF_PIN_BY_NAME); 177 __uint(max_entries, IPCACHE_MAP_SIZE); 178 __uint(map_flags, BPF_F_NO_PREALLOC); 179 } IPCACHE_MAP __section_maps_btf; 180 181 struct { 182 __uint(type, BPF_MAP_TYPE_HASH); 183 __type(key, struct node_key); 184 __type(value, __u16); 185 __uint(pinning, LIBBPF_PIN_BY_NAME); 186 __uint(max_entries, NODE_MAP_SIZE); 187 __uint(map_flags, BPF_F_NO_PREALLOC); 188 } NODE_MAP __section_maps_btf; 189 190 struct node_value { 191 __u16 id; 192 __u8 spi; 193 __u8 pad; 194 }; 195 196 struct { 197 __uint(type, BPF_MAP_TYPE_HASH); 198 __type(key, struct node_key); 199 __type(value, struct node_value); 200 __uint(pinning, LIBBPF_PIN_BY_NAME); 201 __uint(max_entries, NODE_MAP_SIZE); 202 __uint(map_flags, BPF_F_NO_PREALLOC); 203 } NODE_MAP_V2 __section_maps_btf; 204 205 struct l2_responder_v4_key { 206 __u32 ip4; 207 __u32 ifindex; 208 }; 209 210 struct l2_responder_v4_stats { 211 __u64 responses_sent; 212 }; 213 214 struct { 215 __uint(type, BPF_MAP_TYPE_HASH); 216 __type(key, struct l2_responder_v4_key); 217 __type(value, struct l2_responder_v4_stats); 218 __uint(pinning, LIBBPF_PIN_BY_NAME); 219 __uint(max_entries, L2_RESPONSER_MAP4_SIZE); 220 __uint(map_flags, BPF_F_NO_PREALLOC); 221 } L2_RESPONDER_MAP4 __section_maps_btf; 222 223 #ifdef ENABLE_SRV6 224 # define SRV6_VRF_MAP(IP_FAMILY) \ 225 struct { \ 226 __uint(type, BPF_MAP_TYPE_LPM_TRIE); \ 227 __type(key, struct srv6_vrf_key ## IP_FAMILY); \ 228 __type(value, __u32); \ 229 __uint(pinning, LIBBPF_PIN_BY_NAME); \ 230 __uint(max_entries, SRV6_VRF_MAP_SIZE); \ 231 __uint(map_flags, BPF_F_NO_PREALLOC); \ 232 } SRV6_VRF_MAP ## IP_FAMILY __section_maps_btf; 233 234 # define SRV6_POLICY_MAP(IP_FAMILY) \ 235 struct { \ 236 __uint(type, BPF_MAP_TYPE_LPM_TRIE); \ 237 __type(key, struct srv6_policy_key ## IP_FAMILY); \ 238 __type(value, union v6addr); \ 239 __uint(pinning, LIBBPF_PIN_BY_NAME); \ 240 __uint(max_entries, SRV6_POLICY_MAP_SIZE); \ 241 __uint(map_flags, BPF_F_NO_PREALLOC); \ 242 } SRV6_POLICY_MAP ## IP_FAMILY __section_maps_btf; 243 244 # ifdef ENABLE_IPV4 245 SRV6_VRF_MAP(4) 246 SRV6_POLICY_MAP(4) 247 # endif /* ENABLE_IPV4 */ 248 249 SRV6_VRF_MAP(6) 250 SRV6_POLICY_MAP(6) 251 252 struct { 253 __uint(type, BPF_MAP_TYPE_HASH); 254 __type(key, union v6addr); /* SID */ 255 __type(value, __u32); /* VRF ID */ 256 __uint(pinning, LIBBPF_PIN_BY_NAME); 257 __uint(max_entries, SRV6_SID_MAP_SIZE); 258 __uint(map_flags, BPF_F_NO_PREALLOC); 259 } SRV6_SID_MAP __section_maps_btf; 260 #endif /* ENABLE_SRV6 */ 261 262 #ifdef ENABLE_VTEP 263 struct { 264 __uint(type, BPF_MAP_TYPE_HASH); 265 __type(key, struct vtep_key); 266 __type(value, struct vtep_value); 267 __uint(pinning, LIBBPF_PIN_BY_NAME); 268 __uint(max_entries, VTEP_MAP_SIZE); 269 __uint(map_flags, CONDITIONAL_PREALLOC); 270 } VTEP_MAP __section_maps_btf; 271 #endif /* ENABLE_VTEP */ 272 273 struct world_cidrs_key4 { 274 struct bpf_lpm_trie_key lpm_key; 275 __u32 ip; 276 } __packed; 277 278 #ifdef ENABLE_HIGH_SCALE_IPCACHE 279 struct { 280 __uint(type, BPF_MAP_TYPE_LPM_TRIE); 281 __type(key, struct world_cidrs_key4); 282 __type(value, __u8); 283 __uint(pinning, LIBBPF_PIN_BY_NAME); 284 __uint(max_entries, WORLD_CIDRS4_MAP_SIZE); 285 __uint(map_flags, BPF_F_NO_PREALLOC); 286 } WORLD_CIDRS4_MAP __section_maps_btf; 287 #endif /* ENABLE_HIGH_SCALE_IPCACHE */ 288 289 #ifndef SKIP_CALLS_MAP 290 static __always_inline __must_check int 291 tail_call_internal(struct __ctx_buff *ctx, const __u32 index, __s8 *ext_err) 292 { 293 tail_call_static(ctx, CALLS_MAP, index); 294 295 if (ext_err) 296 *ext_err = (__s8)index; 297 return DROP_MISSED_TAIL_CALL; 298 } 299 #endif /* SKIP_CALLS_MAP */