github.com/cilium/cilium@v1.16.2/bpf/lib/edt.h (about)

     1  /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
     2  /* Copyright Authors of Cilium */
     3  
     4  #pragma once
     5  
     6  #include <bpf/ctx/ctx.h>
     7  
     8  #include "common.h"
     9  #include "time.h"
    10  
    11  /* From XDP layer, we neither go through an egress hook nor qdisc
    12   * from here, hence nothing to be set.
    13   */
    14  #if defined(ENABLE_BANDWIDTH_MANAGER) && __ctx_is == __ctx_skb
    15  struct {
    16  	__uint(type, BPF_MAP_TYPE_HASH);
    17  	__type(key, struct edt_id);
    18  	__type(value, struct edt_info);
    19  	__uint(pinning, LIBBPF_PIN_BY_NAME);
    20  	__uint(max_entries, THROTTLE_MAP_SIZE);
    21  	__uint(map_flags, BPF_F_NO_PREALLOC);
    22  } THROTTLE_MAP __section_maps_btf;
    23  
    24  static __always_inline void edt_set_aggregate(struct __ctx_buff *ctx,
    25  					      __u32 aggregate)
    26  {
    27  	/* 16 bit as current used aggregate, and preserved in host ns. */
    28  	ctx->queue_mapping = aggregate;
    29  }
    30  
    31  static __always_inline __u32 edt_get_aggregate(struct __ctx_buff *ctx)
    32  {
    33  	__u32 aggregate = ctx->queue_mapping;
    34  
    35  	/* We need to reset queue mapping here such that new mapping will
    36  	 * be performed based on skb hash. See netdev_pick_tx().
    37  	 */
    38  	ctx->queue_mapping = 0;
    39  
    40  	return aggregate;
    41  }
    42  
    43  static __always_inline int
    44  edt_sched_departure(struct __ctx_buff *ctx, __be16 proto)
    45  {
    46  	__u64 delay, now, t, t_next;
    47  	struct edt_id aggregate;
    48  	struct edt_info *info;
    49  
    50  	if (!eth_is_supported_ethertype(proto))
    51  		return CTX_ACT_OK;
    52  	if (proto != bpf_htons(ETH_P_IP) &&
    53  	    proto != bpf_htons(ETH_P_IPV6))
    54  		return CTX_ACT_OK;
    55  
    56  	aggregate.id = edt_get_aggregate(ctx);
    57  	if (!aggregate.id)
    58  		return CTX_ACT_OK;
    59  
    60  	info = map_lookup_elem(&THROTTLE_MAP, &aggregate);
    61  	if (!info)
    62  		return CTX_ACT_OK;
    63  
    64  	now = ktime_get_ns();
    65  	t = ctx->tstamp;
    66  	if (t < now)
    67  		t = now;
    68  	delay = ((__u64)ctx_wire_len(ctx)) * NSEC_PER_SEC / info->bps;
    69  	t_next = READ_ONCE(info->t_last) + delay;
    70  	if (t_next <= t) {
    71  		WRITE_ONCE(info->t_last, t);
    72  		return CTX_ACT_OK;
    73  	}
    74  	/* FQ implements a drop horizon, see also 39d010504e6b ("net_sched:
    75  	 * sch_fq: add horizon attribute"). However, we explicitly need the
    76  	 * drop horizon here to i) avoid having t_last messed up and ii) to
    77  	 * potentially allow for per aggregate control.
    78  	 */
    79  	if (t_next - now >= info->t_horizon_drop)
    80  		return CTX_ACT_DROP;
    81  	WRITE_ONCE(info->t_last, t_next);
    82  	ctx->tstamp = t_next;
    83  	return CTX_ACT_OK;
    84  }
    85  #else
    86  static __always_inline void
    87  edt_set_aggregate(struct __ctx_buff *ctx __maybe_unused,
    88  		  __u32 aggregate __maybe_unused)
    89  {
    90  }
    91  #endif /* ENABLE_BANDWIDTH_MANAGER */