171634d7fSbrakmo // SPDX-License-Identifier: GPL-2.0
271634d7fSbrakmo /* Copyright (c) 2019 Facebook
371634d7fSbrakmo *
471634d7fSbrakmo * This program is free software; you can redistribute it and/or
571634d7fSbrakmo * modify it under the terms of version 2 of the GNU General Public
671634d7fSbrakmo * License as published by the Free Software Foundation.
771634d7fSbrakmo *
871634d7fSbrakmo * Sample Host Bandwidth Manager (HBM) BPF program.
971634d7fSbrakmo *
1071634d7fSbrakmo * A cgroup skb BPF egress program to limit cgroup output bandwidth.
1171634d7fSbrakmo * It uses a modified virtual token bucket queue to limit average
1271634d7fSbrakmo * egress bandwidth. The implementation uses credits instead of tokens.
1371634d7fSbrakmo * Negative credits imply that queueing would have happened (this is
1471634d7fSbrakmo * a virtual queue, so no queueing is done by it. However, queueing may
1571634d7fSbrakmo * occur at the actual qdisc (which is not used for rate limiting).
1671634d7fSbrakmo *
1771634d7fSbrakmo * This implementation uses 3 thresholds, one to start marking packets and
1871634d7fSbrakmo * the other two to drop packets:
1971634d7fSbrakmo * CREDIT
2071634d7fSbrakmo * - <--------------------------|------------------------> +
2171634d7fSbrakmo * | | | 0
2271634d7fSbrakmo * | Large pkt |
2371634d7fSbrakmo * | drop thresh |
2471634d7fSbrakmo * Small pkt drop Mark threshold
2571634d7fSbrakmo * thresh
2671634d7fSbrakmo *
2771634d7fSbrakmo * The effect of marking depends on the type of packet:
2871634d7fSbrakmo * a) If the packet is ECN enabled and it is a TCP packet, then the packet
2971634d7fSbrakmo * is ECN marked.
3071634d7fSbrakmo * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
3171634d7fSbrakmo * to reduce the congestion window. The current implementation uses a linear
3271634d7fSbrakmo * distribution (0% probability at marking threshold, 100% probability
3371634d7fSbrakmo * at drop threshold).
3471634d7fSbrakmo * c) If the packet is not a TCP packet, then it is dropped.
3571634d7fSbrakmo *
3671634d7fSbrakmo * If the credit is below the drop threshold, the packet is dropped. If it
3771634d7fSbrakmo * is a TCP packet, then it also calls tcp_cwr since packets dropped by
38*01dea954SShaomin Deng * a cgroup skb BPF program do not automatically trigger a call to
3971634d7fSbrakmo * tcp_cwr in the current kernel code.
4071634d7fSbrakmo *
4171634d7fSbrakmo * This BPF program actually uses 2 drop thresholds, one threshold
4271634d7fSbrakmo * for larger packets (>= 120 bytes) and another for smaller packets. This
4371634d7fSbrakmo * protects smaller packets such as SYNs, ACKs, etc.
4471634d7fSbrakmo *
4571634d7fSbrakmo * The default bandwidth limit is set at 1Gbps but this can be changed by
4671634d7fSbrakmo * a user program through a shared BPF map. In addition, by default this BPF
4771634d7fSbrakmo * program does not limit connections using loopback. This behavior can be
4871634d7fSbrakmo * overwritten by the user program. There is also an option to calculate
4971634d7fSbrakmo * some statistics, such as percent of packets marked or dropped, which
5071634d7fSbrakmo * a user program, such as hbm, can access.
5171634d7fSbrakmo */
5271634d7fSbrakmo
5371634d7fSbrakmo #include "hbm_kern.h"
5471634d7fSbrakmo
5571634d7fSbrakmo SEC("cgroup_skb/egress")
_hbm_out_cg(struct __sk_buff * skb)5671634d7fSbrakmo int _hbm_out_cg(struct __sk_buff *skb)
5771634d7fSbrakmo {
5871634d7fSbrakmo long long delta = 0, delta_send;
5971634d7fSbrakmo unsigned long long curtime, sendtime;
6071634d7fSbrakmo struct hbm_queue_stats *qsp = NULL;
6171634d7fSbrakmo unsigned int queue_index = 0;
6271634d7fSbrakmo bool congestion_flag = false;
6371634d7fSbrakmo bool ecn_ce_flag = false;
6471634d7fSbrakmo struct hbm_pkt_info pkti = {};
6571634d7fSbrakmo struct hbm_vqueue *qdp;
6671634d7fSbrakmo bool drop_flag = false;
6771634d7fSbrakmo bool cwr_flag = false;
6871634d7fSbrakmo int len = skb->len;
6971634d7fSbrakmo int rv = ALLOW_PKT;
7071634d7fSbrakmo
7171634d7fSbrakmo qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
7271634d7fSbrakmo
7371634d7fSbrakmo // Check if we should ignore loopback traffic
7471634d7fSbrakmo if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
7571634d7fSbrakmo return ALLOW_PKT;
7671634d7fSbrakmo
7771634d7fSbrakmo hbm_get_pkt_info(skb, &pkti);
7871634d7fSbrakmo
7971634d7fSbrakmo // We may want to account for the length of headers in len
8071634d7fSbrakmo // calculation, like ETH header + overhead, specially if it
8171634d7fSbrakmo // is a gso packet. But I am not doing it right now.
8271634d7fSbrakmo
8371634d7fSbrakmo qdp = bpf_get_local_storage(&queue_state, 0);
8471634d7fSbrakmo if (!qdp)
8571634d7fSbrakmo return ALLOW_PKT;
8671634d7fSbrakmo if (qdp->lasttime == 0)
8771634d7fSbrakmo hbm_init_edt_vqueue(qdp, 1024);
8871634d7fSbrakmo
8971634d7fSbrakmo curtime = bpf_ktime_get_ns();
9071634d7fSbrakmo
9171634d7fSbrakmo // Begin critical section
9271634d7fSbrakmo bpf_spin_lock(&qdp->lock);
9371634d7fSbrakmo delta = qdp->lasttime - curtime;
9471634d7fSbrakmo // bound bursts to 100us
9571634d7fSbrakmo if (delta < -BURST_SIZE_NS) {
9671634d7fSbrakmo // negative delta is a credit that allows bursts
9771634d7fSbrakmo qdp->lasttime = curtime - BURST_SIZE_NS;
9871634d7fSbrakmo delta = -BURST_SIZE_NS;
9971634d7fSbrakmo }
10071634d7fSbrakmo sendtime = qdp->lasttime;
10171634d7fSbrakmo delta_send = BYTES_TO_NS(len, qdp->rate);
10271634d7fSbrakmo __sync_add_and_fetch(&(qdp->lasttime), delta_send);
10371634d7fSbrakmo bpf_spin_unlock(&qdp->lock);
10471634d7fSbrakmo // End critical section
10571634d7fSbrakmo
10671634d7fSbrakmo // Set EDT of packet
10771634d7fSbrakmo skb->tstamp = sendtime;
10871634d7fSbrakmo
10971634d7fSbrakmo // Check if we should update rate
11071634d7fSbrakmo if (qsp != NULL && (qsp->rate * 128) != qdp->rate)
11171634d7fSbrakmo qdp->rate = qsp->rate * 128;
11271634d7fSbrakmo
11371634d7fSbrakmo // Set flags (drop, congestion, cwr)
11471634d7fSbrakmo // last packet will be sent in the future, bound latency
11571634d7fSbrakmo if (delta > DROP_THRESH_NS || (delta > LARGE_PKT_DROP_THRESH_NS &&
11671634d7fSbrakmo len > LARGE_PKT_THRESH)) {
11771634d7fSbrakmo drop_flag = true;
11871634d7fSbrakmo if (pkti.is_tcp && pkti.ecn == 0)
11971634d7fSbrakmo cwr_flag = true;
12071634d7fSbrakmo } else if (delta > MARK_THRESH_NS) {
12171634d7fSbrakmo if (pkti.is_tcp)
12271634d7fSbrakmo congestion_flag = true;
12371634d7fSbrakmo else
12471634d7fSbrakmo drop_flag = true;
12571634d7fSbrakmo }
12671634d7fSbrakmo
12771634d7fSbrakmo if (congestion_flag) {
12871634d7fSbrakmo if (bpf_skb_ecn_set_ce(skb)) {
12971634d7fSbrakmo ecn_ce_flag = true;
13071634d7fSbrakmo } else {
13171634d7fSbrakmo if (pkti.is_tcp) {
13271634d7fSbrakmo unsigned int rand = bpf_get_prandom_u32();
13371634d7fSbrakmo
13471634d7fSbrakmo if (delta >= MARK_THRESH_NS +
13571634d7fSbrakmo (rand % MARK_REGION_SIZE_NS)) {
13671634d7fSbrakmo // Do congestion control
13771634d7fSbrakmo cwr_flag = true;
13871634d7fSbrakmo }
13971634d7fSbrakmo } else if (len > LARGE_PKT_THRESH) {
14071634d7fSbrakmo // Problem if too many small packets?
14171634d7fSbrakmo drop_flag = true;
14271634d7fSbrakmo congestion_flag = false;
14371634d7fSbrakmo }
14471634d7fSbrakmo }
14571634d7fSbrakmo }
14671634d7fSbrakmo
14771634d7fSbrakmo if (pkti.is_tcp && drop_flag && pkti.packets_out <= 1) {
14871634d7fSbrakmo drop_flag = false;
14971634d7fSbrakmo cwr_flag = true;
15071634d7fSbrakmo congestion_flag = false;
15171634d7fSbrakmo }
15271634d7fSbrakmo
15371634d7fSbrakmo if (qsp != NULL && qsp->no_cn)
15471634d7fSbrakmo cwr_flag = false;
15571634d7fSbrakmo
15671634d7fSbrakmo hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
15771634d7fSbrakmo cwr_flag, ecn_ce_flag, &pkti, (int) delta);
15871634d7fSbrakmo
15971634d7fSbrakmo if (drop_flag) {
16071634d7fSbrakmo __sync_add_and_fetch(&(qdp->lasttime), -delta_send);
16171634d7fSbrakmo rv = DROP_PKT;
16271634d7fSbrakmo }
16371634d7fSbrakmo
16471634d7fSbrakmo if (cwr_flag)
16571634d7fSbrakmo rv |= CWR;
16671634d7fSbrakmo return rv;
16771634d7fSbrakmo }
16871634d7fSbrakmo char _license[] SEC("license") = "GPL";
169