xref: /linux/tools/testing/selftests/bpf/progs/test_tc_edt.c (revision a4eb44a6435d6d8f9e642407a4a06f65eb90ca04)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdint.h>
3 #include <linux/bpf.h>
4 #include <linux/if_ether.h>
5 #include <linux/stddef.h>
6 #include <linux/in.h>
7 #include <linux/ip.h>
8 #include <linux/pkt_cls.h>
9 #include <linux/tcp.h>
10 #include <bpf/bpf_helpers.h>
11 #include <bpf/bpf_endian.h>
12 
13 /* the maximum delay we are willing to add (drop packets beyond that) */
14 #define TIME_HORIZON_NS (2000 * 1000 * 1000)
15 #define NS_PER_SEC 1000000000
16 #define ECN_HORIZON_NS 5000000
17 #define THROTTLE_RATE_BPS (5 * 1000 * 1000)
18 
19 /* flow_key => last_tstamp timestamp used */
20 struct bpf_map_def SEC("maps") flow_map = {
21 	.type = BPF_MAP_TYPE_HASH,
22 	.key_size = sizeof(uint32_t),
23 	.value_size = sizeof(uint64_t),
24 	.max_entries = 1,
25 };
26 
27 static inline int throttle_flow(struct __sk_buff *skb)
28 {
29 	int key = 0;
30 	uint64_t *last_tstamp = bpf_map_lookup_elem(&flow_map, &key);
31 	uint64_t delay_ns = ((uint64_t)skb->len) * NS_PER_SEC /
32 			THROTTLE_RATE_BPS;
33 	uint64_t now = bpf_ktime_get_ns();
34 	uint64_t tstamp, next_tstamp = 0;
35 
36 	if (last_tstamp)
37 		next_tstamp = *last_tstamp + delay_ns;
38 
39 	tstamp = skb->tstamp;
40 	if (tstamp < now)
41 		tstamp = now;
42 
43 	/* should we throttle? */
44 	if (next_tstamp <= tstamp) {
45 		if (bpf_map_update_elem(&flow_map, &key, &tstamp, BPF_ANY))
46 			return TC_ACT_SHOT;
47 		return TC_ACT_OK;
48 	}
49 
50 	/* do not queue past the time horizon */
51 	if (next_tstamp - now >= TIME_HORIZON_NS)
52 		return TC_ACT_SHOT;
53 
54 	/* set ecn bit, if needed */
55 	if (next_tstamp - now >= ECN_HORIZON_NS)
56 		bpf_skb_ecn_set_ce(skb);
57 
58 	if (bpf_map_update_elem(&flow_map, &key, &next_tstamp, BPF_EXIST))
59 		return TC_ACT_SHOT;
60 	skb->tstamp = next_tstamp;
61 
62 	return TC_ACT_OK;
63 }
64 
65 static inline int handle_tcp(struct __sk_buff *skb, struct tcphdr *tcp)
66 {
67 	void *data_end = (void *)(long)skb->data_end;
68 
69 	/* drop malformed packets */
70 	if ((void *)(tcp + 1) > data_end)
71 		return TC_ACT_SHOT;
72 
73 	if (tcp->dest == bpf_htons(9000))
74 		return throttle_flow(skb);
75 
76 	return TC_ACT_OK;
77 }
78 
79 static inline int handle_ipv4(struct __sk_buff *skb)
80 {
81 	void *data_end = (void *)(long)skb->data_end;
82 	void *data = (void *)(long)skb->data;
83 	struct iphdr *iph;
84 	uint32_t ihl;
85 
86 	/* drop malformed packets */
87 	if (data + sizeof(struct ethhdr) > data_end)
88 		return TC_ACT_SHOT;
89 	iph = (struct iphdr *)(data + sizeof(struct ethhdr));
90 	if ((void *)(iph + 1) > data_end)
91 		return TC_ACT_SHOT;
92 	ihl = iph->ihl * 4;
93 	if (((void *)iph) + ihl > data_end)
94 		return TC_ACT_SHOT;
95 
96 	if (iph->protocol == IPPROTO_TCP)
97 		return handle_tcp(skb, (struct tcphdr *)(((void *)iph) + ihl));
98 
99 	return TC_ACT_OK;
100 }
101 
102 SEC("cls_test") int tc_prog(struct __sk_buff *skb)
103 {
104 	if (skb->protocol == bpf_htons(ETH_P_IP))
105 		return handle_ipv4(skb);
106 
107 	return TC_ACT_OK;
108 }
109 
110 char __license[] SEC("license") = "GPL";
111