xref: /linux/tools/testing/selftests/bpf/progs/lpm_trie_bench.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1*737433c6SMatt Fleming // SPDX-License-Identifier: GPL-2.0
2*737433c6SMatt Fleming /* Copyright (c) 2025 Cloudflare */
3*737433c6SMatt Fleming 
4*737433c6SMatt Fleming #include <vmlinux.h>
5*737433c6SMatt Fleming #include <errno.h>
6*737433c6SMatt Fleming #include <bpf/bpf_tracing.h>
7*737433c6SMatt Fleming #include <bpf/bpf_helpers.h>
8*737433c6SMatt Fleming #include <bpf/bpf_core_read.h>
9*737433c6SMatt Fleming #include "bpf_misc.h"
10*737433c6SMatt Fleming #include "bpf_atomic.h"
11*737433c6SMatt Fleming #include "progs/lpm_trie.h"
12*737433c6SMatt Fleming 
13*737433c6SMatt Fleming #define BPF_OBJ_NAME_LEN 16U
14*737433c6SMatt Fleming #define MAX_ENTRIES 100000000
15*737433c6SMatt Fleming #define NR_LOOPS 10000
16*737433c6SMatt Fleming 
17*737433c6SMatt Fleming char _license[] SEC("license") = "GPL";
18*737433c6SMatt Fleming 
19*737433c6SMatt Fleming /* Filled by userspace. See fill_map() in bench_lpm_trie_map.c */
20*737433c6SMatt Fleming struct {
21*737433c6SMatt Fleming 	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
22*737433c6SMatt Fleming 	__type(key, struct trie_key);
23*737433c6SMatt Fleming 	__type(value, __u32);
24*737433c6SMatt Fleming 	__uint(map_flags, BPF_F_NO_PREALLOC);
25*737433c6SMatt Fleming 	__uint(max_entries, MAX_ENTRIES);
26*737433c6SMatt Fleming } trie_map SEC(".maps");
27*737433c6SMatt Fleming 
28*737433c6SMatt Fleming long hits;
29*737433c6SMatt Fleming long duration_ns;
30*737433c6SMatt Fleming 
31*737433c6SMatt Fleming /* Configured from userspace */
32*737433c6SMatt Fleming __u32 nr_entries;
33*737433c6SMatt Fleming __u32 prefixlen;
34*737433c6SMatt Fleming bool random;
35*737433c6SMatt Fleming __u8 op;
36*737433c6SMatt Fleming 
37*737433c6SMatt Fleming static __u64 latency_free_start;
38*737433c6SMatt Fleming 
39*737433c6SMatt Fleming SEC("fentry/bpf_map_free_deferred")
40*737433c6SMatt Fleming int BPF_PROG(trie_free_entry, struct work_struct *work)
41*737433c6SMatt Fleming {
42*737433c6SMatt Fleming 	struct bpf_map *map = container_of(work, struct bpf_map, work);
43*737433c6SMatt Fleming 	char name[BPF_OBJ_NAME_LEN];
44*737433c6SMatt Fleming 	u32 map_type;
45*737433c6SMatt Fleming 
46*737433c6SMatt Fleming 	map_type = BPF_CORE_READ(map, map_type);
47*737433c6SMatt Fleming 	if (map_type != BPF_MAP_TYPE_LPM_TRIE)
48*737433c6SMatt Fleming 		return 0;
49*737433c6SMatt Fleming 
50*737433c6SMatt Fleming 	/*
51*737433c6SMatt Fleming 	 * Ideally we'd have access to the map ID but that's already
52*737433c6SMatt Fleming 	 * freed before we enter trie_free().
53*737433c6SMatt Fleming 	 */
54*737433c6SMatt Fleming 	BPF_CORE_READ_STR_INTO(&name, map, name);
55*737433c6SMatt Fleming 	if (bpf_strncmp(name, BPF_OBJ_NAME_LEN, "trie_free_map"))
56*737433c6SMatt Fleming 		return 0;
57*737433c6SMatt Fleming 
58*737433c6SMatt Fleming 	latency_free_start = bpf_ktime_get_ns();
59*737433c6SMatt Fleming 
60*737433c6SMatt Fleming 	return 0;
61*737433c6SMatt Fleming }
62*737433c6SMatt Fleming 
63*737433c6SMatt Fleming SEC("fexit/bpf_map_free_deferred")
64*737433c6SMatt Fleming int BPF_PROG(trie_free_exit, struct work_struct *work)
65*737433c6SMatt Fleming {
66*737433c6SMatt Fleming 	__u64 val;
67*737433c6SMatt Fleming 
68*737433c6SMatt Fleming 	if (!latency_free_start)
69*737433c6SMatt Fleming 		return 0;
70*737433c6SMatt Fleming 
71*737433c6SMatt Fleming 	val = bpf_ktime_get_ns() - latency_free_start;
72*737433c6SMatt Fleming 	latency_free_start = 0;
73*737433c6SMatt Fleming 
74*737433c6SMatt Fleming 	__sync_add_and_fetch(&duration_ns, val);
75*737433c6SMatt Fleming 	__sync_add_and_fetch(&hits, 1);
76*737433c6SMatt Fleming 
77*737433c6SMatt Fleming 	return 0;
78*737433c6SMatt Fleming }
79*737433c6SMatt Fleming 
80*737433c6SMatt Fleming static __u32 cur_key;
81*737433c6SMatt Fleming 
82*737433c6SMatt Fleming static __always_inline void generate_key(struct trie_key *key)
83*737433c6SMatt Fleming {
84*737433c6SMatt Fleming 	key->prefixlen = prefixlen;
85*737433c6SMatt Fleming 
86*737433c6SMatt Fleming 	if (random)
87*737433c6SMatt Fleming 		key->data = bpf_get_prandom_u32() % nr_entries;
88*737433c6SMatt Fleming 	else
89*737433c6SMatt Fleming 		key->data = cur_key++ % nr_entries;
90*737433c6SMatt Fleming }
91*737433c6SMatt Fleming 
92*737433c6SMatt Fleming static int noop(__u32 index, __u32 *unused)
93*737433c6SMatt Fleming {
94*737433c6SMatt Fleming 	return 0;
95*737433c6SMatt Fleming }
96*737433c6SMatt Fleming 
97*737433c6SMatt Fleming static int baseline(__u32 index, __u32 *unused)
98*737433c6SMatt Fleming {
99*737433c6SMatt Fleming 	struct trie_key key;
100*737433c6SMatt Fleming 	__u32 blackbox = 0;
101*737433c6SMatt Fleming 
102*737433c6SMatt Fleming 	generate_key(&key);
103*737433c6SMatt Fleming 	/* Avoid compiler optimizing out the modulo */
104*737433c6SMatt Fleming 	barrier_var(blackbox);
105*737433c6SMatt Fleming 	blackbox = READ_ONCE(key.data);
106*737433c6SMatt Fleming 
107*737433c6SMatt Fleming 	return 0;
108*737433c6SMatt Fleming }
109*737433c6SMatt Fleming 
110*737433c6SMatt Fleming static int lookup(__u32 index, int *retval)
111*737433c6SMatt Fleming {
112*737433c6SMatt Fleming 	struct trie_key key;
113*737433c6SMatt Fleming 
114*737433c6SMatt Fleming 	generate_key(&key);
115*737433c6SMatt Fleming 	if (!bpf_map_lookup_elem(&trie_map, &key)) {
116*737433c6SMatt Fleming 		*retval = -ENOENT;
117*737433c6SMatt Fleming 		return 1;
118*737433c6SMatt Fleming 	}
119*737433c6SMatt Fleming 
120*737433c6SMatt Fleming 	return 0;
121*737433c6SMatt Fleming }
122*737433c6SMatt Fleming 
123*737433c6SMatt Fleming static int insert(__u32 index, int *retval)
124*737433c6SMatt Fleming {
125*737433c6SMatt Fleming 	struct trie_key key;
126*737433c6SMatt Fleming 	u32 val = 1;
127*737433c6SMatt Fleming 	int err;
128*737433c6SMatt Fleming 
129*737433c6SMatt Fleming 	generate_key(&key);
130*737433c6SMatt Fleming 	err = bpf_map_update_elem(&trie_map, &key, &val, BPF_NOEXIST);
131*737433c6SMatt Fleming 	if (err) {
132*737433c6SMatt Fleming 		*retval = err;
133*737433c6SMatt Fleming 		return 1;
134*737433c6SMatt Fleming 	}
135*737433c6SMatt Fleming 
136*737433c6SMatt Fleming 	/* Is this the last entry? */
137*737433c6SMatt Fleming 	if (key.data == nr_entries - 1) {
138*737433c6SMatt Fleming 		/* For atomicity concerns, see the comment in delete() */
139*737433c6SMatt Fleming 		*retval = LPM_BENCH_REINIT_MAP;
140*737433c6SMatt Fleming 		return 1;
141*737433c6SMatt Fleming 	}
142*737433c6SMatt Fleming 
143*737433c6SMatt Fleming 	return 0;
144*737433c6SMatt Fleming }
145*737433c6SMatt Fleming 
146*737433c6SMatt Fleming static int update(__u32 index, int *retval)
147*737433c6SMatt Fleming {
148*737433c6SMatt Fleming 	struct trie_key key;
149*737433c6SMatt Fleming 	u32 val = 1;
150*737433c6SMatt Fleming 	int err;
151*737433c6SMatt Fleming 
152*737433c6SMatt Fleming 	generate_key(&key);
153*737433c6SMatt Fleming 	err = bpf_map_update_elem(&trie_map, &key, &val, BPF_EXIST);
154*737433c6SMatt Fleming 	if (err) {
155*737433c6SMatt Fleming 		*retval = err;
156*737433c6SMatt Fleming 		return 1;
157*737433c6SMatt Fleming 	}
158*737433c6SMatt Fleming 
159*737433c6SMatt Fleming 	return 0;
160*737433c6SMatt Fleming }
161*737433c6SMatt Fleming 
162*737433c6SMatt Fleming static int delete(__u32 index, int *retval)
163*737433c6SMatt Fleming {
164*737433c6SMatt Fleming 	struct trie_key key;
165*737433c6SMatt Fleming 	int err;
166*737433c6SMatt Fleming 
167*737433c6SMatt Fleming 	generate_key(&key);
168*737433c6SMatt Fleming 	err = bpf_map_delete_elem(&trie_map, &key);
169*737433c6SMatt Fleming 	if (err) {
170*737433c6SMatt Fleming 		*retval = err;
171*737433c6SMatt Fleming 		return 1;
172*737433c6SMatt Fleming 	}
173*737433c6SMatt Fleming 
174*737433c6SMatt Fleming 	/* Do we need to refill the map? */
175*737433c6SMatt Fleming 	if (key.data == nr_entries - 1) {
176*737433c6SMatt Fleming 		/*
177*737433c6SMatt Fleming 		 * Atomicity isn't required because DELETE only supports
178*737433c6SMatt Fleming 		 * one producer running concurrently. What we need is a
179*737433c6SMatt Fleming 		 * way to track how many entries have been deleted from
180*737433c6SMatt Fleming 		 * the trie between consecutive invocations of the BPF
181*737433c6SMatt Fleming 		 * prog because a single bpf_loop() call might not
182*737433c6SMatt Fleming 		 * delete all entries, e.g. when NR_LOOPS < nr_entries.
183*737433c6SMatt Fleming 		 */
184*737433c6SMatt Fleming 		*retval = LPM_BENCH_REINIT_MAP;
185*737433c6SMatt Fleming 		return 1;
186*737433c6SMatt Fleming 	}
187*737433c6SMatt Fleming 
188*737433c6SMatt Fleming 	return 0;
189*737433c6SMatt Fleming }
190*737433c6SMatt Fleming 
191*737433c6SMatt Fleming SEC("xdp")
192*737433c6SMatt Fleming int BPF_PROG(run_bench)
193*737433c6SMatt Fleming {
194*737433c6SMatt Fleming 	int err = LPM_BENCH_SUCCESS;
195*737433c6SMatt Fleming 	u64 start, delta;
196*737433c6SMatt Fleming 	int loops;
197*737433c6SMatt Fleming 
198*737433c6SMatt Fleming 	start = bpf_ktime_get_ns();
199*737433c6SMatt Fleming 
200*737433c6SMatt Fleming 	switch (op) {
201*737433c6SMatt Fleming 	case LPM_OP_NOOP:
202*737433c6SMatt Fleming 		loops = bpf_loop(NR_LOOPS, noop, NULL, 0);
203*737433c6SMatt Fleming 		break;
204*737433c6SMatt Fleming 	case LPM_OP_BASELINE:
205*737433c6SMatt Fleming 		loops = bpf_loop(NR_LOOPS, baseline, NULL, 0);
206*737433c6SMatt Fleming 		break;
207*737433c6SMatt Fleming 	case LPM_OP_LOOKUP:
208*737433c6SMatt Fleming 		loops = bpf_loop(NR_LOOPS, lookup, &err, 0);
209*737433c6SMatt Fleming 		break;
210*737433c6SMatt Fleming 	case LPM_OP_INSERT:
211*737433c6SMatt Fleming 		loops = bpf_loop(NR_LOOPS, insert, &err, 0);
212*737433c6SMatt Fleming 		break;
213*737433c6SMatt Fleming 	case LPM_OP_UPDATE:
214*737433c6SMatt Fleming 		loops = bpf_loop(NR_LOOPS, update, &err, 0);
215*737433c6SMatt Fleming 		break;
216*737433c6SMatt Fleming 	case LPM_OP_DELETE:
217*737433c6SMatt Fleming 		loops = bpf_loop(NR_LOOPS, delete, &err, 0);
218*737433c6SMatt Fleming 		break;
219*737433c6SMatt Fleming 	default:
220*737433c6SMatt Fleming 		bpf_printk("invalid benchmark operation\n");
221*737433c6SMatt Fleming 		return -1;
222*737433c6SMatt Fleming 	}
223*737433c6SMatt Fleming 
224*737433c6SMatt Fleming 	delta = bpf_ktime_get_ns() - start;
225*737433c6SMatt Fleming 
226*737433c6SMatt Fleming 	__sync_add_and_fetch(&duration_ns, delta);
227*737433c6SMatt Fleming 	__sync_add_and_fetch(&hits, loops);
228*737433c6SMatt Fleming 
229*737433c6SMatt Fleming 	return err;
230*737433c6SMatt Fleming }
231