xref: /linux/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c (revision 336b78c655c84ce9ce47219185171b3912109c0a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Isovalent */
3 
4 #include <sys/random.h>
5 #include <argp.h>
6 #include "bench.h"
7 #include "bpf_hashmap_lookup.skel.h"
8 #include "bpf_util.h"
9 
10 /* BPF triggering benchmarks */
11 static struct ctx {
12 	struct bpf_hashmap_lookup *skel;
13 } ctx;
14 
15 /* only available to kernel, so define it here */
16 #define BPF_MAX_LOOPS (1<<23)
17 
18 #define MAX_KEY_SIZE 1024 /* the size of the key map */
19 
20 static struct {
21 	__u32 key_size;
22 	__u32 map_flags;
23 	__u32 max_entries;
24 	__u32 nr_entries;
25 	__u32 nr_loops;
26 } args = {
27 	.key_size = 4,
28 	.map_flags = 0,
29 	.max_entries = 1000,
30 	.nr_entries = 500,
31 	.nr_loops = 1000000,
32 };
33 
34 enum {
35 	ARG_KEY_SIZE = 8001,
36 	ARG_MAP_FLAGS,
37 	ARG_MAX_ENTRIES,
38 	ARG_NR_ENTRIES,
39 	ARG_NR_LOOPS,
40 };
41 
42 static const struct argp_option opts[] = {
43 	{ "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0,
44 	  "The hashmap key size (max 1024)"},
45 	{ "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0,
46 	  "The hashmap flags passed to BPF_MAP_CREATE"},
47 	{ "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0,
48 	  "The hashmap max entries"},
49 	{ "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0,
50 	  "The number of entries to insert/lookup"},
51 	{ "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0,
52 	  "The number of loops for the benchmark"},
53 	{},
54 };
55 
56 static error_t parse_arg(int key, char *arg, struct argp_state *state)
57 {
58 	long ret;
59 
60 	switch (key) {
61 	case ARG_KEY_SIZE:
62 		ret = strtol(arg, NULL, 10);
63 		if (ret < 1 || ret > MAX_KEY_SIZE) {
64 			fprintf(stderr, "invalid key_size");
65 			argp_usage(state);
66 		}
67 		args.key_size = ret;
68 		break;
69 	case ARG_MAP_FLAGS:
70 		ret = strtol(arg, NULL, 0);
71 		if (ret < 0 || ret > UINT_MAX) {
72 			fprintf(stderr, "invalid map_flags");
73 			argp_usage(state);
74 		}
75 		args.map_flags = ret;
76 		break;
77 	case ARG_MAX_ENTRIES:
78 		ret = strtol(arg, NULL, 10);
79 		if (ret < 1 || ret > UINT_MAX) {
80 			fprintf(stderr, "invalid max_entries");
81 			argp_usage(state);
82 		}
83 		args.max_entries = ret;
84 		break;
85 	case ARG_NR_ENTRIES:
86 		ret = strtol(arg, NULL, 10);
87 		if (ret < 1 || ret > UINT_MAX) {
88 			fprintf(stderr, "invalid nr_entries");
89 			argp_usage(state);
90 		}
91 		args.nr_entries = ret;
92 		break;
93 	case ARG_NR_LOOPS:
94 		ret = strtol(arg, NULL, 10);
95 		if (ret < 1 || ret > BPF_MAX_LOOPS) {
96 			fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n",
97 				ret, BPF_MAX_LOOPS);
98 			argp_usage(state);
99 		}
100 		args.nr_loops = ret;
101 		break;
102 	default:
103 		return ARGP_ERR_UNKNOWN;
104 	}
105 
106 	return 0;
107 }
108 
109 const struct argp bench_hashmap_lookup_argp = {
110 	.options = opts,
111 	.parser = parse_arg,
112 };
113 
114 static void validate(void)
115 {
116 	if (env.consumer_cnt != 1) {
117 		fprintf(stderr, "benchmark doesn't support multi-consumer!\n");
118 		exit(1);
119 	}
120 
121 	if (args.nr_entries > args.max_entries) {
122 		fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n",
123 			args.max_entries, args.nr_entries);
124 		exit(1);
125 	}
126 }
127 
128 static void *producer(void *input)
129 {
130 	while (true) {
131 		/* trigger the bpf program */
132 		syscall(__NR_getpgid);
133 	}
134 	return NULL;
135 }
136 
137 static void *consumer(void *input)
138 {
139 	return NULL;
140 }
141 
142 static void measure(struct bench_res *res)
143 {
144 }
145 
146 static inline void patch_key(u32 i, u32 *key)
147 {
148 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
149 	*key = i + 1;
150 #else
151 	*key = __builtin_bswap32(i + 1);
152 #endif
153 	/* the rest of key is random */
154 }
155 
156 static void setup(void)
157 {
158 	struct bpf_link *link;
159 	int map_fd;
160 	int ret;
161 	int i;
162 
163 	setup_libbpf();
164 
165 	ctx.skel = bpf_hashmap_lookup__open();
166 	if (!ctx.skel) {
167 		fprintf(stderr, "failed to open skeleton\n");
168 		exit(1);
169 	}
170 
171 	bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries);
172 	bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size);
173 	bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8);
174 	bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags);
175 
176 	ctx.skel->bss->nr_entries = args.nr_entries;
177 	ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries;
178 
179 	if (args.key_size > 4) {
180 		for (i = 1; i < args.key_size/4; i++)
181 			ctx.skel->bss->key[i] = 2654435761 * i;
182 	}
183 
184 	ret = bpf_hashmap_lookup__load(ctx.skel);
185 	if (ret) {
186 		bpf_hashmap_lookup__destroy(ctx.skel);
187 		fprintf(stderr, "failed to load map: %s", strerror(-ret));
188 		exit(1);
189 	}
190 
191 	/* fill in the hash_map */
192 	map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench);
193 	for (u64 i = 0; i < args.nr_entries; i++) {
194 		patch_key(i, ctx.skel->bss->key);
195 		bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY);
196 	}
197 
198 	link = bpf_program__attach(ctx.skel->progs.benchmark);
199 	if (!link) {
200 		fprintf(stderr, "failed to attach program!\n");
201 		exit(1);
202 	}
203 }
204 
205 static inline double events_from_time(u64 time)
206 {
207 	if (time)
208 		return args.nr_loops * 1000000000llu / time / 1000000.0L;
209 
210 	return 0;
211 }
212 
213 static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time)
214 {
215 	int i, n = 0;
216 
217 	*events_mean = 0;
218 	*events_stddev = 0;
219 	*mean_time = 0;
220 
221 	for (i = 0; i < 32; i++) {
222 		if (!times[i])
223 			break;
224 		*mean_time += times[i];
225 		*events_mean += events_from_time(times[i]);
226 		n += 1;
227 	}
228 	if (!n)
229 		return 0;
230 
231 	*mean_time /= n;
232 	*events_mean /= n;
233 
234 	if (n > 1) {
235 		for (i = 0; i < n; i++) {
236 			double events_i = *events_mean - events_from_time(times[i]);
237 			*events_stddev += events_i * events_i / (n - 1);
238 		}
239 		*events_stddev = sqrt(*events_stddev);
240 	}
241 
242 	return n;
243 }
244 
245 static void hashmap_report_final(struct bench_res res[], int res_cnt)
246 {
247 	unsigned int nr_cpus = bpf_num_possible_cpus();
248 	double events_mean, events_stddev;
249 	u64 mean_time;
250 	int i, n;
251 
252 	for (i = 0; i < nr_cpus; i++) {
253 		n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean,
254 				   &events_stddev, &mean_time);
255 		if (n == 0)
256 			continue;
257 
258 		if (env.quiet) {
259 			/* we expect only one cpu to be present */
260 			if (env.affinity)
261 				printf("%.3lf\n", events_mean);
262 			else
263 				printf("cpu%02d %.3lf\n", i, events_mean);
264 		} else {
265 			printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec"
266 			       " (approximated from %d samples of ~%lums)\n",
267 			       i, events_mean, 2*events_stddev,
268 			       n, mean_time / 1000000);
269 		}
270 	}
271 }
272 
273 const struct bench bench_bpf_hashmap_lookup = {
274 	.name = "bpf-hashmap-lookup",
275 	.argp = &bench_hashmap_lookup_argp,
276 	.validate = validate,
277 	.setup = setup,
278 	.producer_thread = producer,
279 	.consumer_thread = consumer,
280 	.measure = measure,
281 	.report_progress = NULL,
282 	.report_final = hashmap_report_final,
283 };
284