xref: /linux/tools/testing/selftests/bpf/benchs/bench_htab_mem.c (revision daa2be74b1b2302004945b2a5e32424e177cc7da)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <argp.h>
4 #include <stdbool.h>
5 #include <pthread.h>
6 #include <sys/types.h>
7 #include <sys/stat.h>
8 #include <sys/param.h>
9 #include <fcntl.h>
10 
11 #include "bench.h"
12 #include "bpf_util.h"
13 #include "cgroup_helpers.h"
14 #include "htab_mem_bench.skel.h"
15 
16 struct htab_mem_use_case {
17 	const char *name;
18 	const char **progs;
19 	/* Do synchronization between addition thread and deletion thread */
20 	bool need_sync;
21 };
22 
23 static struct htab_mem_ctx {
24 	const struct htab_mem_use_case *uc;
25 	struct htab_mem_bench *skel;
26 	pthread_barrier_t *notify;
27 	int fd;
28 } ctx;
29 
30 const char *ow_progs[] = {"overwrite", NULL};
31 const char *batch_progs[] = {"batch_add_batch_del", NULL};
32 const char *add_del_progs[] = {"add_only", "del_only", NULL};
33 const static struct htab_mem_use_case use_cases[] = {
34 	{ .name = "overwrite", .progs = ow_progs },
35 	{ .name = "batch_add_batch_del", .progs = batch_progs },
36 	{ .name = "add_del_on_diff_cpu", .progs = add_del_progs, .need_sync = true },
37 };
38 
39 static struct htab_mem_args {
40 	u32 value_size;
41 	const char *use_case;
42 	bool preallocated;
43 } args = {
44 	.value_size = 8,
45 	.use_case = "overwrite",
46 	.preallocated = false,
47 };
48 
49 enum {
50 	ARG_VALUE_SIZE = 10000,
51 	ARG_USE_CASE = 10001,
52 	ARG_PREALLOCATED = 10002,
53 };
54 
55 static const struct argp_option opts[] = {
56 	{ "value-size", ARG_VALUE_SIZE, "VALUE_SIZE", 0,
57 	  "Set the value size of hash map (default 8)" },
58 	{ "use-case", ARG_USE_CASE, "USE_CASE", 0,
59 	  "Set the use case of hash map: overwrite|batch_add_batch_del|add_del_on_diff_cpu" },
60 	{ "preallocated", ARG_PREALLOCATED, NULL, 0, "use preallocated hash map" },
61 	{},
62 };
63 
64 static error_t htab_mem_parse_arg(int key, char *arg, struct argp_state *state)
65 {
66 	switch (key) {
67 	case ARG_VALUE_SIZE:
68 		args.value_size = strtoul(arg, NULL, 10);
69 		if (args.value_size > 4096) {
70 			fprintf(stderr, "too big value size %u\n", args.value_size);
71 			argp_usage(state);
72 		}
73 		break;
74 	case ARG_USE_CASE:
75 		args.use_case = strdup(arg);
76 		if (!args.use_case) {
77 			fprintf(stderr, "no mem for use-case\n");
78 			argp_usage(state);
79 		}
80 		break;
81 	case ARG_PREALLOCATED:
82 		args.preallocated = true;
83 		break;
84 	default:
85 		return ARGP_ERR_UNKNOWN;
86 	}
87 
88 	return 0;
89 }
90 
91 const struct argp bench_htab_mem_argp = {
92 	.options = opts,
93 	.parser = htab_mem_parse_arg,
94 };
95 
96 static void htab_mem_validate(void)
97 {
98 	if (!strcmp(use_cases[2].name, args.use_case) && env.producer_cnt % 2) {
99 		fprintf(stderr, "%s needs an even number of producers\n", args.use_case);
100 		exit(1);
101 	}
102 }
103 
104 static int htab_mem_bench_init_barriers(void)
105 {
106 	pthread_barrier_t *barriers;
107 	unsigned int i, nr;
108 
109 	if (!ctx.uc->need_sync)
110 		return 0;
111 
112 	nr = (env.producer_cnt + 1) / 2;
113 	barriers = calloc(nr, sizeof(*barriers));
114 	if (!barriers)
115 		return -1;
116 
117 	/* Used for synchronization between two threads */
118 	for (i = 0; i < nr; i++)
119 		pthread_barrier_init(&barriers[i], NULL, 2);
120 
121 	ctx.notify = barriers;
122 	return 0;
123 }
124 
125 static void htab_mem_bench_exit_barriers(void)
126 {
127 	unsigned int i, nr;
128 
129 	if (!ctx.notify)
130 		return;
131 
132 	nr = (env.producer_cnt + 1) / 2;
133 	for (i = 0; i < nr; i++)
134 		pthread_barrier_destroy(&ctx.notify[i]);
135 	free(ctx.notify);
136 }
137 
138 static const struct htab_mem_use_case *htab_mem_find_use_case_or_exit(const char *name)
139 {
140 	unsigned int i;
141 
142 	for (i = 0; i < ARRAY_SIZE(use_cases); i++) {
143 		if (!strcmp(name, use_cases[i].name))
144 			return &use_cases[i];
145 	}
146 
147 	fprintf(stderr, "no such use-case: %s\n", name);
148 	fprintf(stderr, "available use case:");
149 	for (i = 0; i < ARRAY_SIZE(use_cases); i++)
150 		fprintf(stderr, " %s", use_cases[i].name);
151 	fprintf(stderr, "\n");
152 	exit(1);
153 }
154 
155 static void htab_mem_setup(void)
156 {
157 	struct bpf_map *map;
158 	const char **names;
159 	int err;
160 
161 	setup_libbpf();
162 
163 	ctx.uc = htab_mem_find_use_case_or_exit(args.use_case);
164 	err = htab_mem_bench_init_barriers();
165 	if (err) {
166 		fprintf(stderr, "failed to init barrier\n");
167 		exit(1);
168 	}
169 
170 	ctx.fd = cgroup_setup_and_join("/htab_mem");
171 	if (ctx.fd < 0)
172 		goto cleanup;
173 
174 	ctx.skel = htab_mem_bench__open();
175 	if (!ctx.skel) {
176 		fprintf(stderr, "failed to open skeleton\n");
177 		goto cleanup;
178 	}
179 
180 	map = ctx.skel->maps.htab;
181 	bpf_map__set_value_size(map, args.value_size);
182 	/* Ensure that different CPUs can operate on different subset */
183 	bpf_map__set_max_entries(map, MAX(8192, 64 * env.nr_cpus));
184 	if (args.preallocated)
185 		bpf_map__set_map_flags(map, bpf_map__map_flags(map) & ~BPF_F_NO_PREALLOC);
186 
187 	names = ctx.uc->progs;
188 	while (*names) {
189 		struct bpf_program *prog;
190 
191 		prog = bpf_object__find_program_by_name(ctx.skel->obj, *names);
192 		if (!prog) {
193 			fprintf(stderr, "no such program %s\n", *names);
194 			goto cleanup;
195 		}
196 		bpf_program__set_autoload(prog, true);
197 		names++;
198 	}
199 	ctx.skel->bss->nr_thread = env.producer_cnt;
200 
201 	err = htab_mem_bench__load(ctx.skel);
202 	if (err) {
203 		fprintf(stderr, "failed to load skeleton\n");
204 		goto cleanup;
205 	}
206 	err = htab_mem_bench__attach(ctx.skel);
207 	if (err) {
208 		fprintf(stderr, "failed to attach skeleton\n");
209 		goto cleanup;
210 	}
211 	return;
212 
213 cleanup:
214 	htab_mem_bench__destroy(ctx.skel);
215 	htab_mem_bench_exit_barriers();
216 	if (ctx.fd >= 0) {
217 		close(ctx.fd);
218 		cleanup_cgroup_environment();
219 	}
220 	exit(1);
221 }
222 
223 static void htab_mem_add_fn(pthread_barrier_t *notify)
224 {
225 	while (true) {
226 		/* Do addition */
227 		(void)syscall(__NR_getpgid, 0);
228 		/* Notify deletion thread to do deletion */
229 		pthread_barrier_wait(notify);
230 		/* Wait for deletion to complete */
231 		pthread_barrier_wait(notify);
232 	}
233 }
234 
235 static void htab_mem_delete_fn(pthread_barrier_t *notify)
236 {
237 	while (true) {
238 		/* Wait for addition to complete */
239 		pthread_barrier_wait(notify);
240 		/* Do deletion */
241 		(void)syscall(__NR_getppid);
242 		/* Notify addition thread to do addition */
243 		pthread_barrier_wait(notify);
244 	}
245 }
246 
247 static void *htab_mem_producer(void *arg)
248 {
249 	pthread_barrier_t *notify;
250 	int seq;
251 
252 	if (!ctx.uc->need_sync) {
253 		while (true)
254 			(void)syscall(__NR_getpgid, 0);
255 		return NULL;
256 	}
257 
258 	seq = (long)arg;
259 	notify = &ctx.notify[seq / 2];
260 	if (seq & 1)
261 		htab_mem_delete_fn(notify);
262 	else
263 		htab_mem_add_fn(notify);
264 	return NULL;
265 }
266 
267 static void htab_mem_read_mem_cgrp_file(const char *name, unsigned long *value)
268 {
269 	char buf[32];
270 	ssize_t got;
271 	int fd;
272 
273 	fd = openat(ctx.fd, name, O_RDONLY);
274 	if (fd < 0) {
275 		/* cgroup v1 ? */
276 		fprintf(stderr, "no %s\n", name);
277 		*value = 0;
278 		return;
279 	}
280 
281 	got = read(fd, buf, sizeof(buf) - 1);
282 	if (got <= 0) {
283 		*value = 0;
284 		return;
285 	}
286 	buf[got] = 0;
287 
288 	*value = strtoull(buf, NULL, 0);
289 
290 	close(fd);
291 }
292 
293 static void htab_mem_measure(struct bench_res *res)
294 {
295 	res->hits = atomic_swap(&ctx.skel->bss->op_cnt, 0) / env.producer_cnt;
296 	htab_mem_read_mem_cgrp_file("memory.current", &res->gp_ct);
297 }
298 
299 static void htab_mem_report_progress(int iter, struct bench_res *res, long delta_ns)
300 {
301 	double loop, mem;
302 
303 	loop = res->hits / 1000.0 / (delta_ns / 1000000000.0);
304 	mem = res->gp_ct / 1048576.0;
305 	printf("Iter %3d (%7.3lfus): ", iter, (delta_ns - 1000000000) / 1000.0);
306 	printf("per-prod-op %7.2lfk/s, memory usage %7.2lfMiB\n", loop, mem);
307 }
308 
309 static void htab_mem_report_final(struct bench_res res[], int res_cnt)
310 {
311 	double mem_mean = 0.0, mem_stddev = 0.0;
312 	double loop_mean = 0.0, loop_stddev = 0.0;
313 	unsigned long peak_mem;
314 	int i;
315 
316 	for (i = 0; i < res_cnt; i++) {
317 		loop_mean += res[i].hits / 1000.0 / (0.0 + res_cnt);
318 		mem_mean += res[i].gp_ct / 1048576.0 / (0.0 + res_cnt);
319 	}
320 	if (res_cnt > 1)  {
321 		for (i = 0; i < res_cnt; i++) {
322 			loop_stddev += (loop_mean - res[i].hits / 1000.0) *
323 				       (loop_mean - res[i].hits / 1000.0) /
324 				       (res_cnt - 1.0);
325 			mem_stddev += (mem_mean - res[i].gp_ct / 1048576.0) *
326 				      (mem_mean - res[i].gp_ct / 1048576.0) /
327 				      (res_cnt - 1.0);
328 		}
329 		loop_stddev = sqrt(loop_stddev);
330 		mem_stddev = sqrt(mem_stddev);
331 	}
332 
333 	htab_mem_read_mem_cgrp_file("memory.peak", &peak_mem);
334 	printf("Summary: per-prod-op %7.2lf \u00B1 %7.2lfk/s, memory usage %7.2lf \u00B1 %7.2lfMiB,"
335 	       " peak memory usage %7.2lfMiB\n",
336 	       loop_mean, loop_stddev, mem_mean, mem_stddev, peak_mem / 1048576.0);
337 
338 	close(ctx.fd);
339 	cleanup_cgroup_environment();
340 }
341 
342 const struct bench bench_htab_mem = {
343 	.name = "htab-mem",
344 	.argp = &bench_htab_mem_argp,
345 	.validate = htab_mem_validate,
346 	.setup = htab_mem_setup,
347 	.producer_thread = htab_mem_producer,
348 	.measure = htab_mem_measure,
349 	.report_progress = htab_mem_report_progress,
350 	.report_final = htab_mem_report_final,
351 };
352