xref: /linux/tools/testing/selftests/bpf/map_tests/task_storage_map.c (revision 7b26bc6582b13a52a42a4a9765e8f30d58a81198)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2022. Huawei Technologies Co., Ltd */
3 #define _GNU_SOURCE
4 #include <sched.h>
5 #include <unistd.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <string.h>
10 #include <pthread.h>
11 
12 #include <bpf/bpf.h>
13 #include <bpf/libbpf.h>
14 
15 #include "bpf_util.h"
16 #include "test_maps.h"
17 #include "task_local_storage_helpers.h"
18 #include "read_bpf_task_storage_busy.skel.h"
19 
20 struct lookup_ctx {
21 	bool start;
22 	bool stop;
23 	int pid_fd;
24 	int map_fd;
25 	int loop;
26 };
27 
28 static void *lookup_fn(void *arg)
29 {
30 	struct lookup_ctx *ctx = arg;
31 	long value;
32 	int i = 0;
33 
34 	while (!ctx->start)
35 		usleep(1);
36 
37 	while (!ctx->stop && i++ < ctx->loop)
38 		bpf_map_lookup_elem(ctx->map_fd, &ctx->pid_fd, &value);
39 	return NULL;
40 }
41 
42 static void abort_lookup(struct lookup_ctx *ctx, pthread_t *tids, unsigned int nr)
43 {
44 	unsigned int i;
45 
46 	ctx->stop = true;
47 	ctx->start = true;
48 	for (i = 0; i < nr; i++)
49 		pthread_join(tids[i], NULL);
50 }
51 
52 void test_task_storage_map_stress_lookup(void)
53 {
54 #define MAX_NR_THREAD 4096
55 	unsigned int i, nr = 256, loop = 8192, cpu = 0;
56 	struct read_bpf_task_storage_busy *skel;
57 	pthread_t tids[MAX_NR_THREAD];
58 	struct lookup_ctx ctx;
59 	cpu_set_t old, new;
60 	const char *cfg;
61 	int err;
62 
63 	cfg = getenv("TASK_STORAGE_MAP_NR_THREAD");
64 	if (cfg) {
65 		nr = atoi(cfg);
66 		if (nr > MAX_NR_THREAD)
67 			nr = MAX_NR_THREAD;
68 	}
69 	cfg = getenv("TASK_STORAGE_MAP_NR_LOOP");
70 	if (cfg)
71 		loop = atoi(cfg);
72 	cfg = getenv("TASK_STORAGE_MAP_PIN_CPU");
73 	if (cfg)
74 		cpu = atoi(cfg);
75 
76 	skel = read_bpf_task_storage_busy__open_and_load();
77 	err = libbpf_get_error(skel);
78 	CHECK(err, "open_and_load", "error %d\n", err);
79 
80 	/* Only for a fully preemptible kernel */
81 	if (!skel->kconfig->CONFIG_PREEMPTION) {
82 		printf("%s SKIP (no CONFIG_PREEMPTION)\n", __func__);
83 		read_bpf_task_storage_busy__destroy(skel);
84 		skips++;
85 		return;
86 	}
87 
88 	/* Save the old affinity setting */
89 	sched_getaffinity(getpid(), sizeof(old), &old);
90 
91 	/* Pinned on a specific CPU */
92 	CPU_ZERO(&new);
93 	CPU_SET(cpu, &new);
94 	sched_setaffinity(getpid(), sizeof(new), &new);
95 
96 	ctx.start = false;
97 	ctx.stop = false;
98 	ctx.pid_fd = sys_pidfd_open(getpid(), 0);
99 	ctx.map_fd = bpf_map__fd(skel->maps.task);
100 	ctx.loop = loop;
101 	for (i = 0; i < nr; i++) {
102 		err = pthread_create(&tids[i], NULL, lookup_fn, &ctx);
103 		if (err) {
104 			abort_lookup(&ctx, tids, i);
105 			CHECK(err, "pthread_create", "error %d\n", err);
106 			goto out;
107 		}
108 	}
109 
110 	ctx.start = true;
111 	for (i = 0; i < nr; i++)
112 		pthread_join(tids[i], NULL);
113 
114 	skel->bss->pid = getpid();
115 	err = read_bpf_task_storage_busy__attach(skel);
116 	CHECK(err, "attach", "error %d\n", err);
117 
118 	/* Trigger program */
119 	sys_gettid();
120 	skel->bss->pid = 0;
121 
122 	CHECK(skel->bss->busy != 0, "bad bpf_task_storage_busy", "got %d\n", skel->bss->busy);
123 out:
124 	read_bpf_task_storage_busy__destroy(skel);
125 	/* Restore affinity setting */
126 	sched_setaffinity(getpid(), sizeof(old), &old);
127 	printf("%s:PASS\n", __func__);
128 }
129