xref: /linux/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c (revision cdd5b5a9761fd66d17586e4f4ba6588c70e640ea)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <unistd.h>
3 #include <pthread.h>
4 #include <sys/mman.h>
5 #include <stdatomic.h>
6 #include <test_progs.h>
7 #include <sys/syscall.h>
8 #include <linux/module.h>
9 #include <linux/userfaultfd.h>
10 
11 #include "ksym_race.skel.h"
12 #include "bpf_mod_race.skel.h"
13 #include "kfunc_call_race.skel.h"
14 #include "testing_helpers.h"
15 
16 /* This test crafts a race between btf_try_get_module and do_init_module, and
17  * checks whether btf_try_get_module handles the invocation for a well-formed
18  * but uninitialized module correctly. Unless the module has completed its
19  * initcalls, the verifier should fail the program load and return ENXIO.
20  *
21  * userfaultfd is used to trigger a fault in an fmod_ret program, and make it
22  * sleep, then the BPF program is loaded and the return value from verifier is
23  * inspected. After this, the userfaultfd is closed so that the module loading
24  * thread makes forward progress, and fmod_ret injects an error so that the
25  * module load fails and it is freed.
26  *
27  * If the verifier succeeded in loading the supplied program, it will end up
28  * taking reference to freed module, and trigger a crash when the program fd
29  * is closed later. This is true for both kfuncs and ksyms. In both cases,
30  * the crash is triggered inside bpf_prog_free_deferred, when module reference
31  * is finally released.
32  */
33 
34 struct test_config {
35 	const char *str_open;
36 	void *(*bpf_open_and_load)();
37 	void (*bpf_destroy)(void *);
38 };
39 
40 enum bpf_test_state {
41 	_TS_INVALID,
42 	TS_MODULE_LOAD,
43 	TS_MODULE_LOAD_FAIL,
44 };
45 
46 static _Atomic enum bpf_test_state state = _TS_INVALID;
47 
load_module_thread(void * p)48 static void *load_module_thread(void *p)
49 {
50 
51 	if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail"))
52 		atomic_store(&state, TS_MODULE_LOAD);
53 	else
54 		atomic_store(&state, TS_MODULE_LOAD_FAIL);
55 	return p;
56 }
57 
sys_userfaultfd(int flags)58 static int sys_userfaultfd(int flags)
59 {
60 	return syscall(__NR_userfaultfd, flags);
61 }
62 
test_setup_uffd(void * fault_addr)63 static int test_setup_uffd(void *fault_addr)
64 {
65 	struct uffdio_register uffd_register = {};
66 	struct uffdio_api uffd_api = {};
67 	int uffd;
68 
69 	uffd = sys_userfaultfd(O_CLOEXEC);
70 	if (uffd < 0)
71 		return -errno;
72 
73 	uffd_api.api = UFFD_API;
74 	uffd_api.features = 0;
75 	if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
76 		close(uffd);
77 		return -1;
78 	}
79 
80 	uffd_register.range.start = (unsigned long)fault_addr;
81 	uffd_register.range.len = 4096;
82 	uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
83 	if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
84 		close(uffd);
85 		return -1;
86 	}
87 	return uffd;
88 }
89 
test_bpf_mod_race_config(const struct test_config * config)90 static void test_bpf_mod_race_config(const struct test_config *config)
91 {
92 	void *fault_addr, *skel_fail;
93 	struct bpf_mod_race *skel;
94 	struct uffd_msg uffd_msg;
95 	pthread_t load_mod_thrd;
96 	_Atomic int *blockingp;
97 	int uffd, ret;
98 
99 	fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
100 	if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
101 		return;
102 
103 	if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod"))
104 		goto end_mmap;
105 
106 	skel = bpf_mod_race__open();
107 	if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
108 		goto end_module;
109 
110 	skel->rodata->bpf_mod_race_config.tgid = getpid();
111 	skel->rodata->bpf_mod_race_config.inject_error = -4242;
112 	skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
113 	if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
114 		goto end_destroy;
115 	blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
116 
117 	if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
118 		goto end_destroy;
119 
120 	uffd = test_setup_uffd(fault_addr);
121 	if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
122 		goto end_destroy;
123 
124 	if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
125 		       "load module thread"))
126 		goto end_uffd;
127 
128 	/* Now, we either fail loading module, or block in bpf prog, spin to find out */
129 	while (!atomic_load(&state) && !atomic_load(blockingp))
130 		;
131 	if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
132 		goto end_join;
133 	if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
134 		pthread_kill(load_mod_thrd, SIGKILL);
135 		goto end_uffd;
136 	}
137 
138 	/* We might have set bpf_blocking to 1, but may have not blocked in
139 	 * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
140 	 */
141 	if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
142 		       "read uffd block event"))
143 		goto end_join;
144 	if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
145 		goto end_join;
146 
147 	/* We know that load_mod_thrd is blocked in the fmod_ret program, the
148 	 * module state is still MODULE_STATE_COMING because mod->init hasn't
149 	 * returned. This is the time we try to load a program calling kfunc and
150 	 * check if we get ENXIO from verifier.
151 	 */
152 	skel_fail = config->bpf_open_and_load();
153 	ret = errno;
154 	if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
155 		/* Close uffd to unblock load_mod_thrd */
156 		close(uffd);
157 		uffd = -1;
158 		while (atomic_load(blockingp) != 2)
159 			;
160 		ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
161 		config->bpf_destroy(skel_fail);
162 		goto end_join;
163 
164 	}
165 	ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
166 	ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
167 
168 	close(uffd);
169 	uffd = -1;
170 end_join:
171 	pthread_join(load_mod_thrd, NULL);
172 	if (uffd < 0)
173 		ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
174 end_uffd:
175 	if (uffd >= 0)
176 		close(uffd);
177 end_destroy:
178 	bpf_mod_race__destroy(skel);
179 	ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
180 end_module:
181 	unload_bpf_testmod(false);
182 	ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod");
183 end_mmap:
184 	munmap(fault_addr, 4096);
185 	atomic_store(&state, _TS_INVALID);
186 }
187 
188 static const struct test_config ksym_config = {
189 	.str_open = "ksym_race__open_and_load",
190 	.bpf_open_and_load = (void *)ksym_race__open_and_load,
191 	.bpf_destroy = (void *)ksym_race__destroy,
192 };
193 
194 static const struct test_config kfunc_config = {
195 	.str_open = "kfunc_call_race__open_and_load",
196 	.bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
197 	.bpf_destroy = (void *)kfunc_call_race__destroy,
198 };
199 
serial_test_bpf_mod_race(void)200 void serial_test_bpf_mod_race(void)
201 {
202 	if (test__start_subtest("ksym (used_btfs UAF)"))
203 		test_bpf_mod_race_config(&ksym_config);
204 	if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
205 		test_bpf_mod_race_config(&kfunc_config);
206 }
207