1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2026 Meta Platforms, Inc. and affiliates. */
3 #include <test_progs.h>
4 #include <network_helpers.h>
5
6 #include "map_kptr_race.skel.h"
7
get_map_id(int map_fd)8 static int get_map_id(int map_fd)
9 {
10 struct bpf_map_info info = {};
11 __u32 len = sizeof(info);
12
13 if (!ASSERT_OK(bpf_map_get_info_by_fd(map_fd, &info, &len), "get_map_info"))
14 return -1;
15 return info.id;
16 }
17
read_refs(struct map_kptr_race * skel)18 static int read_refs(struct map_kptr_race *skel)
19 {
20 LIBBPF_OPTS(bpf_test_run_opts, opts);
21 int ret;
22
23 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.count_ref), &opts);
24 if (!ASSERT_OK(ret, "count_ref run"))
25 return -1;
26 if (!ASSERT_OK(opts.retval, "count_ref retval"))
27 return -1;
28 return skel->bss->num_of_refs;
29 }
30
test_htab_leak(void)31 static void test_htab_leak(void)
32 {
33 LIBBPF_OPTS(bpf_test_run_opts, opts,
34 .data_in = &pkt_v4,
35 .data_size_in = sizeof(pkt_v4),
36 .repeat = 1,
37 );
38 struct map_kptr_race *skel, *watcher;
39 int ret, map_id;
40
41 skel = map_kptr_race__open_and_load();
42 if (!ASSERT_OK_PTR(skel, "open_and_load"))
43 return;
44
45 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_htab_leak), &opts);
46 if (!ASSERT_OK(ret, "test_htab_leak run"))
47 goto out_skel;
48 if (!ASSERT_OK(opts.retval, "test_htab_leak retval"))
49 goto out_skel;
50
51 map_id = get_map_id(bpf_map__fd(skel->maps.race_hash_map));
52 if (!ASSERT_GE(map_id, 0, "map_id"))
53 goto out_skel;
54
55 watcher = map_kptr_race__open_and_load();
56 if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
57 goto out_skel;
58
59 watcher->bss->target_map_id = map_id;
60 watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
61 if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
62 goto out_watcher;
63 watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free);
64 if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit"))
65 goto out_watcher;
66
67 map_kptr_race__destroy(skel);
68 skel = NULL;
69
70 kern_sync_rcu();
71
72 while (!READ_ONCE(watcher->bss->map_freed))
73 sched_yield();
74
75 ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
76 ASSERT_EQ(read_refs(watcher), 2, "htab refcount");
77
78 out_watcher:
79 map_kptr_race__destroy(watcher);
80 out_skel:
81 map_kptr_race__destroy(skel);
82 }
83
test_percpu_htab_leak(void)84 static void test_percpu_htab_leak(void)
85 {
86 LIBBPF_OPTS(bpf_test_run_opts, opts,
87 .data_in = &pkt_v4,
88 .data_size_in = sizeof(pkt_v4),
89 .repeat = 1,
90 );
91 struct map_kptr_race *skel, *watcher;
92 int ret, map_id;
93
94 skel = map_kptr_race__open();
95 if (!ASSERT_OK_PTR(skel, "open"))
96 return;
97
98 skel->rodata->nr_cpus = libbpf_num_possible_cpus();
99 if (skel->rodata->nr_cpus > 16)
100 skel->rodata->nr_cpus = 16;
101
102 ret = map_kptr_race__load(skel);
103 if (!ASSERT_OK(ret, "load"))
104 goto out_skel;
105
106 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_percpu_htab_leak), &opts);
107 if (!ASSERT_OK(ret, "test_percpu_htab_leak run"))
108 goto out_skel;
109 if (!ASSERT_OK(opts.retval, "test_percpu_htab_leak retval"))
110 goto out_skel;
111
112 map_id = get_map_id(bpf_map__fd(skel->maps.race_percpu_hash_map));
113 if (!ASSERT_GE(map_id, 0, "map_id"))
114 goto out_skel;
115
116 watcher = map_kptr_race__open_and_load();
117 if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
118 goto out_skel;
119
120 watcher->bss->target_map_id = map_id;
121 watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
122 if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
123 goto out_watcher;
124 watcher->links.htab_map_free = bpf_program__attach(watcher->progs.htab_map_free);
125 if (!ASSERT_OK_PTR(watcher->links.htab_map_free, "attach fexit"))
126 goto out_watcher;
127
128 map_kptr_race__destroy(skel);
129 skel = NULL;
130
131 kern_sync_rcu();
132
133 while (!READ_ONCE(watcher->bss->map_freed))
134 sched_yield();
135
136 ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
137 ASSERT_EQ(read_refs(watcher), 2, "percpu_htab refcount");
138
139 out_watcher:
140 map_kptr_race__destroy(watcher);
141 out_skel:
142 map_kptr_race__destroy(skel);
143 }
144
test_sk_ls_leak(void)145 static void test_sk_ls_leak(void)
146 {
147 struct map_kptr_race *skel, *watcher;
148 int listen_fd = -1, client_fd = -1, map_id;
149
150 skel = map_kptr_race__open_and_load();
151 if (!ASSERT_OK_PTR(skel, "open_and_load"))
152 return;
153
154 if (!ASSERT_OK(map_kptr_race__attach(skel), "attach"))
155 goto out_skel;
156
157 listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
158 if (!ASSERT_GE(listen_fd, 0, "start_server"))
159 goto out_skel;
160
161 client_fd = connect_to_fd(listen_fd, 0);
162 if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
163 goto out_skel;
164
165 if (!ASSERT_EQ(skel->bss->sk_ls_leak_done, 1, "sk_ls_leak_done"))
166 goto out_skel;
167
168 close(client_fd);
169 client_fd = -1;
170 close(listen_fd);
171 listen_fd = -1;
172
173 map_id = get_map_id(bpf_map__fd(skel->maps.race_sk_ls_map));
174 if (!ASSERT_GE(map_id, 0, "map_id"))
175 goto out_skel;
176
177 watcher = map_kptr_race__open_and_load();
178 if (!ASSERT_OK_PTR(watcher, "watcher open_and_load"))
179 goto out_skel;
180
181 watcher->bss->target_map_id = map_id;
182 watcher->links.map_put = bpf_program__attach(watcher->progs.map_put);
183 if (!ASSERT_OK_PTR(watcher->links.map_put, "attach fentry"))
184 goto out_watcher;
185 watcher->links.sk_map_free = bpf_program__attach(watcher->progs.sk_map_free);
186 if (!ASSERT_OK_PTR(watcher->links.sk_map_free, "attach fexit"))
187 goto out_watcher;
188
189 map_kptr_race__destroy(skel);
190 skel = NULL;
191
192 kern_sync_rcu();
193
194 while (!READ_ONCE(watcher->bss->map_freed))
195 sched_yield();
196
197 ASSERT_EQ(watcher->bss->map_freed, 1, "map_freed");
198 ASSERT_EQ(read_refs(watcher), 2, "sk_ls refcount");
199
200 out_watcher:
201 map_kptr_race__destroy(watcher);
202 out_skel:
203 if (client_fd >= 0)
204 close(client_fd);
205 if (listen_fd >= 0)
206 close(listen_fd);
207 map_kptr_race__destroy(skel);
208 }
209
serial_test_map_kptr_race(void)210 void serial_test_map_kptr_race(void)
211 {
212 if (test__start_subtest("htab_leak"))
213 test_htab_leak();
214 if (test__start_subtest("percpu_htab_leak"))
215 test_percpu_htab_leak();
216 if (test__start_subtest("sk_ls_leak"))
217 test_sk_ls_leak();
218 }
219