xref: /linux/tools/testing/selftests/bpf/progs/verifier_arena_large.c (revision 317a5df78f24bd77fb770a26eb85bf39620592e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 
4 #define BPF_NO_KFUNC_PROTOTYPES
5 #include <vmlinux.h>
6 #include <bpf/bpf_helpers.h>
7 #include <bpf/bpf_tracing.h>
8 #include "bpf_misc.h"
9 #include "bpf_experimental.h"
10 #include "bpf_arena_common.h"
11 
12 #define ARENA_SIZE (1ull << 32)
13 
14 struct {
15 	__uint(type, BPF_MAP_TYPE_ARENA);
16 	__uint(map_flags, BPF_F_MMAPABLE);
17 	__uint(max_entries, ARENA_SIZE / PAGE_SIZE);
18 } arena SEC(".maps");
19 
20 SEC("syscall")
21 __success __retval(0)
22 int big_alloc1(void *ctx)
23 {
24 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
25 	volatile char __arena *page1, *page2, *no_page, *page3;
26 	u64 base;
27 
28 	base = (u64)arena_base(&arena);
29 
30 	page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
31 	if (!page1)
32 		return 1;
33 
34 	if ((u64)page1 != base)
35 		return 15;
36 
37 	*page1 = 1;
38 	page2 = bpf_arena_alloc_pages(&arena, (void __arena *)(ARENA_SIZE - 2 * PAGE_SIZE),
39 				      1, NUMA_NO_NODE, 0);
40 	if (!page2)
41 		return 2;
42 	*page2 = 2;
43 
44 	/* Test for the guard region at the end of the arena. */
45 	no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE - PAGE_SIZE,
46 					1, NUMA_NO_NODE, 0);
47 	if (no_page)
48 		return 16;
49 
50 	no_page = bpf_arena_alloc_pages(&arena, (void __arena *)ARENA_SIZE,
51 					1, NUMA_NO_NODE, 0);
52 	if (no_page)
53 		return 3;
54 	if (*page1 != 1)
55 		return 4;
56 	if (*page2 != 2)
57 		return 5;
58 	bpf_arena_free_pages(&arena, (void __arena *)page1, 1);
59 	if (*page2 != 2)
60 		return 6;
61 	if (*page1 != 0) /* use-after-free should return 0 */
62 		return 7;
63 	page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
64 	if (!page3)
65 		return 8;
66 	*page3 = 3;
67 	if (page1 != page3)
68 		return 9;
69 	if (*page2 != 2)
70 		return 10;
71 	if (*(page1 + PAGE_SIZE) != 0)
72 		return 11;
73 	if (*(page1 - PAGE_SIZE) != 0)
74 		return 12;
75 	if (*(page2 + PAGE_SIZE) != 0)
76 		return 13;
77 	if (*(page2 - PAGE_SIZE) != 0)
78 		return 14;
79 #endif
80 	return 0;
81 }
82 
83 /* Try to access a reserved page. Behavior should be identical with accessing unallocated pages. */
84 SEC("syscall")
85 __success __retval(0)
86 int access_reserved(void *ctx)
87 {
88 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
89 	volatile char __arena *page;
90 	char __arena *base;
91 	const size_t len = 4;
92 	int ret, i;
93 
94 	/* Get a separate region of the arena. */
95 	page = base = arena_base(&arena) + 16384 * PAGE_SIZE;
96 
97 	ret = bpf_arena_reserve_pages(&arena, base, len);
98 	if (ret)
99 		return 1;
100 
101 	/* Try to dirty reserved memory. */
102 	for (i = 0; i < len && can_loop; i++)
103 		*page = 0x5a;
104 
105 	for (i = 0; i < len && can_loop; i++) {
106 		page = (volatile char __arena *)(base + i * PAGE_SIZE);
107 
108 		/*
109 		 * Error out in case either the write went through,
110 		 * or the address has random garbage.
111 		 */
112 		if (*page == 0x5a)
113 			return 2 + 2 * i;
114 
115 		if (*page)
116 			return 2 + 2 * i + 1;
117 	}
118 #endif
119 	return 0;
120 }
121 
122 /* Try to allocate a region overlapping with a reservation. */
123 SEC("syscall")
124 __success __retval(0)
125 int request_partially_reserved(void *ctx)
126 {
127 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
128 	volatile char __arena *page;
129 	char __arena *base;
130 	int ret;
131 
132 	/* Add an arbitrary page offset. */
133 	page = base = arena_base(&arena) + 4096 * __PAGE_SIZE;
134 
135 	ret = bpf_arena_reserve_pages(&arena, base + 3 * __PAGE_SIZE, 4);
136 	if (ret)
137 		return 1;
138 
139 	page = bpf_arena_alloc_pages(&arena, base, 5, NUMA_NO_NODE, 0);
140 	if ((u64)page != 0ULL)
141 		return 2;
142 #endif
143 	return 0;
144 }
145 
146 SEC("syscall")
147 __success __retval(0)
148 int free_reserved(void *ctx)
149 {
150 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
151 	char __arena *addr;
152 	char __arena *page;
153 	int ret;
154 
155 	/* Add an arbitrary page offset. */
156 	addr = arena_base(&arena) + 32768 * __PAGE_SIZE;
157 
158 	page = bpf_arena_alloc_pages(&arena, addr, 2, NUMA_NO_NODE, 0);
159 	if (!page)
160 		return 1;
161 
162 	ret = bpf_arena_reserve_pages(&arena, addr + 2 * __PAGE_SIZE, 2);
163 	if (ret)
164 		return 2;
165 
166 	/*
167 	 * Reserved and allocated pages should be interchangeable for
168 	 * bpf_arena_free_pages(). Free a reserved and an allocated
169 	 * page with a single call.
170 	 */
171 	bpf_arena_free_pages(&arena, addr + __PAGE_SIZE , 2);
172 
173 	/* The free call above should have succeeded, so this allocation should too. */
174 	page = bpf_arena_alloc_pages(&arena, addr + __PAGE_SIZE, 2, NUMA_NO_NODE, 0);
175 	if (!page)
176 		return 3;
177 #endif
178 	return 0;
179 }
180 
181 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
182 #define PAGE_CNT 100
183 __u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
184 __u8 __arena *base;
185 
186 /*
187  * Check that arena's range_tree algorithm allocates pages sequentially
188  * on the first pass and then fills in all gaps on the second pass.
189  */
190 __noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass,
191 		int max_idx, int step)
192 {
193 	__u8 __arena *pg;
194 	int i, pg_idx;
195 
196 	for (i = 0; i < page_cnt; i++) {
197 		pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce,
198 					   NUMA_NO_NODE, 0);
199 		if (!pg)
200 			return step;
201 		pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
202 		if (first_pass) {
203 			/* Pages must be allocated sequentially */
204 			if (pg_idx != i)
205 				return step + 100;
206 		} else {
207 			/* Allocator must fill into gaps */
208 			if (pg_idx >= max_idx || (pg_idx & 1))
209 				return step + 200;
210 		}
211 		*pg = pg_idx;
212 		page[pg_idx] = pg;
213 		cond_break;
214 	}
215 	return 0;
216 }
217 
218 SEC("syscall")
219 __success __retval(0)
220 int big_alloc2(void *ctx)
221 {
222 	__u8 __arena *pg;
223 	int i, err;
224 
225 	base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
226 	if (!base)
227 		return 1;
228 	bpf_arena_free_pages(&arena, (void __arena *)base, 1);
229 
230 	err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2);
231 	if (err)
232 		return err;
233 
234 	/* Clear all even pages */
235 	for (i = 0; i < PAGE_CNT; i += 2) {
236 		pg = page[i];
237 		if (*pg != i)
238 			return 3;
239 		bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
240 		page[i] = NULL;
241 		cond_break;
242 	}
243 
244 	/* Allocate into freed gaps */
245 	err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4);
246 	if (err)
247 		return err;
248 
249 	/* Free pairs of pages */
250 	for (i = 0; i < PAGE_CNT; i += 4) {
251 		pg = page[i];
252 		if (*pg != i)
253 			return 5;
254 		bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
255 		page[i] = NULL;
256 		barrier();
257 		page[i + 1] = NULL;
258 		cond_break;
259 	}
260 
261 	/* Allocate 2 pages at a time into freed gaps */
262 	err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6);
263 	if (err)
264 		return err;
265 
266 	/* Check pages without freeing */
267 	for (i = 0; i < PAGE_CNT; i += 2) {
268 		pg = page[i];
269 		if (*pg != i)
270 			return 7;
271 		cond_break;
272 	}
273 
274 	pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
275 
276 	if (!pg)
277 		return 8;
278 	/*
279 	 * The first PAGE_CNT pages are occupied. The new page
280 	 * must be above.
281 	 */
282 	if ((pg - base) / PAGE_SIZE < PAGE_CNT)
283 		return 9;
284 	return 0;
285 }
286 
287 SEC("socket")
288 __success __retval(0)
289 int big_alloc3(void *ctx)
290 {
291 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
292 	char __arena *pages;
293 	u64 i;
294 
295 	/*
296 	 * Allocate 2051 pages in one go to check how kmalloc_nolock() handles large requests.
297 	 * Since kmalloc_nolock() can allocate up to 1024 struct page * at a time, this call should
298 	 * result in three batches: two batches of 1024 pages each, followed by a final batch of 3
299 	 * pages.
300 	 */
301 	pages = bpf_arena_alloc_pages(&arena, NULL, 2051, NUMA_NO_NODE, 0);
302 	if (!pages)
303 		return 0;
304 
305 	bpf_for(i, 0, 2051)
306 			pages[i * PAGE_SIZE] = 123;
307 	bpf_for(i, 0, 2051)
308 			if (pages[i * PAGE_SIZE] != 123)
309 				return i;
310 
311 	bpf_arena_free_pages(&arena, pages, 2051);
312 #endif
313 	return 0;
314 }
315 #endif
316 char _license[] SEC("license") = "GPL";
317