1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ 3 4 #define BPF_NO_KFUNC_PROTOTYPES 5 #include <vmlinux.h> 6 #include <errno.h> 7 #include <bpf/bpf_helpers.h> 8 #include <bpf/bpf_tracing.h> 9 #include "bpf_misc.h" 10 #include "bpf_experimental.h" 11 #include "bpf_arena_common.h" 12 13 #define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8))) 14 15 struct { 16 __uint(type, BPF_MAP_TYPE_ARENA); 17 __uint(map_flags, BPF_F_MMAPABLE); 18 __uint(max_entries, 2); /* arena of two pages close to 32-bit boundary*/ 19 #ifdef __TARGET_ARCH_arm64 20 __ulong(map_extra, (1ull << 32) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ 21 #else 22 __ulong(map_extra, (1ull << 44) | (~0u - __PAGE_SIZE * 2 + 1)); /* start of mmap() region */ 23 #endif 24 } arena SEC(".maps"); 25 26 SEC("socket") 27 __success __retval(0) 28 int basic_alloc1_nosleep(void *ctx) 29 { 30 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 31 volatile int __arena *page1, *page2, *no_page; 32 33 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 34 if (!page1) 35 return 1; 36 *page1 = 1; 37 page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 38 if (!page2) 39 return 2; 40 *page2 = 2; 41 no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 42 if (no_page) 43 return 3; 44 if (*page1 != 1) 45 return 4; 46 if (*page2 != 2) 47 return 5; 48 bpf_arena_free_pages(&arena, (void __arena *)page2, 1); 49 if (*page1 != 1) 50 return 6; 51 if (*page2 != 0 && *page2 != 2) /* use-after-free should return 0 or the stored value */ 52 return 7; 53 #endif 54 return 0; 55 } 56 57 SEC("syscall") 58 __success __retval(0) 59 int basic_alloc1(void *ctx) 60 { 61 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 62 volatile int __arena *page1, *page2, *no_page, *page3; 63 64 page1 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 65 if (!page1) 66 return 1; 67 *page1 = 1; 68 page2 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 69 if (!page2) 70 return 2; 71 *page2 = 2; 72 no_page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 73 if (no_page) 74 return 3; 75 if (*page1 != 1) 76 return 4; 77 if (*page2 != 2) 78 return 5; 79 bpf_arena_free_pages(&arena, (void __arena *)page2, 1); 80 if (*page1 != 1) 81 return 6; 82 if (*page2 != 0) /* use-after-free should return 0 */ 83 return 7; 84 page3 = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 85 if (!page3) 86 return 8; 87 *page3 = 3; 88 if (page2 != page3) 89 return 9; 90 if (*page1 != 1) 91 return 10; 92 #endif 93 return 0; 94 } 95 96 SEC("socket") 97 __success __retval(0) 98 int basic_alloc2_nosleep(void *ctx) 99 { 100 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 101 volatile char __arena *page1, *page2, *page3, *page4; 102 103 page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); 104 if (!page1) 105 return 1; 106 page2 = page1 + __PAGE_SIZE; 107 page3 = page1 + __PAGE_SIZE * 2; 108 page4 = page1 - __PAGE_SIZE; 109 *page1 = 1; 110 *page2 = 2; 111 *page3 = 3; 112 *page4 = 4; 113 if (*page1 != 1) 114 return 1; 115 if (*page2 != 2) 116 return 2; 117 if (*page3 != 0) 118 return 3; 119 if (*page4 != 0) 120 return 4; 121 bpf_arena_free_pages(&arena, (void __arena *)page1, 2); 122 if (*page1 != 0 && *page1 != 1) 123 return 5; 124 if (*page2 != 0 && *page2 != 2) 125 return 6; 126 if (*page3 != 0) 127 return 7; 128 if (*page4 != 0) 129 return 8; 130 #endif 131 return 0; 132 } 133 134 SEC("syscall") 135 __success __retval(0) 136 int basic_alloc2(void *ctx) 137 { 138 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 139 volatile char __arena *page1, *page2, *page3, *page4; 140 141 page1 = bpf_arena_alloc_pages(&arena, NULL, 2, NUMA_NO_NODE, 0); 142 if (!page1) 143 return 1; 144 page2 = page1 + __PAGE_SIZE; 145 page3 = page1 + __PAGE_SIZE * 2; 146 page4 = page1 - __PAGE_SIZE; 147 *page1 = 1; 148 *page2 = 2; 149 *page3 = 3; 150 *page4 = 4; 151 if (*page1 != 1) 152 return 1; 153 if (*page2 != 2) 154 return 2; 155 if (*page3 != 0) 156 return 3; 157 if (*page4 != 0) 158 return 4; 159 bpf_arena_free_pages(&arena, (void __arena *)page1, 2); 160 if (*page1 != 0) 161 return 5; 162 if (*page2 != 0) 163 return 6; 164 if (*page3 != 0) 165 return 7; 166 if (*page4 != 0) 167 return 8; 168 #endif 169 return 0; 170 } 171 172 struct bpf_arena___l { 173 struct bpf_map map; 174 } __attribute__((preserve_access_index)); 175 176 SEC("socket") 177 __success __retval(0) __log_level(2) 178 int basic_alloc3_nosleep(void *ctx) 179 { 180 struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena; 181 volatile char __arena *pages; 182 183 pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); 184 if (!pages) 185 return 1; 186 return 0; 187 } 188 189 SEC("syscall") 190 __success __retval(0) __log_level(2) 191 int basic_alloc3(void *ctx) 192 { 193 struct bpf_arena___l *ar = (struct bpf_arena___l *)&arena; 194 volatile char __arena *pages; 195 196 pages = bpf_arena_alloc_pages(&ar->map, NULL, ar->map.max_entries, NUMA_NO_NODE, 0); 197 if (!pages) 198 return 1; 199 return 0; 200 } 201 202 SEC("socket") 203 __success __retval(0) 204 int basic_reserve1_nosleep(void *ctx) 205 { 206 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 207 char __arena *page; 208 int ret; 209 210 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 211 if (!page) 212 return 1; 213 214 page += __PAGE_SIZE; 215 216 /* Reserve the second page */ 217 ret = bpf_arena_reserve_pages(&arena, page, 1); 218 if (ret) 219 return 2; 220 221 /* Try to explicitly allocate the reserved page. */ 222 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); 223 if (page) 224 return 3; 225 226 /* Try to implicitly allocate the page (since there's only 2 of them). */ 227 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 228 if (page) 229 return 4; 230 #endif 231 return 0; 232 } 233 234 SEC("syscall") 235 __success __retval(0) 236 int basic_reserve1(void *ctx) 237 { 238 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 239 char __arena *page; 240 int ret; 241 242 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 243 if (!page) 244 return 1; 245 246 page += __PAGE_SIZE; 247 248 /* Reserve the second page */ 249 ret = bpf_arena_reserve_pages(&arena, page, 1); 250 if (ret) 251 return 2; 252 253 /* Try to explicitly allocate the reserved page. */ 254 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); 255 if (page) 256 return 3; 257 258 /* Try to implicitly allocate the page (since there's only 2 of them). */ 259 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 260 if (page) 261 return 4; 262 #endif 263 return 0; 264 } 265 266 SEC("socket") 267 __success __retval(0) 268 int basic_reserve2_nosleep(void *ctx) 269 { 270 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 271 char __arena *page; 272 int ret; 273 274 page = arena_base(&arena); 275 ret = bpf_arena_reserve_pages(&arena, page, 1); 276 if (ret) 277 return 1; 278 279 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); 280 if ((u64)page) 281 return 2; 282 #endif 283 return 0; 284 } 285 286 SEC("syscall") 287 __success __retval(0) 288 int basic_reserve2(void *ctx) 289 { 290 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 291 char __arena *page; 292 int ret; 293 294 page = arena_base(&arena); 295 ret = bpf_arena_reserve_pages(&arena, page, 1); 296 if (ret) 297 return 1; 298 299 page = bpf_arena_alloc_pages(&arena, page, 1, NUMA_NO_NODE, 0); 300 if ((u64)page) 301 return 2; 302 #endif 303 return 0; 304 } 305 306 /* Reserve the same page twice, should return -EBUSY. */ 307 SEC("socket") 308 __success __retval(0) 309 int reserve_twice_nosleep(void *ctx) 310 { 311 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 312 char __arena *page; 313 int ret; 314 315 page = arena_base(&arena); 316 317 ret = bpf_arena_reserve_pages(&arena, page, 1); 318 if (ret) 319 return 1; 320 321 ret = bpf_arena_reserve_pages(&arena, page, 1); 322 if (ret != -EBUSY) 323 return 2; 324 #endif 325 return 0; 326 } 327 328 SEC("syscall") 329 __success __retval(0) 330 int reserve_twice(void *ctx) 331 { 332 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 333 char __arena *page; 334 int ret; 335 336 page = arena_base(&arena); 337 338 ret = bpf_arena_reserve_pages(&arena, page, 1); 339 if (ret) 340 return 1; 341 342 ret = bpf_arena_reserve_pages(&arena, page, 1); 343 if (ret != -EBUSY) 344 return 2; 345 #endif 346 return 0; 347 } 348 349 /* Try to reserve past the end of the arena. */ 350 SEC("socket") 351 __success __retval(0) 352 int reserve_invalid_region_nosleep(void *ctx) 353 { 354 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 355 char __arena *page; 356 int ret; 357 358 /* Try a NULL pointer. */ 359 ret = bpf_arena_reserve_pages(&arena, NULL, 3); 360 if (ret != -EINVAL) 361 return 1; 362 363 page = arena_base(&arena); 364 365 ret = bpf_arena_reserve_pages(&arena, page, 3); 366 if (ret != -EINVAL) 367 return 2; 368 369 ret = bpf_arena_reserve_pages(&arena, page, 4096); 370 if (ret != -EINVAL) 371 return 3; 372 373 ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1); 374 if (ret != -EINVAL) 375 return 4; 376 #endif 377 return 0; 378 } 379 380 SEC("syscall") 381 __success __retval(0) 382 int reserve_invalid_region(void *ctx) 383 { 384 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 385 char __arena *page; 386 int ret; 387 388 /* Try a NULL pointer. */ 389 ret = bpf_arena_reserve_pages(&arena, NULL, 3); 390 if (ret != -EINVAL) 391 return 1; 392 393 page = arena_base(&arena); 394 395 ret = bpf_arena_reserve_pages(&arena, page, 3); 396 if (ret != -EINVAL) 397 return 2; 398 399 ret = bpf_arena_reserve_pages(&arena, page, 4096); 400 if (ret != -EINVAL) 401 return 3; 402 403 ret = bpf_arena_reserve_pages(&arena, page, (1ULL << 32) - 1); 404 if (ret != -EINVAL) 405 return 4; 406 #endif 407 return 0; 408 } 409 410 SEC("iter.s/bpf_map") 411 __success __log_level(2) 412 int iter_maps1(struct bpf_iter__bpf_map *ctx) 413 { 414 struct bpf_map *map = ctx->map; 415 416 if (!map) 417 return 0; 418 bpf_arena_alloc_pages(map, NULL, map->max_entries, 0, 0); 419 return 0; 420 } 421 422 SEC("iter.s/bpf_map") 423 __failure __msg("expected pointer to STRUCT bpf_map") 424 int iter_maps2(struct bpf_iter__bpf_map *ctx) 425 { 426 struct seq_file *seq = ctx->meta->seq; 427 428 bpf_arena_alloc_pages((void *)seq, NULL, 1, 0, 0); 429 return 0; 430 } 431 432 SEC("iter.s/bpf_map") 433 __failure __msg("untrusted_ptr_bpf_map") 434 int iter_maps3(struct bpf_iter__bpf_map *ctx) 435 { 436 struct bpf_map *map = ctx->map; 437 438 if (!map) 439 return 0; 440 bpf_arena_alloc_pages(map->inner_map_meta, NULL, map->max_entries, 0, 0); 441 return 0; 442 } 443 444 private(ARENA_TESTS) struct bpf_spin_lock arena_bpf_test_lock; 445 446 /* Use the arena kfunc API while under a BPF lock. */ 447 SEC("syscall") 448 __success __retval(0) 449 int arena_kfuncs_under_bpf_lock(void *ctx) 450 { 451 #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) 452 char __arena *page; 453 int ret; 454 455 bpf_spin_lock(&arena_bpf_test_lock); 456 457 /* Get a separate region of the arena. */ 458 page = arena_base(&arena); 459 ret = bpf_arena_reserve_pages(&arena, page, 1); 460 if (ret) { 461 bpf_spin_unlock(&arena_bpf_test_lock); 462 return 1; 463 } 464 465 bpf_arena_free_pages(&arena, page, 1); 466 467 page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); 468 if (!page) { 469 bpf_spin_unlock(&arena_bpf_test_lock); 470 return 2; 471 } 472 473 bpf_arena_free_pages(&arena, page, 1); 474 475 bpf_spin_unlock(&arena_bpf_test_lock); 476 #endif 477 478 return 0; 479 } 480 char _license[] SEC("license") = "GPL"; 481