xref: /linux/tools/testing/selftests/bpf/bpf_arena_alloc.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
2 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
3 #pragma once
4 #include "bpf_arena_common.h"
5 
6 #ifndef __round_mask
7 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
8 #endif
9 #ifndef round_up
10 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
11 #endif
12 
13 #ifdef __BPF__
14 #define NR_CPUS (sizeof(struct cpumask) * 8)
15 
16 static void __arena * __arena page_frag_cur_page[NR_CPUS];
17 static int __arena page_frag_cur_offset[NR_CPUS];
18 
19 /* Simple page_frag allocator */
20 static inline void __arena* bpf_alloc(unsigned int size)
21 {
22 	__u64 __arena *obj_cnt;
23 	__u32 cpu = bpf_get_smp_processor_id();
24 	void __arena *page = page_frag_cur_page[cpu];
25 	int __arena *cur_offset = &page_frag_cur_offset[cpu];
26 	int offset;
27 
28 	size = round_up(size, 8);
29 	if (size >= PAGE_SIZE - 8)
30 		return NULL;
31 	if (!page) {
32 refill:
33 		page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
34 		if (!page)
35 			return NULL;
36 		cast_kern(page);
37 		page_frag_cur_page[cpu] = page;
38 		*cur_offset = PAGE_SIZE - 8;
39 		obj_cnt = page + PAGE_SIZE - 8;
40 		*obj_cnt = 0;
41 	} else {
42 		cast_kern(page);
43 		obj_cnt = page + PAGE_SIZE - 8;
44 	}
45 
46 	offset = *cur_offset - size;
47 	if (offset < 0)
48 		goto refill;
49 
50 	(*obj_cnt)++;
51 	*cur_offset = offset;
52 	return page + offset;
53 }
54 
55 static inline void bpf_free(void __arena *addr)
56 {
57 	__u64 __arena *obj_cnt;
58 
59 	addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
60 	obj_cnt = addr + PAGE_SIZE - 8;
61 	if (--(*obj_cnt) == 0)
62 		bpf_arena_free_pages(&arena, addr, 1);
63 }
64 #else
65 static inline void __arena* bpf_alloc(unsigned int size) { return NULL; }
66 static inline void bpf_free(void __arena *addr) {}
67 #endif
68