xref: /linux/tools/testing/selftests/bpf/progs/test_bpf_ma.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <vmlinux.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
6 
7 #include "bpf_experimental.h"
8 #include "bpf_misc.h"
9 
10 #ifndef ARRAY_SIZE
11 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
12 #endif
13 
14 struct generic_map_value {
15 	void *data;
16 };
17 
18 char _license[] SEC("license") = "GPL";
19 
20 const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
21 const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
22 
23 const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
24 const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
25 
26 int err = 0;
27 u32 pid = 0;
28 
29 #define DEFINE_ARRAY_WITH_KPTR(_size) \
30 	struct bin_data_##_size { \
31 		char data[_size - sizeof(void *)]; \
32 	}; \
33 	/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */	\
34 	struct bin_data_##_size *__bin_data_##_size; \
35 	struct map_value_##_size { \
36 		struct bin_data_##_size __kptr * data; \
37 	}; \
38 	struct { \
39 		__uint(type, BPF_MAP_TYPE_ARRAY); \
40 		__type(key, int); \
41 		__type(value, struct map_value_##_size); \
42 		__uint(max_entries, 128); \
43 	} array_##_size SEC(".maps")
44 
45 #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
46 	struct percpu_bin_data_##_size { \
47 		char data[_size]; \
48 	}; \
49 	struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
50 	struct map_value_percpu_##_size { \
51 		struct percpu_bin_data_##_size __percpu_kptr * data; \
52 	}; \
53 	struct { \
54 		__uint(type, BPF_MAP_TYPE_ARRAY); \
55 		__type(key, int); \
56 		__type(value, struct map_value_percpu_##_size); \
57 		__uint(max_entries, 128); \
58 	} array_percpu_##_size SEC(".maps")
59 
60 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
61 {
62 	struct generic_map_value *value;
63 	unsigned int i, key;
64 	void *old, *new;
65 
66 	for (i = 0; i < batch; i++) {
67 		key = i;
68 		value = bpf_map_lookup_elem(map, &key);
69 		if (!value) {
70 			err = 1;
71 			return;
72 		}
73 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
74 		if (!new) {
75 			err = 2;
76 			return;
77 		}
78 		old = bpf_kptr_xchg(&value->data, new);
79 		if (old) {
80 			bpf_obj_drop(old);
81 			err = 3;
82 			return;
83 		}
84 	}
85 }
86 
87 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
88 {
89 	struct generic_map_value *value;
90 	unsigned int i, key;
91 	void *old;
92 
93 	for (i = 0; i < batch; i++) {
94 		key = i;
95 		value = bpf_map_lookup_elem(map, &key);
96 		if (!value) {
97 			err = 4;
98 			return;
99 		}
100 		old = bpf_kptr_xchg(&value->data, NULL);
101 		if (!old) {
102 			err = 5;
103 			return;
104 		}
105 		bpf_obj_drop(old);
106 	}
107 }
108 
109 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
110 					       unsigned int idx)
111 {
112 	struct generic_map_value *value;
113 	unsigned int i, key;
114 	void *old, *new;
115 
116 	for (i = 0; i < batch; i++) {
117 		key = i;
118 		value = bpf_map_lookup_elem(map, &key);
119 		if (!value) {
120 			err = 1;
121 			return;
122 		}
123 		/* per-cpu allocator may not be able to refill in time */
124 		new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
125 		if (!new)
126 			continue;
127 
128 		old = bpf_kptr_xchg(&value->data, new);
129 		if (old) {
130 			bpf_percpu_obj_drop(old);
131 			err = 2;
132 			return;
133 		}
134 	}
135 }
136 
137 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
138 					      unsigned int idx)
139 {
140 	struct generic_map_value *value;
141 	unsigned int i, key;
142 	void *old;
143 
144 	for (i = 0; i < batch; i++) {
145 		key = i;
146 		value = bpf_map_lookup_elem(map, &key);
147 		if (!value) {
148 			err = 3;
149 			return;
150 		}
151 		old = bpf_kptr_xchg(&value->data, NULL);
152 		if (!old)
153 			continue;
154 		bpf_percpu_obj_drop(old);
155 	}
156 }
157 
158 #define CALL_BATCH_ALLOC(size, batch, idx) \
159 	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
160 
161 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
162 	do { \
163 		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
164 		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
165 	} while (0)
166 
167 #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
168 	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
169 
170 #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
171 	do { \
172 		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
173 		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
174 	} while (0)
175 
176 /* kptr doesn't support bin_data_8 which is a zero-sized array */
177 DEFINE_ARRAY_WITH_KPTR(16);
178 DEFINE_ARRAY_WITH_KPTR(32);
179 DEFINE_ARRAY_WITH_KPTR(64);
180 DEFINE_ARRAY_WITH_KPTR(96);
181 DEFINE_ARRAY_WITH_KPTR(128);
182 DEFINE_ARRAY_WITH_KPTR(192);
183 DEFINE_ARRAY_WITH_KPTR(256);
184 DEFINE_ARRAY_WITH_KPTR(512);
185 DEFINE_ARRAY_WITH_KPTR(1024);
186 DEFINE_ARRAY_WITH_KPTR(2048);
187 DEFINE_ARRAY_WITH_KPTR(4096);
188 
189 DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
190 DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
191 DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
192 DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
193 DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
194 DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
195 DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
196 DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
197 DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
198 
199 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
200 int test_batch_alloc_free(void *ctx)
201 {
202 	if ((u32)bpf_get_current_pid_tgid() != pid)
203 		return 0;
204 
205 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
206 	 * then free 128 16-bytes objects in batch to trigger freeing.
207 	 */
208 	CALL_BATCH_ALLOC_FREE(16, 128, 0);
209 	CALL_BATCH_ALLOC_FREE(32, 128, 1);
210 	CALL_BATCH_ALLOC_FREE(64, 128, 2);
211 	CALL_BATCH_ALLOC_FREE(96, 128, 3);
212 	CALL_BATCH_ALLOC_FREE(128, 128, 4);
213 	CALL_BATCH_ALLOC_FREE(192, 128, 5);
214 	CALL_BATCH_ALLOC_FREE(256, 128, 6);
215 	CALL_BATCH_ALLOC_FREE(512, 64, 7);
216 	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
217 	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
218 	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
219 
220 	return 0;
221 }
222 
223 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
224 int test_free_through_map_free(void *ctx)
225 {
226 	if ((u32)bpf_get_current_pid_tgid() != pid)
227 		return 0;
228 
229 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
230 	 * then free these objects through map free.
231 	 */
232 	CALL_BATCH_ALLOC(16, 128, 0);
233 	CALL_BATCH_ALLOC(32, 128, 1);
234 	CALL_BATCH_ALLOC(64, 128, 2);
235 	CALL_BATCH_ALLOC(96, 128, 3);
236 	CALL_BATCH_ALLOC(128, 128, 4);
237 	CALL_BATCH_ALLOC(192, 128, 5);
238 	CALL_BATCH_ALLOC(256, 128, 6);
239 	CALL_BATCH_ALLOC(512, 64, 7);
240 	CALL_BATCH_ALLOC(1024, 32, 8);
241 	CALL_BATCH_ALLOC(2048, 16, 9);
242 	CALL_BATCH_ALLOC(4096, 8, 10);
243 
244 	return 0;
245 }
246 
247 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
248 int test_batch_percpu_alloc_free(void *ctx)
249 {
250 	if ((u32)bpf_get_current_pid_tgid() != pid)
251 		return 0;
252 
253 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
254 	 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
255 	 */
256 	CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
257 	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
258 	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
259 	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
260 	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
261 	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
262 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
263 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
264 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
265 
266 	return 0;
267 }
268 
269 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
270 int test_percpu_free_through_map_free(void *ctx)
271 {
272 	if ((u32)bpf_get_current_pid_tgid() != pid)
273 		return 0;
274 
275 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
276 	 * then free these object through map free.
277 	 */
278 	CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
279 	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
280 	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
281 	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
282 	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
283 	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
284 	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
285 	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
286 	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
287 
288 	return 0;
289 }
290