xref: /linux/tools/testing/selftests/bpf/progs/test_bpf_ma.c (revision 03c305861c70d6db898dd2379b882e7772a5c5d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <vmlinux.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
6 
7 #include "bpf_experimental.h"
8 #include "bpf_misc.h"
9 
10 #ifndef ARRAY_SIZE
11 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
12 #endif
13 
14 struct generic_map_value {
15 	void *data;
16 };
17 
18 char _license[] SEC("license") = "GPL";
19 
20 const unsigned int data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
21 const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
22 
23 int err = 0;
24 int pid = 0;
25 
26 #define DEFINE_ARRAY_WITH_KPTR(_size) \
27 	struct bin_data_##_size { \
28 		char data[_size - sizeof(void *)]; \
29 	}; \
30 	struct map_value_##_size { \
31 		struct bin_data_##_size __kptr * data; \
32 		/* To emit BTF info for bin_data_xx */ \
33 		struct bin_data_##_size not_used; \
34 	}; \
35 	struct { \
36 		__uint(type, BPF_MAP_TYPE_ARRAY); \
37 		__type(key, int); \
38 		__type(value, struct map_value_##_size); \
39 		__uint(max_entries, 128); \
40 	} array_##_size SEC(".maps")
41 
42 #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
43 	struct map_value_percpu_##_size { \
44 		struct bin_data_##_size __percpu_kptr * data; \
45 	}; \
46 	struct { \
47 		__uint(type, BPF_MAP_TYPE_ARRAY); \
48 		__type(key, int); \
49 		__type(value, struct map_value_percpu_##_size); \
50 		__uint(max_entries, 128); \
51 	} array_percpu_##_size SEC(".maps")
52 
53 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
54 {
55 	struct generic_map_value *value;
56 	unsigned int i, key;
57 	void *old, *new;
58 
59 	for (i = 0; i < batch; i++) {
60 		key = i;
61 		value = bpf_map_lookup_elem(map, &key);
62 		if (!value) {
63 			err = 1;
64 			return;
65 		}
66 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
67 		if (!new) {
68 			err = 2;
69 			return;
70 		}
71 		old = bpf_kptr_xchg(&value->data, new);
72 		if (old) {
73 			bpf_obj_drop(old);
74 			err = 3;
75 			return;
76 		}
77 	}
78 }
79 
80 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
81 {
82 	struct generic_map_value *value;
83 	unsigned int i, key;
84 	void *old;
85 
86 	for (i = 0; i < batch; i++) {
87 		key = i;
88 		value = bpf_map_lookup_elem(map, &key);
89 		if (!value) {
90 			err = 4;
91 			return;
92 		}
93 		old = bpf_kptr_xchg(&value->data, NULL);
94 		if (!old) {
95 			err = 5;
96 			return;
97 		}
98 		bpf_obj_drop(old);
99 	}
100 }
101 
102 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
103 					       unsigned int idx)
104 {
105 	struct generic_map_value *value;
106 	unsigned int i, key;
107 	void *old, *new;
108 
109 	for (i = 0; i < batch; i++) {
110 		key = i;
111 		value = bpf_map_lookup_elem(map, &key);
112 		if (!value) {
113 			err = 1;
114 			return;
115 		}
116 		/* per-cpu allocator may not be able to refill in time */
117 		new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL);
118 		if (!new)
119 			continue;
120 
121 		old = bpf_kptr_xchg(&value->data, new);
122 		if (old) {
123 			bpf_percpu_obj_drop(old);
124 			err = 2;
125 			return;
126 		}
127 	}
128 }
129 
130 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
131 					      unsigned int idx)
132 {
133 	struct generic_map_value *value;
134 	unsigned int i, key;
135 	void *old;
136 
137 	for (i = 0; i < batch; i++) {
138 		key = i;
139 		value = bpf_map_lookup_elem(map, &key);
140 		if (!value) {
141 			err = 3;
142 			return;
143 		}
144 		old = bpf_kptr_xchg(&value->data, NULL);
145 		if (!old)
146 			continue;
147 		bpf_percpu_obj_drop(old);
148 	}
149 }
150 
151 #define CALL_BATCH_ALLOC(size, batch, idx) \
152 	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
153 
154 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
155 	do { \
156 		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
157 		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
158 	} while (0)
159 
160 #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
161 	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
162 
163 #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
164 	do { \
165 		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
166 		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
167 	} while (0)
168 
169 DEFINE_ARRAY_WITH_KPTR(8);
170 DEFINE_ARRAY_WITH_KPTR(16);
171 DEFINE_ARRAY_WITH_KPTR(32);
172 DEFINE_ARRAY_WITH_KPTR(64);
173 DEFINE_ARRAY_WITH_KPTR(96);
174 DEFINE_ARRAY_WITH_KPTR(128);
175 DEFINE_ARRAY_WITH_KPTR(192);
176 DEFINE_ARRAY_WITH_KPTR(256);
177 DEFINE_ARRAY_WITH_KPTR(512);
178 DEFINE_ARRAY_WITH_KPTR(1024);
179 DEFINE_ARRAY_WITH_KPTR(2048);
180 DEFINE_ARRAY_WITH_KPTR(4096);
181 
182 /* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */
183 DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
184 DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
185 DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
186 DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
187 DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
188 DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
189 DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
190 DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
191 DEFINE_ARRAY_WITH_PERCPU_KPTR(1024);
192 DEFINE_ARRAY_WITH_PERCPU_KPTR(2048);
193 DEFINE_ARRAY_WITH_PERCPU_KPTR(4096);
194 
195 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
196 int test_batch_alloc_free(void *ctx)
197 {
198 	if ((u32)bpf_get_current_pid_tgid() != pid)
199 		return 0;
200 
201 	/* Alloc 128 8-bytes objects in batch to trigger refilling,
202 	 * then free 128 8-bytes objects in batch to trigger freeing.
203 	 */
204 	CALL_BATCH_ALLOC_FREE(8, 128, 0);
205 	CALL_BATCH_ALLOC_FREE(16, 128, 1);
206 	CALL_BATCH_ALLOC_FREE(32, 128, 2);
207 	CALL_BATCH_ALLOC_FREE(64, 128, 3);
208 	CALL_BATCH_ALLOC_FREE(96, 128, 4);
209 	CALL_BATCH_ALLOC_FREE(128, 128, 5);
210 	CALL_BATCH_ALLOC_FREE(192, 128, 6);
211 	CALL_BATCH_ALLOC_FREE(256, 128, 7);
212 	CALL_BATCH_ALLOC_FREE(512, 64, 8);
213 	CALL_BATCH_ALLOC_FREE(1024, 32, 9);
214 	CALL_BATCH_ALLOC_FREE(2048, 16, 10);
215 	CALL_BATCH_ALLOC_FREE(4096, 8, 11);
216 
217 	return 0;
218 }
219 
220 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
221 int test_free_through_map_free(void *ctx)
222 {
223 	if ((u32)bpf_get_current_pid_tgid() != pid)
224 		return 0;
225 
226 	/* Alloc 128 8-bytes objects in batch to trigger refilling,
227 	 * then free these objects through map free.
228 	 */
229 	CALL_BATCH_ALLOC(8, 128, 0);
230 	CALL_BATCH_ALLOC(16, 128, 1);
231 	CALL_BATCH_ALLOC(32, 128, 2);
232 	CALL_BATCH_ALLOC(64, 128, 3);
233 	CALL_BATCH_ALLOC(96, 128, 4);
234 	CALL_BATCH_ALLOC(128, 128, 5);
235 	CALL_BATCH_ALLOC(192, 128, 6);
236 	CALL_BATCH_ALLOC(256, 128, 7);
237 	CALL_BATCH_ALLOC(512, 64, 8);
238 	CALL_BATCH_ALLOC(1024, 32, 9);
239 	CALL_BATCH_ALLOC(2048, 16, 10);
240 	CALL_BATCH_ALLOC(4096, 8, 11);
241 
242 	return 0;
243 }
244 
245 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
246 int test_batch_percpu_alloc_free(void *ctx)
247 {
248 	if ((u32)bpf_get_current_pid_tgid() != pid)
249 		return 0;
250 
251 	/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
252 	 * then free 128 16-bytes per-cpu objects in batch to trigger freeing.
253 	 */
254 	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
255 	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
256 	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
257 	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
258 	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
259 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
260 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
261 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
262 	CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9);
263 	CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10);
264 	CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11);
265 
266 	return 0;
267 }
268 
269 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
270 int test_percpu_free_through_map_free(void *ctx)
271 {
272 	if ((u32)bpf_get_current_pid_tgid() != pid)
273 		return 0;
274 
275 	/* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling,
276 	 * then free these object through map free.
277 	 */
278 	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
279 	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
280 	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
281 	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
282 	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
283 	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
284 	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
285 	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
286 	CALL_BATCH_PERCPU_ALLOC(1024, 32, 9);
287 	CALL_BATCH_PERCPU_ALLOC(2048, 16, 10);
288 	CALL_BATCH_PERCPU_ALLOC(4096, 8, 11);
289 
290 	return 0;
291 }
292