xref: /linux/tools/testing/selftests/bpf/progs/test_bpf_ma.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3 #include <vmlinux.h>
4 #include <bpf/bpf_tracing.h>
5 #include <bpf/bpf_helpers.h>
6 
7 #include "bpf_experimental.h"
8 #include "bpf_misc.h"
9 
10 struct generic_map_value {
11 	void *data;
12 };
13 
14 char _license[] SEC("license") = "GPL";
15 
16 const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
17 const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
18 
19 const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
20 const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
21 
22 int err = 0;
23 u32 pid = 0;
24 
25 #define DEFINE_ARRAY_WITH_KPTR(_size) \
26 	struct bin_data_##_size { \
27 		char data[_size - sizeof(void *)]; \
28 	}; \
29 	/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */	\
30 	struct bin_data_##_size *__bin_data_##_size; \
31 	struct map_value_##_size { \
32 		struct bin_data_##_size __kptr * data; \
33 	}; \
34 	struct { \
35 		__uint(type, BPF_MAP_TYPE_ARRAY); \
36 		__type(key, int); \
37 		__type(value, struct map_value_##_size); \
38 		__uint(max_entries, 128); \
39 	} array_##_size SEC(".maps")
40 
41 #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
42 	struct percpu_bin_data_##_size { \
43 		char data[_size]; \
44 	}; \
45 	struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
46 	struct map_value_percpu_##_size { \
47 		struct percpu_bin_data_##_size __percpu_kptr * data; \
48 	}; \
49 	struct { \
50 		__uint(type, BPF_MAP_TYPE_ARRAY); \
51 		__type(key, int); \
52 		__type(value, struct map_value_percpu_##_size); \
53 		__uint(max_entries, 128); \
54 	} array_percpu_##_size SEC(".maps")
55 
56 static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
57 {
58 	struct generic_map_value *value;
59 	unsigned int i, key;
60 	void *old, *new;
61 
62 	for (i = 0; i < batch; i++) {
63 		key = i;
64 		value = bpf_map_lookup_elem(map, &key);
65 		if (!value) {
66 			err = 1;
67 			return;
68 		}
69 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
70 		if (!new) {
71 			err = 2;
72 			return;
73 		}
74 		old = bpf_kptr_xchg(&value->data, new);
75 		if (old) {
76 			bpf_obj_drop(old);
77 			err = 3;
78 			return;
79 		}
80 	}
81 }
82 
83 static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
84 {
85 	struct generic_map_value *value;
86 	unsigned int i, key;
87 	void *old;
88 
89 	for (i = 0; i < batch; i++) {
90 		key = i;
91 		value = bpf_map_lookup_elem(map, &key);
92 		if (!value) {
93 			err = 4;
94 			return;
95 		}
96 		old = bpf_kptr_xchg(&value->data, NULL);
97 		if (!old) {
98 			err = 5;
99 			return;
100 		}
101 		bpf_obj_drop(old);
102 	}
103 }
104 
105 static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
106 					       unsigned int idx)
107 {
108 	struct generic_map_value *value;
109 	unsigned int i, key;
110 	void *old, *new;
111 
112 	for (i = 0; i < batch; i++) {
113 		key = i;
114 		value = bpf_map_lookup_elem(map, &key);
115 		if (!value) {
116 			err = 1;
117 			return;
118 		}
119 		/* per-cpu allocator may not be able to refill in time */
120 		new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
121 		if (!new)
122 			continue;
123 
124 		old = bpf_kptr_xchg(&value->data, new);
125 		if (old) {
126 			bpf_percpu_obj_drop(old);
127 			err = 2;
128 			return;
129 		}
130 	}
131 }
132 
133 static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
134 					      unsigned int idx)
135 {
136 	struct generic_map_value *value;
137 	unsigned int i, key;
138 	void *old;
139 
140 	for (i = 0; i < batch; i++) {
141 		key = i;
142 		value = bpf_map_lookup_elem(map, &key);
143 		if (!value) {
144 			err = 3;
145 			return;
146 		}
147 		old = bpf_kptr_xchg(&value->data, NULL);
148 		if (!old)
149 			continue;
150 		bpf_percpu_obj_drop(old);
151 	}
152 }
153 
154 #define CALL_BATCH_ALLOC(size, batch, idx) \
155 	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
156 
157 #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
158 	do { \
159 		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
160 		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
161 	} while (0)
162 
163 #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
164 	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
165 
166 #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
167 	do { \
168 		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
169 		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
170 	} while (0)
171 
172 /* kptr doesn't support bin_data_8 which is a zero-sized array */
173 DEFINE_ARRAY_WITH_KPTR(16);
174 DEFINE_ARRAY_WITH_KPTR(32);
175 DEFINE_ARRAY_WITH_KPTR(64);
176 DEFINE_ARRAY_WITH_KPTR(96);
177 DEFINE_ARRAY_WITH_KPTR(128);
178 DEFINE_ARRAY_WITH_KPTR(192);
179 DEFINE_ARRAY_WITH_KPTR(256);
180 DEFINE_ARRAY_WITH_KPTR(512);
181 DEFINE_ARRAY_WITH_KPTR(1024);
182 DEFINE_ARRAY_WITH_KPTR(2048);
183 DEFINE_ARRAY_WITH_KPTR(4096);
184 
185 DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
186 DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
187 DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
188 DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
189 DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
190 DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
191 DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
192 DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
193 DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
194 
195 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
196 int test_batch_alloc_free(void *ctx)
197 {
198 	if ((u32)bpf_get_current_pid_tgid() != pid)
199 		return 0;
200 
201 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
202 	 * then free 128 16-bytes objects in batch to trigger freeing.
203 	 */
204 	CALL_BATCH_ALLOC_FREE(16, 128, 0);
205 	CALL_BATCH_ALLOC_FREE(32, 128, 1);
206 	CALL_BATCH_ALLOC_FREE(64, 128, 2);
207 	CALL_BATCH_ALLOC_FREE(96, 128, 3);
208 	CALL_BATCH_ALLOC_FREE(128, 128, 4);
209 	CALL_BATCH_ALLOC_FREE(192, 128, 5);
210 	CALL_BATCH_ALLOC_FREE(256, 128, 6);
211 	CALL_BATCH_ALLOC_FREE(512, 64, 7);
212 	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
213 	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
214 	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
215 
216 	return 0;
217 }
218 
219 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
220 int test_free_through_map_free(void *ctx)
221 {
222 	if ((u32)bpf_get_current_pid_tgid() != pid)
223 		return 0;
224 
225 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
226 	 * then free these objects through map free.
227 	 */
228 	CALL_BATCH_ALLOC(16, 128, 0);
229 	CALL_BATCH_ALLOC(32, 128, 1);
230 	CALL_BATCH_ALLOC(64, 128, 2);
231 	CALL_BATCH_ALLOC(96, 128, 3);
232 	CALL_BATCH_ALLOC(128, 128, 4);
233 	CALL_BATCH_ALLOC(192, 128, 5);
234 	CALL_BATCH_ALLOC(256, 128, 6);
235 	CALL_BATCH_ALLOC(512, 64, 7);
236 	CALL_BATCH_ALLOC(1024, 32, 8);
237 	CALL_BATCH_ALLOC(2048, 16, 9);
238 	CALL_BATCH_ALLOC(4096, 8, 10);
239 
240 	return 0;
241 }
242 
243 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
244 int test_batch_percpu_alloc_free(void *ctx)
245 {
246 	if ((u32)bpf_get_current_pid_tgid() != pid)
247 		return 0;
248 
249 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
250 	 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
251 	 */
252 	CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
253 	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
254 	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
255 	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
256 	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
257 	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
258 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
259 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
260 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
261 
262 	return 0;
263 }
264 
265 SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
266 int test_percpu_free_through_map_free(void *ctx)
267 {
268 	if ((u32)bpf_get_current_pid_tgid() != pid)
269 		return 0;
270 
271 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
272 	 * then free these object through map free.
273 	 */
274 	CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
275 	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
276 	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
277 	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
278 	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
279 	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
280 	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
281 	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
282 	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
283 
284 	return 0;
285 }
286