xref: /linux/tools/testing/selftests/bpf/progs/test_bpf_ma.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1f0a42ab5SHou Tao // SPDX-License-Identifier: GPL-2.0
2f0a42ab5SHou Tao /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3f0a42ab5SHou Tao #include <vmlinux.h>
4f0a42ab5SHou Tao #include <bpf/bpf_tracing.h>
5f0a42ab5SHou Tao #include <bpf/bpf_helpers.h>
6f0a42ab5SHou Tao 
7f0a42ab5SHou Tao #include "bpf_experimental.h"
8f0a42ab5SHou Tao #include "bpf_misc.h"
9f0a42ab5SHou Tao 
10f0a42ab5SHou Tao struct generic_map_value {
11f0a42ab5SHou Tao 	void *data;
12f0a42ab5SHou Tao };
13f0a42ab5SHou Tao 
14f0a42ab5SHou Tao char _license[] SEC("license") = "GPL";
15f0a42ab5SHou Tao 
1669ff403dSHou Tao const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096};
17f0a42ab5SHou Tao const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
18f0a42ab5SHou Tao 
19*21f5a801SYonghong Song const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512};
20*21f5a801SYonghong Song const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {};
21*21f5a801SYonghong Song 
22f0a42ab5SHou Tao int err = 0;
23495d2d81SAlexei Starovoitov u32 pid = 0;
24f0a42ab5SHou Tao 
25f0a42ab5SHou Tao #define DEFINE_ARRAY_WITH_KPTR(_size) \
26f0a42ab5SHou Tao 	struct bin_data_##_size { \
27f0a42ab5SHou Tao 		char data[_size - sizeof(void *)]; \
28f0a42ab5SHou Tao 	}; \
29*21f5a801SYonghong Song 	/* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */	\
30*21f5a801SYonghong Song 	struct bin_data_##_size *__bin_data_##_size; \
31f0a42ab5SHou Tao 	struct map_value_##_size { \
32f0a42ab5SHou Tao 		struct bin_data_##_size __kptr * data; \
33f0a42ab5SHou Tao 	}; \
34f0a42ab5SHou Tao 	struct { \
35f0a42ab5SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
36f0a42ab5SHou Tao 		__type(key, int); \
37f0a42ab5SHou Tao 		__type(value, struct map_value_##_size); \
38f0a42ab5SHou Tao 		__uint(max_entries, 128); \
39d440ba91SHou Tao 	} array_##_size SEC(".maps")
40f0a42ab5SHou Tao 
41d440ba91SHou Tao #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \
42*21f5a801SYonghong Song 	struct percpu_bin_data_##_size { \
43*21f5a801SYonghong Song 		char data[_size]; \
44*21f5a801SYonghong Song 	}; \
45*21f5a801SYonghong Song 	struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \
46d440ba91SHou Tao 	struct map_value_percpu_##_size { \
47*21f5a801SYonghong Song 		struct percpu_bin_data_##_size __percpu_kptr * data; \
48d440ba91SHou Tao 	}; \
49d440ba91SHou Tao 	struct { \
50d440ba91SHou Tao 		__uint(type, BPF_MAP_TYPE_ARRAY); \
51d440ba91SHou Tao 		__type(key, int); \
52d440ba91SHou Tao 		__type(value, struct map_value_percpu_##_size); \
53d440ba91SHou Tao 		__uint(max_entries, 128); \
54d440ba91SHou Tao 	} array_percpu_##_size SEC(".maps")
55d440ba91SHou Tao 
batch_alloc(struct bpf_map * map,unsigned int batch,unsigned int idx)56d440ba91SHou Tao static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx)
57f0a42ab5SHou Tao {
58f0a42ab5SHou Tao 	struct generic_map_value *value;
59f0a42ab5SHou Tao 	unsigned int i, key;
60f0a42ab5SHou Tao 	void *old, *new;
61f0a42ab5SHou Tao 
62f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
63f0a42ab5SHou Tao 		key = i;
64f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
65f0a42ab5SHou Tao 		if (!value) {
66f0a42ab5SHou Tao 			err = 1;
67f0a42ab5SHou Tao 			return;
68f0a42ab5SHou Tao 		}
69f0a42ab5SHou Tao 		new = bpf_obj_new_impl(data_btf_ids[idx], NULL);
70f0a42ab5SHou Tao 		if (!new) {
71f0a42ab5SHou Tao 			err = 2;
72f0a42ab5SHou Tao 			return;
73f0a42ab5SHou Tao 		}
74f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, new);
75f0a42ab5SHou Tao 		if (old) {
76f0a42ab5SHou Tao 			bpf_obj_drop(old);
77f0a42ab5SHou Tao 			err = 3;
78f0a42ab5SHou Tao 			return;
79f0a42ab5SHou Tao 		}
80f0a42ab5SHou Tao 	}
81d440ba91SHou Tao }
82d440ba91SHou Tao 
batch_free(struct bpf_map * map,unsigned int batch,unsigned int idx)83d440ba91SHou Tao static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx)
84d440ba91SHou Tao {
85d440ba91SHou Tao 	struct generic_map_value *value;
86d440ba91SHou Tao 	unsigned int i, key;
87d440ba91SHou Tao 	void *old;
88d440ba91SHou Tao 
89f0a42ab5SHou Tao 	for (i = 0; i < batch; i++) {
90f0a42ab5SHou Tao 		key = i;
91f0a42ab5SHou Tao 		value = bpf_map_lookup_elem(map, &key);
92f0a42ab5SHou Tao 		if (!value) {
93f0a42ab5SHou Tao 			err = 4;
94f0a42ab5SHou Tao 			return;
95f0a42ab5SHou Tao 		}
96f0a42ab5SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
97f0a42ab5SHou Tao 		if (!old) {
98f0a42ab5SHou Tao 			err = 5;
99f0a42ab5SHou Tao 			return;
100f0a42ab5SHou Tao 		}
101f0a42ab5SHou Tao 		bpf_obj_drop(old);
102f0a42ab5SHou Tao 	}
103f0a42ab5SHou Tao }
104f0a42ab5SHou Tao 
batch_percpu_alloc(struct bpf_map * map,unsigned int batch,unsigned int idx)105d440ba91SHou Tao static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch,
106d440ba91SHou Tao 					       unsigned int idx)
107d440ba91SHou Tao {
108d440ba91SHou Tao 	struct generic_map_value *value;
109d440ba91SHou Tao 	unsigned int i, key;
110d440ba91SHou Tao 	void *old, *new;
111d440ba91SHou Tao 
112d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
113d440ba91SHou Tao 		key = i;
114d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
115d440ba91SHou Tao 		if (!value) {
116d440ba91SHou Tao 			err = 1;
117d440ba91SHou Tao 			return;
118d440ba91SHou Tao 		}
119d440ba91SHou Tao 		/* per-cpu allocator may not be able to refill in time */
120*21f5a801SYonghong Song 		new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL);
121d440ba91SHou Tao 		if (!new)
122d440ba91SHou Tao 			continue;
123d440ba91SHou Tao 
124d440ba91SHou Tao 		old = bpf_kptr_xchg(&value->data, new);
125d440ba91SHou Tao 		if (old) {
126d440ba91SHou Tao 			bpf_percpu_obj_drop(old);
127d440ba91SHou Tao 			err = 2;
128d440ba91SHou Tao 			return;
129d440ba91SHou Tao 		}
130d440ba91SHou Tao 	}
131d440ba91SHou Tao }
132d440ba91SHou Tao 
batch_percpu_free(struct bpf_map * map,unsigned int batch,unsigned int idx)133d440ba91SHou Tao static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch,
134d440ba91SHou Tao 					      unsigned int idx)
135d440ba91SHou Tao {
136d440ba91SHou Tao 	struct generic_map_value *value;
137d440ba91SHou Tao 	unsigned int i, key;
138d440ba91SHou Tao 	void *old;
139d440ba91SHou Tao 
140d440ba91SHou Tao 	for (i = 0; i < batch; i++) {
141d440ba91SHou Tao 		key = i;
142d440ba91SHou Tao 		value = bpf_map_lookup_elem(map, &key);
143d440ba91SHou Tao 		if (!value) {
144d440ba91SHou Tao 			err = 3;
145d440ba91SHou Tao 			return;
146d440ba91SHou Tao 		}
147d440ba91SHou Tao 		old = bpf_kptr_xchg(&value->data, NULL);
148d440ba91SHou Tao 		if (!old)
149d440ba91SHou Tao 			continue;
150d440ba91SHou Tao 		bpf_percpu_obj_drop(old);
151d440ba91SHou Tao 	}
152d440ba91SHou Tao }
153d440ba91SHou Tao 
154d440ba91SHou Tao #define CALL_BATCH_ALLOC(size, batch, idx) \
155d440ba91SHou Tao 	batch_alloc((struct bpf_map *)(&array_##size), batch, idx)
156d440ba91SHou Tao 
157f0a42ab5SHou Tao #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \
158d440ba91SHou Tao 	do { \
159d440ba91SHou Tao 		batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \
160d440ba91SHou Tao 		batch_free((struct bpf_map *)(&array_##size), batch, idx); \
161d440ba91SHou Tao 	} while (0)
162d440ba91SHou Tao 
163d440ba91SHou Tao #define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \
164d440ba91SHou Tao 	batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx)
165d440ba91SHou Tao 
166d440ba91SHou Tao #define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \
167d440ba91SHou Tao 	do { \
168d440ba91SHou Tao 		batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \
169d440ba91SHou Tao 		batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \
170d440ba91SHou Tao 	} while (0)
171f0a42ab5SHou Tao 
17269ff403dSHou Tao /* kptr doesn't support bin_data_8 which is a zero-sized array */
173f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(16);
174f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(32);
175f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(64);
176f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(96);
177f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(128);
178f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(192);
179f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(256);
180f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(512);
181f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(1024);
182f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(2048);
183f0a42ab5SHou Tao DEFINE_ARRAY_WITH_KPTR(4096);
184f0a42ab5SHou Tao 
185*21f5a801SYonghong Song DEFINE_ARRAY_WITH_PERCPU_KPTR(8);
186d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(16);
187d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(32);
188d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(64);
189d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(96);
190d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(128);
191d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(192);
192d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(256);
193d440ba91SHou Tao DEFINE_ARRAY_WITH_PERCPU_KPTR(512);
194d440ba91SHou Tao 
195d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_batch_alloc_free(void * ctx)196d440ba91SHou Tao int test_batch_alloc_free(void *ctx)
197f0a42ab5SHou Tao {
198f0a42ab5SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
199f0a42ab5SHou Tao 		return 0;
200f0a42ab5SHou Tao 
20169ff403dSHou Tao 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
20269ff403dSHou Tao 	 * then free 128 16-bytes objects in batch to trigger freeing.
203f0a42ab5SHou Tao 	 */
20469ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(16, 128, 0);
20569ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(32, 128, 1);
20669ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(64, 128, 2);
20769ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(96, 128, 3);
20869ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(128, 128, 4);
20969ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(192, 128, 5);
21069ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(256, 128, 6);
21169ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(512, 64, 7);
21269ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(1024, 32, 8);
21369ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(2048, 16, 9);
21469ff403dSHou Tao 	CALL_BATCH_ALLOC_FREE(4096, 8, 10);
215f0a42ab5SHou Tao 
216f0a42ab5SHou Tao 	return 0;
217f0a42ab5SHou Tao }
218d440ba91SHou Tao 
219d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_free_through_map_free(void * ctx)220d440ba91SHou Tao int test_free_through_map_free(void *ctx)
221d440ba91SHou Tao {
222d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
223d440ba91SHou Tao 		return 0;
224d440ba91SHou Tao 
22569ff403dSHou Tao 	/* Alloc 128 16-bytes objects in batch to trigger refilling,
226d440ba91SHou Tao 	 * then free these objects through map free.
227d440ba91SHou Tao 	 */
22869ff403dSHou Tao 	CALL_BATCH_ALLOC(16, 128, 0);
22969ff403dSHou Tao 	CALL_BATCH_ALLOC(32, 128, 1);
23069ff403dSHou Tao 	CALL_BATCH_ALLOC(64, 128, 2);
23169ff403dSHou Tao 	CALL_BATCH_ALLOC(96, 128, 3);
23269ff403dSHou Tao 	CALL_BATCH_ALLOC(128, 128, 4);
23369ff403dSHou Tao 	CALL_BATCH_ALLOC(192, 128, 5);
23469ff403dSHou Tao 	CALL_BATCH_ALLOC(256, 128, 6);
23569ff403dSHou Tao 	CALL_BATCH_ALLOC(512, 64, 7);
23669ff403dSHou Tao 	CALL_BATCH_ALLOC(1024, 32, 8);
23769ff403dSHou Tao 	CALL_BATCH_ALLOC(2048, 16, 9);
23869ff403dSHou Tao 	CALL_BATCH_ALLOC(4096, 8, 10);
239d440ba91SHou Tao 
240d440ba91SHou Tao 	return 0;
241d440ba91SHou Tao }
242d440ba91SHou Tao 
243d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_batch_percpu_alloc_free(void * ctx)244d440ba91SHou Tao int test_batch_percpu_alloc_free(void *ctx)
245d440ba91SHou Tao {
246d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
247d440ba91SHou Tao 		return 0;
248d440ba91SHou Tao 
249*21f5a801SYonghong Song 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
250*21f5a801SYonghong Song 	 * then free 128 8-bytes per-cpu objects in batch to trigger freeing.
251d440ba91SHou Tao 	 */
252*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0);
253*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1);
254*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2);
255*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3);
256*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4);
257*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5);
258*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6);
259*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7);
260*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8);
261d440ba91SHou Tao 
262d440ba91SHou Tao 	return 0;
263d440ba91SHou Tao }
264d440ba91SHou Tao 
265d440ba91SHou Tao SEC("?fentry/" SYS_PREFIX "sys_nanosleep")
test_percpu_free_through_map_free(void * ctx)266d440ba91SHou Tao int test_percpu_free_through_map_free(void *ctx)
267d440ba91SHou Tao {
268d440ba91SHou Tao 	if ((u32)bpf_get_current_pid_tgid() != pid)
269d440ba91SHou Tao 		return 0;
270d440ba91SHou Tao 
271*21f5a801SYonghong Song 	/* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling,
272d440ba91SHou Tao 	 * then free these object through map free.
273d440ba91SHou Tao 	 */
274*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(8, 128, 0);
275*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(16, 128, 1);
276*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(32, 128, 2);
277*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(64, 128, 3);
278*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(96, 128, 4);
279*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(128, 128, 5);
280*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(192, 128, 6);
281*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(256, 128, 7);
282*21f5a801SYonghong Song 	CALL_BATCH_PERCPU_ALLOC(512, 64, 8);
283d440ba91SHou Tao 
284d440ba91SHou Tao 	return 0;
285d440ba91SHou Tao }
286