xref: /linux/tools/testing/selftests/bpf/progs/map_kptr.c (revision f08a1e912d3e60bf3028ea1c5199a609d12cd37c)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <vmlinux.h>
3 #include <bpf/bpf_tracing.h>
4 #include <bpf/bpf_helpers.h>
5 #include "../bpf_testmod/bpf_testmod_kfunc.h"
6 
7 struct map_value {
8 	struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
9 	struct prog_test_ref_kfunc __kptr *ref_ptr;
10 };
11 
12 struct array_map {
13 	__uint(type, BPF_MAP_TYPE_ARRAY);
14 	__type(key, int);
15 	__type(value, struct map_value);
16 	__uint(max_entries, 1);
17 } array_map SEC(".maps");
18 
19 struct pcpu_array_map {
20 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
21 	__type(key, int);
22 	__type(value, struct map_value);
23 	__uint(max_entries, 1);
24 } pcpu_array_map SEC(".maps");
25 
26 struct hash_map {
27 	__uint(type, BPF_MAP_TYPE_HASH);
28 	__type(key, int);
29 	__type(value, struct map_value);
30 	__uint(max_entries, 1);
31 } hash_map SEC(".maps");
32 
33 struct pcpu_hash_map {
34 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
35 	__type(key, int);
36 	__type(value, struct map_value);
37 	__uint(max_entries, 1);
38 } pcpu_hash_map SEC(".maps");
39 
40 struct hash_malloc_map {
41 	__uint(type, BPF_MAP_TYPE_HASH);
42 	__type(key, int);
43 	__type(value, struct map_value);
44 	__uint(max_entries, 1);
45 	__uint(map_flags, BPF_F_NO_PREALLOC);
46 } hash_malloc_map SEC(".maps");
47 
48 struct pcpu_hash_malloc_map {
49 	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
50 	__type(key, int);
51 	__type(value, struct map_value);
52 	__uint(max_entries, 1);
53 	__uint(map_flags, BPF_F_NO_PREALLOC);
54 } pcpu_hash_malloc_map SEC(".maps");
55 
56 struct lru_hash_map {
57 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
58 	__type(key, int);
59 	__type(value, struct map_value);
60 	__uint(max_entries, 1);
61 } lru_hash_map SEC(".maps");
62 
63 struct lru_pcpu_hash_map {
64 	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
65 	__type(key, int);
66 	__type(value, struct map_value);
67 	__uint(max_entries, 1);
68 } lru_pcpu_hash_map SEC(".maps");
69 
70 struct cgrp_ls_map {
71 	__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
72 	__uint(map_flags, BPF_F_NO_PREALLOC);
73 	__type(key, int);
74 	__type(value, struct map_value);
75 } cgrp_ls_map SEC(".maps");
76 
77 struct task_ls_map {
78 	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
79 	__uint(map_flags, BPF_F_NO_PREALLOC);
80 	__type(key, int);
81 	__type(value, struct map_value);
82 } task_ls_map SEC(".maps");
83 
84 struct inode_ls_map {
85 	__uint(type, BPF_MAP_TYPE_INODE_STORAGE);
86 	__uint(map_flags, BPF_F_NO_PREALLOC);
87 	__type(key, int);
88 	__type(value, struct map_value);
89 } inode_ls_map SEC(".maps");
90 
91 struct sk_ls_map {
92 	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
93 	__uint(map_flags, BPF_F_NO_PREALLOC);
94 	__type(key, int);
95 	__type(value, struct map_value);
96 } sk_ls_map SEC(".maps");
97 
98 #define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name)       \
99 	struct {                                                \
100 		__uint(type, map_type);                         \
101 		__uint(max_entries, 1);                         \
102 		__uint(key_size, sizeof(int));                  \
103 		__uint(value_size, sizeof(int));                \
104 		__array(values, struct inner_map_type);         \
105 	} name SEC(".maps") = {                                 \
106 		.values = { [0] = &inner_map_type },            \
107 	}
108 
109 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
110 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
111 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
112 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
113 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_array_map, array_of_pcpu_array_maps);
114 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_hash_map, array_of_pcpu_hash_maps);
115 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
116 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
117 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
118 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
119 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_array_map, hash_of_pcpu_array_maps);
120 DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_hash_map, hash_of_pcpu_hash_maps);
121 
122 #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
123 
test_kptr_unref(struct map_value * v)124 static void test_kptr_unref(struct map_value *v)
125 {
126 	struct prog_test_ref_kfunc *p;
127 
128 	p = v->unref_ptr;
129 	/* store untrusted_ptr_or_null_ */
130 	WRITE_ONCE(v->unref_ptr, p);
131 	if (!p)
132 		return;
133 	if (p->a + p->b > 100)
134 		return;
135 	/* store untrusted_ptr_ */
136 	WRITE_ONCE(v->unref_ptr, p);
137 	/* store NULL */
138 	WRITE_ONCE(v->unref_ptr, NULL);
139 }
140 
test_kptr_ref(struct map_value * v)141 static void test_kptr_ref(struct map_value *v)
142 {
143 	struct prog_test_ref_kfunc *p;
144 
145 	p = v->ref_ptr;
146 	/* store ptr_or_null_ */
147 	WRITE_ONCE(v->unref_ptr, p);
148 	if (!p)
149 		return;
150 	/*
151 	 * p is rcu_ptr_prog_test_ref_kfunc,
152 	 * because bpf prog is non-sleepable and runs in RCU CS.
153 	 * p can be passed to kfunc that requires KF_RCU.
154 	 */
155 	bpf_kfunc_call_test_ref(p);
156 	if (p->a + p->b > 100)
157 		return;
158 	/* store NULL */
159 	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
160 	if (!p)
161 		return;
162 	/*
163 	 * p is trusted_ptr_prog_test_ref_kfunc.
164 	 * p can be passed to kfunc that requires KF_RCU.
165 	 */
166 	bpf_kfunc_call_test_ref(p);
167 	if (p->a + p->b > 100) {
168 		bpf_kfunc_call_test_release(p);
169 		return;
170 	}
171 	/* store ptr_ */
172 	WRITE_ONCE(v->unref_ptr, p);
173 	bpf_kfunc_call_test_release(p);
174 
175 	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
176 	if (!p)
177 		return;
178 	/* store ptr_ */
179 	p = bpf_kptr_xchg(&v->ref_ptr, p);
180 	if (!p)
181 		return;
182 	if (p->a + p->b > 100) {
183 		bpf_kfunc_call_test_release(p);
184 		return;
185 	}
186 	bpf_kfunc_call_test_release(p);
187 }
188 
test_kptr(struct map_value * v)189 static void test_kptr(struct map_value *v)
190 {
191 	test_kptr_unref(v);
192 	test_kptr_ref(v);
193 }
194 
195 SEC("tc")
test_map_kptr(struct __sk_buff * ctx)196 int test_map_kptr(struct __sk_buff *ctx)
197 {
198 	struct map_value *v;
199 	int key = 0;
200 
201 #define TEST(map)					\
202 	v = bpf_map_lookup_elem(&map, &key);		\
203 	if (!v)						\
204 		return 0;				\
205 	test_kptr(v)
206 
207 	TEST(array_map);
208 	TEST(hash_map);
209 	TEST(hash_malloc_map);
210 	TEST(lru_hash_map);
211 	TEST(pcpu_array_map);
212 	TEST(pcpu_hash_map);
213 
214 #undef TEST
215 	return 0;
216 }
217 
218 SEC("tp_btf/cgroup_mkdir")
BPF_PROG(test_cgrp_map_kptr,struct cgroup * cgrp,const char * path)219 int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
220 {
221 	struct map_value *v;
222 
223 	v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
224 	if (v)
225 		test_kptr(v);
226 	return 0;
227 }
228 
229 SEC("lsm/inode_unlink")
BPF_PROG(test_task_map_kptr,struct inode * inode,struct dentry * victim)230 int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
231 {
232 	struct task_struct *task;
233 	struct map_value *v;
234 
235 	task = bpf_get_current_task_btf();
236 	if (!task)
237 		return 0;
238 	v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
239 	if (v)
240 		test_kptr(v);
241 	return 0;
242 }
243 
244 SEC("lsm/inode_unlink")
BPF_PROG(test_inode_map_kptr,struct inode * inode,struct dentry * victim)245 int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
246 {
247 	struct map_value *v;
248 
249 	v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
250 	if (v)
251 		test_kptr(v);
252 	return 0;
253 }
254 
255 SEC("tc")
test_sk_map_kptr(struct __sk_buff * ctx)256 int test_sk_map_kptr(struct __sk_buff *ctx)
257 {
258 	struct map_value *v;
259 	struct bpf_sock *sk;
260 
261 	sk = ctx->sk;
262 	if (!sk)
263 		return 0;
264 	v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
265 	if (v)
266 		test_kptr(v);
267 	return 0;
268 }
269 
270 SEC("tc")
test_map_in_map_kptr(struct __sk_buff * ctx)271 int test_map_in_map_kptr(struct __sk_buff *ctx)
272 {
273 	struct map_value *v;
274 	int key = 0;
275 	void *map;
276 
277 #define TEST(map_in_map)                                \
278 	map = bpf_map_lookup_elem(&map_in_map, &key);   \
279 	if (!map)                                       \
280 		return 0;                               \
281 	v = bpf_map_lookup_elem(map, &key);		\
282 	if (!v)						\
283 		return 0;				\
284 	test_kptr(v)
285 
286 	TEST(array_of_array_maps);
287 	TEST(array_of_hash_maps);
288 	TEST(array_of_hash_malloc_maps);
289 	TEST(array_of_lru_hash_maps);
290 	TEST(array_of_pcpu_array_maps);
291 	TEST(array_of_pcpu_hash_maps);
292 	TEST(hash_of_array_maps);
293 	TEST(hash_of_hash_maps);
294 	TEST(hash_of_hash_malloc_maps);
295 	TEST(hash_of_lru_hash_maps);
296 	TEST(hash_of_pcpu_array_maps);
297 	TEST(hash_of_pcpu_hash_maps);
298 
299 #undef TEST
300 	return 0;
301 }
302 
303 int ref = 1;
304 
305 static __always_inline
test_map_kptr_ref_pre(struct map_value * v)306 int test_map_kptr_ref_pre(struct map_value *v)
307 {
308 	struct prog_test_ref_kfunc *p, *p_st;
309 	unsigned long arg = 0;
310 	int ret;
311 
312 	p = bpf_kfunc_call_test_acquire(&arg);
313 	if (!p)
314 		return 1;
315 	ref++;
316 
317 	p_st = p->next;
318 	if (p_st->cnt.refs.counter != ref) {
319 		ret = 2;
320 		goto end;
321 	}
322 
323 	p = bpf_kptr_xchg(&v->ref_ptr, p);
324 	if (p) {
325 		ret = 3;
326 		goto end;
327 	}
328 	if (p_st->cnt.refs.counter != ref)
329 		return 4;
330 
331 	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
332 	if (!p)
333 		return 5;
334 	bpf_kfunc_call_test_release(p);
335 	ref--;
336 	if (p_st->cnt.refs.counter != ref)
337 		return 6;
338 
339 	p = bpf_kfunc_call_test_acquire(&arg);
340 	if (!p)
341 		return 7;
342 	ref++;
343 	p = bpf_kptr_xchg(&v->ref_ptr, p);
344 	if (p) {
345 		ret = 8;
346 		goto end;
347 	}
348 	if (p_st->cnt.refs.counter != ref)
349 		return 9;
350 	/* Leave in map */
351 
352 	return 0;
353 end:
354 	ref--;
355 	bpf_kfunc_call_test_release(p);
356 	return ret;
357 }
358 
359 static __always_inline
test_map_kptr_ref_post(struct map_value * v)360 int test_map_kptr_ref_post(struct map_value *v)
361 {
362 	struct prog_test_ref_kfunc *p, *p_st;
363 
364 	p_st = v->ref_ptr;
365 	if (!p_st || p_st->cnt.refs.counter != ref)
366 		return 1;
367 
368 	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
369 	if (!p)
370 		return 2;
371 	if (p_st->cnt.refs.counter != ref) {
372 		bpf_kfunc_call_test_release(p);
373 		return 3;
374 	}
375 
376 	p = bpf_kptr_xchg(&v->ref_ptr, p);
377 	if (p) {
378 		bpf_kfunc_call_test_release(p);
379 		return 4;
380 	}
381 	if (p_st->cnt.refs.counter != ref)
382 		return 5;
383 
384 	return 0;
385 }
386 
387 #define TEST(map)                            \
388 	v = bpf_map_lookup_elem(&map, &key); \
389 	if (!v)                              \
390 		return -1;                   \
391 	ret = test_map_kptr_ref_pre(v);      \
392 	if (ret)                             \
393 		return ret;
394 
395 #define TEST_PCPU(map)                                 \
396 	v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
397 	if (!v)                                        \
398 		return -1;                             \
399 	ret = test_map_kptr_ref_pre(v);                \
400 	if (ret)                                       \
401 		return ret;
402 
403 SEC("tc")
test_map_kptr_ref1(struct __sk_buff * ctx)404 int test_map_kptr_ref1(struct __sk_buff *ctx)
405 {
406 	struct map_value *v, val = {};
407 	int key = 0, ret;
408 
409 	bpf_map_update_elem(&hash_map, &key, &val, 0);
410 	bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
411 	bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
412 
413 	bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
414 	bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
415 	bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
416 
417 	TEST(array_map);
418 	TEST(hash_map);
419 	TEST(hash_malloc_map);
420 	TEST(lru_hash_map);
421 
422 	TEST_PCPU(pcpu_array_map);
423 	TEST_PCPU(pcpu_hash_map);
424 	TEST_PCPU(pcpu_hash_malloc_map);
425 	TEST_PCPU(lru_pcpu_hash_map);
426 
427 	return 0;
428 }
429 
430 #undef TEST
431 #undef TEST_PCPU
432 
433 #define TEST(map)                            \
434 	v = bpf_map_lookup_elem(&map, &key); \
435 	if (!v)                              \
436 		return -1;                   \
437 	ret = test_map_kptr_ref_post(v);     \
438 	if (ret)                             \
439 		return ret;
440 
441 #define TEST_PCPU(map)                                 \
442 	v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
443 	if (!v)                                        \
444 		return -1;                             \
445 	ret = test_map_kptr_ref_post(v);               \
446 	if (ret)                                       \
447 		return ret;
448 
449 SEC("tc")
test_map_kptr_ref2(struct __sk_buff * ctx)450 int test_map_kptr_ref2(struct __sk_buff *ctx)
451 {
452 	struct map_value *v;
453 	int key = 0, ret;
454 
455 	TEST(array_map);
456 	TEST(hash_map);
457 	TEST(hash_malloc_map);
458 	TEST(lru_hash_map);
459 
460 	TEST_PCPU(pcpu_array_map);
461 	TEST_PCPU(pcpu_hash_map);
462 	TEST_PCPU(pcpu_hash_malloc_map);
463 	TEST_PCPU(lru_pcpu_hash_map);
464 
465 	return 0;
466 }
467 
468 #undef TEST
469 #undef TEST_PCPU
470 
471 SEC("tc")
test_map_kptr_ref3(struct __sk_buff * ctx)472 int test_map_kptr_ref3(struct __sk_buff *ctx)
473 {
474 	struct prog_test_ref_kfunc *p;
475 	unsigned long sp = 0;
476 
477 	p = bpf_kfunc_call_test_acquire(&sp);
478 	if (!p)
479 		return 1;
480 	ref++;
481 	if (p->cnt.refs.counter != ref) {
482 		bpf_kfunc_call_test_release(p);
483 		return 2;
484 	}
485 	bpf_kfunc_call_test_release(p);
486 	ref--;
487 	return 0;
488 }
489 
490 SEC("syscall")
test_ls_map_kptr_ref1(void * ctx)491 int test_ls_map_kptr_ref1(void *ctx)
492 {
493 	struct task_struct *current;
494 	struct map_value *v;
495 
496 	current = bpf_get_current_task_btf();
497 	if (!current)
498 		return 100;
499 	v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
500 	if (v)
501 		return 150;
502 	v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
503 	if (!v)
504 		return 200;
505 	return test_map_kptr_ref_pre(v);
506 }
507 
508 SEC("syscall")
test_ls_map_kptr_ref2(void * ctx)509 int test_ls_map_kptr_ref2(void *ctx)
510 {
511 	struct task_struct *current;
512 	struct map_value *v;
513 
514 	current = bpf_get_current_task_btf();
515 	if (!current)
516 		return 100;
517 	v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
518 	if (!v)
519 		return 200;
520 	return test_map_kptr_ref_post(v);
521 }
522 
523 SEC("syscall")
test_ls_map_kptr_ref_del(void * ctx)524 int test_ls_map_kptr_ref_del(void *ctx)
525 {
526 	struct task_struct *current;
527 	struct map_value *v;
528 
529 	current = bpf_get_current_task_btf();
530 	if (!current)
531 		return 100;
532 	v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
533 	if (!v)
534 		return 200;
535 	if (!v->ref_ptr)
536 		return 300;
537 	return bpf_task_storage_delete(&task_ls_map, current);
538 }
539 
540 char _license[] SEC("license") = "GPL";
541