xref: /linux/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c (revision db6b35cffe59c619ea3772b21d7c7c8a7b885dc1)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3 
4 #include <test_progs.h>
5 #include <network_helpers.h>
6 
7 #include "refcounted_kptr.skel.h"
8 #include "refcounted_kptr_fail.skel.h"
9 
10 void test_refcounted_kptr(void)
11 {
12 	RUN_TESTS(refcounted_kptr);
13 }
14 
15 void test_refcounted_kptr_fail(void)
16 {
17 	RUN_TESTS(refcounted_kptr_fail);
18 }
19 
20 void test_refcounted_kptr_wrong_owner(void)
21 {
22 	LIBBPF_OPTS(bpf_test_run_opts, opts,
23 		    .data_in = &pkt_v4,
24 		    .data_size_in = sizeof(pkt_v4),
25 		    .repeat = 1,
26 	);
27 	struct refcounted_kptr *skel;
28 	int ret;
29 
30 	skel = refcounted_kptr__open_and_load();
31 	if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
32 		return;
33 
34 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a1), &opts);
35 	ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a1");
36 	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a1 retval");
37 
38 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_b), &opts);
39 	ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_b");
40 	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_b retval");
41 
42 	ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_wrong_owner_remove_fail_a2), &opts);
43 	ASSERT_OK(ret, "rbtree_wrong_owner_remove_fail_a2");
44 	ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
45 	refcounted_kptr__destroy(skel);
46 }
47 
48 void test_percpu_hash_refcounted_kptr_refcount_leak(void)
49 {
50 	struct refcounted_kptr *skel;
51 	int cpu_nr, fd, err, key = 0;
52 	struct bpf_map *map;
53 	size_t values_sz;
54 	u64 *values;
55 	LIBBPF_OPTS(bpf_test_run_opts, opts,
56 		    .data_in = &pkt_v4,
57 		    .data_size_in = sizeof(pkt_v4),
58 		    .repeat = 1,
59 	);
60 
61 	cpu_nr = libbpf_num_possible_cpus();
62 	if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
63 		return;
64 
65 	values = calloc(cpu_nr, sizeof(u64));
66 	if (!ASSERT_OK_PTR(values, "calloc values"))
67 		return;
68 
69 	skel = refcounted_kptr__open_and_load();
70 	if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) {
71 		free(values);
72 		return;
73 	}
74 
75 	values_sz = cpu_nr * sizeof(u64);
76 	memset(values, 0, values_sz);
77 
78 	map = skel->maps.percpu_hash;
79 	err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
80 	if (!ASSERT_OK(err, "bpf_map__update_elem"))
81 		goto out;
82 
83 	fd = bpf_program__fd(skel->progs.percpu_hash_refcount_leak);
84 	err = bpf_prog_test_run_opts(fd, &opts);
85 	if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
86 		goto out;
87 	if (!ASSERT_EQ(opts.retval, 2, "opts.retval"))
88 		goto out;
89 
90 	err = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, 0);
91 	if (!ASSERT_OK(err, "bpf_map__update_elem"))
92 		goto out;
93 
94 	fd = bpf_program__fd(skel->progs.check_percpu_hash_refcount);
95 	err = bpf_prog_test_run_opts(fd, &opts);
96 	ASSERT_OK(err, "bpf_prog_test_run_opts");
97 	ASSERT_EQ(opts.retval, 1, "opts.retval");
98 
99 out:
100 	refcounted_kptr__destroy(skel);
101 	free(values);
102 }
103