xref: /linux/tools/testing/selftests/bpf/prog_tests/map_btf.c (revision 06d07429858317ded2db7986113a9e0129cd599b)
1*e58aac1aSHou Tao // SPDX-License-Identifier: GPL-2.0
2*e58aac1aSHou Tao /* Copyright (C) 2023. Huawei Technologies Co., Ltd */
3*e58aac1aSHou Tao #include <test_progs.h>
4*e58aac1aSHou Tao 
5*e58aac1aSHou Tao #include "normal_map_btf.skel.h"
6*e58aac1aSHou Tao #include "map_in_map_btf.skel.h"
7*e58aac1aSHou Tao 
do_test_normal_map_btf(void)8*e58aac1aSHou Tao static void do_test_normal_map_btf(void)
9*e58aac1aSHou Tao {
10*e58aac1aSHou Tao 	struct normal_map_btf *skel;
11*e58aac1aSHou Tao 	int i, err, new_fd = -1;
12*e58aac1aSHou Tao 	int map_fd_arr[64];
13*e58aac1aSHou Tao 
14*e58aac1aSHou Tao 	skel = normal_map_btf__open_and_load();
15*e58aac1aSHou Tao 	if (!ASSERT_OK_PTR(skel, "open_load"))
16*e58aac1aSHou Tao 		return;
17*e58aac1aSHou Tao 
18*e58aac1aSHou Tao 	err = normal_map_btf__attach(skel);
19*e58aac1aSHou Tao 	if (!ASSERT_OK(err, "attach"))
20*e58aac1aSHou Tao 		goto out;
21*e58aac1aSHou Tao 
22*e58aac1aSHou Tao 	skel->bss->pid = getpid();
23*e58aac1aSHou Tao 	usleep(1);
24*e58aac1aSHou Tao 	ASSERT_TRUE(skel->bss->done, "done");
25*e58aac1aSHou Tao 
26*e58aac1aSHou Tao 	/* Use percpu_array to slow bpf_map_free_deferred() down.
27*e58aac1aSHou Tao 	 * The memory allocation may fail, so doesn't check the returned fd.
28*e58aac1aSHou Tao 	 */
29*e58aac1aSHou Tao 	for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++)
30*e58aac1aSHou Tao 		map_fd_arr[i] = bpf_map_create(BPF_MAP_TYPE_PERCPU_ARRAY, NULL, 4, 4, 256, NULL);
31*e58aac1aSHou Tao 
32*e58aac1aSHou Tao 	/* Close array fd later */
33*e58aac1aSHou Tao 	new_fd = dup(bpf_map__fd(skel->maps.array));
34*e58aac1aSHou Tao out:
35*e58aac1aSHou Tao 	normal_map_btf__destroy(skel);
36*e58aac1aSHou Tao 	if (new_fd < 0)
37*e58aac1aSHou Tao 		return;
38*e58aac1aSHou Tao 	/* Use kern_sync_rcu() to wait for the start of the free of the bpf
39*e58aac1aSHou Tao 	 * program and use an assumed delay to wait for the release of the map
40*e58aac1aSHou Tao 	 * btf which is held by other maps (e.g, bss). After that, array map
41*e58aac1aSHou Tao 	 * holds the last reference of map btf.
42*e58aac1aSHou Tao 	 */
43*e58aac1aSHou Tao 	kern_sync_rcu();
44*e58aac1aSHou Tao 	usleep(4000);
45*e58aac1aSHou Tao 	/* Spawn multiple kworkers to delay the invocation of
46*e58aac1aSHou Tao 	 * bpf_map_free_deferred() for array map.
47*e58aac1aSHou Tao 	 */
48*e58aac1aSHou Tao 	for (i = 0; i < ARRAY_SIZE(map_fd_arr); i++) {
49*e58aac1aSHou Tao 		if (map_fd_arr[i] < 0)
50*e58aac1aSHou Tao 			continue;
51*e58aac1aSHou Tao 		close(map_fd_arr[i]);
52*e58aac1aSHou Tao 	}
53*e58aac1aSHou Tao 	close(new_fd);
54*e58aac1aSHou Tao }
55*e58aac1aSHou Tao 
do_test_map_in_map_btf(void)56*e58aac1aSHou Tao static void do_test_map_in_map_btf(void)
57*e58aac1aSHou Tao {
58*e58aac1aSHou Tao 	int err, zero = 0, new_fd = -1;
59*e58aac1aSHou Tao 	struct map_in_map_btf *skel;
60*e58aac1aSHou Tao 
61*e58aac1aSHou Tao 	skel = map_in_map_btf__open_and_load();
62*e58aac1aSHou Tao 	if (!ASSERT_OK_PTR(skel, "open_load"))
63*e58aac1aSHou Tao 		return;
64*e58aac1aSHou Tao 
65*e58aac1aSHou Tao 	err = map_in_map_btf__attach(skel);
66*e58aac1aSHou Tao 	if (!ASSERT_OK(err, "attach"))
67*e58aac1aSHou Tao 		goto out;
68*e58aac1aSHou Tao 
69*e58aac1aSHou Tao 	skel->bss->pid = getpid();
70*e58aac1aSHou Tao 	usleep(1);
71*e58aac1aSHou Tao 	ASSERT_TRUE(skel->bss->done, "done");
72*e58aac1aSHou Tao 
73*e58aac1aSHou Tao 	/* Close inner_array fd later */
74*e58aac1aSHou Tao 	new_fd = dup(bpf_map__fd(skel->maps.inner_array));
75*e58aac1aSHou Tao 	/* Defer the free of inner_array */
76*e58aac1aSHou Tao 	err = bpf_map__delete_elem(skel->maps.outer_array, &zero, sizeof(zero), 0);
77*e58aac1aSHou Tao 	ASSERT_OK(err, "delete inner map");
78*e58aac1aSHou Tao out:
79*e58aac1aSHou Tao 	map_in_map_btf__destroy(skel);
80*e58aac1aSHou Tao 	if (new_fd < 0)
81*e58aac1aSHou Tao 		return;
82*e58aac1aSHou Tao 	/* Use kern_sync_rcu() to wait for the start of the free of the bpf
83*e58aac1aSHou Tao 	 * program and use an assumed delay to wait for the free of the outer
84*e58aac1aSHou Tao 	 * map and the release of map btf. After that, inner map holds the last
85*e58aac1aSHou Tao 	 * reference of map btf.
86*e58aac1aSHou Tao 	 */
87*e58aac1aSHou Tao 	kern_sync_rcu();
88*e58aac1aSHou Tao 	usleep(10000);
89*e58aac1aSHou Tao 	close(new_fd);
90*e58aac1aSHou Tao }
91*e58aac1aSHou Tao 
test_map_btf(void)92*e58aac1aSHou Tao void test_map_btf(void)
93*e58aac1aSHou Tao {
94*e58aac1aSHou Tao 	if (test__start_subtest("array_btf"))
95*e58aac1aSHou Tao 		do_test_normal_map_btf();
96*e58aac1aSHou Tao 	if (test__start_subtest("inner_array_btf"))
97*e58aac1aSHou Tao 		do_test_map_in_map_btf();
98*e58aac1aSHou Tao }
99