xref: /linux/tools/testing/selftests/bpf/prog_tests/cgroup_iter.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Google */
3 
4 #include <test_progs.h>
5 #include <bpf/libbpf.h>
6 #include <bpf/btf.h>
7 #include "iters_css_task.skel.h"
8 #include "cgroup_iter.skel.h"
9 #include "cgroup_helpers.h"
10 
11 #define ROOT           0
12 #define PARENT         1
13 #define CHILD1         2
14 #define CHILD2         3
15 #define NUM_CGROUPS    4
16 
17 #define PROLOGUE       "prologue\n"
18 #define EPILOGUE       "epilogue\n"
19 
20 static const char *cg_path[] = {
21 	"/", "/parent", "/parent/child1", "/parent/child2"
22 };
23 
24 static int cg_fd[] = {-1, -1, -1, -1};
25 static unsigned long long cg_id[] = {0, 0, 0, 0};
26 static char expected_output[64];
27 
setup_cgroups(void)28 static int setup_cgroups(void)
29 {
30 	int fd, i = 0;
31 
32 	for (i = 0; i < NUM_CGROUPS; i++) {
33 		fd = create_and_get_cgroup(cg_path[i]);
34 		if (fd < 0)
35 			return fd;
36 
37 		cg_fd[i] = fd;
38 		cg_id[i] = get_cgroup_id(cg_path[i]);
39 	}
40 	return 0;
41 }
42 
cleanup_cgroups(void)43 static void cleanup_cgroups(void)
44 {
45 	int i;
46 
47 	for (i = 0; i < NUM_CGROUPS; i++)
48 		close(cg_fd[i]);
49 }
50 
read_from_cgroup_iter(struct bpf_program * prog,int cgroup_fd,int order,const char * testname)51 static void read_from_cgroup_iter(struct bpf_program *prog, int cgroup_fd,
52 				  int order, const char *testname)
53 {
54 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
55 	union bpf_iter_link_info linfo;
56 	struct bpf_link *link;
57 	int len, iter_fd;
58 	static char buf[128];
59 	size_t left;
60 	char *p;
61 
62 	memset(&linfo, 0, sizeof(linfo));
63 	linfo.cgroup.cgroup_fd = cgroup_fd;
64 	linfo.cgroup.order = order;
65 	opts.link_info = &linfo;
66 	opts.link_info_len = sizeof(linfo);
67 
68 	link = bpf_program__attach_iter(prog, &opts);
69 	if (!ASSERT_OK_PTR(link, "attach_iter"))
70 		return;
71 
72 	iter_fd = bpf_iter_create(bpf_link__fd(link));
73 	if (iter_fd < 0)
74 		goto free_link;
75 
76 	memset(buf, 0, sizeof(buf));
77 	left = ARRAY_SIZE(buf);
78 	p = buf;
79 	while ((len = read(iter_fd, p, left)) > 0) {
80 		p += len;
81 		left -= len;
82 	}
83 
84 	ASSERT_STREQ(buf, expected_output, testname);
85 
86 	/* read() after iter finishes should be ok. */
87 	if (len == 0)
88 		ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
89 
90 	close(iter_fd);
91 free_link:
92 	bpf_link__destroy(link);
93 }
94 
95 /* Invalid cgroup. */
test_invalid_cgroup(struct cgroup_iter * skel)96 static void test_invalid_cgroup(struct cgroup_iter *skel)
97 {
98 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
99 	union bpf_iter_link_info linfo;
100 	struct bpf_link *link;
101 
102 	memset(&linfo, 0, sizeof(linfo));
103 	linfo.cgroup.cgroup_fd = (__u32)-1;
104 	opts.link_info = &linfo;
105 	opts.link_info_len = sizeof(linfo);
106 
107 	link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
108 	ASSERT_ERR_PTR(link, "attach_iter");
109 	bpf_link__destroy(link);
110 }
111 
112 /* Specifying both cgroup_fd and cgroup_id is invalid. */
test_invalid_cgroup_spec(struct cgroup_iter * skel)113 static void test_invalid_cgroup_spec(struct cgroup_iter *skel)
114 {
115 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
116 	union bpf_iter_link_info linfo;
117 	struct bpf_link *link;
118 
119 	memset(&linfo, 0, sizeof(linfo));
120 	linfo.cgroup.cgroup_fd = (__u32)cg_fd[PARENT];
121 	linfo.cgroup.cgroup_id = (__u64)cg_id[PARENT];
122 	opts.link_info = &linfo;
123 	opts.link_info_len = sizeof(linfo);
124 
125 	link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
126 	ASSERT_ERR_PTR(link, "attach_iter");
127 	bpf_link__destroy(link);
128 }
129 
130 /* Preorder walk prints parent and child in order. */
test_walk_preorder(struct cgroup_iter * skel)131 static void test_walk_preorder(struct cgroup_iter *skel)
132 {
133 	snprintf(expected_output, sizeof(expected_output),
134 		 PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE,
135 		 cg_id[PARENT], cg_id[CHILD1], cg_id[CHILD2]);
136 
137 	read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
138 			      BPF_CGROUP_ITER_DESCENDANTS_PRE, "preorder");
139 }
140 
141 /* Postorder walk prints child and parent in order. */
test_walk_postorder(struct cgroup_iter * skel)142 static void test_walk_postorder(struct cgroup_iter *skel)
143 {
144 	snprintf(expected_output, sizeof(expected_output),
145 		 PROLOGUE "%8llu\n%8llu\n%8llu\n" EPILOGUE,
146 		 cg_id[CHILD1], cg_id[CHILD2], cg_id[PARENT]);
147 
148 	read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
149 			      BPF_CGROUP_ITER_DESCENDANTS_POST, "postorder");
150 }
151 
152 /* Walking parents prints parent and then root. */
test_walk_ancestors_up(struct cgroup_iter * skel)153 static void test_walk_ancestors_up(struct cgroup_iter *skel)
154 {
155 	/* terminate the walk when ROOT is met. */
156 	skel->bss->terminal_cgroup = cg_id[ROOT];
157 
158 	snprintf(expected_output, sizeof(expected_output),
159 		 PROLOGUE "%8llu\n%8llu\n" EPILOGUE,
160 		 cg_id[PARENT], cg_id[ROOT]);
161 
162 	read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
163 			      BPF_CGROUP_ITER_ANCESTORS_UP, "ancestors_up");
164 
165 	skel->bss->terminal_cgroup = 0;
166 }
167 
168 /* Early termination prints parent only. */
test_early_termination(struct cgroup_iter * skel)169 static void test_early_termination(struct cgroup_iter *skel)
170 {
171 	/* terminate the walk after the first element is processed. */
172 	skel->bss->terminate_early = 1;
173 
174 	snprintf(expected_output, sizeof(expected_output),
175 		 PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]);
176 
177 	read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
178 			      BPF_CGROUP_ITER_DESCENDANTS_PRE, "early_termination");
179 
180 	skel->bss->terminate_early = 0;
181 }
182 
183 /* Waling self prints self only. */
test_walk_self_only(struct cgroup_iter * skel)184 static void test_walk_self_only(struct cgroup_iter *skel)
185 {
186 	snprintf(expected_output, sizeof(expected_output),
187 		 PROLOGUE "%8llu\n" EPILOGUE, cg_id[PARENT]);
188 
189 	read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[PARENT],
190 			      BPF_CGROUP_ITER_SELF_ONLY, "self_only");
191 }
192 
test_walk_dead_self_only(struct cgroup_iter * skel)193 static void test_walk_dead_self_only(struct cgroup_iter *skel)
194 {
195 	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
196 	char expected_output[128], buf[128];
197 	const char *cgrp_name = "/dead";
198 	union bpf_iter_link_info linfo;
199 	int len, cgrp_fd, iter_fd;
200 	struct bpf_link *link;
201 	size_t left;
202 	char *p;
203 
204 	cgrp_fd = create_and_get_cgroup(cgrp_name);
205 	if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
206 		return;
207 
208 	/* The cgroup will be dead during read() iteration, so it only has
209 	 * epilogue in the output
210 	 */
211 	snprintf(expected_output, sizeof(expected_output), EPILOGUE);
212 
213 	memset(&linfo, 0, sizeof(linfo));
214 	linfo.cgroup.cgroup_fd = cgrp_fd;
215 	linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
216 	opts.link_info = &linfo;
217 	opts.link_info_len = sizeof(linfo);
218 
219 	link = bpf_program__attach_iter(skel->progs.cgroup_id_printer, &opts);
220 	if (!ASSERT_OK_PTR(link, "attach_iter"))
221 		goto close_cgrp;
222 
223 	iter_fd = bpf_iter_create(bpf_link__fd(link));
224 	if (!ASSERT_GE(iter_fd, 0, "iter_create"))
225 		goto free_link;
226 
227 	/* Close link fd and cgroup fd */
228 	bpf_link__destroy(link);
229 	close(cgrp_fd);
230 
231 	/* Remove cgroup to mark it as dead */
232 	remove_cgroup(cgrp_name);
233 
234 	/* Two kern_sync_rcu() and usleep() pairs are used to wait for the
235 	 * releases of cgroup css, and the last kern_sync_rcu() and usleep()
236 	 * pair is used to wait for the free of cgroup itself.
237 	 */
238 	kern_sync_rcu();
239 	usleep(8000);
240 	kern_sync_rcu();
241 	usleep(8000);
242 	kern_sync_rcu();
243 	usleep(1000);
244 
245 	memset(buf, 0, sizeof(buf));
246 	left = ARRAY_SIZE(buf);
247 	p = buf;
248 	while ((len = read(iter_fd, p, left)) > 0) {
249 		p += len;
250 		left -= len;
251 	}
252 
253 	ASSERT_STREQ(buf, expected_output, "dead cgroup output");
254 
255 	/* read() after iter finishes should be ok. */
256 	if (len == 0)
257 		ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
258 
259 	close(iter_fd);
260 	return;
261 free_link:
262 	bpf_link__destroy(link);
263 close_cgrp:
264 	close(cgrp_fd);
265 }
266 
test_walk_self_only_css_task(void)267 static void test_walk_self_only_css_task(void)
268 {
269 	struct iters_css_task *skel;
270 	int err;
271 
272 	skel = iters_css_task__open();
273 	if (!ASSERT_OK_PTR(skel, "skel_open"))
274 		return;
275 
276 	bpf_program__set_autoload(skel->progs.cgroup_id_printer, true);
277 
278 	err = iters_css_task__load(skel);
279 	if (!ASSERT_OK(err, "skel_load"))
280 		goto cleanup;
281 
282 	err = join_cgroup(cg_path[CHILD2]);
283 	if (!ASSERT_OK(err, "join_cgroup"))
284 		goto cleanup;
285 
286 	skel->bss->target_pid = getpid();
287 	snprintf(expected_output, sizeof(expected_output),
288 		PROLOGUE "%8llu\n" EPILOGUE, cg_id[CHILD2]);
289 	read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[CHILD2],
290 		BPF_CGROUP_ITER_SELF_ONLY, "test_walk_self_only_css_task");
291 	ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
292 cleanup:
293 	iters_css_task__destroy(skel);
294 }
295 
test_cgroup_iter(void)296 void test_cgroup_iter(void)
297 {
298 	struct cgroup_iter *skel = NULL;
299 
300 	if (setup_cgroup_environment())
301 		return;
302 
303 	if (setup_cgroups())
304 		goto out;
305 
306 	skel = cgroup_iter__open_and_load();
307 	if (!ASSERT_OK_PTR(skel, "cgroup_iter__open_and_load"))
308 		goto out;
309 
310 	if (test__start_subtest("cgroup_iter__invalid_cgroup"))
311 		test_invalid_cgroup(skel);
312 	if (test__start_subtest("cgroup_iter__invalid_cgroup_spec"))
313 		test_invalid_cgroup_spec(skel);
314 	if (test__start_subtest("cgroup_iter__preorder"))
315 		test_walk_preorder(skel);
316 	if (test__start_subtest("cgroup_iter__postorder"))
317 		test_walk_postorder(skel);
318 	if (test__start_subtest("cgroup_iter__ancestors_up_walk"))
319 		test_walk_ancestors_up(skel);
320 	if (test__start_subtest("cgroup_iter__early_termination"))
321 		test_early_termination(skel);
322 	if (test__start_subtest("cgroup_iter__self_only"))
323 		test_walk_self_only(skel);
324 	if (test__start_subtest("cgroup_iter__dead_self_only"))
325 		test_walk_dead_self_only(skel);
326 	if (test__start_subtest("cgroup_iter__self_only_css_task"))
327 		test_walk_self_only_css_task();
328 
329 out:
330 	cgroup_iter__destroy(skel);
331 	cleanup_cgroups();
332 	cleanup_cgroup_environment();
333 }
334