xref: /linux/tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <sys/types.h>
4 #include <sys/socket.h>
5 #include <test_progs.h>
6 #include <bpf/btf.h>
7 
8 #include "lsm_cgroup.skel.h"
9 #include "lsm_cgroup_nonvoid.skel.h"
10 #include "cgroup_helpers.h"
11 #include "network_helpers.h"
12 
13 static struct btf *btf;
14 
15 static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
16 {
17 	LIBBPF_OPTS(bpf_prog_query_opts, p);
18 	int cnt = 0;
19 	int i;
20 
21 	ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
22 
23 	if (!attach_func)
24 		return p.prog_cnt;
25 
26 	/* When attach_func is provided, count the number of progs that
27 	 * attach to the given symbol.
28 	 */
29 
30 	if (!btf)
31 		btf = btf__load_vmlinux_btf();
32 	if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux"))
33 		return -1;
34 
35 	p.prog_ids = malloc(sizeof(u32) * p.prog_cnt);
36 	p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt);
37 	ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
38 
39 	for (i = 0; i < p.prog_cnt; i++) {
40 		struct bpf_prog_info info = {};
41 		__u32 info_len = sizeof(info);
42 		int fd;
43 
44 		fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
45 		ASSERT_GE(fd, 0, "prog_get_fd_by_id");
46 		ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len),
47 			  "prog_info_by_fd");
48 		close(fd);
49 
50 		if (info.attach_btf_id ==
51 		    btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC))
52 			cnt++;
53 	}
54 
55 	free(p.prog_ids);
56 	free(p.prog_attach_flags);
57 
58 	return cnt;
59 }
60 
61 static void test_lsm_cgroup_functional(void)
62 {
63 	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
64 	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
65 	int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1;
66 	int listen_fd, client_fd, accepted_fd;
67 	struct lsm_cgroup *skel = NULL;
68 	int post_create_prog_fd2 = -1;
69 	int post_create_prog_fd = -1;
70 	int bind_link_fd2 = -1;
71 	int bind_prog_fd2 = -1;
72 	int alloc_prog_fd = -1;
73 	int bind_prog_fd = -1;
74 	int bind_link_fd = -1;
75 	int clone_prog_fd = -1;
76 	int err, fd, prio;
77 	socklen_t socklen;
78 
79 	cgroup_fd3 = test__join_cgroup("/sock_policy_empty");
80 	if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup"))
81 		goto close_cgroup;
82 
83 	cgroup_fd2 = test__join_cgroup("/sock_policy_reuse");
84 	if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse"))
85 		goto close_cgroup;
86 
87 	cgroup_fd = test__join_cgroup("/sock_policy");
88 	if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
89 		goto close_cgroup;
90 
91 	skel = lsm_cgroup__open_and_load();
92 	if (!ASSERT_OK_PTR(skel, "open_and_load"))
93 		goto close_cgroup;
94 
95 	post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create);
96 	post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2);
97 	bind_prog_fd = bpf_program__fd(skel->progs.socket_bind);
98 	bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2);
99 	alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc);
100 	clone_prog_fd = bpf_program__fd(skel->progs.socket_clone);
101 
102 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count");
103 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count");
104 	err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
105 	if (err == -ENOTSUPP) {
106 		test__skip();
107 		goto close_cgroup;
108 	}
109 	if (!ASSERT_OK(err, "attach alloc_prog_fd"))
110 		goto detach_cgroup;
111 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count");
112 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count");
113 
114 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count");
115 	err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
116 	if (!ASSERT_OK(err, "attach clone_prog_fd"))
117 		goto detach_cgroup;
118 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count");
119 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count");
120 
121 	/* Make sure replacing works. */
122 
123 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count");
124 	err = bpf_prog_attach(post_create_prog_fd, cgroup_fd,
125 			      BPF_LSM_CGROUP, 0);
126 	if (!ASSERT_OK(err, "attach post_create_prog_fd"))
127 		goto detach_cgroup;
128 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
129 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
130 
131 	attach_opts.replace_prog_fd = post_create_prog_fd;
132 	err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd,
133 				   BPF_LSM_CGROUP, &attach_opts);
134 	if (!ASSERT_OK(err, "prog replace post_create_prog_fd"))
135 		goto detach_cgroup;
136 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
137 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
138 
139 	/* Try the same attach/replace via link API. */
140 
141 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count");
142 	bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd,
143 				       BPF_LSM_CGROUP, NULL);
144 	if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd"))
145 		goto detach_cgroup;
146 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
147 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
148 
149 	update_opts.old_prog_fd = bind_prog_fd;
150 	update_opts.flags = BPF_F_REPLACE;
151 
152 	err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts);
153 	if (!ASSERT_OK(err, "link update bind_prog_fd"))
154 		goto detach_cgroup;
155 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
156 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
157 
158 	/* Attach another instance of bind program to another cgroup.
159 	 * This should trigger the reuse of the trampoline shim (two
160 	 * programs attaching to the same btf_id).
161 	 */
162 
163 	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
164 	ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count");
165 	bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2,
166 					BPF_LSM_CGROUP, NULL);
167 	if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2"))
168 		goto detach_cgroup;
169 	ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count");
170 	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
171 	ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
172 
173 	fd = socket(AF_UNIX, SOCK_STREAM, 0);
174 	if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
175 	    || skel->kconfig->CONFIG_SECURITY_SELINUX
176 	    || skel->kconfig->CONFIG_SECURITY_SMACK))
177 		/* AF_UNIX is prohibited. */
178 		ASSERT_LT(fd, 0, "socket(AF_UNIX)");
179 	close(fd);
180 
181 	/* AF_INET6 gets default policy (sk_priority). */
182 
183 	fd = socket(AF_INET6, SOCK_STREAM, 0);
184 	if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
185 		goto detach_cgroup;
186 
187 	prio = 0;
188 	socklen = sizeof(prio);
189 	ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
190 		  "getsockopt");
191 	ASSERT_EQ(prio, 123, "sk_priority");
192 
193 	close(fd);
194 
195 	/* TX-only AF_PACKET is allowed. */
196 
197 	ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0,
198 		  "socket(AF_PACKET, ..., ETH_P_ALL)");
199 
200 	fd = socket(AF_PACKET, SOCK_RAW, 0);
201 	ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)");
202 
203 	/* TX-only AF_PACKET can not be rebound. */
204 
205 	struct sockaddr_ll sa = {
206 		.sll_family = AF_PACKET,
207 		.sll_protocol = htons(ETH_P_ALL),
208 	};
209 	ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0,
210 		  "bind(ETH_P_ALL)");
211 
212 	close(fd);
213 
214 	/* Trigger passive open. */
215 
216 	listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
217 	ASSERT_GE(listen_fd, 0, "start_server");
218 	client_fd = connect_to_fd(listen_fd, 0);
219 	ASSERT_GE(client_fd, 0, "connect_to_fd");
220 	accepted_fd = accept(listen_fd, NULL, NULL);
221 	ASSERT_GE(accepted_fd, 0, "accept");
222 
223 	prio = 0;
224 	socklen = sizeof(prio);
225 	ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
226 		  "getsockopt");
227 	ASSERT_EQ(prio, 234, "sk_priority");
228 
229 	/* These are replaced and never called. */
230 	ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create");
231 	ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind");
232 
233 	/* AF_INET6+SOCK_STREAM
234 	 * AF_PACKET+SOCK_RAW
235 	 * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
236 	 * listen_fd
237 	 * client_fd
238 	 * accepted_fd
239 	 */
240 	if (skel->kconfig->CONFIG_SECURITY_APPARMOR
241 	    || skel->kconfig->CONFIG_SECURITY_SELINUX
242 	    || skel->kconfig->CONFIG_SECURITY_SMACK)
243 		/* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
244 		ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
245 	else
246 		ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
247 
248 	/* start_server
249 	 * bind(ETH_P_ALL)
250 	 */
251 	ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2");
252 	/* Single accept(). */
253 	ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone");
254 
255 	/* AF_UNIX+SOCK_STREAM (failed)
256 	 * AF_INET6+SOCK_STREAM
257 	 * AF_PACKET+SOCK_RAW (failed)
258 	 * AF_PACKET+SOCK_RAW
259 	 * listen_fd
260 	 * client_fd
261 	 * accepted_fd
262 	 */
263 	ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc");
264 
265 	close(listen_fd);
266 	close(client_fd);
267 	close(accepted_fd);
268 
269 	/* Make sure other cgroup doesn't trigger the programs. */
270 
271 	if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup"))
272 		goto detach_cgroup;
273 
274 	fd = socket(AF_INET6, SOCK_STREAM, 0);
275 	if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
276 		goto detach_cgroup;
277 
278 	prio = 0;
279 	socklen = sizeof(prio);
280 	ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
281 		  "getsockopt");
282 	ASSERT_EQ(prio, 0, "sk_priority");
283 
284 	close(fd);
285 
286 detach_cgroup:
287 	ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd,
288 				   BPF_LSM_CGROUP), 0, "detach_create");
289 	close(bind_link_fd);
290 	/* Don't close bind_link_fd2, exercise cgroup release cleanup. */
291 	ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd,
292 				   BPF_LSM_CGROUP), 0, "detach_alloc");
293 	ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd,
294 				   BPF_LSM_CGROUP), 0, "detach_clone");
295 
296 close_cgroup:
297 	close(cgroup_fd);
298 	close(cgroup_fd2);
299 	close(cgroup_fd3);
300 	lsm_cgroup__destroy(skel);
301 }
302 
303 static void test_lsm_cgroup_nonvoid(void)
304 {
305 	struct lsm_cgroup_nonvoid *skel = NULL;
306 
307 	skel = lsm_cgroup_nonvoid__open_and_load();
308 	ASSERT_NULL(skel, "open succeeds");
309 	lsm_cgroup_nonvoid__destroy(skel);
310 }
311 
312 void test_lsm_cgroup(void)
313 {
314 	if (test__start_subtest("functional"))
315 		test_lsm_cgroup_functional();
316 	if (test__start_subtest("nonvoid"))
317 		test_lsm_cgroup_nonvoid();
318 	btf__free(btf);
319 }
320