xref: /linux/tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c (revision 5e3fee34f626a8cb8715f5b5409416c481714ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include "test_xdp_context_test_run.skel.h"
5 #include "test_xdp_meta.skel.h"
6 
7 #define RX_NAME "veth0"
8 #define TX_NAME "veth1"
9 #define TX_NETNS "xdp_context_tx"
10 #define RX_NETNS "xdp_context_rx"
11 #define TAP_NAME "tap0"
12 #define DUMMY_NAME "dum0"
13 #define TAP_NETNS "xdp_context_tuntap"
14 
15 #define TEST_PAYLOAD_LEN 32
16 static const __u8 test_payload[TEST_PAYLOAD_LEN] = {
17 	0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
18 	0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
19 	0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
20 	0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
21 };
22 
23 void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts,
24 			    __u32 data_meta, __u32 data, __u32 data_end,
25 			    __u32 ingress_ifindex, __u32 rx_queue_index,
26 			    __u32 egress_ifindex)
27 {
28 	struct xdp_md ctx = {
29 		.data = data,
30 		.data_end = data_end,
31 		.data_meta = data_meta,
32 		.ingress_ifindex = ingress_ifindex,
33 		.rx_queue_index = rx_queue_index,
34 		.egress_ifindex = egress_ifindex,
35 	};
36 	int err;
37 
38 	opts.ctx_in = &ctx;
39 	opts.ctx_size_in = sizeof(ctx);
40 	err = bpf_prog_test_run_opts(prog_fd, &opts);
41 	ASSERT_EQ(errno, EINVAL, "errno-EINVAL");
42 	ASSERT_ERR(err, "bpf_prog_test_run");
43 }
44 
45 void test_xdp_context_test_run(void)
46 {
47 	struct test_xdp_context_test_run *skel = NULL;
48 	char data[sizeof(pkt_v4) + sizeof(__u32)];
49 	char bad_ctx[sizeof(struct xdp_md) + 1];
50 	struct xdp_md ctx_in, ctx_out;
51 	DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
52 			    .data_in = &data,
53 			    .data_size_in = sizeof(data),
54 			    .ctx_out = &ctx_out,
55 			    .ctx_size_out = sizeof(ctx_out),
56 			    .repeat = 1,
57 		);
58 	int err, prog_fd;
59 
60 	skel = test_xdp_context_test_run__open_and_load();
61 	if (!ASSERT_OK_PTR(skel, "skel"))
62 		return;
63 	prog_fd = bpf_program__fd(skel->progs.xdp_context);
64 
65 	/* Data past the end of the kernel's struct xdp_md must be 0 */
66 	bad_ctx[sizeof(bad_ctx) - 1] = 1;
67 	opts.ctx_in = bad_ctx;
68 	opts.ctx_size_in = sizeof(bad_ctx);
69 	err = bpf_prog_test_run_opts(prog_fd, &opts);
70 	ASSERT_EQ(errno, E2BIG, "extradata-errno");
71 	ASSERT_ERR(err, "bpf_prog_test_run(extradata)");
72 
73 	*(__u32 *)data = XDP_PASS;
74 	*(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4;
75 	opts.ctx_in = &ctx_in;
76 	opts.ctx_size_in = sizeof(ctx_in);
77 	memset(&ctx_in, 0, sizeof(ctx_in));
78 	ctx_in.data_meta = 0;
79 	ctx_in.data = sizeof(__u32);
80 	ctx_in.data_end = ctx_in.data + sizeof(pkt_v4);
81 	err = bpf_prog_test_run_opts(prog_fd, &opts);
82 	ASSERT_OK(err, "bpf_prog_test_run(valid)");
83 	ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval");
84 	ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize");
85 	ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize");
86 	ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta");
87 	ASSERT_EQ(ctx_out.data, 0, "valid-data");
88 	ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend");
89 
90 	/* Meta data's size must be a multiple of 4 */
91 	test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0);
92 
93 	/* data_meta must reference the start of data */
94 	test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
95 			       0, 0, 0);
96 
97 	/* Meta data must be 255 bytes or smaller */
98 	test_xdp_context_error(prog_fd, opts, 0, 256, sizeof(data), 0, 0, 0);
99 
100 	/* Total size of data must be data_end - data_meta or larger */
101 	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
102 			       sizeof(data) + 1, 0, 0, 0);
103 
104 	/* RX queue cannot be specified without specifying an ingress */
105 	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
106 			       0, 1, 0);
107 
108 	/* Interface 1 is always the loopback interface which always has only
109 	 * one RX queue (index 0). This makes index 1 an invalid rx queue index
110 	 * for interface 1.
111 	 */
112 	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
113 			       1, 1, 0);
114 
115 	/* The egress cannot be specified */
116 	test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
117 			       0, 0, 1);
118 
119 	test_xdp_context_test_run__destroy(skel);
120 }
121 
122 static int send_test_packet(int ifindex)
123 {
124 	int n, sock = -1;
125 	__u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
126 
127 	/* The ethernet header is not relevant for this test and doesn't need to
128 	 * be meaningful.
129 	 */
130 	struct ethhdr eth = { 0 };
131 
132 	memcpy(packet, &eth, sizeof(eth));
133 	memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
134 
135 	sock = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
136 	if (!ASSERT_GE(sock, 0, "socket"))
137 		goto err;
138 
139 	struct sockaddr_ll saddr = {
140 		.sll_family = PF_PACKET,
141 		.sll_ifindex = ifindex,
142 		.sll_halen = ETH_ALEN
143 	};
144 	n = sendto(sock, packet, sizeof(packet), 0, (struct sockaddr *)&saddr,
145 		   sizeof(saddr));
146 	if (!ASSERT_EQ(n, sizeof(packet), "sendto"))
147 		goto err;
148 
149 	close(sock);
150 	return 0;
151 
152 err:
153 	if (sock >= 0)
154 		close(sock);
155 	return -1;
156 }
157 
158 static int write_test_packet(int tap_fd)
159 {
160 	__u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
161 	int n;
162 
163 	/* The ethernet header doesn't need to be valid for this test */
164 	memset(packet, 0, sizeof(struct ethhdr));
165 	memcpy(packet + sizeof(struct ethhdr), test_payload, TEST_PAYLOAD_LEN);
166 
167 	n = write(tap_fd, packet, sizeof(packet));
168 	if (!ASSERT_EQ(n, sizeof(packet), "write packet"))
169 		return -1;
170 
171 	return 0;
172 }
173 
174 static void assert_test_result(const struct bpf_map *result_map)
175 {
176 	int err;
177 	__u32 map_key = 0;
178 	__u8 map_value[TEST_PAYLOAD_LEN];
179 
180 	err = bpf_map__lookup_elem(result_map, &map_key, sizeof(map_key),
181 				   &map_value, TEST_PAYLOAD_LEN, BPF_ANY);
182 	if (!ASSERT_OK(err, "lookup test_result"))
183 		return;
184 
185 	ASSERT_MEMEQ(&map_value, &test_payload, TEST_PAYLOAD_LEN,
186 		     "test_result map contains test payload");
187 }
188 
189 static bool clear_test_result(struct bpf_map *result_map)
190 {
191 	const __u8 v[sizeof(test_payload)] = {};
192 	const __u32 k = 0;
193 	int err;
194 
195 	err = bpf_map__update_elem(result_map, &k, sizeof(k), v, sizeof(v), BPF_ANY);
196 	ASSERT_OK(err, "update test_result");
197 
198 	return err == 0;
199 }
200 
201 void test_xdp_context_veth(void)
202 {
203 	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
204 	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
205 	struct netns_obj *rx_ns = NULL, *tx_ns = NULL;
206 	struct bpf_program *tc_prog, *xdp_prog;
207 	struct test_xdp_meta *skel = NULL;
208 	struct nstoken *nstoken = NULL;
209 	int rx_ifindex, tx_ifindex;
210 	int ret;
211 
212 	tx_ns = netns_new(TX_NETNS, false);
213 	if (!ASSERT_OK_PTR(tx_ns, "create tx_ns"))
214 		return;
215 
216 	rx_ns = netns_new(RX_NETNS, false);
217 	if (!ASSERT_OK_PTR(rx_ns, "create rx_ns"))
218 		goto close;
219 
220 	SYS(close, "ip link add " RX_NAME " netns " RX_NETNS
221 	    " type veth peer name " TX_NAME " netns " TX_NETNS);
222 
223 	nstoken = open_netns(RX_NETNS);
224 	if (!ASSERT_OK_PTR(nstoken, "setns rx_ns"))
225 		goto close;
226 
227 	SYS(close, "ip link set dev " RX_NAME " up");
228 
229 	skel = test_xdp_meta__open_and_load();
230 	if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
231 		goto close;
232 
233 	rx_ifindex = if_nametoindex(RX_NAME);
234 	if (!ASSERT_GE(rx_ifindex, 0, "if_nametoindex rx"))
235 		goto close;
236 
237 	tc_hook.ifindex = rx_ifindex;
238 	ret = bpf_tc_hook_create(&tc_hook);
239 	if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
240 		goto close;
241 
242 	tc_prog = bpf_object__find_program_by_name(skel->obj, "ing_cls");
243 	if (!ASSERT_OK_PTR(tc_prog, "open ing_cls prog"))
244 		goto close;
245 
246 	tc_opts.prog_fd = bpf_program__fd(tc_prog);
247 	ret = bpf_tc_attach(&tc_hook, &tc_opts);
248 	if (!ASSERT_OK(ret, "bpf_tc_attach"))
249 		goto close;
250 
251 	xdp_prog = bpf_object__find_program_by_name(skel->obj, "ing_xdp");
252 	if (!ASSERT_OK_PTR(xdp_prog, "open ing_xdp prog"))
253 		goto close;
254 
255 	ret = bpf_xdp_attach(rx_ifindex,
256 			     bpf_program__fd(xdp_prog),
257 			     0, NULL);
258 	if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
259 		goto close;
260 
261 	close_netns(nstoken);
262 
263 	nstoken = open_netns(TX_NETNS);
264 	if (!ASSERT_OK_PTR(nstoken, "setns tx_ns"))
265 		goto close;
266 
267 	SYS(close, "ip link set dev " TX_NAME " up");
268 
269 	tx_ifindex = if_nametoindex(TX_NAME);
270 	if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
271 		goto close;
272 
273 	ret = send_test_packet(tx_ifindex);
274 	if (!ASSERT_OK(ret, "send_test_packet"))
275 		goto close;
276 
277 	assert_test_result(skel->maps.test_result);
278 
279 close:
280 	close_netns(nstoken);
281 	test_xdp_meta__destroy(skel);
282 	netns_free(rx_ns);
283 	netns_free(tx_ns);
284 }
285 
286 static void test_tuntap(struct bpf_program *xdp_prog,
287 			struct bpf_program *tc_prio_1_prog,
288 			struct bpf_program *tc_prio_2_prog,
289 			struct bpf_map *result_map)
290 {
291 	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
292 	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
293 	struct netns_obj *ns = NULL;
294 	int tap_fd = -1;
295 	int tap_ifindex;
296 	int ret;
297 
298 	if (!clear_test_result(result_map))
299 		return;
300 
301 	ns = netns_new(TAP_NETNS, true);
302 	if (!ASSERT_OK_PTR(ns, "create and open ns"))
303 		return;
304 
305 	tap_fd = open_tuntap(TAP_NAME, true);
306 	if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
307 		goto close;
308 
309 	SYS(close, "ip link set dev " TAP_NAME " up");
310 
311 	tap_ifindex = if_nametoindex(TAP_NAME);
312 	if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
313 		goto close;
314 
315 	tc_hook.ifindex = tap_ifindex;
316 	ret = bpf_tc_hook_create(&tc_hook);
317 	if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
318 		goto close;
319 
320 	tc_opts.prog_fd = bpf_program__fd(tc_prio_1_prog);
321 	ret = bpf_tc_attach(&tc_hook, &tc_opts);
322 	if (!ASSERT_OK(ret, "bpf_tc_attach"))
323 		goto close;
324 
325 	if (tc_prio_2_prog) {
326 		LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 2,
327 			    .prog_fd = bpf_program__fd(tc_prio_2_prog));
328 
329 		ret = bpf_tc_attach(&tc_hook, &tc_opts);
330 		if (!ASSERT_OK(ret, "bpf_tc_attach"))
331 			goto close;
332 	}
333 
334 	ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog),
335 			     0, NULL);
336 	if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
337 		goto close;
338 
339 	ret = write_test_packet(tap_fd);
340 	if (!ASSERT_OK(ret, "write_test_packet"))
341 		goto close;
342 
343 	assert_test_result(result_map);
344 
345 close:
346 	if (tap_fd >= 0)
347 		close(tap_fd);
348 	netns_free(ns);
349 }
350 
351 /* Write a packet to a tap dev and copy it to ingress of a dummy dev */
352 static void test_tuntap_mirred(struct bpf_program *xdp_prog,
353 			       struct bpf_program *tc_prog,
354 			       bool *test_pass)
355 {
356 	LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
357 	LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
358 	struct netns_obj *ns = NULL;
359 	int dummy_ifindex;
360 	int tap_fd = -1;
361 	int tap_ifindex;
362 	int ret;
363 
364 	*test_pass = false;
365 
366 	ns = netns_new(TAP_NETNS, true);
367 	if (!ASSERT_OK_PTR(ns, "netns_new"))
368 		return;
369 
370 	/* Setup dummy interface */
371 	SYS(close, "ip link add name " DUMMY_NAME " type dummy");
372 	SYS(close, "ip link set dev " DUMMY_NAME " up");
373 
374 	dummy_ifindex = if_nametoindex(DUMMY_NAME);
375 	if (!ASSERT_GE(dummy_ifindex, 0, "if_nametoindex"))
376 		goto close;
377 
378 	tc_hook.ifindex = dummy_ifindex;
379 	ret = bpf_tc_hook_create(&tc_hook);
380 	if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
381 		goto close;
382 
383 	tc_opts.prog_fd = bpf_program__fd(tc_prog);
384 	ret = bpf_tc_attach(&tc_hook, &tc_opts);
385 	if (!ASSERT_OK(ret, "bpf_tc_attach"))
386 		goto close;
387 
388 	/* Setup TAP interface */
389 	tap_fd = open_tuntap(TAP_NAME, true);
390 	if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
391 		goto close;
392 
393 	SYS(close, "ip link set dev " TAP_NAME " up");
394 
395 	tap_ifindex = if_nametoindex(TAP_NAME);
396 	if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
397 		goto close;
398 
399 	ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog), 0, NULL);
400 	if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
401 		goto close;
402 
403 	/* Copy all packets received from TAP to dummy ingress */
404 	SYS(close, "tc qdisc add dev " TAP_NAME " clsact");
405 	SYS(close, "tc filter add dev " TAP_NAME " ingress "
406 		   "protocol all matchall "
407 		   "action mirred ingress mirror dev " DUMMY_NAME);
408 
409 	/* Receive a packet on TAP */
410 	ret = write_test_packet(tap_fd);
411 	if (!ASSERT_OK(ret, "write_test_packet"))
412 		goto close;
413 
414 	ASSERT_TRUE(*test_pass, "test_pass");
415 
416 close:
417 	if (tap_fd >= 0)
418 		close(tap_fd);
419 	netns_free(ns);
420 }
421 
422 void test_xdp_context_tuntap(void)
423 {
424 	struct test_xdp_meta *skel = NULL;
425 
426 	skel = test_xdp_meta__open_and_load();
427 	if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
428 		return;
429 
430 	if (test__start_subtest("data_meta"))
431 		test_tuntap(skel->progs.ing_xdp,
432 			    skel->progs.ing_cls,
433 			    NULL, /* tc prio 2 */
434 			    skel->maps.test_result);
435 	if (test__start_subtest("dynptr_read"))
436 		test_tuntap(skel->progs.ing_xdp,
437 			    skel->progs.ing_cls_dynptr_read,
438 			    NULL, /* tc prio 2 */
439 			    skel->maps.test_result);
440 	if (test__start_subtest("dynptr_slice"))
441 		test_tuntap(skel->progs.ing_xdp,
442 			    skel->progs.ing_cls_dynptr_slice,
443 			    NULL, /* tc prio 2 */
444 			    skel->maps.test_result);
445 	if (test__start_subtest("dynptr_write"))
446 		test_tuntap(skel->progs.ing_xdp_zalloc_meta,
447 			    skel->progs.ing_cls_dynptr_write,
448 			    skel->progs.ing_cls_dynptr_read,
449 			    skel->maps.test_result);
450 	if (test__start_subtest("dynptr_slice_rdwr"))
451 		test_tuntap(skel->progs.ing_xdp_zalloc_meta,
452 			    skel->progs.ing_cls_dynptr_slice_rdwr,
453 			    skel->progs.ing_cls_dynptr_slice,
454 			    skel->maps.test_result);
455 	if (test__start_subtest("dynptr_offset"))
456 		test_tuntap(skel->progs.ing_xdp_zalloc_meta,
457 			    skel->progs.ing_cls_dynptr_offset_wr,
458 			    skel->progs.ing_cls_dynptr_offset_rd,
459 			    skel->maps.test_result);
460 	if (test__start_subtest("dynptr_offset_oob"))
461 		test_tuntap(skel->progs.ing_xdp,
462 			    skel->progs.ing_cls_dynptr_offset_oob,
463 			    skel->progs.ing_cls,
464 			    skel->maps.test_result);
465 	if (test__start_subtest("clone_data_meta_empty_on_data_write"))
466 		test_tuntap_mirred(skel->progs.ing_xdp,
467 				   skel->progs.clone_data_meta_empty_on_data_write,
468 				   &skel->bss->test_pass);
469 	if (test__start_subtest("clone_data_meta_empty_on_meta_write"))
470 		test_tuntap_mirred(skel->progs.ing_xdp,
471 				   skel->progs.clone_data_meta_empty_on_meta_write,
472 				   &skel->bss->test_pass);
473 	if (test__start_subtest("clone_dynptr_empty_on_data_slice_write"))
474 		test_tuntap_mirred(skel->progs.ing_xdp,
475 				   skel->progs.clone_dynptr_empty_on_data_slice_write,
476 				   &skel->bss->test_pass);
477 	if (test__start_subtest("clone_dynptr_empty_on_meta_slice_write"))
478 		test_tuntap_mirred(skel->progs.ing_xdp,
479 				   skel->progs.clone_dynptr_empty_on_meta_slice_write,
480 				   &skel->bss->test_pass);
481 	if (test__start_subtest("clone_dynptr_rdonly_before_data_dynptr_write"))
482 		test_tuntap_mirred(skel->progs.ing_xdp,
483 				   skel->progs.clone_dynptr_rdonly_before_data_dynptr_write,
484 				   &skel->bss->test_pass);
485 	if (test__start_subtest("clone_dynptr_rdonly_before_meta_dynptr_write"))
486 		test_tuntap_mirred(skel->progs.ing_xdp,
487 				   skel->progs.clone_dynptr_rdonly_before_meta_dynptr_write,
488 				   &skel->bss->test_pass);
489 
490 	test_xdp_meta__destroy(skel);
491 }
492