1 // SPDX-License-Identifier: GPL-2.0
2 #include <test_progs.h>
3 #include <network_helpers.h>
4 #include "test_xdp_context_test_run.skel.h"
5 #include "test_xdp_meta.skel.h"
6
7 #define RX_NAME "veth0"
8 #define TX_NAME "veth1"
9 #define TX_NETNS "xdp_context_tx"
10 #define RX_NETNS "xdp_context_rx"
11 #define TAP_NAME "tap0"
12 #define DUMMY_NAME "dum0"
13 #define TAP_NETNS "xdp_context_tuntap"
14
15 #define TEST_PAYLOAD_LEN 32
16 static const __u8 test_payload[TEST_PAYLOAD_LEN] = {
17 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
18 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
19 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
20 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38,
21 };
22
test_xdp_context_error(int prog_fd,struct bpf_test_run_opts opts,__u32 data_meta,__u32 data,__u32 data_end,__u32 ingress_ifindex,__u32 rx_queue_index,__u32 egress_ifindex)23 void test_xdp_context_error(int prog_fd, struct bpf_test_run_opts opts,
24 __u32 data_meta, __u32 data, __u32 data_end,
25 __u32 ingress_ifindex, __u32 rx_queue_index,
26 __u32 egress_ifindex)
27 {
28 struct xdp_md ctx = {
29 .data = data,
30 .data_end = data_end,
31 .data_meta = data_meta,
32 .ingress_ifindex = ingress_ifindex,
33 .rx_queue_index = rx_queue_index,
34 .egress_ifindex = egress_ifindex,
35 };
36 int err;
37
38 opts.ctx_in = &ctx;
39 opts.ctx_size_in = sizeof(ctx);
40 err = bpf_prog_test_run_opts(prog_fd, &opts);
41 ASSERT_EQ(errno, EINVAL, "errno-EINVAL");
42 ASSERT_ERR(err, "bpf_prog_test_run");
43 }
44
test_xdp_context_test_run(void)45 void test_xdp_context_test_run(void)
46 {
47 struct test_xdp_context_test_run *skel = NULL;
48 char data[sizeof(pkt_v4) + sizeof(__u32)];
49 char bad_ctx[sizeof(struct xdp_md) + 1];
50 char large_data[256];
51 struct xdp_md ctx_in, ctx_out;
52 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
53 .data_in = &data,
54 .data_size_in = sizeof(data),
55 .ctx_out = &ctx_out,
56 .ctx_size_out = sizeof(ctx_out),
57 .repeat = 1,
58 );
59 int err, prog_fd;
60
61 skel = test_xdp_context_test_run__open_and_load();
62 if (!ASSERT_OK_PTR(skel, "skel"))
63 return;
64 prog_fd = bpf_program__fd(skel->progs.xdp_context);
65
66 /* Data past the end of the kernel's struct xdp_md must be 0 */
67 bad_ctx[sizeof(bad_ctx) - 1] = 1;
68 opts.ctx_in = bad_ctx;
69 opts.ctx_size_in = sizeof(bad_ctx);
70 err = bpf_prog_test_run_opts(prog_fd, &opts);
71 ASSERT_EQ(errno, E2BIG, "extradata-errno");
72 ASSERT_ERR(err, "bpf_prog_test_run(extradata)");
73
74 *(__u32 *)data = XDP_PASS;
75 *(struct ipv4_packet *)(data + sizeof(__u32)) = pkt_v4;
76 opts.ctx_in = &ctx_in;
77 opts.ctx_size_in = sizeof(ctx_in);
78 memset(&ctx_in, 0, sizeof(ctx_in));
79 ctx_in.data_meta = 0;
80 ctx_in.data = sizeof(__u32);
81 ctx_in.data_end = ctx_in.data + sizeof(pkt_v4);
82 err = bpf_prog_test_run_opts(prog_fd, &opts);
83 ASSERT_OK(err, "bpf_prog_test_run(valid)");
84 ASSERT_EQ(opts.retval, XDP_PASS, "valid-retval");
85 ASSERT_EQ(opts.data_size_out, sizeof(pkt_v4), "valid-datasize");
86 ASSERT_EQ(opts.ctx_size_out, opts.ctx_size_in, "valid-ctxsize");
87 ASSERT_EQ(ctx_out.data_meta, 0, "valid-datameta");
88 ASSERT_EQ(ctx_out.data, 0, "valid-data");
89 ASSERT_EQ(ctx_out.data_end, sizeof(pkt_v4), "valid-dataend");
90
91 /* Meta data's size must be a multiple of 4 */
92 test_xdp_context_error(prog_fd, opts, 0, 1, sizeof(data), 0, 0, 0);
93
94 /* data_meta must reference the start of data */
95 test_xdp_context_error(prog_fd, opts, 4, sizeof(__u32), sizeof(data),
96 0, 0, 0);
97
98 /* Total size of data must be data_end - data_meta or larger */
99 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32),
100 sizeof(data) + 1, 0, 0, 0);
101
102 /* RX queue cannot be specified without specifying an ingress */
103 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
104 0, 1, 0);
105
106 /* Interface 1 is always the loopback interface which always has only
107 * one RX queue (index 0). This makes index 1 an invalid rx queue index
108 * for interface 1.
109 */
110 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
111 1, 1, 0);
112
113 /* The egress cannot be specified */
114 test_xdp_context_error(prog_fd, opts, 0, sizeof(__u32), sizeof(data),
115 0, 0, 1);
116
117 /* Meta data must be 216 bytes or smaller (256 - sizeof(struct
118 * xdp_frame)). Test both nearest invalid size and nearest invalid
119 * 4-byte-aligned size, and make sure data_in is large enough that we
120 * actually hit the check on metadata length
121 */
122 opts.data_in = large_data;
123 opts.data_size_in = sizeof(large_data);
124 test_xdp_context_error(prog_fd, opts, 0, 217, sizeof(large_data), 0, 0, 0);
125 test_xdp_context_error(prog_fd, opts, 0, 220, sizeof(large_data), 0, 0, 0);
126
127 test_xdp_context_test_run__destroy(skel);
128 }
129
send_test_packet(int ifindex)130 static int send_test_packet(int ifindex)
131 {
132 int n, sock = -1;
133 __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
134
135 /* We use the Ethernet header only to identify the test packet */
136 struct ethhdr eth = {
137 .h_source = { 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF },
138 };
139
140 memcpy(packet, ð, sizeof(eth));
141 memcpy(packet + sizeof(eth), test_payload, TEST_PAYLOAD_LEN);
142
143 sock = socket(AF_PACKET, SOCK_RAW, IPPROTO_RAW);
144 if (!ASSERT_GE(sock, 0, "socket"))
145 goto err;
146
147 struct sockaddr_ll saddr = {
148 .sll_family = PF_PACKET,
149 .sll_ifindex = ifindex,
150 .sll_halen = ETH_ALEN
151 };
152 n = sendto(sock, packet, sizeof(packet), 0, (struct sockaddr *)&saddr,
153 sizeof(saddr));
154 if (!ASSERT_EQ(n, sizeof(packet), "sendto"))
155 goto err;
156
157 close(sock);
158 return 0;
159
160 err:
161 if (sock >= 0)
162 close(sock);
163 return -1;
164 }
165
write_test_packet(int tap_fd)166 static int write_test_packet(int tap_fd)
167 {
168 __u8 packet[sizeof(struct ethhdr) + TEST_PAYLOAD_LEN];
169 int n;
170
171 /* The Ethernet header is mostly not relevant. We use it to identify the
172 * test packet and some BPF helpers we exercise expect to operate on
173 * Ethernet frames carrying IP packets. Pretend that's the case.
174 */
175 struct ethhdr eth = {
176 .h_source = { 0x12, 0x34, 0xDE, 0xAD, 0xBE, 0xEF },
177 .h_proto = htons(ETH_P_IP),
178 };
179
180 memcpy(packet, ð, sizeof(eth));
181 memcpy(packet + sizeof(struct ethhdr), test_payload, TEST_PAYLOAD_LEN);
182
183 n = write(tap_fd, packet, sizeof(packet));
184 if (!ASSERT_EQ(n, sizeof(packet), "write packet"))
185 return -1;
186
187 return 0;
188 }
189
dump_err_stream(const struct bpf_program * prog)190 static void dump_err_stream(const struct bpf_program *prog)
191 {
192 char buf[512];
193 int ret;
194
195 ret = 0;
196 do {
197 ret = bpf_prog_stream_read(bpf_program__fd(prog),
198 BPF_STREAM_STDERR, buf, sizeof(buf),
199 NULL);
200 if (ret > 0)
201 fwrite(buf, sizeof(buf[0]), ret, stderr);
202 } while (ret > 0);
203 }
204
test_xdp_context_veth(void)205 void test_xdp_context_veth(void)
206 {
207 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
208 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
209 struct netns_obj *rx_ns = NULL, *tx_ns = NULL;
210 struct bpf_program *tc_prog, *xdp_prog;
211 struct test_xdp_meta *skel = NULL;
212 struct nstoken *nstoken = NULL;
213 int rx_ifindex, tx_ifindex;
214 int ret;
215
216 tx_ns = netns_new(TX_NETNS, false);
217 if (!ASSERT_OK_PTR(tx_ns, "create tx_ns"))
218 return;
219
220 rx_ns = netns_new(RX_NETNS, false);
221 if (!ASSERT_OK_PTR(rx_ns, "create rx_ns"))
222 goto close;
223
224 SYS(close, "ip link add " RX_NAME " netns " RX_NETNS
225 " type veth peer name " TX_NAME " netns " TX_NETNS);
226
227 nstoken = open_netns(RX_NETNS);
228 if (!ASSERT_OK_PTR(nstoken, "setns rx_ns"))
229 goto close;
230
231 SYS(close, "ip link set dev " RX_NAME " up");
232
233 skel = test_xdp_meta__open_and_load();
234 if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
235 goto close;
236
237 rx_ifindex = if_nametoindex(RX_NAME);
238 if (!ASSERT_GE(rx_ifindex, 0, "if_nametoindex rx"))
239 goto close;
240
241 tc_hook.ifindex = rx_ifindex;
242 ret = bpf_tc_hook_create(&tc_hook);
243 if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
244 goto close;
245
246 tc_prog = bpf_object__find_program_by_name(skel->obj, "ing_cls");
247 if (!ASSERT_OK_PTR(tc_prog, "open ing_cls prog"))
248 goto close;
249
250 tc_opts.prog_fd = bpf_program__fd(tc_prog);
251 ret = bpf_tc_attach(&tc_hook, &tc_opts);
252 if (!ASSERT_OK(ret, "bpf_tc_attach"))
253 goto close;
254
255 xdp_prog = bpf_object__find_program_by_name(skel->obj, "ing_xdp");
256 if (!ASSERT_OK_PTR(xdp_prog, "open ing_xdp prog"))
257 goto close;
258
259 ret = bpf_xdp_attach(rx_ifindex,
260 bpf_program__fd(xdp_prog),
261 0, NULL);
262 if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
263 goto close;
264
265 close_netns(nstoken);
266
267 nstoken = open_netns(TX_NETNS);
268 if (!ASSERT_OK_PTR(nstoken, "setns tx_ns"))
269 goto close;
270
271 SYS(close, "ip link set dev " TX_NAME " up");
272
273 tx_ifindex = if_nametoindex(TX_NAME);
274 if (!ASSERT_GE(tx_ifindex, 0, "if_nametoindex tx"))
275 goto close;
276
277 skel->bss->test_pass = false;
278
279 ret = send_test_packet(tx_ifindex);
280 if (!ASSERT_OK(ret, "send_test_packet"))
281 goto close;
282
283 if (!ASSERT_TRUE(skel->bss->test_pass, "test_pass"))
284 dump_err_stream(tc_prog);
285
286 close:
287 close_netns(nstoken);
288 test_xdp_meta__destroy(skel);
289 netns_free(rx_ns);
290 netns_free(tx_ns);
291 }
292
test_tuntap(struct bpf_program * xdp_prog,struct bpf_program * tc_prio_1_prog,struct bpf_program * tc_prio_2_prog,bool * test_pass)293 static void test_tuntap(struct bpf_program *xdp_prog,
294 struct bpf_program *tc_prio_1_prog,
295 struct bpf_program *tc_prio_2_prog,
296 bool *test_pass)
297 {
298 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
299 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
300 struct netns_obj *ns = NULL;
301 int tap_fd = -1;
302 int tap_ifindex;
303 int ret;
304
305 *test_pass = false;
306
307 ns = netns_new(TAP_NETNS, true);
308 if (!ASSERT_OK_PTR(ns, "create and open ns"))
309 return;
310
311 tap_fd = open_tuntap(TAP_NAME, true);
312 if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
313 goto close;
314
315 SYS(close, "ip link set dev " TAP_NAME " up");
316
317 tap_ifindex = if_nametoindex(TAP_NAME);
318 if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
319 goto close;
320
321 tc_hook.ifindex = tap_ifindex;
322 ret = bpf_tc_hook_create(&tc_hook);
323 if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
324 goto close;
325
326 tc_opts.prog_fd = bpf_program__fd(tc_prio_1_prog);
327 ret = bpf_tc_attach(&tc_hook, &tc_opts);
328 if (!ASSERT_OK(ret, "bpf_tc_attach"))
329 goto close;
330
331 if (tc_prio_2_prog) {
332 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 2,
333 .prog_fd = bpf_program__fd(tc_prio_2_prog));
334
335 ret = bpf_tc_attach(&tc_hook, &tc_opts);
336 if (!ASSERT_OK(ret, "bpf_tc_attach"))
337 goto close;
338 }
339
340 ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog),
341 0, NULL);
342 if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
343 goto close;
344
345 ret = write_test_packet(tap_fd);
346 if (!ASSERT_OK(ret, "write_test_packet"))
347 goto close;
348
349 if (!ASSERT_TRUE(*test_pass, "test_pass"))
350 dump_err_stream(tc_prio_2_prog ? : tc_prio_1_prog);
351
352 close:
353 if (tap_fd >= 0)
354 close(tap_fd);
355 netns_free(ns);
356 }
357
358 /* Write a packet to a tap dev and copy it to ingress of a dummy dev */
test_tuntap_mirred(struct bpf_program * xdp_prog,struct bpf_program * tc_prog,bool * test_pass)359 static void test_tuntap_mirred(struct bpf_program *xdp_prog,
360 struct bpf_program *tc_prog,
361 bool *test_pass)
362 {
363 LIBBPF_OPTS(bpf_tc_hook, tc_hook, .attach_point = BPF_TC_INGRESS);
364 LIBBPF_OPTS(bpf_tc_opts, tc_opts, .handle = 1, .priority = 1);
365 struct netns_obj *ns = NULL;
366 int dummy_ifindex;
367 int tap_fd = -1;
368 int tap_ifindex;
369 int ret;
370
371 *test_pass = false;
372
373 ns = netns_new(TAP_NETNS, true);
374 if (!ASSERT_OK_PTR(ns, "netns_new"))
375 return;
376
377 /* Setup dummy interface */
378 SYS(close, "ip link add name " DUMMY_NAME " type dummy");
379 SYS(close, "ip link set dev " DUMMY_NAME " up");
380
381 dummy_ifindex = if_nametoindex(DUMMY_NAME);
382 if (!ASSERT_GE(dummy_ifindex, 0, "if_nametoindex"))
383 goto close;
384
385 tc_hook.ifindex = dummy_ifindex;
386 ret = bpf_tc_hook_create(&tc_hook);
387 if (!ASSERT_OK(ret, "bpf_tc_hook_create"))
388 goto close;
389
390 tc_opts.prog_fd = bpf_program__fd(tc_prog);
391 ret = bpf_tc_attach(&tc_hook, &tc_opts);
392 if (!ASSERT_OK(ret, "bpf_tc_attach"))
393 goto close;
394
395 /* Setup TAP interface */
396 tap_fd = open_tuntap(TAP_NAME, true);
397 if (!ASSERT_GE(tap_fd, 0, "open_tuntap"))
398 goto close;
399
400 SYS(close, "ip link set dev " TAP_NAME " up");
401
402 tap_ifindex = if_nametoindex(TAP_NAME);
403 if (!ASSERT_GE(tap_ifindex, 0, "if_nametoindex"))
404 goto close;
405
406 ret = bpf_xdp_attach(tap_ifindex, bpf_program__fd(xdp_prog), 0, NULL);
407 if (!ASSERT_GE(ret, 0, "bpf_xdp_attach"))
408 goto close;
409
410 /* Copy all packets received from TAP to dummy ingress */
411 SYS(close, "tc qdisc add dev " TAP_NAME " clsact");
412 SYS(close, "tc filter add dev " TAP_NAME " ingress "
413 "protocol all matchall "
414 "action mirred ingress mirror dev " DUMMY_NAME);
415
416 /* Receive a packet on TAP */
417 ret = write_test_packet(tap_fd);
418 if (!ASSERT_OK(ret, "write_test_packet"))
419 goto close;
420
421 if (!ASSERT_TRUE(*test_pass, "test_pass"))
422 dump_err_stream(tc_prog);
423
424 close:
425 if (tap_fd >= 0)
426 close(tap_fd);
427 netns_free(ns);
428 }
429
test_xdp_context_tuntap(void)430 void test_xdp_context_tuntap(void)
431 {
432 struct test_xdp_meta *skel = NULL;
433
434 skel = test_xdp_meta__open_and_load();
435 if (!ASSERT_OK_PTR(skel, "open and load skeleton"))
436 return;
437
438 if (test__start_subtest("data_meta"))
439 test_tuntap(skel->progs.ing_xdp,
440 skel->progs.ing_cls,
441 NULL, /* tc prio 2 */
442 &skel->bss->test_pass);
443 if (test__start_subtest("dynptr_read"))
444 test_tuntap(skel->progs.ing_xdp,
445 skel->progs.ing_cls_dynptr_read,
446 NULL, /* tc prio 2 */
447 &skel->bss->test_pass);
448 if (test__start_subtest("dynptr_slice"))
449 test_tuntap(skel->progs.ing_xdp,
450 skel->progs.ing_cls_dynptr_slice,
451 NULL, /* tc prio 2 */
452 &skel->bss->test_pass);
453 if (test__start_subtest("dynptr_write"))
454 test_tuntap(skel->progs.ing_xdp_zalloc_meta,
455 skel->progs.ing_cls_dynptr_write,
456 skel->progs.ing_cls_dynptr_read,
457 &skel->bss->test_pass);
458 if (test__start_subtest("dynptr_slice_rdwr"))
459 test_tuntap(skel->progs.ing_xdp_zalloc_meta,
460 skel->progs.ing_cls_dynptr_slice_rdwr,
461 skel->progs.ing_cls_dynptr_slice,
462 &skel->bss->test_pass);
463 if (test__start_subtest("dynptr_offset"))
464 test_tuntap(skel->progs.ing_xdp_zalloc_meta,
465 skel->progs.ing_cls_dynptr_offset_wr,
466 skel->progs.ing_cls_dynptr_offset_rd,
467 &skel->bss->test_pass);
468 if (test__start_subtest("dynptr_offset_oob"))
469 test_tuntap(skel->progs.ing_xdp,
470 skel->progs.ing_cls_dynptr_offset_oob,
471 skel->progs.ing_cls,
472 &skel->bss->test_pass);
473 if (test__start_subtest("clone_data_meta_survives_data_write"))
474 test_tuntap_mirred(skel->progs.ing_xdp,
475 skel->progs.clone_data_meta_survives_data_write,
476 &skel->bss->test_pass);
477 if (test__start_subtest("clone_data_meta_survives_meta_write"))
478 test_tuntap_mirred(skel->progs.ing_xdp,
479 skel->progs.clone_data_meta_survives_meta_write,
480 &skel->bss->test_pass);
481 if (test__start_subtest("clone_meta_dynptr_survives_data_slice_write"))
482 test_tuntap_mirred(skel->progs.ing_xdp,
483 skel->progs.clone_meta_dynptr_survives_data_slice_write,
484 &skel->bss->test_pass);
485 if (test__start_subtest("clone_meta_dynptr_survives_meta_slice_write"))
486 test_tuntap_mirred(skel->progs.ing_xdp,
487 skel->progs.clone_meta_dynptr_survives_meta_slice_write,
488 &skel->bss->test_pass);
489 if (test__start_subtest("clone_meta_dynptr_rw_before_data_dynptr_write"))
490 test_tuntap_mirred(skel->progs.ing_xdp,
491 skel->progs.clone_meta_dynptr_rw_before_data_dynptr_write,
492 &skel->bss->test_pass);
493 if (test__start_subtest("clone_meta_dynptr_rw_before_meta_dynptr_write"))
494 test_tuntap_mirred(skel->progs.ing_xdp,
495 skel->progs.clone_meta_dynptr_rw_before_meta_dynptr_write,
496 &skel->bss->test_pass);
497 /* Tests for BPF helpers which touch headroom */
498 if (test__start_subtest("helper_skb_vlan_push_pop"))
499 test_tuntap(skel->progs.ing_xdp,
500 skel->progs.helper_skb_vlan_push_pop,
501 NULL, /* tc prio 2 */
502 &skel->bss->test_pass);
503 if (test__start_subtest("helper_skb_adjust_room"))
504 test_tuntap(skel->progs.ing_xdp,
505 skel->progs.helper_skb_adjust_room,
506 NULL, /* tc prio 2 */
507 &skel->bss->test_pass);
508 if (test__start_subtest("helper_skb_change_head_tail"))
509 test_tuntap(skel->progs.ing_xdp,
510 skel->progs.helper_skb_change_head_tail,
511 NULL, /* tc prio 2 */
512 &skel->bss->test_pass);
513 if (test__start_subtest("helper_skb_change_proto"))
514 test_tuntap(skel->progs.ing_xdp,
515 skel->progs.helper_skb_change_proto,
516 NULL, /* tc prio 2 */
517 &skel->bss->test_pass);
518
519 test_xdp_meta__destroy(skel);
520 }
521