1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <test_progs.h>
4 #include <network_helpers.h>
5 #include <linux/if_tun.h>
6 #include <sys/uio.h>
7
8 #include "bpf_flow.skel.h"
9
10 #define TEST_NS "flow_dissector_ns"
11 #define FLOW_CONTINUE_SADDR 0x7f00007f /* 127.0.0.127 */
12 #define TEST_NAME_MAX_LEN 64
13
14 #ifndef IP_MF
15 #define IP_MF 0x2000
16 #endif
17
18 struct ipv4_pkt {
19 struct ethhdr eth;
20 struct iphdr iph;
21 struct tcphdr tcp;
22 } __packed;
23
24 struct ipip_pkt {
25 struct ethhdr eth;
26 struct iphdr iph;
27 struct iphdr iph_inner;
28 struct tcphdr tcp;
29 } __packed;
30
31 struct svlan_ipv4_pkt {
32 struct ethhdr eth;
33 __u16 vlan_tci;
34 __u16 vlan_proto;
35 struct iphdr iph;
36 struct tcphdr tcp;
37 } __packed;
38
39 struct ipv6_pkt {
40 struct ethhdr eth;
41 struct ipv6hdr iph;
42 struct tcphdr tcp;
43 } __packed;
44
45 struct ipv6_frag_pkt {
46 struct ethhdr eth;
47 struct ipv6hdr iph;
48 struct frag_hdr {
49 __u8 nexthdr;
50 __u8 reserved;
51 __be16 frag_off;
52 __be32 identification;
53 } ipf;
54 struct tcphdr tcp;
55 } __packed;
56
57 struct dvlan_ipv6_pkt {
58 struct ethhdr eth;
59 __u16 vlan_tci;
60 __u16 vlan_proto;
61 __u16 vlan_tci2;
62 __u16 vlan_proto2;
63 struct ipv6hdr iph;
64 struct tcphdr tcp;
65 } __packed;
66
67 struct gre_base_hdr {
68 __be16 flags;
69 __be16 protocol;
70 } gre_base_hdr;
71
72 struct gre_minimal_pkt {
73 struct ethhdr eth;
74 struct iphdr iph;
75 struct gre_base_hdr gre_hdr;
76 struct iphdr iph_inner;
77 struct tcphdr tcp;
78 } __packed;
79
80 struct test {
81 const char *name;
82 union {
83 struct ipv4_pkt ipv4;
84 struct svlan_ipv4_pkt svlan_ipv4;
85 struct ipip_pkt ipip;
86 struct ipv6_pkt ipv6;
87 struct ipv6_frag_pkt ipv6_frag;
88 struct dvlan_ipv6_pkt dvlan_ipv6;
89 struct gre_minimal_pkt gre_minimal;
90 } pkt;
91 struct bpf_flow_keys keys;
92 __u32 flags;
93 __u32 retval;
94 };
95
96 #define VLAN_HLEN 4
97
98 struct test tests[] = {
99 {
100 .name = "ipv4",
101 .pkt.ipv4 = {
102 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
103 .iph.ihl = 5,
104 .iph.protocol = IPPROTO_TCP,
105 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
106 .tcp.doff = 5,
107 .tcp.source = 80,
108 .tcp.dest = 8080,
109 },
110 .keys = {
111 .nhoff = ETH_HLEN,
112 .thoff = ETH_HLEN + sizeof(struct iphdr),
113 .addr_proto = ETH_P_IP,
114 .ip_proto = IPPROTO_TCP,
115 .n_proto = __bpf_constant_htons(ETH_P_IP),
116 .sport = 80,
117 .dport = 8080,
118 },
119 .retval = BPF_OK,
120 },
121 {
122 .name = "ipv6",
123 .pkt.ipv6 = {
124 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
125 .iph.nexthdr = IPPROTO_TCP,
126 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
127 .tcp.doff = 5,
128 .tcp.source = 80,
129 .tcp.dest = 8080,
130 },
131 .keys = {
132 .nhoff = ETH_HLEN,
133 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
134 .addr_proto = ETH_P_IPV6,
135 .ip_proto = IPPROTO_TCP,
136 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
137 .sport = 80,
138 .dport = 8080,
139 },
140 .retval = BPF_OK,
141 },
142 {
143 .name = "802.1q-ipv4",
144 .pkt.svlan_ipv4 = {
145 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
146 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
147 .iph.ihl = 5,
148 .iph.protocol = IPPROTO_TCP,
149 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
150 .tcp.doff = 5,
151 .tcp.source = 80,
152 .tcp.dest = 8080,
153 },
154 .keys = {
155 .nhoff = ETH_HLEN + VLAN_HLEN,
156 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr),
157 .addr_proto = ETH_P_IP,
158 .ip_proto = IPPROTO_TCP,
159 .n_proto = __bpf_constant_htons(ETH_P_IP),
160 .sport = 80,
161 .dport = 8080,
162 },
163 .retval = BPF_OK,
164 },
165 {
166 .name = "802.1ad-ipv6",
167 .pkt.dvlan_ipv6 = {
168 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
169 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
170 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
171 .iph.nexthdr = IPPROTO_TCP,
172 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
173 .tcp.doff = 5,
174 .tcp.source = 80,
175 .tcp.dest = 8080,
176 },
177 .keys = {
178 .nhoff = ETH_HLEN + VLAN_HLEN * 2,
179 .thoff = ETH_HLEN + VLAN_HLEN * 2 +
180 sizeof(struct ipv6hdr),
181 .addr_proto = ETH_P_IPV6,
182 .ip_proto = IPPROTO_TCP,
183 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
184 .sport = 80,
185 .dport = 8080,
186 },
187 .retval = BPF_OK,
188 },
189 {
190 .name = "ipv4-frag",
191 .pkt.ipv4 = {
192 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
193 .iph.ihl = 5,
194 .iph.protocol = IPPROTO_TCP,
195 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
196 .iph.frag_off = __bpf_constant_htons(IP_MF),
197 .tcp.doff = 5,
198 .tcp.source = 80,
199 .tcp.dest = 8080,
200 },
201 .keys = {
202 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
203 .nhoff = ETH_HLEN,
204 .thoff = ETH_HLEN + sizeof(struct iphdr),
205 .addr_proto = ETH_P_IP,
206 .ip_proto = IPPROTO_TCP,
207 .n_proto = __bpf_constant_htons(ETH_P_IP),
208 .is_frag = true,
209 .is_first_frag = true,
210 .sport = 80,
211 .dport = 8080,
212 },
213 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
214 .retval = BPF_OK,
215 },
216 {
217 .name = "ipv4-no-frag",
218 .pkt.ipv4 = {
219 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
220 .iph.ihl = 5,
221 .iph.protocol = IPPROTO_TCP,
222 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
223 .iph.frag_off = __bpf_constant_htons(IP_MF),
224 .tcp.doff = 5,
225 .tcp.source = 80,
226 .tcp.dest = 8080,
227 },
228 .keys = {
229 .nhoff = ETH_HLEN,
230 .thoff = ETH_HLEN + sizeof(struct iphdr),
231 .addr_proto = ETH_P_IP,
232 .ip_proto = IPPROTO_TCP,
233 .n_proto = __bpf_constant_htons(ETH_P_IP),
234 .is_frag = true,
235 .is_first_frag = true,
236 },
237 .retval = BPF_OK,
238 },
239 {
240 .name = "ipv6-frag",
241 .pkt.ipv6_frag = {
242 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
243 .iph.nexthdr = IPPROTO_FRAGMENT,
244 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
245 .ipf.nexthdr = IPPROTO_TCP,
246 .tcp.doff = 5,
247 .tcp.source = 80,
248 .tcp.dest = 8080,
249 },
250 .keys = {
251 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
252 .nhoff = ETH_HLEN,
253 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
254 sizeof(struct frag_hdr),
255 .addr_proto = ETH_P_IPV6,
256 .ip_proto = IPPROTO_TCP,
257 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
258 .is_frag = true,
259 .is_first_frag = true,
260 .sport = 80,
261 .dport = 8080,
262 },
263 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG,
264 .retval = BPF_OK,
265 },
266 {
267 .name = "ipv6-no-frag",
268 .pkt.ipv6_frag = {
269 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
270 .iph.nexthdr = IPPROTO_FRAGMENT,
271 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
272 .ipf.nexthdr = IPPROTO_TCP,
273 .tcp.doff = 5,
274 .tcp.source = 80,
275 .tcp.dest = 8080,
276 },
277 .keys = {
278 .nhoff = ETH_HLEN,
279 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) +
280 sizeof(struct frag_hdr),
281 .addr_proto = ETH_P_IPV6,
282 .ip_proto = IPPROTO_TCP,
283 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
284 .is_frag = true,
285 .is_first_frag = true,
286 },
287 .retval = BPF_OK,
288 },
289 {
290 .name = "ipv6-flow-label",
291 .pkt.ipv6 = {
292 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
293 .iph.nexthdr = IPPROTO_TCP,
294 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
295 .iph.flow_lbl = { 0xb, 0xee, 0xef },
296 .tcp.doff = 5,
297 .tcp.source = 80,
298 .tcp.dest = 8080,
299 },
300 .keys = {
301 .nhoff = ETH_HLEN,
302 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
303 .addr_proto = ETH_P_IPV6,
304 .ip_proto = IPPROTO_TCP,
305 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
306 .sport = 80,
307 .dport = 8080,
308 .flow_label = __bpf_constant_htonl(0xbeeef),
309 },
310 .retval = BPF_OK,
311 },
312 {
313 .name = "ipv6-no-flow-label",
314 .pkt.ipv6 = {
315 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
316 .iph.nexthdr = IPPROTO_TCP,
317 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
318 .iph.flow_lbl = { 0xb, 0xee, 0xef },
319 .tcp.doff = 5,
320 .tcp.source = 80,
321 .tcp.dest = 8080,
322 },
323 .keys = {
324 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
325 .nhoff = ETH_HLEN,
326 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
327 .addr_proto = ETH_P_IPV6,
328 .ip_proto = IPPROTO_TCP,
329 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
330 .flow_label = __bpf_constant_htonl(0xbeeef),
331 },
332 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
333 .retval = BPF_OK,
334 },
335 {
336 .name = "ipv6-empty-flow-label",
337 .pkt.ipv6 = {
338 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
339 .iph.nexthdr = IPPROTO_TCP,
340 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
341 .iph.flow_lbl = { 0x00, 0x00, 0x00 },
342 .tcp.doff = 5,
343 .tcp.source = 80,
344 .tcp.dest = 8080,
345 },
346 .keys = {
347 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
348 .nhoff = ETH_HLEN,
349 .thoff = ETH_HLEN + sizeof(struct ipv6hdr),
350 .addr_proto = ETH_P_IPV6,
351 .ip_proto = IPPROTO_TCP,
352 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
353 .sport = 80,
354 .dport = 8080,
355 },
356 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL,
357 .retval = BPF_OK,
358 },
359 {
360 .name = "ipip-encap",
361 .pkt.ipip = {
362 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
363 .iph.ihl = 5,
364 .iph.protocol = IPPROTO_IPIP,
365 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
366 .iph_inner.ihl = 5,
367 .iph_inner.protocol = IPPROTO_TCP,
368 .iph_inner.tot_len =
369 __bpf_constant_htons(MAGIC_BYTES -
370 sizeof(struct iphdr)),
371 .tcp.doff = 5,
372 .tcp.source = 80,
373 .tcp.dest = 8080,
374 },
375 .keys = {
376 .nhoff = ETH_HLEN,
377 .thoff = ETH_HLEN + sizeof(struct iphdr) +
378 sizeof(struct iphdr),
379 .addr_proto = ETH_P_IP,
380 .ip_proto = IPPROTO_TCP,
381 .n_proto = __bpf_constant_htons(ETH_P_IP),
382 .is_encap = true,
383 .sport = 80,
384 .dport = 8080,
385 },
386 .retval = BPF_OK,
387 },
388 {
389 .name = "ipip-no-encap",
390 .pkt.ipip = {
391 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
392 .iph.ihl = 5,
393 .iph.protocol = IPPROTO_IPIP,
394 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
395 .iph_inner.ihl = 5,
396 .iph_inner.protocol = IPPROTO_TCP,
397 .iph_inner.tot_len =
398 __bpf_constant_htons(MAGIC_BYTES -
399 sizeof(struct iphdr)),
400 .tcp.doff = 5,
401 .tcp.source = 80,
402 .tcp.dest = 8080,
403 },
404 .keys = {
405 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
406 .nhoff = ETH_HLEN,
407 .thoff = ETH_HLEN + sizeof(struct iphdr),
408 .addr_proto = ETH_P_IP,
409 .ip_proto = IPPROTO_IPIP,
410 .n_proto = __bpf_constant_htons(ETH_P_IP),
411 .is_encap = true,
412 },
413 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
414 .retval = BPF_OK,
415 },
416 {
417 .name = "ipip-encap-dissector-continue",
418 .pkt.ipip = {
419 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
420 .iph.ihl = 5,
421 .iph.protocol = IPPROTO_IPIP,
422 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
423 .iph.saddr = __bpf_constant_htonl(FLOW_CONTINUE_SADDR),
424 .iph_inner.ihl = 5,
425 .iph_inner.protocol = IPPROTO_TCP,
426 .iph_inner.tot_len =
427 __bpf_constant_htons(MAGIC_BYTES -
428 sizeof(struct iphdr)),
429 .tcp.doff = 5,
430 .tcp.source = 99,
431 .tcp.dest = 9090,
432 },
433 .retval = BPF_FLOW_DISSECTOR_CONTINUE,
434 },
435 {
436 .name = "ip-gre",
437 .pkt.gre_minimal = {
438 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
439 .iph.ihl = 5,
440 .iph.protocol = IPPROTO_GRE,
441 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
442 .gre_hdr = {
443 .flags = 0,
444 .protocol = __bpf_constant_htons(ETH_P_IP),
445 },
446 .iph_inner.ihl = 5,
447 .iph_inner.protocol = IPPROTO_TCP,
448 .iph_inner.tot_len =
449 __bpf_constant_htons(MAGIC_BYTES -
450 sizeof(struct iphdr)),
451 .tcp.doff = 5,
452 .tcp.source = 80,
453 .tcp.dest = 8080,
454 },
455 .keys = {
456 .nhoff = ETH_HLEN,
457 .thoff = ETH_HLEN + sizeof(struct iphdr) * 2 +
458 sizeof(struct gre_base_hdr),
459 .addr_proto = ETH_P_IP,
460 .ip_proto = IPPROTO_TCP,
461 .n_proto = __bpf_constant_htons(ETH_P_IP),
462 .is_encap = true,
463 .sport = 80,
464 .dport = 8080,
465 },
466 .retval = BPF_OK,
467 },
468 {
469 .name = "ip-gre-no-encap",
470 .pkt.ipip = {
471 .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
472 .iph.ihl = 5,
473 .iph.protocol = IPPROTO_GRE,
474 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
475 .iph_inner.ihl = 5,
476 .iph_inner.protocol = IPPROTO_TCP,
477 .iph_inner.tot_len =
478 __bpf_constant_htons(MAGIC_BYTES -
479 sizeof(struct iphdr)),
480 .tcp.doff = 5,
481 .tcp.source = 80,
482 .tcp.dest = 8080,
483 },
484 .keys = {
485 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
486 .nhoff = ETH_HLEN,
487 .thoff = ETH_HLEN + sizeof(struct iphdr)
488 + sizeof(struct gre_base_hdr),
489 .addr_proto = ETH_P_IP,
490 .ip_proto = IPPROTO_GRE,
491 .n_proto = __bpf_constant_htons(ETH_P_IP),
492 .is_encap = true,
493 },
494 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP,
495 .retval = BPF_OK,
496 },
497 };
498
serial_test_flow_dissector_namespace(void)499 void serial_test_flow_dissector_namespace(void)
500 {
501 struct bpf_flow *skel;
502 struct nstoken *ns;
503 int err, prog_fd;
504
505 skel = bpf_flow__open_and_load();
506 if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
507 return;
508
509 prog_fd = bpf_program__fd(skel->progs._dissect);
510 if (!ASSERT_OK_FD(prog_fd, "get dissector fd"))
511 goto out_destroy_skel;
512
513 /* We must be able to attach a flow dissector to root namespace */
514 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
515 if (!ASSERT_OK(err, "attach on root namespace ok"))
516 goto out_destroy_skel;
517
518 err = make_netns(TEST_NS);
519 if (!ASSERT_OK(err, "create non-root net namespace"))
520 goto out_destroy_skel;
521
522 /* We must not be able to additionally attach a flow dissector to a
523 * non-root net namespace
524 */
525 ns = open_netns(TEST_NS);
526 if (!ASSERT_OK_PTR(ns, "enter non-root net namespace"))
527 goto out_clean_ns;
528 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
529 if (!ASSERT_ERR(err,
530 "refuse new flow dissector in non-root net namespace"))
531 bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
532 else
533 ASSERT_EQ(errno, EEXIST,
534 "refused because of already attached prog");
535 close_netns(ns);
536
537 /* If no flow dissector is attached to the root namespace, we must
538 * be able to attach one to a non-root net namespace
539 */
540 bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
541 ns = open_netns(TEST_NS);
542 ASSERT_OK_PTR(ns, "enter non-root net namespace");
543 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
544 close_netns(ns);
545 ASSERT_OK(err, "accept new flow dissector in non-root net namespace");
546
547 /* If a flow dissector is attached to non-root net namespace, attaching
548 * a flow dissector to root namespace must fail
549 */
550 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
551 if (!ASSERT_ERR(err, "refuse new flow dissector on root namespace"))
552 bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
553 else
554 ASSERT_EQ(errno, EEXIST,
555 "refused because of already attached prog");
556
557 ns = open_netns(TEST_NS);
558 bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
559 close_netns(ns);
560 out_clean_ns:
561 remove_netns(TEST_NS);
562 out_destroy_skel:
563 bpf_flow__destroy(skel);
564 }
565
create_tap(const char * ifname)566 static int create_tap(const char *ifname)
567 {
568 struct ifreq ifr = {
569 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS,
570 };
571 int fd, ret;
572
573 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
574
575 fd = open("/dev/net/tun", O_RDWR);
576 if (fd < 0)
577 return -1;
578
579 ret = ioctl(fd, TUNSETIFF, &ifr);
580 if (ret)
581 return -1;
582
583 return fd;
584 }
585
tx_tap(int fd,void * pkt,size_t len)586 static int tx_tap(int fd, void *pkt, size_t len)
587 {
588 struct iovec iov[] = {
589 {
590 .iov_len = len,
591 .iov_base = pkt,
592 },
593 };
594 return writev(fd, iov, ARRAY_SIZE(iov));
595 }
596
ifup(const char * ifname)597 static int ifup(const char *ifname)
598 {
599 struct ifreq ifr = {};
600 int sk, ret;
601
602 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
603
604 sk = socket(PF_INET, SOCK_DGRAM, 0);
605 if (sk < 0)
606 return -1;
607
608 ret = ioctl(sk, SIOCGIFFLAGS, &ifr);
609 if (ret) {
610 close(sk);
611 return -1;
612 }
613
614 ifr.ifr_flags |= IFF_UP;
615 ret = ioctl(sk, SIOCSIFFLAGS, &ifr);
616 if (ret) {
617 close(sk);
618 return -1;
619 }
620
621 close(sk);
622 return 0;
623 }
624
init_prog_array(struct bpf_object * obj,struct bpf_map * prog_array)625 static int init_prog_array(struct bpf_object *obj, struct bpf_map *prog_array)
626 {
627 int i, err, map_fd, prog_fd;
628 struct bpf_program *prog;
629 char prog_name[32];
630
631 map_fd = bpf_map__fd(prog_array);
632 if (map_fd < 0)
633 return -1;
634
635 for (i = 0; i < bpf_map__max_entries(prog_array); i++) {
636 snprintf(prog_name, sizeof(prog_name), "flow_dissector_%d", i);
637
638 prog = bpf_object__find_program_by_name(obj, prog_name);
639 if (!prog)
640 return -1;
641
642 prog_fd = bpf_program__fd(prog);
643 if (prog_fd < 0)
644 return -1;
645
646 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
647 if (err)
648 return -1;
649 }
650 return 0;
651 }
652
run_tests_skb_less(int tap_fd,struct bpf_map * keys,char * test_suffix)653 static void run_tests_skb_less(int tap_fd, struct bpf_map *keys,
654 char *test_suffix)
655 {
656 char test_name[TEST_NAME_MAX_LEN];
657 int i, err, keys_fd;
658
659 keys_fd = bpf_map__fd(keys);
660 if (!ASSERT_OK_FD(keys_fd, "bpf_map__fd"))
661 return;
662
663 for (i = 0; i < ARRAY_SIZE(tests); i++) {
664 /* Keep in sync with 'flags' from eth_get_headlen. */
665 __u32 eth_get_headlen_flags =
666 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
667 struct bpf_flow_keys flow_keys = {};
668 __u32 key = (__u32)(tests[i].keys.sport) << 16 |
669 tests[i].keys.dport;
670 snprintf(test_name, TEST_NAME_MAX_LEN, "%s-%s", tests[i].name,
671 test_suffix);
672 if (!test__start_subtest(test_name))
673 continue;
674
675 /* For skb-less case we can't pass input flags; run
676 * only the tests that have a matching set of flags.
677 */
678
679 if (tests[i].flags != eth_get_headlen_flags)
680 continue;
681
682 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt));
683 if (!ASSERT_EQ(err, sizeof(tests[i].pkt), "tx_tap"))
684 continue;
685
686 /* check the stored flow_keys only if BPF_OK expected */
687 if (tests[i].retval != BPF_OK)
688 continue;
689
690 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys);
691 if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
692 continue;
693
694 ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
695 sizeof(struct bpf_flow_keys),
696 "returned flow keys");
697
698 err = bpf_map_delete_elem(keys_fd, &key);
699 ASSERT_OK(err, "bpf_map_delete_elem");
700 }
701 }
702
test_flow_dissector_skb_less_direct_attach(void)703 void test_flow_dissector_skb_less_direct_attach(void)
704 {
705 int err, prog_fd, tap_fd;
706 struct bpf_flow *skel;
707 struct netns_obj *ns;
708
709 ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
710 if (!ASSERT_OK_PTR(ns, "create and open netns"))
711 return;
712
713 skel = bpf_flow__open_and_load();
714 if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
715 goto out_clean_ns;
716
717 err = init_prog_array(skel->obj, skel->maps.jmp_table);
718 if (!ASSERT_OK(err, "init_prog_array"))
719 goto out_destroy_skel;
720
721 prog_fd = bpf_program__fd(skel->progs._dissect);
722 if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
723 goto out_destroy_skel;
724
725 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0);
726 if (!ASSERT_OK(err, "bpf_prog_attach"))
727 goto out_destroy_skel;
728
729 tap_fd = create_tap("tap0");
730 if (!ASSERT_OK_FD(tap_fd, "create_tap"))
731 goto out_destroy_skel;
732 err = ifup("tap0");
733 if (!ASSERT_OK(err, "ifup"))
734 goto out_close_tap;
735
736 run_tests_skb_less(tap_fd, skel->maps.last_dissection,
737 "non-skb-direct-attach");
738
739 err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
740 ASSERT_OK(err, "bpf_prog_detach2");
741
742 out_close_tap:
743 close(tap_fd);
744 out_destroy_skel:
745 bpf_flow__destroy(skel);
746 out_clean_ns:
747 netns_free(ns);
748 }
749
test_flow_dissector_skb_less_indirect_attach(void)750 void test_flow_dissector_skb_less_indirect_attach(void)
751 {
752 int err, net_fd, tap_fd;
753 struct bpf_flow *skel;
754 struct bpf_link *link;
755 struct netns_obj *ns;
756
757 ns = netns_new("flow_dissector_skb_less_indirect_attach_ns", true);
758 if (!ASSERT_OK_PTR(ns, "create and open netns"))
759 return;
760
761 skel = bpf_flow__open_and_load();
762 if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
763 goto out_clean_ns;
764
765 net_fd = open("/proc/self/ns/net", O_RDONLY);
766 if (!ASSERT_OK_FD(net_fd, "open(/proc/self/ns/net"))
767 goto out_destroy_skel;
768
769 err = init_prog_array(skel->obj, skel->maps.jmp_table);
770 if (!ASSERT_OK(err, "init_prog_array"))
771 goto out_destroy_skel;
772
773 tap_fd = create_tap("tap0");
774 if (!ASSERT_OK_FD(tap_fd, "create_tap"))
775 goto out_close_ns;
776 err = ifup("tap0");
777 if (!ASSERT_OK(err, "ifup"))
778 goto out_close_tap;
779
780 link = bpf_program__attach_netns(skel->progs._dissect, net_fd);
781 if (!ASSERT_OK_PTR(link, "attach_netns"))
782 goto out_close_tap;
783
784 run_tests_skb_less(tap_fd, skel->maps.last_dissection,
785 "non-skb-indirect-attach");
786
787 err = bpf_link__destroy(link);
788 ASSERT_OK(err, "bpf_link__destroy");
789
790 out_close_tap:
791 close(tap_fd);
792 out_close_ns:
793 close(net_fd);
794 out_destroy_skel:
795 bpf_flow__destroy(skel);
796 out_clean_ns:
797 netns_free(ns);
798 }
799
test_flow_dissector_skb(void)800 void test_flow_dissector_skb(void)
801 {
802 char test_name[TEST_NAME_MAX_LEN];
803 struct bpf_flow *skel;
804 int i, err, prog_fd;
805
806 skel = bpf_flow__open_and_load();
807 if (!ASSERT_OK_PTR(skel, "open/load skeleton"))
808 return;
809
810 err = init_prog_array(skel->obj, skel->maps.jmp_table);
811 if (!ASSERT_OK(err, "init_prog_array"))
812 goto out_destroy_skel;
813
814 prog_fd = bpf_program__fd(skel->progs._dissect);
815 if (!ASSERT_OK_FD(prog_fd, "bpf_program__fd"))
816 goto out_destroy_skel;
817
818 for (i = 0; i < ARRAY_SIZE(tests); i++) {
819 struct bpf_flow_keys flow_keys;
820 LIBBPF_OPTS(bpf_test_run_opts, topts,
821 .data_in = &tests[i].pkt,
822 .data_size_in = sizeof(tests[i].pkt),
823 .data_out = &flow_keys,
824 );
825 static struct bpf_flow_keys ctx = {};
826
827 snprintf(test_name, TEST_NAME_MAX_LEN, "%s-skb", tests[i].name);
828 if (!test__start_subtest(test_name))
829 continue;
830
831 if (tests[i].flags) {
832 topts.ctx_in = &ctx;
833 topts.ctx_size_in = sizeof(ctx);
834 ctx.flags = tests[i].flags;
835 }
836
837 err = bpf_prog_test_run_opts(prog_fd, &topts);
838 ASSERT_OK(err, "test_run");
839 ASSERT_EQ(topts.retval, tests[i].retval, "test_run retval");
840
841 /* check the resulting flow_keys only if BPF_OK returned */
842 if (topts.retval != BPF_OK)
843 continue;
844 ASSERT_EQ(topts.data_size_out, sizeof(flow_keys),
845 "test_run data_size_out");
846 ASSERT_MEMEQ(&flow_keys, &tests[i].keys,
847 sizeof(struct bpf_flow_keys),
848 "returned flow keys");
849 }
850
851 out_destroy_skel:
852 bpf_flow__destroy(skel);
853 }
854
855