1 // SPDX-License-Identifier: GPL-2.0 2 #include <test_progs.h> 3 #include <error.h> 4 #include <linux/if.h> 5 #include <linux/if_tun.h> 6 #include <sys/uio.h> 7 8 #ifndef IP_MF 9 #define IP_MF 0x2000 10 #endif 11 12 #define CHECK_FLOW_KEYS(desc, got, expected) \ 13 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ 14 desc, \ 15 "nhoff=%u/%u " \ 16 "thoff=%u/%u " \ 17 "addr_proto=0x%x/0x%x " \ 18 "is_frag=%u/%u " \ 19 "is_first_frag=%u/%u " \ 20 "is_encap=%u/%u " \ 21 "ip_proto=0x%x/0x%x " \ 22 "n_proto=0x%x/0x%x " \ 23 "flow_label=0x%x/0x%x " \ 24 "sport=%u/%u " \ 25 "dport=%u/%u\n", \ 26 got.nhoff, expected.nhoff, \ 27 got.thoff, expected.thoff, \ 28 got.addr_proto, expected.addr_proto, \ 29 got.is_frag, expected.is_frag, \ 30 got.is_first_frag, expected.is_first_frag, \ 31 got.is_encap, expected.is_encap, \ 32 got.ip_proto, expected.ip_proto, \ 33 got.n_proto, expected.n_proto, \ 34 got.flow_label, expected.flow_label, \ 35 got.sport, expected.sport, \ 36 got.dport, expected.dport) 37 38 struct ipv4_pkt { 39 struct ethhdr eth; 40 struct iphdr iph; 41 struct tcphdr tcp; 42 } __packed; 43 44 struct ipip_pkt { 45 struct ethhdr eth; 46 struct iphdr iph; 47 struct iphdr iph_inner; 48 struct tcphdr tcp; 49 } __packed; 50 51 struct svlan_ipv4_pkt { 52 struct ethhdr eth; 53 __u16 vlan_tci; 54 __u16 vlan_proto; 55 struct iphdr iph; 56 struct tcphdr tcp; 57 } __packed; 58 59 struct ipv6_pkt { 60 struct ethhdr eth; 61 struct ipv6hdr iph; 62 struct tcphdr tcp; 63 } __packed; 64 65 struct ipv6_frag_pkt { 66 struct ethhdr eth; 67 struct ipv6hdr iph; 68 struct frag_hdr { 69 __u8 nexthdr; 70 __u8 reserved; 71 __be16 frag_off; 72 __be32 identification; 73 } ipf; 74 struct tcphdr tcp; 75 } __packed; 76 77 struct dvlan_ipv6_pkt { 78 struct ethhdr eth; 79 __u16 vlan_tci; 80 __u16 vlan_proto; 81 __u16 vlan_tci2; 82 __u16 vlan_proto2; 83 struct ipv6hdr iph; 84 struct tcphdr tcp; 85 } __packed; 86 87 struct test { 88 const char *name; 89 union { 90 struct ipv4_pkt ipv4; 91 struct svlan_ipv4_pkt svlan_ipv4; 92 struct ipip_pkt ipip; 93 struct ipv6_pkt ipv6; 94 struct ipv6_frag_pkt ipv6_frag; 95 struct dvlan_ipv6_pkt dvlan_ipv6; 96 } pkt; 97 struct bpf_flow_keys keys; 98 __u32 flags; 99 }; 100 101 #define VLAN_HLEN 4 102 103 struct test tests[] = { 104 { 105 .name = "ipv4", 106 .pkt.ipv4 = { 107 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 108 .iph.ihl = 5, 109 .iph.protocol = IPPROTO_TCP, 110 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 111 .tcp.doff = 5, 112 }, 113 .keys = { 114 .nhoff = ETH_HLEN, 115 .thoff = ETH_HLEN + sizeof(struct iphdr), 116 .addr_proto = ETH_P_IP, 117 .ip_proto = IPPROTO_TCP, 118 .n_proto = __bpf_constant_htons(ETH_P_IP), 119 }, 120 }, 121 { 122 .name = "ipv6", 123 .pkt.ipv6 = { 124 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 125 .iph.nexthdr = IPPROTO_TCP, 126 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 127 .tcp.doff = 5, 128 }, 129 .keys = { 130 .nhoff = ETH_HLEN, 131 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 132 .addr_proto = ETH_P_IPV6, 133 .ip_proto = IPPROTO_TCP, 134 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 135 }, 136 }, 137 { 138 .name = "802.1q-ipv4", 139 .pkt.svlan_ipv4 = { 140 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), 141 .vlan_proto = __bpf_constant_htons(ETH_P_IP), 142 .iph.ihl = 5, 143 .iph.protocol = IPPROTO_TCP, 144 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 145 .tcp.doff = 5, 146 }, 147 .keys = { 148 .nhoff = ETH_HLEN + VLAN_HLEN, 149 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr), 150 .addr_proto = ETH_P_IP, 151 .ip_proto = IPPROTO_TCP, 152 .n_proto = __bpf_constant_htons(ETH_P_IP), 153 }, 154 }, 155 { 156 .name = "802.1ad-ipv6", 157 .pkt.dvlan_ipv6 = { 158 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), 159 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), 160 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), 161 .iph.nexthdr = IPPROTO_TCP, 162 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 163 .tcp.doff = 5, 164 }, 165 .keys = { 166 .nhoff = ETH_HLEN + VLAN_HLEN * 2, 167 .thoff = ETH_HLEN + VLAN_HLEN * 2 + 168 sizeof(struct ipv6hdr), 169 .addr_proto = ETH_P_IPV6, 170 .ip_proto = IPPROTO_TCP, 171 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 172 }, 173 }, 174 { 175 .name = "ipv4-frag", 176 .pkt.ipv4 = { 177 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 178 .iph.ihl = 5, 179 .iph.protocol = IPPROTO_TCP, 180 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 181 .iph.frag_off = __bpf_constant_htons(IP_MF), 182 .tcp.doff = 5, 183 .tcp.source = 80, 184 .tcp.dest = 8080, 185 }, 186 .keys = { 187 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 188 .nhoff = ETH_HLEN, 189 .thoff = ETH_HLEN + sizeof(struct iphdr), 190 .addr_proto = ETH_P_IP, 191 .ip_proto = IPPROTO_TCP, 192 .n_proto = __bpf_constant_htons(ETH_P_IP), 193 .is_frag = true, 194 .is_first_frag = true, 195 .sport = 80, 196 .dport = 8080, 197 }, 198 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 199 }, 200 { 201 .name = "ipv4-no-frag", 202 .pkt.ipv4 = { 203 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 204 .iph.ihl = 5, 205 .iph.protocol = IPPROTO_TCP, 206 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 207 .iph.frag_off = __bpf_constant_htons(IP_MF), 208 .tcp.doff = 5, 209 .tcp.source = 80, 210 .tcp.dest = 8080, 211 }, 212 .keys = { 213 .nhoff = ETH_HLEN, 214 .thoff = ETH_HLEN + sizeof(struct iphdr), 215 .addr_proto = ETH_P_IP, 216 .ip_proto = IPPROTO_TCP, 217 .n_proto = __bpf_constant_htons(ETH_P_IP), 218 .is_frag = true, 219 .is_first_frag = true, 220 }, 221 }, 222 { 223 .name = "ipv6-frag", 224 .pkt.ipv6_frag = { 225 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 226 .iph.nexthdr = IPPROTO_FRAGMENT, 227 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 228 .ipf.nexthdr = IPPROTO_TCP, 229 .tcp.doff = 5, 230 .tcp.source = 80, 231 .tcp.dest = 8080, 232 }, 233 .keys = { 234 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 235 .nhoff = ETH_HLEN, 236 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + 237 sizeof(struct frag_hdr), 238 .addr_proto = ETH_P_IPV6, 239 .ip_proto = IPPROTO_TCP, 240 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 241 .is_frag = true, 242 .is_first_frag = true, 243 .sport = 80, 244 .dport = 8080, 245 }, 246 .flags = BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG, 247 }, 248 { 249 .name = "ipv6-no-frag", 250 .pkt.ipv6_frag = { 251 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 252 .iph.nexthdr = IPPROTO_FRAGMENT, 253 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 254 .ipf.nexthdr = IPPROTO_TCP, 255 .tcp.doff = 5, 256 .tcp.source = 80, 257 .tcp.dest = 8080, 258 }, 259 .keys = { 260 .nhoff = ETH_HLEN, 261 .thoff = ETH_HLEN + sizeof(struct ipv6hdr) + 262 sizeof(struct frag_hdr), 263 .addr_proto = ETH_P_IPV6, 264 .ip_proto = IPPROTO_TCP, 265 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 266 .is_frag = true, 267 .is_first_frag = true, 268 }, 269 }, 270 { 271 .name = "ipv6-flow-label", 272 .pkt.ipv6 = { 273 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 274 .iph.nexthdr = IPPROTO_TCP, 275 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 276 .iph.flow_lbl = { 0xb, 0xee, 0xef }, 277 .tcp.doff = 5, 278 .tcp.source = 80, 279 .tcp.dest = 8080, 280 }, 281 .keys = { 282 .nhoff = ETH_HLEN, 283 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 284 .addr_proto = ETH_P_IPV6, 285 .ip_proto = IPPROTO_TCP, 286 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 287 .sport = 80, 288 .dport = 8080, 289 .flow_label = __bpf_constant_htonl(0xbeeef), 290 }, 291 }, 292 { 293 .name = "ipv6-no-flow-label", 294 .pkt.ipv6 = { 295 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 296 .iph.nexthdr = IPPROTO_TCP, 297 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 298 .iph.flow_lbl = { 0xb, 0xee, 0xef }, 299 .tcp.doff = 5, 300 .tcp.source = 80, 301 .tcp.dest = 8080, 302 }, 303 .keys = { 304 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, 305 .nhoff = ETH_HLEN, 306 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 307 .addr_proto = ETH_P_IPV6, 308 .ip_proto = IPPROTO_TCP, 309 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 310 .flow_label = __bpf_constant_htonl(0xbeeef), 311 }, 312 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, 313 }, 314 { 315 .name = "ipip-encap", 316 .pkt.ipip = { 317 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 318 .iph.ihl = 5, 319 .iph.protocol = IPPROTO_IPIP, 320 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 321 .iph_inner.ihl = 5, 322 .iph_inner.protocol = IPPROTO_TCP, 323 .iph_inner.tot_len = 324 __bpf_constant_htons(MAGIC_BYTES) - 325 sizeof(struct iphdr), 326 .tcp.doff = 5, 327 .tcp.source = 80, 328 .tcp.dest = 8080, 329 }, 330 .keys = { 331 .nhoff = 0, 332 .nhoff = ETH_HLEN, 333 .thoff = ETH_HLEN + sizeof(struct iphdr) + 334 sizeof(struct iphdr), 335 .addr_proto = ETH_P_IP, 336 .ip_proto = IPPROTO_TCP, 337 .n_proto = __bpf_constant_htons(ETH_P_IP), 338 .is_encap = true, 339 .sport = 80, 340 .dport = 8080, 341 }, 342 }, 343 { 344 .name = "ipip-no-encap", 345 .pkt.ipip = { 346 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 347 .iph.ihl = 5, 348 .iph.protocol = IPPROTO_IPIP, 349 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 350 .iph_inner.ihl = 5, 351 .iph_inner.protocol = IPPROTO_TCP, 352 .iph_inner.tot_len = 353 __bpf_constant_htons(MAGIC_BYTES) - 354 sizeof(struct iphdr), 355 .tcp.doff = 5, 356 .tcp.source = 80, 357 .tcp.dest = 8080, 358 }, 359 .keys = { 360 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, 361 .nhoff = ETH_HLEN, 362 .thoff = ETH_HLEN + sizeof(struct iphdr), 363 .addr_proto = ETH_P_IP, 364 .ip_proto = IPPROTO_IPIP, 365 .n_proto = __bpf_constant_htons(ETH_P_IP), 366 .is_encap = true, 367 }, 368 .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP, 369 }, 370 }; 371 372 static int create_tap(const char *ifname) 373 { 374 struct ifreq ifr = { 375 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, 376 }; 377 int fd, ret; 378 379 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 380 381 fd = open("/dev/net/tun", O_RDWR); 382 if (fd < 0) 383 return -1; 384 385 ret = ioctl(fd, TUNSETIFF, &ifr); 386 if (ret) 387 return -1; 388 389 return fd; 390 } 391 392 static int tx_tap(int fd, void *pkt, size_t len) 393 { 394 struct iovec iov[] = { 395 { 396 .iov_len = len, 397 .iov_base = pkt, 398 }, 399 }; 400 return writev(fd, iov, ARRAY_SIZE(iov)); 401 } 402 403 static int ifup(const char *ifname) 404 { 405 struct ifreq ifr = {}; 406 int sk, ret; 407 408 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 409 410 sk = socket(PF_INET, SOCK_DGRAM, 0); 411 if (sk < 0) 412 return -1; 413 414 ret = ioctl(sk, SIOCGIFFLAGS, &ifr); 415 if (ret) { 416 close(sk); 417 return -1; 418 } 419 420 ifr.ifr_flags |= IFF_UP; 421 ret = ioctl(sk, SIOCSIFFLAGS, &ifr); 422 if (ret) { 423 close(sk); 424 return -1; 425 } 426 427 close(sk); 428 return 0; 429 } 430 431 void test_flow_dissector(void) 432 { 433 int i, err, prog_fd, keys_fd = -1, tap_fd; 434 struct bpf_object *obj; 435 __u32 duration = 0; 436 437 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", 438 "jmp_table", "last_dissection", &prog_fd, &keys_fd); 439 if (err) { 440 error_cnt++; 441 return; 442 } 443 444 for (i = 0; i < ARRAY_SIZE(tests); i++) { 445 struct bpf_flow_keys flow_keys; 446 struct bpf_prog_test_run_attr tattr = { 447 .prog_fd = prog_fd, 448 .data_in = &tests[i].pkt, 449 .data_size_in = sizeof(tests[i].pkt), 450 .data_out = &flow_keys, 451 }; 452 static struct bpf_flow_keys ctx = {}; 453 454 if (tests[i].flags) { 455 tattr.ctx_in = &ctx; 456 tattr.ctx_size_in = sizeof(ctx); 457 ctx.flags = tests[i].flags; 458 } 459 460 err = bpf_prog_test_run_xattr(&tattr); 461 CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || 462 err || tattr.retval != 1, 463 tests[i].name, 464 "err %d errno %d retval %d duration %d size %u/%lu\n", 465 err, errno, tattr.retval, tattr.duration, 466 tattr.data_size_out, sizeof(flow_keys)); 467 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 468 } 469 470 /* Do the same tests but for skb-less flow dissector. 471 * We use a known path in the net/tun driver that calls 472 * eth_get_headlen and we manually export bpf_flow_keys 473 * via BPF map in this case. 474 */ 475 476 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); 477 CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno); 478 479 tap_fd = create_tap("tap0"); 480 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); 481 err = ifup("tap0"); 482 CHECK(err, "ifup", "err %d errno %d\n", err, errno); 483 484 for (i = 0; i < ARRAY_SIZE(tests); i++) { 485 /* Keep in sync with 'flags' from eth_get_headlen. */ 486 __u32 eth_get_headlen_flags = 487 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG; 488 struct bpf_prog_test_run_attr tattr = {}; 489 struct bpf_flow_keys flow_keys = {}; 490 __u32 key = 0; 491 492 /* For skb-less case we can't pass input flags; run 493 * only the tests that have a matching set of flags. 494 */ 495 496 if (tests[i].flags != eth_get_headlen_flags) 497 continue; 498 499 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); 500 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); 501 502 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); 503 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); 504 505 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); 506 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 507 } 508 509 bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); 510 bpf_object__close(obj); 511 } 512