1 // SPDX-License-Identifier: GPL-2.0 2 #include <test_progs.h> 3 #include <error.h> 4 #include <linux/if.h> 5 #include <linux/if_tun.h> 6 7 #define CHECK_FLOW_KEYS(desc, got, expected) \ 8 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ 9 desc, \ 10 "nhoff=%u/%u " \ 11 "thoff=%u/%u " \ 12 "addr_proto=0x%x/0x%x " \ 13 "is_frag=%u/%u " \ 14 "is_first_frag=%u/%u " \ 15 "is_encap=%u/%u " \ 16 "ip_proto=0x%x/0x%x " \ 17 "n_proto=0x%x/0x%x " \ 18 "sport=%u/%u " \ 19 "dport=%u/%u\n", \ 20 got.nhoff, expected.nhoff, \ 21 got.thoff, expected.thoff, \ 22 got.addr_proto, expected.addr_proto, \ 23 got.is_frag, expected.is_frag, \ 24 got.is_first_frag, expected.is_first_frag, \ 25 got.is_encap, expected.is_encap, \ 26 got.ip_proto, expected.ip_proto, \ 27 got.n_proto, expected.n_proto, \ 28 got.sport, expected.sport, \ 29 got.dport, expected.dport) 30 31 struct ipv4_pkt { 32 struct ethhdr eth; 33 struct iphdr iph; 34 struct tcphdr tcp; 35 } __packed; 36 37 struct svlan_ipv4_pkt { 38 struct ethhdr eth; 39 __u16 vlan_tci; 40 __u16 vlan_proto; 41 struct iphdr iph; 42 struct tcphdr tcp; 43 } __packed; 44 45 struct ipv6_pkt { 46 struct ethhdr eth; 47 struct ipv6hdr iph; 48 struct tcphdr tcp; 49 } __packed; 50 51 struct dvlan_ipv6_pkt { 52 struct ethhdr eth; 53 __u16 vlan_tci; 54 __u16 vlan_proto; 55 __u16 vlan_tci2; 56 __u16 vlan_proto2; 57 struct ipv6hdr iph; 58 struct tcphdr tcp; 59 } __packed; 60 61 struct test { 62 const char *name; 63 union { 64 struct ipv4_pkt ipv4; 65 struct svlan_ipv4_pkt svlan_ipv4; 66 struct ipv6_pkt ipv6; 67 struct dvlan_ipv6_pkt dvlan_ipv6; 68 } pkt; 69 struct bpf_flow_keys keys; 70 }; 71 72 #define VLAN_HLEN 4 73 74 struct test tests[] = { 75 { 76 .name = "ipv4", 77 .pkt.ipv4 = { 78 .eth.h_proto = __bpf_constant_htons(ETH_P_IP), 79 .iph.ihl = 5, 80 .iph.protocol = IPPROTO_TCP, 81 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 82 .tcp.doff = 5, 83 }, 84 .keys = { 85 .nhoff = ETH_HLEN, 86 .thoff = ETH_HLEN + sizeof(struct iphdr), 87 .addr_proto = ETH_P_IP, 88 .ip_proto = IPPROTO_TCP, 89 .n_proto = __bpf_constant_htons(ETH_P_IP), 90 }, 91 }, 92 { 93 .name = "ipv6", 94 .pkt.ipv6 = { 95 .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), 96 .iph.nexthdr = IPPROTO_TCP, 97 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 98 .tcp.doff = 5, 99 }, 100 .keys = { 101 .nhoff = ETH_HLEN, 102 .thoff = ETH_HLEN + sizeof(struct ipv6hdr), 103 .addr_proto = ETH_P_IPV6, 104 .ip_proto = IPPROTO_TCP, 105 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 106 }, 107 }, 108 { 109 .name = "802.1q-ipv4", 110 .pkt.svlan_ipv4 = { 111 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q), 112 .vlan_proto = __bpf_constant_htons(ETH_P_IP), 113 .iph.ihl = 5, 114 .iph.protocol = IPPROTO_TCP, 115 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES), 116 .tcp.doff = 5, 117 }, 118 .keys = { 119 .nhoff = ETH_HLEN + VLAN_HLEN, 120 .thoff = ETH_HLEN + VLAN_HLEN + sizeof(struct iphdr), 121 .addr_proto = ETH_P_IP, 122 .ip_proto = IPPROTO_TCP, 123 .n_proto = __bpf_constant_htons(ETH_P_IP), 124 }, 125 }, 126 { 127 .name = "802.1ad-ipv6", 128 .pkt.dvlan_ipv6 = { 129 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD), 130 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q), 131 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6), 132 .iph.nexthdr = IPPROTO_TCP, 133 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), 134 .tcp.doff = 5, 135 }, 136 .keys = { 137 .nhoff = ETH_HLEN + VLAN_HLEN * 2, 138 .thoff = ETH_HLEN + VLAN_HLEN * 2 + 139 sizeof(struct ipv6hdr), 140 .addr_proto = ETH_P_IPV6, 141 .ip_proto = IPPROTO_TCP, 142 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 143 }, 144 }, 145 }; 146 147 static int create_tap(const char *ifname) 148 { 149 struct ifreq ifr = { 150 .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, 151 }; 152 int fd, ret; 153 154 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 155 156 fd = open("/dev/net/tun", O_RDWR); 157 if (fd < 0) 158 return -1; 159 160 ret = ioctl(fd, TUNSETIFF, &ifr); 161 if (ret) 162 return -1; 163 164 return fd; 165 } 166 167 static int tx_tap(int fd, void *pkt, size_t len) 168 { 169 struct iovec iov[] = { 170 { 171 .iov_len = len, 172 .iov_base = pkt, 173 }, 174 }; 175 return writev(fd, iov, ARRAY_SIZE(iov)); 176 } 177 178 static int ifup(const char *ifname) 179 { 180 struct ifreq ifr = {}; 181 int sk, ret; 182 183 strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 184 185 sk = socket(PF_INET, SOCK_DGRAM, 0); 186 if (sk < 0) 187 return -1; 188 189 ret = ioctl(sk, SIOCGIFFLAGS, &ifr); 190 if (ret) { 191 close(sk); 192 return -1; 193 } 194 195 ifr.ifr_flags |= IFF_UP; 196 ret = ioctl(sk, SIOCSIFFLAGS, &ifr); 197 if (ret) { 198 close(sk); 199 return -1; 200 } 201 202 close(sk); 203 return 0; 204 } 205 206 void test_flow_dissector(void) 207 { 208 int i, err, prog_fd, keys_fd = -1, tap_fd; 209 struct bpf_object *obj; 210 __u32 duration = 0; 211 212 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", 213 "jmp_table", "last_dissection", &prog_fd, &keys_fd); 214 if (err) { 215 error_cnt++; 216 return; 217 } 218 219 for (i = 0; i < ARRAY_SIZE(tests); i++) { 220 struct bpf_flow_keys flow_keys; 221 struct bpf_prog_test_run_attr tattr = { 222 .prog_fd = prog_fd, 223 .data_in = &tests[i].pkt, 224 .data_size_in = sizeof(tests[i].pkt), 225 .data_out = &flow_keys, 226 }; 227 228 err = bpf_prog_test_run_xattr(&tattr); 229 CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || 230 err || tattr.retval != 1, 231 tests[i].name, 232 "err %d errno %d retval %d duration %d size %u/%lu\n", 233 err, errno, tattr.retval, tattr.duration, 234 tattr.data_size_out, sizeof(flow_keys)); 235 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 236 } 237 238 /* Do the same tests but for skb-less flow dissector. 239 * We use a known path in the net/tun driver that calls 240 * eth_get_headlen and we manually export bpf_flow_keys 241 * via BPF map in this case. 242 */ 243 244 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); 245 CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno); 246 247 tap_fd = create_tap("tap0"); 248 CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); 249 err = ifup("tap0"); 250 CHECK(err, "ifup", "err %d errno %d\n", err, errno); 251 252 for (i = 0; i < ARRAY_SIZE(tests); i++) { 253 struct bpf_flow_keys flow_keys = {}; 254 struct bpf_prog_test_run_attr tattr = {}; 255 __u32 key = 0; 256 257 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); 258 CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); 259 260 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); 261 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); 262 263 CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); 264 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 265 } 266 267 bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); 268 bpf_object__close(obj); 269 } 270