1 // SPDX-License-Identifier: GPL-2.0 2 // Copyright (c) 2017 Facebook 3 #include <stddef.h> 4 #include <stdbool.h> 5 #include <string.h> 6 #include <linux/pkt_cls.h> 7 #include <linux/bpf.h> 8 #include <linux/in.h> 9 #include <linux/if_ether.h> 10 #include <linux/ip.h> 11 #include <linux/ipv6.h> 12 #include <linux/icmp.h> 13 #include <linux/icmpv6.h> 14 #include <linux/tcp.h> 15 #include <linux/udp.h> 16 #include "bpf_helpers.h" 17 18 static __u32 rol32(__u32 word, unsigned int shift) 19 { 20 return (word << shift) | (word >> ((-shift) & 31)); 21 } 22 23 /* copy paste of jhash from kernel sources to make sure llvm 24 * can compile it into valid sequence of bpf instructions 25 */ 26 #define __jhash_mix(a, b, c) \ 27 { \ 28 a -= c; a ^= rol32(c, 4); c += b; \ 29 b -= a; b ^= rol32(a, 6); a += c; \ 30 c -= b; c ^= rol32(b, 8); b += a; \ 31 a -= c; a ^= rol32(c, 16); c += b; \ 32 b -= a; b ^= rol32(a, 19); a += c; \ 33 c -= b; c ^= rol32(b, 4); b += a; \ 34 } 35 36 #define __jhash_final(a, b, c) \ 37 { \ 38 c ^= b; c -= rol32(b, 14); \ 39 a ^= c; a -= rol32(c, 11); \ 40 b ^= a; b -= rol32(a, 25); \ 41 c ^= b; c -= rol32(b, 16); \ 42 a ^= c; a -= rol32(c, 4); \ 43 b ^= a; b -= rol32(a, 14); \ 44 c ^= b; c -= rol32(b, 24); \ 45 } 46 47 #define JHASH_INITVAL 0xdeadbeef 48 49 typedef unsigned int u32; 50 51 static __attribute__ ((noinline)) 52 u32 jhash(const void *key, u32 length, u32 initval) 53 { 54 u32 a, b, c; 55 const unsigned char *k = key; 56 57 a = b = c = JHASH_INITVAL + length + initval; 58 59 while (length > 12) { 60 a += *(u32 *)(k); 61 b += *(u32 *)(k + 4); 62 c += *(u32 *)(k + 8); 63 __jhash_mix(a, b, c); 64 length -= 12; 65 k += 12; 66 } 67 switch (length) { 68 case 12: c += (u32)k[11]<<24; 69 case 11: c += (u32)k[10]<<16; 70 case 10: c += (u32)k[9]<<8; 71 case 9: c += k[8]; 72 case 8: b += (u32)k[7]<<24; 73 case 7: b += (u32)k[6]<<16; 74 case 6: b += (u32)k[5]<<8; 75 case 5: b += k[4]; 76 case 4: a += (u32)k[3]<<24; 77 case 3: a += (u32)k[2]<<16; 78 case 2: a += (u32)k[1]<<8; 79 case 1: a += k[0]; 80 __jhash_final(a, b, c); 81 case 0: /* Nothing left to add */ 82 break; 83 } 84 85 return c; 86 } 87 88 static __attribute__ ((noinline)) 89 u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) 90 { 91 a += initval; 92 b += initval; 93 c += initval; 94 __jhash_final(a, b, c); 95 return c; 96 } 97 98 static __attribute__ ((noinline)) 99 u32 jhash_2words(u32 a, u32 b, u32 initval) 100 { 101 return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); 102 } 103 104 struct flow_key { 105 union { 106 __be32 src; 107 __be32 srcv6[4]; 108 }; 109 union { 110 __be32 dst; 111 __be32 dstv6[4]; 112 }; 113 union { 114 __u32 ports; 115 __u16 port16[2]; 116 }; 117 __u8 proto; 118 }; 119 120 struct packet_description { 121 struct flow_key flow; 122 __u8 flags; 123 }; 124 125 struct ctl_value { 126 union { 127 __u64 value; 128 __u32 ifindex; 129 __u8 mac[6]; 130 }; 131 }; 132 133 struct vip_definition { 134 union { 135 __be32 vip; 136 __be32 vipv6[4]; 137 }; 138 __u16 port; 139 __u16 family; 140 __u8 proto; 141 }; 142 143 struct vip_meta { 144 __u32 flags; 145 __u32 vip_num; 146 }; 147 148 struct real_pos_lru { 149 __u32 pos; 150 __u64 atime; 151 }; 152 153 struct real_definition { 154 union { 155 __be32 dst; 156 __be32 dstv6[4]; 157 }; 158 __u8 flags; 159 }; 160 161 struct lb_stats { 162 __u64 v2; 163 __u64 v1; 164 }; 165 166 struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = { 167 .type = BPF_MAP_TYPE_HASH, 168 .key_size = sizeof(struct vip_definition), 169 .value_size = sizeof(struct vip_meta), 170 .max_entries = 512, 171 .map_flags = 0, 172 }; 173 174 struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = { 175 .type = BPF_MAP_TYPE_LRU_HASH, 176 .key_size = sizeof(struct flow_key), 177 .value_size = sizeof(struct real_pos_lru), 178 .max_entries = 300, 179 .map_flags = 1U << 1, 180 }; 181 182 struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = { 183 .type = BPF_MAP_TYPE_ARRAY, 184 .key_size = sizeof(__u32), 185 .value_size = sizeof(__u32), 186 .max_entries = 12 * 655, 187 .map_flags = 0, 188 }; 189 190 struct bpf_map_def __attribute__ ((section("maps"), used)) reals = { 191 .type = BPF_MAP_TYPE_ARRAY, 192 .key_size = sizeof(__u32), 193 .value_size = sizeof(struct real_definition), 194 .max_entries = 40, 195 .map_flags = 0, 196 }; 197 198 struct bpf_map_def __attribute__ ((section("maps"), used)) stats = { 199 .type = BPF_MAP_TYPE_PERCPU_ARRAY, 200 .key_size = sizeof(__u32), 201 .value_size = sizeof(struct lb_stats), 202 .max_entries = 515, 203 .map_flags = 0, 204 }; 205 206 struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = { 207 .type = BPF_MAP_TYPE_ARRAY, 208 .key_size = sizeof(__u32), 209 .value_size = sizeof(struct ctl_value), 210 .max_entries = 16, 211 .map_flags = 0, 212 }; 213 214 struct eth_hdr { 215 unsigned char eth_dest[6]; 216 unsigned char eth_source[6]; 217 unsigned short eth_proto; 218 }; 219 220 static inline __u64 calc_offset(bool is_ipv6, bool is_icmp) 221 { 222 __u64 off = sizeof(struct eth_hdr); 223 if (is_ipv6) { 224 off += sizeof(struct ipv6hdr); 225 if (is_icmp) 226 off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr); 227 } else { 228 off += sizeof(struct iphdr); 229 if (is_icmp) 230 off += sizeof(struct icmphdr) + sizeof(struct iphdr); 231 } 232 return off; 233 } 234 235 static __attribute__ ((noinline)) 236 bool parse_udp(void *data, void *data_end, 237 bool is_ipv6, struct packet_description *pckt) 238 { 239 240 bool is_icmp = !((pckt->flags & (1 << 0)) == 0); 241 __u64 off = calc_offset(is_ipv6, is_icmp); 242 struct udphdr *udp; 243 udp = data + off; 244 245 if (udp + 1 > data_end) 246 return 0; 247 if (!is_icmp) { 248 pckt->flow.port16[0] = udp->source; 249 pckt->flow.port16[1] = udp->dest; 250 } else { 251 pckt->flow.port16[0] = udp->dest; 252 pckt->flow.port16[1] = udp->source; 253 } 254 return 1; 255 } 256 257 static __attribute__ ((noinline)) 258 bool parse_tcp(void *data, void *data_end, 259 bool is_ipv6, struct packet_description *pckt) 260 { 261 262 bool is_icmp = !((pckt->flags & (1 << 0)) == 0); 263 __u64 off = calc_offset(is_ipv6, is_icmp); 264 struct tcphdr *tcp; 265 266 tcp = data + off; 267 if (tcp + 1 > data_end) 268 return 0; 269 if (tcp->syn) 270 pckt->flags |= (1 << 1); 271 if (!is_icmp) { 272 pckt->flow.port16[0] = tcp->source; 273 pckt->flow.port16[1] = tcp->dest; 274 } else { 275 pckt->flow.port16[0] = tcp->dest; 276 pckt->flow.port16[1] = tcp->source; 277 } 278 return 1; 279 } 280 281 static __attribute__ ((noinline)) 282 bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval, 283 struct packet_description *pckt, 284 struct real_definition *dst, __u32 pkt_bytes) 285 { 286 struct eth_hdr *new_eth; 287 struct eth_hdr *old_eth; 288 struct ipv6hdr *ip6h; 289 __u32 ip_suffix; 290 void *data_end; 291 void *data; 292 293 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) 294 return 0; 295 data = (void *)(long)xdp->data; 296 data_end = (void *)(long)xdp->data_end; 297 new_eth = data; 298 ip6h = data + sizeof(struct eth_hdr); 299 old_eth = data + sizeof(struct ipv6hdr); 300 if (new_eth + 1 > data_end || 301 old_eth + 1 > data_end || ip6h + 1 > data_end) 302 return 0; 303 memcpy(new_eth->eth_dest, cval->mac, 6); 304 memcpy(new_eth->eth_source, old_eth->eth_dest, 6); 305 new_eth->eth_proto = 56710; 306 ip6h->version = 6; 307 ip6h->priority = 0; 308 memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); 309 310 ip6h->nexthdr = IPPROTO_IPV6; 311 ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0]; 312 ip6h->payload_len = 313 __builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr)); 314 ip6h->hop_limit = 4; 315 316 ip6h->saddr.in6_u.u6_addr32[0] = 1; 317 ip6h->saddr.in6_u.u6_addr32[1] = 2; 318 ip6h->saddr.in6_u.u6_addr32[2] = 3; 319 ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix; 320 memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16); 321 return 1; 322 } 323 324 static __attribute__ ((noinline)) 325 bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, 326 struct packet_description *pckt, 327 struct real_definition *dst, __u32 pkt_bytes) 328 { 329 330 __u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]); 331 struct eth_hdr *new_eth; 332 struct eth_hdr *old_eth; 333 __u16 *next_iph_u16; 334 struct iphdr *iph; 335 __u32 csum = 0; 336 void *data_end; 337 void *data; 338 339 ip_suffix <<= 15; 340 ip_suffix ^= pckt->flow.src; 341 if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) 342 return 0; 343 data = (void *)(long)xdp->data; 344 data_end = (void *)(long)xdp->data_end; 345 new_eth = data; 346 iph = data + sizeof(struct eth_hdr); 347 old_eth = data + sizeof(struct iphdr); 348 if (new_eth + 1 > data_end || 349 old_eth + 1 > data_end || iph + 1 > data_end) 350 return 0; 351 memcpy(new_eth->eth_dest, cval->mac, 6); 352 memcpy(new_eth->eth_source, old_eth->eth_dest, 6); 353 new_eth->eth_proto = 8; 354 iph->version = 4; 355 iph->ihl = 5; 356 iph->frag_off = 0; 357 iph->protocol = IPPROTO_IPIP; 358 iph->check = 0; 359 iph->tos = 1; 360 iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr)); 361 /* don't update iph->daddr, since it will overwrite old eth_proto 362 * and multiple iterations of bpf_prog_run() will fail 363 */ 364 365 iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst; 366 iph->ttl = 4; 367 368 next_iph_u16 = (__u16 *) iph; 369 #pragma clang loop unroll(full) 370 for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) 371 csum += *next_iph_u16++; 372 iph->check = ~((csum & 0xffff) + (csum >> 16)); 373 if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) 374 return 0; 375 return 1; 376 } 377 378 static __attribute__ ((noinline)) 379 bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4) 380 { 381 struct eth_hdr *new_eth; 382 struct eth_hdr *old_eth; 383 384 old_eth = *data; 385 new_eth = *data + sizeof(struct ipv6hdr); 386 memcpy(new_eth->eth_source, old_eth->eth_source, 6); 387 memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); 388 if (inner_v4) 389 new_eth->eth_proto = 8; 390 else 391 new_eth->eth_proto = 56710; 392 if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr))) 393 return 0; 394 *data = (void *)(long)xdp->data; 395 *data_end = (void *)(long)xdp->data_end; 396 return 1; 397 } 398 399 static __attribute__ ((noinline)) 400 bool decap_v4(struct xdp_md *xdp, void **data, void **data_end) 401 { 402 struct eth_hdr *new_eth; 403 struct eth_hdr *old_eth; 404 405 old_eth = *data; 406 new_eth = *data + sizeof(struct iphdr); 407 memcpy(new_eth->eth_source, old_eth->eth_source, 6); 408 memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); 409 new_eth->eth_proto = 8; 410 if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) 411 return 0; 412 *data = (void *)(long)xdp->data; 413 *data_end = (void *)(long)xdp->data_end; 414 return 1; 415 } 416 417 static __attribute__ ((noinline)) 418 int swap_mac_and_send(void *data, void *data_end) 419 { 420 unsigned char tmp_mac[6]; 421 struct eth_hdr *eth; 422 423 eth = data; 424 memcpy(tmp_mac, eth->eth_source, 6); 425 memcpy(eth->eth_source, eth->eth_dest, 6); 426 memcpy(eth->eth_dest, tmp_mac, 6); 427 return XDP_TX; 428 } 429 430 static __attribute__ ((noinline)) 431 int send_icmp_reply(void *data, void *data_end) 432 { 433 struct icmphdr *icmp_hdr; 434 __u16 *next_iph_u16; 435 __u32 tmp_addr = 0; 436 struct iphdr *iph; 437 __u32 csum1 = 0; 438 __u32 csum = 0; 439 __u64 off = 0; 440 441 if (data + sizeof(struct eth_hdr) 442 + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end) 443 return XDP_DROP; 444 off += sizeof(struct eth_hdr); 445 iph = data + off; 446 off += sizeof(struct iphdr); 447 icmp_hdr = data + off; 448 icmp_hdr->type = 0; 449 icmp_hdr->checksum += 0x0007; 450 iph->ttl = 4; 451 tmp_addr = iph->daddr; 452 iph->daddr = iph->saddr; 453 iph->saddr = tmp_addr; 454 iph->check = 0; 455 next_iph_u16 = (__u16 *) iph; 456 #pragma clang loop unroll(full) 457 for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) 458 csum += *next_iph_u16++; 459 iph->check = ~((csum & 0xffff) + (csum >> 16)); 460 return swap_mac_and_send(data, data_end); 461 } 462 463 static __attribute__ ((noinline)) 464 int send_icmp6_reply(void *data, void *data_end) 465 { 466 struct icmp6hdr *icmp_hdr; 467 struct ipv6hdr *ip6h; 468 __be32 tmp_addr[4]; 469 __u64 off = 0; 470 471 if (data + sizeof(struct eth_hdr) 472 + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end) 473 return XDP_DROP; 474 off += sizeof(struct eth_hdr); 475 ip6h = data + off; 476 off += sizeof(struct ipv6hdr); 477 icmp_hdr = data + off; 478 icmp_hdr->icmp6_type = 129; 479 icmp_hdr->icmp6_cksum -= 0x0001; 480 ip6h->hop_limit = 4; 481 memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16); 482 memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16); 483 memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16); 484 return swap_mac_and_send(data, data_end); 485 } 486 487 static __attribute__ ((noinline)) 488 int parse_icmpv6(void *data, void *data_end, __u64 off, 489 struct packet_description *pckt) 490 { 491 struct icmp6hdr *icmp_hdr; 492 struct ipv6hdr *ip6h; 493 494 icmp_hdr = data + off; 495 if (icmp_hdr + 1 > data_end) 496 return XDP_DROP; 497 if (icmp_hdr->icmp6_type == 128) 498 return send_icmp6_reply(data, data_end); 499 if (icmp_hdr->icmp6_type != 3) 500 return XDP_PASS; 501 off += sizeof(struct icmp6hdr); 502 ip6h = data + off; 503 if (ip6h + 1 > data_end) 504 return XDP_DROP; 505 pckt->flow.proto = ip6h->nexthdr; 506 pckt->flags |= (1 << 0); 507 memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16); 508 memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16); 509 return -1; 510 } 511 512 static __attribute__ ((noinline)) 513 int parse_icmp(void *data, void *data_end, __u64 off, 514 struct packet_description *pckt) 515 { 516 struct icmphdr *icmp_hdr; 517 struct iphdr *iph; 518 519 icmp_hdr = data + off; 520 if (icmp_hdr + 1 > data_end) 521 return XDP_DROP; 522 if (icmp_hdr->type == 8) 523 return send_icmp_reply(data, data_end); 524 if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4)) 525 return XDP_PASS; 526 off += sizeof(struct icmphdr); 527 iph = data + off; 528 if (iph + 1 > data_end) 529 return XDP_DROP; 530 if (iph->ihl != 5) 531 return XDP_DROP; 532 pckt->flow.proto = iph->protocol; 533 pckt->flags |= (1 << 0); 534 pckt->flow.src = iph->daddr; 535 pckt->flow.dst = iph->saddr; 536 return -1; 537 } 538 539 static __attribute__ ((noinline)) 540 __u32 get_packet_hash(struct packet_description *pckt, 541 bool hash_16bytes) 542 { 543 if (hash_16bytes) 544 return jhash_2words(jhash(pckt->flow.srcv6, 16, 12), 545 pckt->flow.ports, 24); 546 else 547 return jhash_2words(pckt->flow.src, pckt->flow.ports, 548 24); 549 } 550 551 __attribute__ ((noinline)) 552 static bool get_packet_dst(struct real_definition **real, 553 struct packet_description *pckt, 554 struct vip_meta *vip_info, 555 bool is_ipv6, void *lru_map) 556 { 557 struct real_pos_lru new_dst_lru = { }; 558 bool hash_16bytes = is_ipv6; 559 __u32 *real_pos, hash, key; 560 __u64 cur_time; 561 562 if (vip_info->flags & (1 << 2)) 563 hash_16bytes = 1; 564 if (vip_info->flags & (1 << 3)) { 565 pckt->flow.port16[0] = pckt->flow.port16[1]; 566 memset(pckt->flow.srcv6, 0, 16); 567 } 568 hash = get_packet_hash(pckt, hash_16bytes); 569 if (hash != 0x358459b7 /* jhash of ipv4 packet */ && 570 hash != 0x2f4bc6bb /* jhash of ipv6 packet */) 571 return 0; 572 key = 2 * vip_info->vip_num + hash % 2; 573 real_pos = bpf_map_lookup_elem(&ch_rings, &key); 574 if (!real_pos) 575 return 0; 576 key = *real_pos; 577 *real = bpf_map_lookup_elem(&reals, &key); 578 if (!(*real)) 579 return 0; 580 if (!(vip_info->flags & (1 << 1))) { 581 __u32 conn_rate_key = 512 + 2; 582 struct lb_stats *conn_rate_stats = 583 bpf_map_lookup_elem(&stats, &conn_rate_key); 584 585 if (!conn_rate_stats) 586 return 1; 587 cur_time = bpf_ktime_get_ns(); 588 if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) { 589 conn_rate_stats->v1 = 1; 590 conn_rate_stats->v2 = cur_time; 591 } else { 592 conn_rate_stats->v1 += 1; 593 if (conn_rate_stats->v1 >= 1) 594 return 1; 595 } 596 if (pckt->flow.proto == IPPROTO_UDP) 597 new_dst_lru.atime = cur_time; 598 new_dst_lru.pos = key; 599 bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0); 600 } 601 return 1; 602 } 603 604 __attribute__ ((noinline)) 605 static void connection_table_lookup(struct real_definition **real, 606 struct packet_description *pckt, 607 void *lru_map) 608 { 609 610 struct real_pos_lru *dst_lru; 611 __u64 cur_time; 612 __u32 key; 613 614 dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow); 615 if (!dst_lru) 616 return; 617 if (pckt->flow.proto == IPPROTO_UDP) { 618 cur_time = bpf_ktime_get_ns(); 619 if (cur_time - dst_lru->atime > 300000) 620 return; 621 dst_lru->atime = cur_time; 622 } 623 key = dst_lru->pos; 624 *real = bpf_map_lookup_elem(&reals, &key); 625 } 626 627 /* don't believe your eyes! 628 * below function has 6 arguments whereas bpf and llvm allow maximum of 5 629 * but since it's _static_ llvm can optimize one argument away 630 */ 631 __attribute__ ((noinline)) 632 static int process_l3_headers_v6(struct packet_description *pckt, 633 __u8 *protocol, __u64 off, 634 __u16 *pkt_bytes, void *data, 635 void *data_end) 636 { 637 struct ipv6hdr *ip6h; 638 __u64 iph_len; 639 int action; 640 641 ip6h = data + off; 642 if (ip6h + 1 > data_end) 643 return XDP_DROP; 644 iph_len = sizeof(struct ipv6hdr); 645 *protocol = ip6h->nexthdr; 646 pckt->flow.proto = *protocol; 647 *pkt_bytes = __builtin_bswap16(ip6h->payload_len); 648 off += iph_len; 649 if (*protocol == 45) { 650 return XDP_DROP; 651 } else if (*protocol == 59) { 652 action = parse_icmpv6(data, data_end, off, pckt); 653 if (action >= 0) 654 return action; 655 } else { 656 memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16); 657 memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16); 658 } 659 return -1; 660 } 661 662 __attribute__ ((noinline)) 663 static int process_l3_headers_v4(struct packet_description *pckt, 664 __u8 *protocol, __u64 off, 665 __u16 *pkt_bytes, void *data, 666 void *data_end) 667 { 668 struct iphdr *iph; 669 __u64 iph_len; 670 int action; 671 672 iph = data + off; 673 if (iph + 1 > data_end) 674 return XDP_DROP; 675 if (iph->ihl != 5) 676 return XDP_DROP; 677 *protocol = iph->protocol; 678 pckt->flow.proto = *protocol; 679 *pkt_bytes = __builtin_bswap16(iph->tot_len); 680 off += 20; 681 if (iph->frag_off & 65343) 682 return XDP_DROP; 683 if (*protocol == IPPROTO_ICMP) { 684 action = parse_icmp(data, data_end, off, pckt); 685 if (action >= 0) 686 return action; 687 } else { 688 pckt->flow.src = iph->saddr; 689 pckt->flow.dst = iph->daddr; 690 } 691 return -1; 692 } 693 694 __attribute__ ((noinline)) 695 static int process_packet(void *data, __u64 off, void *data_end, 696 bool is_ipv6, struct xdp_md *xdp) 697 { 698 699 struct real_definition *dst = NULL; 700 struct packet_description pckt = { }; 701 struct vip_definition vip = { }; 702 struct lb_stats *data_stats; 703 struct eth_hdr *eth = data; 704 void *lru_map = &lru_cache; 705 struct vip_meta *vip_info; 706 __u32 lru_stats_key = 513; 707 __u32 mac_addr_pos = 0; 708 __u32 stats_key = 512; 709 struct ctl_value *cval; 710 __u16 pkt_bytes; 711 __u64 iph_len; 712 __u8 protocol; 713 __u32 vip_num; 714 int action; 715 716 if (is_ipv6) 717 action = process_l3_headers_v6(&pckt, &protocol, off, 718 &pkt_bytes, data, data_end); 719 else 720 action = process_l3_headers_v4(&pckt, &protocol, off, 721 &pkt_bytes, data, data_end); 722 if (action >= 0) 723 return action; 724 protocol = pckt.flow.proto; 725 if (protocol == IPPROTO_TCP) { 726 if (!parse_tcp(data, data_end, is_ipv6, &pckt)) 727 return XDP_DROP; 728 } else if (protocol == IPPROTO_UDP) { 729 if (!parse_udp(data, data_end, is_ipv6, &pckt)) 730 return XDP_DROP; 731 } else { 732 return XDP_TX; 733 } 734 735 if (is_ipv6) 736 memcpy(vip.vipv6, pckt.flow.dstv6, 16); 737 else 738 vip.vip = pckt.flow.dst; 739 vip.port = pckt.flow.port16[1]; 740 vip.proto = pckt.flow.proto; 741 vip_info = bpf_map_lookup_elem(&vip_map, &vip); 742 if (!vip_info) { 743 vip.port = 0; 744 vip_info = bpf_map_lookup_elem(&vip_map, &vip); 745 if (!vip_info) 746 return XDP_PASS; 747 if (!(vip_info->flags & (1 << 4))) 748 pckt.flow.port16[1] = 0; 749 } 750 if (data_end - data > 1400) 751 return XDP_DROP; 752 data_stats = bpf_map_lookup_elem(&stats, &stats_key); 753 if (!data_stats) 754 return XDP_DROP; 755 data_stats->v1 += 1; 756 if (!dst) { 757 if (vip_info->flags & (1 << 0)) 758 pckt.flow.port16[0] = 0; 759 if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1))) 760 connection_table_lookup(&dst, &pckt, lru_map); 761 if (dst) 762 goto out; 763 if (pckt.flow.proto == IPPROTO_TCP) { 764 struct lb_stats *lru_stats = 765 bpf_map_lookup_elem(&stats, &lru_stats_key); 766 767 if (!lru_stats) 768 return XDP_DROP; 769 if (pckt.flags & (1 << 1)) 770 lru_stats->v1 += 1; 771 else 772 lru_stats->v2 += 1; 773 } 774 if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map)) 775 return XDP_DROP; 776 data_stats->v2 += 1; 777 } 778 out: 779 cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos); 780 if (!cval) 781 return XDP_DROP; 782 if (dst->flags & (1 << 0)) { 783 if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes)) 784 return XDP_DROP; 785 } else { 786 if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes)) 787 return XDP_DROP; 788 } 789 vip_num = vip_info->vip_num; 790 data_stats = bpf_map_lookup_elem(&stats, &vip_num); 791 if (!data_stats) 792 return XDP_DROP; 793 data_stats->v1 += 1; 794 data_stats->v2 += pkt_bytes; 795 796 data = (void *)(long)xdp->data; 797 data_end = (void *)(long)xdp->data_end; 798 if (data + 4 > data_end) 799 return XDP_DROP; 800 *(u32 *)data = dst->dst; 801 return XDP_DROP; 802 } 803 804 __attribute__ ((section("xdp-test"), used)) 805 int balancer_ingress(struct xdp_md *ctx) 806 { 807 void *data = (void *)(long)ctx->data; 808 void *data_end = (void *)(long)ctx->data_end; 809 struct eth_hdr *eth = data; 810 __u32 eth_proto; 811 __u32 nh_off; 812 813 nh_off = sizeof(struct eth_hdr); 814 if (data + nh_off > data_end) 815 return XDP_DROP; 816 eth_proto = eth->eth_proto; 817 if (eth_proto == 8) 818 return process_packet(data, nh_off, data_end, 0, ctx); 819 else if (eth_proto == 56710) 820 return process_packet(data, nh_off, data_end, 1, ctx); 821 else 822 return XDP_DROP; 823 } 824 825 char _license[] __attribute__ ((section("license"), used)) = "GPL"; 826 int _version __attribute__ ((section("version"), used)) = 1; 827