xref: /linux/tools/testing/selftests/bpf/progs/test_xdp_noinline.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Facebook
3 #include <stddef.h>
4 #include <stdbool.h>
5 #include <string.h>
6 #include <linux/pkt_cls.h>
7 #include <linux/bpf.h>
8 #include <linux/in.h>
9 #include <linux/if_ether.h>
10 #include <linux/ip.h>
11 #include <linux/ipv6.h>
12 #include <linux/icmp.h>
13 #include <linux/icmpv6.h>
14 #include <linux/tcp.h>
15 #include <linux/udp.h>
16 #include <bpf/bpf_helpers.h>
17 #include <bpf/bpf_endian.h>
18 #include "bpf_compiler.h"
19 
rol32(__u32 word,unsigned int shift)20 static __always_inline __u32 rol32(__u32 word, unsigned int shift)
21 {
22 	return (word << shift) | (word >> ((-shift) & 31));
23 }
24 
25 /* copy paste of jhash from kernel sources to make sure llvm
26  * can compile it into valid sequence of bpf instructions
27  */
28 #define __jhash_mix(a, b, c)			\
29 {						\
30 	a -= c;  a ^= rol32(c, 4);  c += b;	\
31 	b -= a;  b ^= rol32(a, 6);  a += c;	\
32 	c -= b;  c ^= rol32(b, 8);  b += a;	\
33 	a -= c;  a ^= rol32(c, 16); c += b;	\
34 	b -= a;  b ^= rol32(a, 19); a += c;	\
35 	c -= b;  c ^= rol32(b, 4);  b += a;	\
36 }
37 
38 #define __jhash_final(a, b, c)			\
39 {						\
40 	c ^= b; c -= rol32(b, 14);		\
41 	a ^= c; a -= rol32(c, 11);		\
42 	b ^= a; b -= rol32(a, 25);		\
43 	c ^= b; c -= rol32(b, 16);		\
44 	a ^= c; a -= rol32(c, 4);		\
45 	b ^= a; b -= rol32(a, 14);		\
46 	c ^= b; c -= rol32(b, 24);		\
47 }
48 
49 #define JHASH_INITVAL		0xdeadbeef
50 
51 typedef unsigned int u32;
52 
53 static __noinline
jhash(const void * key,u32 length,u32 initval)54 u32 jhash(const void *key, u32 length, u32 initval)
55 {
56 	u32 a, b, c;
57 	const unsigned char *k = key;
58 
59 	a = b = c = JHASH_INITVAL + length + initval;
60 
61 	while (length > 12) {
62 		a += *(u32 *)(k);
63 		b += *(u32 *)(k + 4);
64 		c += *(u32 *)(k + 8);
65 		__jhash_mix(a, b, c);
66 		length -= 12;
67 		k += 12;
68 	}
69 	switch (length) {
70 	case 12: c += (u32)k[11]<<24;
71 	case 11: c += (u32)k[10]<<16;
72 	case 10: c += (u32)k[9]<<8;
73 	case 9:  c += k[8];
74 	case 8:  b += (u32)k[7]<<24;
75 	case 7:  b += (u32)k[6]<<16;
76 	case 6:  b += (u32)k[5]<<8;
77 	case 5:  b += k[4];
78 	case 4:  a += (u32)k[3]<<24;
79 	case 3:  a += (u32)k[2]<<16;
80 	case 2:  a += (u32)k[1]<<8;
81 	case 1:  a += k[0];
82 		 __jhash_final(a, b, c);
83 	case 0: /* Nothing left to add */
84 		break;
85 	}
86 
87 	return c;
88 }
89 
90 __noinline
__jhash_nwords(u32 a,u32 b,u32 c,u32 initval)91 u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
92 {
93 	a += initval;
94 	b += initval;
95 	c += initval;
96 	__jhash_final(a, b, c);
97 	return c;
98 }
99 
100 __noinline
jhash_2words(u32 a,u32 b,u32 initval)101 u32 jhash_2words(u32 a, u32 b, u32 initval)
102 {
103 	return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
104 }
105 
106 struct flow_key {
107 	union {
108 		__be32 src;
109 		__be32 srcv6[4];
110 	};
111 	union {
112 		__be32 dst;
113 		__be32 dstv6[4];
114 	};
115 	union {
116 		__u32 ports;
117 		__u16 port16[2];
118 	};
119 	__u8 proto;
120 };
121 
122 struct packet_description {
123 	struct flow_key flow;
124 	__u8 flags;
125 };
126 
127 struct ctl_value {
128 	union {
129 		__u64 value;
130 		__u32 ifindex;
131 		__u8 mac[6];
132 	};
133 };
134 
135 struct vip_definition {
136 	union {
137 		__be32 vip;
138 		__be32 vipv6[4];
139 	};
140 	__u16 port;
141 	__u16 family;
142 	__u8 proto;
143 };
144 
145 struct vip_meta {
146 	__u32 flags;
147 	__u32 vip_num;
148 };
149 
150 struct real_pos_lru {
151 	__u32 pos;
152 	__u64 atime;
153 };
154 
155 struct real_definition {
156 	union {
157 		__be32 dst;
158 		__be32 dstv6[4];
159 	};
160 	__u8 flags;
161 };
162 
163 struct lb_stats {
164 	__u64 v2;
165 	__u64 v1;
166 };
167 
168 struct {
169 	__uint(type, BPF_MAP_TYPE_HASH);
170 	__uint(max_entries, 512);
171 	__type(key, struct vip_definition);
172 	__type(value, struct vip_meta);
173 } vip_map SEC(".maps");
174 
175 struct {
176 	__uint(type, BPF_MAP_TYPE_LRU_HASH);
177 	__uint(max_entries, 300);
178 	__uint(map_flags, 1U << 1);
179 	__type(key, struct flow_key);
180 	__type(value, struct real_pos_lru);
181 } lru_cache SEC(".maps");
182 
183 struct {
184 	__uint(type, BPF_MAP_TYPE_ARRAY);
185 	__uint(max_entries, 12 * 655);
186 	__type(key, __u32);
187 	__type(value, __u32);
188 } ch_rings SEC(".maps");
189 
190 struct {
191 	__uint(type, BPF_MAP_TYPE_ARRAY);
192 	__uint(max_entries, 40);
193 	__type(key, __u32);
194 	__type(value, struct real_definition);
195 } reals SEC(".maps");
196 
197 struct {
198 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
199 	__uint(max_entries, 515);
200 	__type(key, __u32);
201 	__type(value, struct lb_stats);
202 } stats SEC(".maps");
203 
204 struct {
205 	__uint(type, BPF_MAP_TYPE_ARRAY);
206 	__uint(max_entries, 16);
207 	__type(key, __u32);
208 	__type(value, struct ctl_value);
209 } ctl_array SEC(".maps");
210 
211 struct eth_hdr {
212 	unsigned char eth_dest[6];
213 	unsigned char eth_source[6];
214 	unsigned short eth_proto;
215 };
216 
calc_offset(bool is_ipv6,bool is_icmp)217 static __noinline __u64 calc_offset(bool is_ipv6, bool is_icmp)
218 {
219 	__u64 off = sizeof(struct eth_hdr);
220 	if (is_ipv6) {
221 		off += sizeof(struct ipv6hdr);
222 		if (is_icmp)
223 			off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr);
224 	} else {
225 		off += sizeof(struct iphdr);
226 		if (is_icmp)
227 			off += sizeof(struct icmphdr) + sizeof(struct iphdr);
228 	}
229 	return off;
230 }
231 
232 static __attribute__ ((noinline))
parse_udp(void * data,void * data_end,bool is_ipv6,struct packet_description * pckt)233 bool parse_udp(void *data, void *data_end,
234 	       bool is_ipv6, struct packet_description *pckt)
235 {
236 
237 	bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
238 	__u64 off = calc_offset(is_ipv6, is_icmp);
239 	struct udphdr *udp;
240 	udp = data + off;
241 
242 	if (udp + 1 > data_end)
243 		return false;
244 	if (!is_icmp) {
245 		pckt->flow.port16[0] = udp->source;
246 		pckt->flow.port16[1] = udp->dest;
247 	} else {
248 		pckt->flow.port16[0] = udp->dest;
249 		pckt->flow.port16[1] = udp->source;
250 	}
251 	return true;
252 }
253 
254 static __attribute__ ((noinline))
parse_tcp(void * data,void * data_end,bool is_ipv6,struct packet_description * pckt)255 bool parse_tcp(void *data, void *data_end,
256 	       bool is_ipv6, struct packet_description *pckt)
257 {
258 
259 	bool is_icmp = !((pckt->flags & (1 << 0)) == 0);
260 	__u64 off = calc_offset(is_ipv6, is_icmp);
261 	struct tcphdr *tcp;
262 
263 	tcp = data + off;
264 	if (tcp + 1 > data_end)
265 		return false;
266 	if (tcp->syn)
267 		pckt->flags |= (1 << 1);
268 	if (!is_icmp) {
269 		pckt->flow.port16[0] = tcp->source;
270 		pckt->flow.port16[1] = tcp->dest;
271 	} else {
272 		pckt->flow.port16[0] = tcp->dest;
273 		pckt->flow.port16[1] = tcp->source;
274 	}
275 	return true;
276 }
277 
278 static __attribute__ ((noinline))
encap_v6(struct xdp_md * xdp,struct ctl_value * cval,struct packet_description * pckt,struct real_definition * dst,__u32 pkt_bytes)279 bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
280 	      struct packet_description *pckt,
281 	      struct real_definition *dst, __u32 pkt_bytes)
282 {
283 	struct eth_hdr *new_eth;
284 	struct eth_hdr *old_eth;
285 	struct ipv6hdr *ip6h;
286 	__u32 ip_suffix;
287 	void *data_end;
288 	void *data;
289 
290 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
291 		return false;
292 	data = (void *)(long)xdp->data;
293 	data_end = (void *)(long)xdp->data_end;
294 	new_eth = data;
295 	ip6h = data + sizeof(struct eth_hdr);
296 	old_eth = data + sizeof(struct ipv6hdr);
297 	if (new_eth + 1 > data_end ||
298 	    old_eth + 1 > data_end || ip6h + 1 > data_end)
299 		return false;
300 	memcpy(new_eth->eth_dest, cval->mac, 6);
301 	memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
302 	new_eth->eth_proto = 56710;
303 	ip6h->version = 6;
304 	ip6h->priority = 0;
305 	memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
306 
307 	ip6h->nexthdr = IPPROTO_IPV6;
308 	ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
309 	ip6h->payload_len =
310 	    bpf_htons(pkt_bytes + sizeof(struct ipv6hdr));
311 	ip6h->hop_limit = 4;
312 
313 	ip6h->saddr.in6_u.u6_addr32[0] = 1;
314 	ip6h->saddr.in6_u.u6_addr32[1] = 2;
315 	ip6h->saddr.in6_u.u6_addr32[2] = 3;
316 	ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix;
317 	memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16);
318 	return true;
319 }
320 
321 #ifndef __clang__
322 #pragma GCC push_options
323 /* GCC optimization collapses functions and increases the number of arguments
324  * beyond the compatible amount supported by BPF.
325  */
326 #pragma GCC optimize("-fno-ipa-sra")
327 #endif
328 
329 static __attribute__ ((noinline))
encap_v4(struct xdp_md * xdp,struct ctl_value * cval,struct packet_description * pckt,struct real_definition * dst,__u32 pkt_bytes)330 bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
331 	      struct packet_description *pckt,
332 	      struct real_definition *dst, __u32 pkt_bytes)
333 {
334 
335 	__u32 ip_suffix = bpf_ntohs(pckt->flow.port16[0]);
336 	struct eth_hdr *new_eth;
337 	struct eth_hdr *old_eth;
338 	__u16 *next_iph_u16;
339 	struct iphdr *iph;
340 	__u32 csum = 0;
341 	void *data_end;
342 	void *data;
343 
344 	ip_suffix <<= 15;
345 	ip_suffix ^= pckt->flow.src;
346 	if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
347 		return false;
348 	data = (void *)(long)xdp->data;
349 	data_end = (void *)(long)xdp->data_end;
350 	new_eth = data;
351 	iph = data + sizeof(struct eth_hdr);
352 	old_eth = data + sizeof(struct iphdr);
353 	if (new_eth + 1 > data_end ||
354 	    old_eth + 1 > data_end || iph + 1 > data_end)
355 		return false;
356 	memcpy(new_eth->eth_dest, cval->mac, 6);
357 	memcpy(new_eth->eth_source, old_eth->eth_dest, 6);
358 	new_eth->eth_proto = 8;
359 	iph->version = 4;
360 	iph->ihl = 5;
361 	iph->frag_off = 0;
362 	iph->protocol = IPPROTO_IPIP;
363 	iph->check = 0;
364 	iph->tos = 1;
365 	iph->tot_len = bpf_htons(pkt_bytes + sizeof(struct iphdr));
366 	/* don't update iph->daddr, since it will overwrite old eth_proto
367 	 * and multiple iterations of bpf_prog_run() will fail
368 	 */
369 
370 	iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst;
371 	iph->ttl = 4;
372 
373 	next_iph_u16 = (__u16 *) iph;
374 	__pragma_loop_unroll_full
375 	for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
376 		csum += *next_iph_u16++;
377 	iph->check = ~((csum & 0xffff) + (csum >> 16));
378 	if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr)))
379 		return false;
380 	return true;
381 }
382 
383 #ifndef __clang__
384 #pragma GCC pop_options
385 #endif
386 
387 static __attribute__ ((noinline))
swap_mac_and_send(void * data,void * data_end)388 int swap_mac_and_send(void *data, void *data_end)
389 {
390 	unsigned char tmp_mac[6];
391 	struct eth_hdr *eth;
392 
393 	eth = data;
394 	memcpy(tmp_mac, eth->eth_source, 6);
395 	memcpy(eth->eth_source, eth->eth_dest, 6);
396 	memcpy(eth->eth_dest, tmp_mac, 6);
397 	return XDP_TX;
398 }
399 
400 static __attribute__ ((noinline))
send_icmp_reply(void * data,void * data_end)401 int send_icmp_reply(void *data, void *data_end)
402 {
403 	struct icmphdr *icmp_hdr;
404 	__u16 *next_iph_u16;
405 	__u32 tmp_addr = 0;
406 	struct iphdr *iph;
407 	__u32 csum = 0;
408 	__u64 off = 0;
409 
410 	if (data + sizeof(struct eth_hdr)
411 	     + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end)
412 		return XDP_DROP;
413 	off += sizeof(struct eth_hdr);
414 	iph = data + off;
415 	off += sizeof(struct iphdr);
416 	icmp_hdr = data + off;
417 	icmp_hdr->type = 0;
418 	icmp_hdr->checksum += 0x0007;
419 	iph->ttl = 4;
420 	tmp_addr = iph->daddr;
421 	iph->daddr = iph->saddr;
422 	iph->saddr = tmp_addr;
423 	iph->check = 0;
424 	next_iph_u16 = (__u16 *) iph;
425 	__pragma_loop_unroll_full
426 	for (int i = 0; i < sizeof(struct iphdr) >> 1; i++)
427 		csum += *next_iph_u16++;
428 	iph->check = ~((csum & 0xffff) + (csum >> 16));
429 	return swap_mac_and_send(data, data_end);
430 }
431 
432 static __attribute__ ((noinline))
send_icmp6_reply(void * data,void * data_end)433 int send_icmp6_reply(void *data, void *data_end)
434 {
435 	struct icmp6hdr *icmp_hdr;
436 	struct ipv6hdr *ip6h;
437 	__be32 tmp_addr[4];
438 	__u64 off = 0;
439 
440 	if (data + sizeof(struct eth_hdr)
441 	     + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end)
442 		return XDP_DROP;
443 	off += sizeof(struct eth_hdr);
444 	ip6h = data + off;
445 	off += sizeof(struct ipv6hdr);
446 	icmp_hdr = data + off;
447 	icmp_hdr->icmp6_type = 129;
448 	icmp_hdr->icmp6_cksum -= 0x0001;
449 	ip6h->hop_limit = 4;
450 	memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16);
451 	memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16);
452 	memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16);
453 	return swap_mac_and_send(data, data_end);
454 }
455 
456 static __attribute__ ((noinline))
parse_icmpv6(void * data,void * data_end,__u64 off,struct packet_description * pckt)457 int parse_icmpv6(void *data, void *data_end, __u64 off,
458 		 struct packet_description *pckt)
459 {
460 	struct icmp6hdr *icmp_hdr;
461 	struct ipv6hdr *ip6h;
462 
463 	icmp_hdr = data + off;
464 	if (icmp_hdr + 1 > data_end)
465 		return XDP_DROP;
466 	if (icmp_hdr->icmp6_type == 128)
467 		return send_icmp6_reply(data, data_end);
468 	if (icmp_hdr->icmp6_type != 3)
469 		return XDP_PASS;
470 	off += sizeof(struct icmp6hdr);
471 	ip6h = data + off;
472 	if (ip6h + 1 > data_end)
473 		return XDP_DROP;
474 	pckt->flow.proto = ip6h->nexthdr;
475 	pckt->flags |= (1 << 0);
476 	memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16);
477 	memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16);
478 	return -1;
479 }
480 
481 static __attribute__ ((noinline))
parse_icmp(void * data,void * data_end,__u64 off,struct packet_description * pckt)482 int parse_icmp(void *data, void *data_end, __u64 off,
483 	       struct packet_description *pckt)
484 {
485 	struct icmphdr *icmp_hdr;
486 	struct iphdr *iph;
487 
488 	icmp_hdr = data + off;
489 	if (icmp_hdr + 1 > data_end)
490 		return XDP_DROP;
491 	if (icmp_hdr->type == 8)
492 		return send_icmp_reply(data, data_end);
493 	if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4))
494 		return XDP_PASS;
495 	off += sizeof(struct icmphdr);
496 	iph = data + off;
497 	if (iph + 1 > data_end)
498 		return XDP_DROP;
499 	if (iph->ihl != 5)
500 		return XDP_DROP;
501 	pckt->flow.proto = iph->protocol;
502 	pckt->flags |= (1 << 0);
503 	pckt->flow.src = iph->daddr;
504 	pckt->flow.dst = iph->saddr;
505 	return -1;
506 }
507 
508 static __attribute__ ((noinline))
get_packet_hash(struct packet_description * pckt,bool hash_16bytes)509 __u32 get_packet_hash(struct packet_description *pckt,
510 		      bool hash_16bytes)
511 {
512 	if (hash_16bytes)
513 		return jhash_2words(jhash(pckt->flow.srcv6, 16, 12),
514 				    pckt->flow.ports, 24);
515 	else
516 		return jhash_2words(pckt->flow.src, pckt->flow.ports,
517 				    24);
518 }
519 
520 __attribute__ ((noinline))
get_packet_dst(struct real_definition ** real,struct packet_description * pckt,struct vip_meta * vip_info,bool is_ipv6,void * lru_map)521 static bool get_packet_dst(struct real_definition **real,
522 			   struct packet_description *pckt,
523 			   struct vip_meta *vip_info,
524 			   bool is_ipv6, void *lru_map)
525 {
526 	struct real_pos_lru new_dst_lru = { };
527 	bool hash_16bytes = is_ipv6;
528 	__u32 *real_pos, hash, key;
529 	__u64 cur_time;
530 
531 	if (vip_info->flags & (1 << 2))
532 		hash_16bytes = 1;
533 	if (vip_info->flags & (1 << 3)) {
534 		pckt->flow.port16[0] = pckt->flow.port16[1];
535 		memset(pckt->flow.srcv6, 0, 16);
536 	}
537 	hash = get_packet_hash(pckt, hash_16bytes);
538 	if (hash != 0x358459b7 /* jhash of ipv4 packet */  &&
539 	    hash != 0x2f4bc6bb /* jhash of ipv6 packet */)
540 		return false;
541 	key = 2 * vip_info->vip_num + hash % 2;
542 	real_pos = bpf_map_lookup_elem(&ch_rings, &key);
543 	if (!real_pos)
544 		return false;
545 	key = *real_pos;
546 	*real = bpf_map_lookup_elem(&reals, &key);
547 	if (!(*real))
548 		return false;
549 	if (!(vip_info->flags & (1 << 1))) {
550 		__u32 conn_rate_key = 512 + 2;
551 		struct lb_stats *conn_rate_stats =
552 		    bpf_map_lookup_elem(&stats, &conn_rate_key);
553 
554 		if (!conn_rate_stats)
555 			return true;
556 		cur_time = bpf_ktime_get_ns();
557 		if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) {
558 			conn_rate_stats->v1 = 1;
559 			conn_rate_stats->v2 = cur_time;
560 		} else {
561 			conn_rate_stats->v1 += 1;
562 			if (conn_rate_stats->v1 >= 1)
563 				return true;
564 		}
565 		if (pckt->flow.proto == IPPROTO_UDP)
566 			new_dst_lru.atime = cur_time;
567 		new_dst_lru.pos = key;
568 		bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0);
569 	}
570 	return true;
571 }
572 
573 __attribute__ ((noinline))
connection_table_lookup(struct real_definition ** real,struct packet_description * pckt,void * lru_map)574 static void connection_table_lookup(struct real_definition **real,
575 				    struct packet_description *pckt,
576 				    void *lru_map)
577 {
578 
579 	struct real_pos_lru *dst_lru;
580 	__u64 cur_time;
581 	__u32 key;
582 
583 	dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow);
584 	if (!dst_lru)
585 		return;
586 	if (pckt->flow.proto == IPPROTO_UDP) {
587 		cur_time = bpf_ktime_get_ns();
588 		if (cur_time - dst_lru->atime > 300000)
589 			return;
590 		dst_lru->atime = cur_time;
591 	}
592 	key = dst_lru->pos;
593 	*real = bpf_map_lookup_elem(&reals, &key);
594 }
595 
596 /* don't believe your eyes!
597  * below function has 6 arguments whereas bpf and llvm allow maximum of 5
598  * but since it's _static_ llvm can optimize one argument away
599  */
600 __attribute__ ((noinline))
process_l3_headers_v6(struct packet_description * pckt,__u8 * protocol,__u64 off,__u16 * pkt_bytes,void * extra_args[2])601 static int process_l3_headers_v6(struct packet_description *pckt,
602 				 __u8 *protocol, __u64 off,
603 				 __u16 *pkt_bytes, void *extra_args[2])
604 {
605 	struct ipv6hdr *ip6h;
606 	__u64 iph_len;
607 	int action;
608 	void *data = extra_args[0];
609 	void *data_end = extra_args[1];
610 
611 	ip6h = data + off;
612 	if (ip6h + 1 > data_end)
613 		return XDP_DROP;
614 	iph_len = sizeof(struct ipv6hdr);
615 	*protocol = ip6h->nexthdr;
616 	pckt->flow.proto = *protocol;
617 	*pkt_bytes = bpf_ntohs(ip6h->payload_len);
618 	off += iph_len;
619 	if (*protocol == 45) {
620 		return XDP_DROP;
621 	} else if (*protocol == 59) {
622 		action = parse_icmpv6(data, data_end, off, pckt);
623 		if (action >= 0)
624 			return action;
625 	} else {
626 		memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16);
627 		memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16);
628 	}
629 	return -1;
630 }
631 
632 __attribute__ ((noinline))
process_l3_headers_v4(struct packet_description * pckt,__u8 * protocol,__u64 off,__u16 * pkt_bytes,void * extra_args[2])633 static int process_l3_headers_v4(struct packet_description *pckt,
634 				 __u8 *protocol, __u64 off,
635 				 __u16 *pkt_bytes, void *extra_args[2])
636 {
637 	struct iphdr *iph;
638 	int action;
639 	void *data = extra_args[0];
640 	void *data_end = extra_args[1];
641 
642 	iph = data + off;
643 	if (iph + 1 > data_end)
644 		return XDP_DROP;
645 	if (iph->ihl != 5)
646 		return XDP_DROP;
647 	*protocol = iph->protocol;
648 	pckt->flow.proto = *protocol;
649 	*pkt_bytes = bpf_ntohs(iph->tot_len);
650 	off += 20;
651 	if (iph->frag_off & 65343)
652 		return XDP_DROP;
653 	if (*protocol == IPPROTO_ICMP) {
654 		action = parse_icmp(data, data_end, off, pckt);
655 		if (action >= 0)
656 			return action;
657 	} else {
658 		pckt->flow.src = iph->saddr;
659 		pckt->flow.dst = iph->daddr;
660 	}
661 	return -1;
662 }
663 
664 __attribute__ ((noinline))
process_packet(void * data,__u64 off,void * data_end,bool is_ipv6,struct xdp_md * xdp)665 static int process_packet(void *data, __u64 off, void *data_end,
666 			  bool is_ipv6, struct xdp_md *xdp)
667 {
668 
669 	struct real_definition *dst = NULL;
670 	struct packet_description pckt = { };
671 	struct vip_definition vip = { };
672 	struct lb_stats *data_stats;
673 	void *lru_map = &lru_cache;
674 	struct vip_meta *vip_info;
675 	__u32 lru_stats_key = 513;
676 	__u32 mac_addr_pos = 0;
677 	__u32 stats_key = 512;
678 	struct ctl_value *cval;
679 	__u16 pkt_bytes;
680 	__u8 protocol;
681 	__u32 vip_num;
682 	int action;
683 	void *extra_args[2] = { data, data_end };
684 
685 	if (is_ipv6)
686 		action = process_l3_headers_v6(&pckt, &protocol, off,
687 					       &pkt_bytes, extra_args);
688 	else
689 		action = process_l3_headers_v4(&pckt, &protocol, off,
690 					       &pkt_bytes, extra_args);
691 	if (action >= 0)
692 		return action;
693 	protocol = pckt.flow.proto;
694 	if (protocol == IPPROTO_TCP) {
695 		if (!parse_tcp(data, data_end, is_ipv6, &pckt))
696 			return XDP_DROP;
697 	} else if (protocol == IPPROTO_UDP) {
698 		if (!parse_udp(data, data_end, is_ipv6, &pckt))
699 			return XDP_DROP;
700 	} else {
701 		return XDP_TX;
702 	}
703 
704 	if (is_ipv6)
705 		memcpy(vip.vipv6, pckt.flow.dstv6, 16);
706 	else
707 		vip.vip = pckt.flow.dst;
708 	vip.port = pckt.flow.port16[1];
709 	vip.proto = pckt.flow.proto;
710 	vip_info = bpf_map_lookup_elem(&vip_map, &vip);
711 	if (!vip_info) {
712 		vip.port = 0;
713 		vip_info = bpf_map_lookup_elem(&vip_map, &vip);
714 		if (!vip_info)
715 			return XDP_PASS;
716 		if (!(vip_info->flags & (1 << 4)))
717 			pckt.flow.port16[1] = 0;
718 	}
719 	if (data_end - data > 1400)
720 		return XDP_DROP;
721 	data_stats = bpf_map_lookup_elem(&stats, &stats_key);
722 	if (!data_stats)
723 		return XDP_DROP;
724 	data_stats->v1 += 1;
725 	if (!dst) {
726 		if (vip_info->flags & (1 << 0))
727 			pckt.flow.port16[0] = 0;
728 		if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1)))
729 			connection_table_lookup(&dst, &pckt, lru_map);
730 		if (dst)
731 			goto out;
732 		if (pckt.flow.proto == IPPROTO_TCP) {
733 			struct lb_stats *lru_stats =
734 			    bpf_map_lookup_elem(&stats, &lru_stats_key);
735 
736 			if (!lru_stats)
737 				return XDP_DROP;
738 			if (pckt.flags & (1 << 1))
739 				lru_stats->v1 += 1;
740 			else
741 				lru_stats->v2 += 1;
742 		}
743 		if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map))
744 			return XDP_DROP;
745 		data_stats->v2 += 1;
746 	}
747 out:
748 	cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos);
749 	if (!cval)
750 		return XDP_DROP;
751 	if (dst->flags & (1 << 0)) {
752 		if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes))
753 			return XDP_DROP;
754 	} else {
755 		if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes))
756 			return XDP_DROP;
757 	}
758 	vip_num = vip_info->vip_num;
759 	data_stats = bpf_map_lookup_elem(&stats, &vip_num);
760 	if (!data_stats)
761 		return XDP_DROP;
762 	data_stats->v1 += 1;
763 	data_stats->v2 += pkt_bytes;
764 
765 	data = (void *)(long)xdp->data;
766 	data_end = (void *)(long)xdp->data_end;
767 	if (data + 4 > data_end)
768 		return XDP_DROP;
769 	*(u32 *)data = dst->dst;
770 	return XDP_DROP;
771 }
772 
773 SEC("xdp")
balancer_ingress_v4(struct xdp_md * ctx)774 int balancer_ingress_v4(struct xdp_md *ctx)
775 {
776 	void *data = (void *)(long)ctx->data;
777 	void *data_end = (void *)(long)ctx->data_end;
778 	struct eth_hdr *eth = data;
779 	__u32 eth_proto;
780 	__u32 nh_off;
781 
782 	nh_off = sizeof(struct eth_hdr);
783 	if (data + nh_off > data_end)
784 		return XDP_DROP;
785 	eth_proto = bpf_ntohs(eth->eth_proto);
786 	if (eth_proto == ETH_P_IP)
787 		return process_packet(data, nh_off, data_end, 0, ctx);
788 	else
789 		return XDP_DROP;
790 }
791 
792 SEC("xdp")
balancer_ingress_v6(struct xdp_md * ctx)793 int balancer_ingress_v6(struct xdp_md *ctx)
794 {
795 	void *data = (void *)(long)ctx->data;
796 	void *data_end = (void *)(long)ctx->data_end;
797 	struct eth_hdr *eth = data;
798 	__u32 eth_proto;
799 	__u32 nh_off;
800 
801 	nh_off = sizeof(struct eth_hdr);
802 	if (data + nh_off > data_end)
803 		return XDP_DROP;
804 	eth_proto = bpf_ntohs(eth->eth_proto);
805 	if (eth_proto == ETH_P_IPV6)
806 		return process_packet(data, nh_off, data_end, 1, ctx);
807 	else
808 		return XDP_DROP;
809 }
810 
811 char _license[] SEC("license") = "GPL";
812