xref: /linux/tools/testing/selftests/bpf/progs/test_assign_reuse.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1*22408d58SDaniel Borkmann // SPDX-License-Identifier: GPL-2.0
2*22408d58SDaniel Borkmann /* Copyright (c) 2023 Isovalent */
3*22408d58SDaniel Borkmann #include <stdbool.h>
4*22408d58SDaniel Borkmann #include <linux/bpf.h>
5*22408d58SDaniel Borkmann #include <linux/if_ether.h>
6*22408d58SDaniel Borkmann #include <linux/in.h>
7*22408d58SDaniel Borkmann #include <linux/ip.h>
8*22408d58SDaniel Borkmann #include <linux/ipv6.h>
9*22408d58SDaniel Borkmann #include <linux/tcp.h>
10*22408d58SDaniel Borkmann #include <linux/udp.h>
11*22408d58SDaniel Borkmann #include <bpf/bpf_endian.h>
12*22408d58SDaniel Borkmann #include <bpf/bpf_helpers.h>
13*22408d58SDaniel Borkmann #include <linux/pkt_cls.h>
14*22408d58SDaniel Borkmann 
15*22408d58SDaniel Borkmann char LICENSE[] SEC("license") = "GPL";
16*22408d58SDaniel Borkmann 
17*22408d58SDaniel Borkmann __u64 sk_cookie_seen;
18*22408d58SDaniel Borkmann __u64 reuseport_executed;
19*22408d58SDaniel Borkmann union {
20*22408d58SDaniel Borkmann 	struct tcphdr tcp;
21*22408d58SDaniel Borkmann 	struct udphdr udp;
22*22408d58SDaniel Borkmann } headers;
23*22408d58SDaniel Borkmann 
24*22408d58SDaniel Borkmann const volatile __u16 dest_port;
25*22408d58SDaniel Borkmann 
26*22408d58SDaniel Borkmann struct {
27*22408d58SDaniel Borkmann 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
28*22408d58SDaniel Borkmann 	__uint(max_entries, 1);
29*22408d58SDaniel Borkmann 	__type(key, __u32);
30*22408d58SDaniel Borkmann 	__type(value, __u64);
31*22408d58SDaniel Borkmann } sk_map SEC(".maps");
32*22408d58SDaniel Borkmann 
33*22408d58SDaniel Borkmann SEC("sk_reuseport")
reuse_accept(struct sk_reuseport_md * ctx)34*22408d58SDaniel Borkmann int reuse_accept(struct sk_reuseport_md *ctx)
35*22408d58SDaniel Borkmann {
36*22408d58SDaniel Borkmann 	reuseport_executed++;
37*22408d58SDaniel Borkmann 
38*22408d58SDaniel Borkmann 	if (ctx->ip_protocol == IPPROTO_TCP) {
39*22408d58SDaniel Borkmann 		if (ctx->data + sizeof(headers.tcp) > ctx->data_end)
40*22408d58SDaniel Borkmann 			return SK_DROP;
41*22408d58SDaniel Borkmann 
42*22408d58SDaniel Borkmann 		if (__builtin_memcmp(&headers.tcp, ctx->data, sizeof(headers.tcp)) != 0)
43*22408d58SDaniel Borkmann 			return SK_DROP;
44*22408d58SDaniel Borkmann 	} else if (ctx->ip_protocol == IPPROTO_UDP) {
45*22408d58SDaniel Borkmann 		if (ctx->data + sizeof(headers.udp) > ctx->data_end)
46*22408d58SDaniel Borkmann 			return SK_DROP;
47*22408d58SDaniel Borkmann 
48*22408d58SDaniel Borkmann 		if (__builtin_memcmp(&headers.udp, ctx->data, sizeof(headers.udp)) != 0)
49*22408d58SDaniel Borkmann 			return SK_DROP;
50*22408d58SDaniel Borkmann 	} else {
51*22408d58SDaniel Borkmann 		return SK_DROP;
52*22408d58SDaniel Borkmann 	}
53*22408d58SDaniel Borkmann 
54*22408d58SDaniel Borkmann 	sk_cookie_seen = bpf_get_socket_cookie(ctx->sk);
55*22408d58SDaniel Borkmann 	return SK_PASS;
56*22408d58SDaniel Borkmann }
57*22408d58SDaniel Borkmann 
58*22408d58SDaniel Borkmann SEC("sk_reuseport")
reuse_drop(struct sk_reuseport_md * ctx)59*22408d58SDaniel Borkmann int reuse_drop(struct sk_reuseport_md *ctx)
60*22408d58SDaniel Borkmann {
61*22408d58SDaniel Borkmann 	reuseport_executed++;
62*22408d58SDaniel Borkmann 	sk_cookie_seen = 0;
63*22408d58SDaniel Borkmann 	return SK_DROP;
64*22408d58SDaniel Borkmann }
65*22408d58SDaniel Borkmann 
66*22408d58SDaniel Borkmann static int
assign_sk(struct __sk_buff * skb)67*22408d58SDaniel Borkmann assign_sk(struct __sk_buff *skb)
68*22408d58SDaniel Borkmann {
69*22408d58SDaniel Borkmann 	int zero = 0, ret = 0;
70*22408d58SDaniel Borkmann 	struct bpf_sock *sk;
71*22408d58SDaniel Borkmann 
72*22408d58SDaniel Borkmann 	sk = bpf_map_lookup_elem(&sk_map, &zero);
73*22408d58SDaniel Borkmann 	if (!sk)
74*22408d58SDaniel Borkmann 		return TC_ACT_SHOT;
75*22408d58SDaniel Borkmann 	ret = bpf_sk_assign(skb, sk, 0);
76*22408d58SDaniel Borkmann 	bpf_sk_release(sk);
77*22408d58SDaniel Borkmann 	return ret ? TC_ACT_SHOT : TC_ACT_OK;
78*22408d58SDaniel Borkmann }
79*22408d58SDaniel Borkmann 
80*22408d58SDaniel Borkmann static bool
maybe_assign_tcp(struct __sk_buff * skb,struct tcphdr * th)81*22408d58SDaniel Borkmann maybe_assign_tcp(struct __sk_buff *skb, struct tcphdr *th)
82*22408d58SDaniel Borkmann {
83*22408d58SDaniel Borkmann 	if (th + 1 > (void *)(long)(skb->data_end))
84*22408d58SDaniel Borkmann 		return TC_ACT_SHOT;
85*22408d58SDaniel Borkmann 
86*22408d58SDaniel Borkmann 	if (!th->syn || th->ack || th->dest != bpf_htons(dest_port))
87*22408d58SDaniel Borkmann 		return TC_ACT_OK;
88*22408d58SDaniel Borkmann 
89*22408d58SDaniel Borkmann 	__builtin_memcpy(&headers.tcp, th, sizeof(headers.tcp));
90*22408d58SDaniel Borkmann 	return assign_sk(skb);
91*22408d58SDaniel Borkmann }
92*22408d58SDaniel Borkmann 
93*22408d58SDaniel Borkmann static bool
maybe_assign_udp(struct __sk_buff * skb,struct udphdr * uh)94*22408d58SDaniel Borkmann maybe_assign_udp(struct __sk_buff *skb, struct udphdr *uh)
95*22408d58SDaniel Borkmann {
96*22408d58SDaniel Borkmann 	if (uh + 1 > (void *)(long)(skb->data_end))
97*22408d58SDaniel Borkmann 		return TC_ACT_SHOT;
98*22408d58SDaniel Borkmann 
99*22408d58SDaniel Borkmann 	if (uh->dest != bpf_htons(dest_port))
100*22408d58SDaniel Borkmann 		return TC_ACT_OK;
101*22408d58SDaniel Borkmann 
102*22408d58SDaniel Borkmann 	__builtin_memcpy(&headers.udp, uh, sizeof(headers.udp));
103*22408d58SDaniel Borkmann 	return assign_sk(skb);
104*22408d58SDaniel Borkmann }
105*22408d58SDaniel Borkmann 
106*22408d58SDaniel Borkmann SEC("tc")
tc_main(struct __sk_buff * skb)107*22408d58SDaniel Borkmann int tc_main(struct __sk_buff *skb)
108*22408d58SDaniel Borkmann {
109*22408d58SDaniel Borkmann 	void *data_end = (void *)(long)skb->data_end;
110*22408d58SDaniel Borkmann 	void *data = (void *)(long)skb->data;
111*22408d58SDaniel Borkmann 	struct ethhdr *eth;
112*22408d58SDaniel Borkmann 
113*22408d58SDaniel Borkmann 	eth = (struct ethhdr *)(data);
114*22408d58SDaniel Borkmann 	if (eth + 1 > data_end)
115*22408d58SDaniel Borkmann 		return TC_ACT_SHOT;
116*22408d58SDaniel Borkmann 
117*22408d58SDaniel Borkmann 	if (eth->h_proto == bpf_htons(ETH_P_IP)) {
118*22408d58SDaniel Borkmann 		struct iphdr *iph = (struct iphdr *)(data + sizeof(*eth));
119*22408d58SDaniel Borkmann 
120*22408d58SDaniel Borkmann 		if (iph + 1 > data_end)
121*22408d58SDaniel Borkmann 			return TC_ACT_SHOT;
122*22408d58SDaniel Borkmann 
123*22408d58SDaniel Borkmann 		if (iph->protocol == IPPROTO_TCP)
124*22408d58SDaniel Borkmann 			return maybe_assign_tcp(skb, (struct tcphdr *)(iph + 1));
125*22408d58SDaniel Borkmann 		else if (iph->protocol == IPPROTO_UDP)
126*22408d58SDaniel Borkmann 			return maybe_assign_udp(skb, (struct udphdr *)(iph + 1));
127*22408d58SDaniel Borkmann 		else
128*22408d58SDaniel Borkmann 			return TC_ACT_SHOT;
129*22408d58SDaniel Borkmann 	} else {
130*22408d58SDaniel Borkmann 		struct ipv6hdr *ip6h = (struct ipv6hdr *)(data + sizeof(*eth));
131*22408d58SDaniel Borkmann 
132*22408d58SDaniel Borkmann 		if (ip6h + 1 > data_end)
133*22408d58SDaniel Borkmann 			return TC_ACT_SHOT;
134*22408d58SDaniel Borkmann 
135*22408d58SDaniel Borkmann 		if (ip6h->nexthdr == IPPROTO_TCP)
136*22408d58SDaniel Borkmann 			return maybe_assign_tcp(skb, (struct tcphdr *)(ip6h + 1));
137*22408d58SDaniel Borkmann 		else if (ip6h->nexthdr == IPPROTO_UDP)
138*22408d58SDaniel Borkmann 			return maybe_assign_udp(skb, (struct udphdr *)(ip6h + 1));
139*22408d58SDaniel Borkmann 		else
140*22408d58SDaniel Borkmann 			return TC_ACT_SHOT;
141*22408d58SDaniel Borkmann 	}
142*22408d58SDaniel Borkmann }
143