xref: /linux/net/ipv4/udp_bpf.c (revision 87c9c16317882dd6dbbc07e349bc3223e14f3244)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020 Cloudflare Ltd https://cloudflare.com */
3 
4 #include <linux/skmsg.h>
5 #include <net/sock.h>
6 #include <net/udp.h>
7 #include <net/inet_common.h>
8 
9 #include "udp_impl.h"
10 
11 static struct proto *udpv6_prot_saved __read_mostly;
12 
13 static int sk_udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
14 			  int noblock, int flags, int *addr_len)
15 {
16 #if IS_ENABLED(CONFIG_IPV6)
17 	if (sk->sk_family == AF_INET6)
18 		return udpv6_prot_saved->recvmsg(sk, msg, len, noblock, flags,
19 						 addr_len);
20 #endif
21 	return udp_prot.recvmsg(sk, msg, len, noblock, flags, addr_len);
22 }
23 
24 static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
25 			   int nonblock, int flags, int *addr_len)
26 {
27 	struct sk_psock *psock;
28 	int copied, ret;
29 
30 	if (unlikely(flags & MSG_ERRQUEUE))
31 		return inet_recv_error(sk, msg, len, addr_len);
32 
33 	psock = sk_psock_get(sk);
34 	if (unlikely(!psock))
35 		return sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
36 
37 	lock_sock(sk);
38 	if (sk_psock_queue_empty(psock)) {
39 		ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
40 		goto out;
41 	}
42 
43 msg_bytes_ready:
44 	copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
45 	if (!copied) {
46 		int data, err = 0;
47 		long timeo;
48 
49 		timeo = sock_rcvtimeo(sk, nonblock);
50 		data = sk_msg_wait_data(sk, psock, flags, timeo, &err);
51 		if (data) {
52 			if (!sk_psock_queue_empty(psock))
53 				goto msg_bytes_ready;
54 			ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
55 			goto out;
56 		}
57 		if (err) {
58 			ret = err;
59 			goto out;
60 		}
61 		copied = -EAGAIN;
62 	}
63 	ret = copied;
64 out:
65 	release_sock(sk);
66 	sk_psock_put(sk, psock);
67 	return ret;
68 }
69 
70 enum {
71 	UDP_BPF_IPV4,
72 	UDP_BPF_IPV6,
73 	UDP_BPF_NUM_PROTS,
74 };
75 
76 static DEFINE_SPINLOCK(udpv6_prot_lock);
77 static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS];
78 
79 static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
80 {
81 	*prot        = *base;
82 	prot->unhash = sock_map_unhash;
83 	prot->close  = sock_map_close;
84 	prot->recvmsg = udp_bpf_recvmsg;
85 }
86 
87 static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)
88 {
89 	if (unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) {
90 		spin_lock_bh(&udpv6_prot_lock);
91 		if (likely(ops != udpv6_prot_saved)) {
92 			udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops);
93 			smp_store_release(&udpv6_prot_saved, ops);
94 		}
95 		spin_unlock_bh(&udpv6_prot_lock);
96 	}
97 }
98 
99 static int __init udp_bpf_v4_build_proto(void)
100 {
101 	udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
102 	return 0;
103 }
104 core_initcall(udp_bpf_v4_build_proto);
105 
106 int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
107 {
108 	int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6;
109 
110 	if (restore) {
111 		sk->sk_write_space = psock->saved_write_space;
112 		WRITE_ONCE(sk->sk_prot, psock->sk_proto);
113 		return 0;
114 	}
115 
116 	if (sk->sk_family == AF_INET6)
117 		udp_bpf_check_v6_needs_rebuild(psock->sk_proto);
118 
119 	WRITE_ONCE(sk->sk_prot, &udp_bpf_prots[family]);
120 	return 0;
121 }
122 EXPORT_SYMBOL_GPL(udp_bpf_update_proto);
123