xref: /linux/include/linux/udp.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the UDP protocol.
8  *
9  * Version:	@(#)udp.h	1.0.2	04/28/93
10  *
11  * Author:	Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  */
13 #ifndef _LINUX_UDP_H
14 #define _LINUX_UDP_H
15 
16 #include <net/inet_sock.h>
17 #include <linux/skbuff.h>
18 #include <net/netns/hash.h>
19 #include <uapi/linux/udp.h>
20 
21 static inline struct udphdr *udp_hdr(const struct sk_buff *skb)
22 {
23 	return (struct udphdr *)skb_transport_header(skb);
24 }
25 
26 #define UDP_HTABLE_SIZE_MIN_PERNET	128
27 #define UDP_HTABLE_SIZE_MIN		(IS_ENABLED(CONFIG_BASE_SMALL) ? 128 : 256)
28 #define UDP_HTABLE_SIZE_MAX		65536
29 
30 static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
31 {
32 	return (num + net_hash_mix(net)) & mask;
33 }
34 
35 enum {
36 	UDP_FLAGS_CORK,		/* Cork is required */
37 	UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
38 	UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
39 	UDP_FLAGS_GRO_ENABLED,	/* Request GRO aggregation */
40 	UDP_FLAGS_ACCEPT_FRAGLIST,
41 	UDP_FLAGS_ACCEPT_L4,
42 	UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
43 	UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
44 	UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
45 };
46 
47 /* per NUMA structure for lockless producer usage. */
48 struct udp_prod_queue {
49 	struct llist_head	ll_root ____cacheline_aligned_in_smp;
50 	atomic_t		rmem_alloc;
51 };
52 
53 struct udp_sock {
54 	/* inet_sock has to be the first member */
55 	struct inet_sock inet;
56 #define udp_port_hash		inet.sk.__sk_common.skc_u16hashes[0]
57 #define udp_portaddr_hash	inet.sk.__sk_common.skc_u16hashes[1]
58 #define udp_portaddr_node	inet.sk.__sk_common.skc_portaddr_node
59 
60 	unsigned long	 udp_flags;
61 
62 	int		 pending;	/* Any pending frames ? */
63 	__u8		 encap_type;	/* Is this an Encapsulation socket? */
64 
65 #if !IS_ENABLED(CONFIG_BASE_SMALL)
66 	/* For UDP 4-tuple hash */
67 	__u16 udp_lrpa_hash;
68 	struct hlist_nulls_node udp_lrpa_node;
69 #endif
70 
71 	/*
72 	 * Following member retains the information to create a UDP header
73 	 * when the socket is uncorked.
74 	 */
75 	__u16		 len;		/* total length of pending frames */
76 	__u16		 gso_size;
77 	/*
78 	 * Fields specific to UDP-Lite.
79 	 */
80 	__u16		 pcslen;
81 	__u16		 pcrlen;
82 	/*
83 	 * For encapsulation sockets.
84 	 */
85 	int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
86 	void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, int err,
87 			      __be16 port, u32 info, u8 *payload);
88 	int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
89 	void (*encap_destroy)(struct sock *sk);
90 
91 	/* GRO functions for UDP socket */
92 	struct sk_buff *	(*gro_receive)(struct sock *sk,
93 					       struct list_head *head,
94 					       struct sk_buff *skb);
95 	int			(*gro_complete)(struct sock *sk,
96 						struct sk_buff *skb,
97 						int nhoff);
98 
99 	struct udp_prod_queue *udp_prod_queue;
100 
101 	/* udp_recvmsg try to use this before splicing sk_receive_queue */
102 	struct sk_buff_head	reader_queue ____cacheline_aligned_in_smp;
103 
104 	/* This field is dirtied by udp_recvmsg() */
105 	int		forward_deficit;
106 
107 	/* This fields follows rcvbuf value, and is touched by udp_recvmsg */
108 	int		forward_threshold;
109 
110 	/* Cache friendly copy of sk->sk_peek_off >= 0 */
111 	bool		peeking_with_offset;
112 
113 	/*
114 	 * Accounting for the tunnel GRO fastpath.
115 	 * Unprotected by compilers guard, as it uses space available in
116 	 * the last UDP socket cacheline.
117 	 */
118 	struct hlist_node	tunnel_list;
119 	struct numa_drop_counters drop_counters;
120 };
121 
122 #define udp_test_bit(nr, sk)			\
123 	test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
124 #define udp_set_bit(nr, sk)			\
125 	set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
126 #define udp_test_and_set_bit(nr, sk)		\
127 	test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
128 #define udp_clear_bit(nr, sk)			\
129 	clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
130 #define udp_assign_bit(nr, sk, val)		\
131 	assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
132 
133 #define UDP_MAX_SEGMENTS	(1 << 7UL)
134 
135 #define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
136 
137 static inline int udp_set_peek_off(struct sock *sk, int val)
138 {
139 	sk_set_peek_off(sk, val);
140 	WRITE_ONCE(udp_sk(sk)->peeking_with_offset, val >= 0);
141 	return 0;
142 }
143 
144 static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
145 {
146 	udp_assign_bit(NO_CHECK6_TX, sk, val);
147 }
148 
149 static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
150 {
151 	udp_assign_bit(NO_CHECK6_RX, sk, val);
152 }
153 
154 static inline bool udp_get_no_check6_tx(const struct sock *sk)
155 {
156 	return udp_test_bit(NO_CHECK6_TX, sk);
157 }
158 
159 static inline bool udp_get_no_check6_rx(const struct sock *sk)
160 {
161 	return udp_test_bit(NO_CHECK6_RX, sk);
162 }
163 
164 static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
165 				 struct sk_buff *skb)
166 {
167 	int gso_size;
168 
169 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
170 		gso_size = skb_shinfo(skb)->gso_size;
171 		put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
172 	}
173 }
174 
175 DECLARE_STATIC_KEY_FALSE(udp_encap_needed_key);
176 #if IS_ENABLED(CONFIG_IPV6)
177 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
178 #endif
179 
180 static inline bool udp_encap_needed(void)
181 {
182 	if (static_branch_unlikely(&udp_encap_needed_key))
183 		return true;
184 
185 #if IS_ENABLED(CONFIG_IPV6)
186 	if (static_branch_unlikely(&udpv6_encap_needed_key))
187 		return true;
188 #endif
189 
190 	return false;
191 }
192 
193 static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
194 {
195 	if (!skb_is_gso(skb))
196 		return false;
197 
198 	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
199 	    !udp_test_bit(ACCEPT_L4, sk))
200 		return true;
201 
202 	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
203 	    !udp_test_bit(ACCEPT_FRAGLIST, sk))
204 		return true;
205 
206 	/* GSO packets lacking the SKB_GSO_UDP_TUNNEL/_CSUM bits might still
207 	 * land in a tunnel as the socket check in udp_gro_receive cannot be
208 	 * foolproof.
209 	 */
210 	if (udp_encap_needed() &&
211 	    READ_ONCE(udp_sk(sk)->encap_rcv) &&
212 	    !(skb_shinfo(skb)->gso_type &
213 	      (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM)))
214 		return true;
215 
216 	return false;
217 }
218 
219 static inline void udp_allow_gso(struct sock *sk)
220 {
221 	udp_set_bit(ACCEPT_L4, sk);
222 	udp_set_bit(ACCEPT_FRAGLIST, sk);
223 }
224 
225 #define udp_portaddr_for_each_entry(__sk, list) \
226 	hlist_for_each_entry(__sk, list, __sk_common.skc_portaddr_node)
227 
228 #define udp_portaddr_for_each_entry_from(__sk) \
229 	hlist_for_each_entry_from(__sk, __sk_common.skc_portaddr_node)
230 
231 #define udp_portaddr_for_each_entry_rcu(__sk, list) \
232 	hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
233 
234 #if !IS_ENABLED(CONFIG_BASE_SMALL)
235 #define udp_lrpa_for_each_entry_rcu(__up, node, list) \
236 	hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node)
237 #endif
238 
239 #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
240 
241 static inline struct sock *udp_tunnel_sk(const struct net *net, bool is_ipv6)
242 {
243 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
244 	return rcu_dereference(net->ipv4.udp_tunnel_gro[is_ipv6].sk);
245 #else
246 	return NULL;
247 #endif
248 }
249 
250 #endif	/* _LINUX_UDP_H */
251