xref: /linux/net/ipv6/udp.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37 #include <trace/events/udp.h>
38 
39 #include <net/addrconf.h>
40 #include <net/ndisc.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/ip6_route.h>
44 #include <net/raw.h>
45 #include <net/seg6.h>
46 #include <net/tcp_states.h>
47 #include <net/ip6_checksum.h>
48 #include <net/ip6_tunnel.h>
49 #include <net/udp_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/inet_hashtables.h>
52 #include <net/inet6_hashtables.h>
53 #include <net/busy_poll.h>
54 #include <net/sock_reuseport.h>
55 #include <net/gro.h>
56 
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59 #include <trace/events/skb.h>
60 #include "udp_impl.h"
61 
62 static void udpv6_destruct_sock(struct sock *sk)
63 {
64 	udp_destruct_common(sk);
65 	inet6_sock_destruct(sk);
66 }
67 
68 int udpv6_init_sock(struct sock *sk)
69 {
70 	udp_lib_init_sock(sk);
71 	sk->sk_destruct = udpv6_destruct_sock;
72 	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
73 	return 0;
74 }
75 
76 INDIRECT_CALLABLE_SCOPE
77 u32 udp6_ehashfn(const struct net *net,
78 		 const struct in6_addr *laddr,
79 		 const u16 lport,
80 		 const struct in6_addr *faddr,
81 		 const __be16 fport)
82 {
83 	u32 lhash, fhash;
84 
85 	net_get_random_once(&udp6_ehash_secret,
86 			    sizeof(udp6_ehash_secret));
87 	net_get_random_once(&udp_ipv6_hash_secret,
88 			    sizeof(udp_ipv6_hash_secret));
89 
90 	lhash = (__force u32)laddr->s6_addr32[3];
91 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
92 
93 	return __inet6_ehashfn(lhash, lport, fhash, fport,
94 			       udp6_ehash_secret + net_hash_mix(net));
95 }
96 
97 int udp_v6_get_port(struct sock *sk, unsigned short snum)
98 {
99 	unsigned int hash2_nulladdr =
100 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
101 	unsigned int hash2_partial =
102 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
103 
104 	/* precompute partial secondary hash */
105 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
106 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
107 }
108 
109 void udp_v6_rehash(struct sock *sk)
110 {
111 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
112 					  &sk->sk_v6_rcv_saddr,
113 					  inet_sk(sk)->inet_num);
114 	u16 new_hash4;
115 
116 	if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
117 		new_hash4 = udp_ehashfn(sock_net(sk),
118 					sk->sk_rcv_saddr, sk->sk_num,
119 					sk->sk_daddr, sk->sk_dport);
120 	} else {
121 		new_hash4 = udp6_ehashfn(sock_net(sk),
122 					 &sk->sk_v6_rcv_saddr, sk->sk_num,
123 					 &sk->sk_v6_daddr, sk->sk_dport);
124 	}
125 
126 	udp_lib_rehash(sk, new_hash, new_hash4);
127 }
128 
129 static int compute_score(struct sock *sk, const struct net *net,
130 			 const struct in6_addr *saddr, __be16 sport,
131 			 const struct in6_addr *daddr, unsigned short hnum,
132 			 int dif, int sdif)
133 {
134 	int bound_dev_if, score;
135 	struct inet_sock *inet;
136 	bool dev_match;
137 
138 	if (!net_eq(sock_net(sk), net) ||
139 	    udp_sk(sk)->udp_port_hash != hnum ||
140 	    sk->sk_family != PF_INET6)
141 		return -1;
142 
143 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
144 		return -1;
145 
146 	score = 0;
147 	inet = inet_sk(sk);
148 
149 	if (inet->inet_dport) {
150 		if (inet->inet_dport != sport)
151 			return -1;
152 		score++;
153 	}
154 
155 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
156 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
157 			return -1;
158 		score++;
159 	}
160 
161 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
162 	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
163 	if (!dev_match)
164 		return -1;
165 	if (bound_dev_if)
166 		score++;
167 
168 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
169 		score++;
170 
171 	return score;
172 }
173 
174 /**
175  * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
176  * @net:	Network namespace
177  * @saddr:	Source address, network order
178  * @sport:	Source port, network order
179  * @daddr:	Destination address, network order
180  * @hnum:	Destination port, host order
181  * @dif:	Destination interface index
182  * @sdif:	Destination bridge port index, if relevant
183  * @udptable:	Set of UDP hash tables
184  *
185  * Simplified lookup to be used as fallback if no sockets are found due to a
186  * potential race between (receive) address change, and lookup happening before
187  * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
188  * result sockets, because if we have one, we don't need the fallback at all.
189  *
190  * Called under rcu_read_lock().
191  *
192  * Return: socket with highest matching score if any, NULL if none
193  */
194 static struct sock *udp6_lib_lookup1(const struct net *net,
195 				     const struct in6_addr *saddr, __be16 sport,
196 				     const struct in6_addr *daddr,
197 				     unsigned int hnum, int dif, int sdif,
198 				     const struct udp_table *udptable)
199 {
200 	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
201 	struct udp_hslot *hslot = &udptable->hash[slot];
202 	struct sock *sk, *result = NULL;
203 	int score, badness = 0;
204 
205 	sk_for_each_rcu(sk, &hslot->head) {
206 		score = compute_score(sk, net,
207 				      saddr, sport, daddr, hnum, dif, sdif);
208 		if (score > badness) {
209 			result = sk;
210 			badness = score;
211 		}
212 	}
213 
214 	return result;
215 }
216 
217 /* called with rcu_read_lock() */
218 static struct sock *udp6_lib_lookup2(const struct net *net,
219 		const struct in6_addr *saddr, __be16 sport,
220 		const struct in6_addr *daddr, unsigned int hnum,
221 		int dif, int sdif, struct udp_hslot *hslot2,
222 		struct sk_buff *skb)
223 {
224 	struct sock *sk, *result;
225 	int score, badness;
226 	bool need_rescore;
227 
228 	result = NULL;
229 	badness = -1;
230 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
231 		need_rescore = false;
232 rescore:
233 		score = compute_score(need_rescore ? result : sk, net, saddr,
234 				      sport, daddr, hnum, dif, sdif);
235 		if (score > badness) {
236 			badness = score;
237 
238 			if (need_rescore)
239 				continue;
240 
241 			if (sk->sk_state == TCP_ESTABLISHED) {
242 				result = sk;
243 				continue;
244 			}
245 
246 			result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
247 							saddr, sport, daddr, hnum, udp6_ehashfn);
248 			if (!result) {
249 				result = sk;
250 				continue;
251 			}
252 
253 			/* Fall back to scoring if group has connections */
254 			if (!reuseport_has_conns(sk))
255 				return result;
256 
257 			/* Reuseport logic returned an error, keep original score. */
258 			if (IS_ERR(result))
259 				continue;
260 
261 			/* compute_score is too long of a function to be
262 			 * inlined, and calling it again here yields
263 			 * measureable overhead for some
264 			 * workloads. Work around it by jumping
265 			 * backwards to rescore 'result'.
266 			 */
267 			need_rescore = true;
268 			goto rescore;
269 		}
270 	}
271 	return result;
272 }
273 
274 #if IS_ENABLED(CONFIG_BASE_SMALL)
275 static struct sock *udp6_lib_lookup4(const struct net *net,
276 				     const struct in6_addr *saddr, __be16 sport,
277 				     const struct in6_addr *daddr,
278 				     unsigned int hnum, int dif, int sdif,
279 				     struct udp_table *udptable)
280 {
281 	return NULL;
282 }
283 
284 static void udp6_hash4(struct sock *sk)
285 {
286 }
287 #else /* !CONFIG_BASE_SMALL */
288 static struct sock *udp6_lib_lookup4(const struct net *net,
289 				     const struct in6_addr *saddr, __be16 sport,
290 				     const struct in6_addr *daddr,
291 				     unsigned int hnum, int dif, int sdif,
292 				     struct udp_table *udptable)
293 {
294 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
295 	const struct hlist_nulls_node *node;
296 	struct udp_hslot *hslot4;
297 	unsigned int hash4, slot;
298 	struct udp_sock *up;
299 	struct sock *sk;
300 
301 	hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport);
302 	slot = hash4 & udptable->mask;
303 	hslot4 = &udptable->hash4[slot];
304 
305 begin:
306 	udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
307 		sk = (struct sock *)up;
308 		if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
309 			return sk;
310 	}
311 
312 	/* if the nulls value we got at the end of this lookup is not the
313 	 * expected one, we must restart lookup. We probably met an item that
314 	 * was moved to another chain due to rehash.
315 	 */
316 	if (get_nulls_value(node) != slot)
317 		goto begin;
318 
319 	return NULL;
320 }
321 
322 static void udp6_hash4(struct sock *sk)
323 {
324 	struct net *net = sock_net(sk);
325 	unsigned int hash;
326 
327 	if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
328 		udp4_hash4(sk);
329 		return;
330 	}
331 
332 	if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr))
333 		return;
334 
335 	hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num,
336 			    &sk->sk_v6_daddr, sk->sk_dport);
337 
338 	udp_lib_hash4(sk, hash);
339 }
340 #endif /* CONFIG_BASE_SMALL */
341 
342 /* rcu_read_lock() must be held */
343 struct sock *__udp6_lib_lookup(const struct net *net,
344 			       const struct in6_addr *saddr, __be16 sport,
345 			       const struct in6_addr *daddr, __be16 dport,
346 			       int dif, int sdif, struct udp_table *udptable,
347 			       struct sk_buff *skb)
348 {
349 	unsigned short hnum = ntohs(dport);
350 	struct udp_hslot *hslot2;
351 	struct sock *result, *sk;
352 	unsigned int hash2;
353 
354 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
355 	hslot2 = udp_hashslot2(udptable, hash2);
356 
357 	if (udp_has_hash4(hslot2)) {
358 		result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
359 					  dif, sdif, udptable);
360 		if (result) /* udp6_lib_lookup4 return sk or NULL */
361 			return result;
362 	}
363 
364 	/* Lookup connected or non-wildcard sockets */
365 	result = udp6_lib_lookup2(net, saddr, sport,
366 				  daddr, hnum, dif, sdif,
367 				  hslot2, skb);
368 	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
369 		goto done;
370 
371 	/* Lookup redirect from BPF */
372 	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
373 	    udptable == net->ipv4.udp_table) {
374 		sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
375 						saddr, sport, daddr, hnum, dif,
376 						udp6_ehashfn);
377 		if (sk) {
378 			result = sk;
379 			goto done;
380 		}
381 	}
382 
383 	/* Got non-wildcard socket or error on first lookup */
384 	if (result)
385 		goto done;
386 
387 	/* Lookup wildcard sockets */
388 	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
389 	hslot2 = udp_hashslot2(udptable, hash2);
390 
391 	result = udp6_lib_lookup2(net, saddr, sport,
392 				  &in6addr_any, hnum, dif, sdif,
393 				  hslot2, skb);
394 	if (!IS_ERR_OR_NULL(result))
395 		goto done;
396 
397 	/* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
398 	result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
399 				  udptable);
400 
401 done:
402 	if (IS_ERR(result))
403 		return NULL;
404 	return result;
405 }
406 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
407 
408 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
409 					  __be16 sport, __be16 dport,
410 					  struct udp_table *udptable)
411 {
412 	const struct ipv6hdr *iph = ipv6_hdr(skb);
413 
414 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
415 				 &iph->daddr, dport, inet6_iif(skb),
416 				 inet6_sdif(skb), udptable, skb);
417 }
418 
419 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
420 				 __be16 sport, __be16 dport)
421 {
422 	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
423 	const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
424 	struct net *net = dev_net(skb->dev);
425 	int iif, sdif;
426 
427 	inet6_get_iif_sdif(skb, &iif, &sdif);
428 
429 	return __udp6_lib_lookup(net, &iph->saddr, sport,
430 				 &iph->daddr, dport, iif,
431 				 sdif, net->ipv4.udp_table, NULL);
432 }
433 
434 /* Must be called under rcu_read_lock().
435  * Does increment socket refcount.
436  */
437 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
438 struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
439 			     const struct in6_addr *daddr, __be16 dport, int dif)
440 {
441 	struct sock *sk;
442 
443 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
444 				dif, 0, net->ipv4.udp_table, NULL);
445 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
446 		sk = NULL;
447 	return sk;
448 }
449 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
450 #endif
451 
452 /* do not use the scratch area len for jumbogram: their length execeeds the
453  * scratch area space; note that the IP6CB flags is still in the first
454  * cacheline, so checking for jumbograms is cheap
455  */
456 static int udp6_skb_len(struct sk_buff *skb)
457 {
458 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
459 }
460 
461 /*
462  *	This should be easy, if there is something there we
463  *	return it, otherwise we block.
464  */
465 
466 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
467 		  int flags, int *addr_len)
468 {
469 	struct ipv6_pinfo *np = inet6_sk(sk);
470 	struct inet_sock *inet = inet_sk(sk);
471 	struct sk_buff *skb;
472 	unsigned int ulen, copied;
473 	int off, err, peeking = flags & MSG_PEEK;
474 	int is_udplite = IS_UDPLITE(sk);
475 	struct udp_mib __percpu *mib;
476 	bool checksum_valid = false;
477 	int is_udp4;
478 
479 	if (flags & MSG_ERRQUEUE)
480 		return ipv6_recv_error(sk, msg, len, addr_len);
481 
482 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
483 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
484 
485 try_again:
486 	off = sk_peek_offset(sk, flags);
487 	skb = __skb_recv_udp(sk, flags, &off, &err);
488 	if (!skb)
489 		return err;
490 
491 	ulen = udp6_skb_len(skb);
492 	copied = len;
493 	if (copied > ulen - off)
494 		copied = ulen - off;
495 	else if (copied < ulen)
496 		msg->msg_flags |= MSG_TRUNC;
497 
498 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
499 	mib = __UDPX_MIB(sk, is_udp4);
500 
501 	/*
502 	 * If checksum is needed at all, try to do it while copying the
503 	 * data.  If the data is truncated, or if we only want a partial
504 	 * coverage checksum (UDP-Lite), do it before the copy.
505 	 */
506 
507 	if (copied < ulen || peeking ||
508 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
509 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
510 				!__udp_lib_checksum_complete(skb);
511 		if (!checksum_valid)
512 			goto csum_copy_err;
513 	}
514 
515 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
516 		if (udp_skb_is_linear(skb))
517 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
518 		else
519 			err = skb_copy_datagram_msg(skb, off, msg, copied);
520 	} else {
521 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
522 		if (err == -EINVAL)
523 			goto csum_copy_err;
524 	}
525 	if (unlikely(err)) {
526 		if (!peeking) {
527 			atomic_inc(&sk->sk_drops);
528 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
529 		}
530 		kfree_skb(skb);
531 		return err;
532 	}
533 	if (!peeking)
534 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
535 
536 	sock_recv_cmsgs(msg, sk, skb);
537 
538 	/* Copy the address. */
539 	if (msg->msg_name) {
540 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
541 		sin6->sin6_family = AF_INET6;
542 		sin6->sin6_port = udp_hdr(skb)->source;
543 		sin6->sin6_flowinfo = 0;
544 
545 		if (is_udp4) {
546 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
547 					       &sin6->sin6_addr);
548 			sin6->sin6_scope_id = 0;
549 		} else {
550 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
551 			sin6->sin6_scope_id =
552 				ipv6_iface_scope_id(&sin6->sin6_addr,
553 						    inet6_iif(skb));
554 		}
555 		*addr_len = sizeof(*sin6);
556 
557 		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
558 						      (struct sockaddr *)sin6,
559 						      addr_len);
560 	}
561 
562 	if (udp_test_bit(GRO_ENABLED, sk))
563 		udp_cmsg_recv(msg, sk, skb);
564 
565 	if (np->rxopt.all)
566 		ip6_datagram_recv_common_ctl(sk, msg, skb);
567 
568 	if (is_udp4) {
569 		if (inet_cmsg_flags(inet))
570 			ip_cmsg_recv_offset(msg, sk, skb,
571 					    sizeof(struct udphdr), off);
572 	} else {
573 		if (np->rxopt.all)
574 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
575 	}
576 
577 	err = copied;
578 	if (flags & MSG_TRUNC)
579 		err = ulen;
580 
581 	skb_consume_udp(sk, skb, peeking ? -err : err);
582 	return err;
583 
584 csum_copy_err:
585 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
586 				 udp_skb_destructor)) {
587 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
588 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
589 	}
590 	kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
591 
592 	/* starting over for a new packet, but check if we need to yield */
593 	cond_resched();
594 	msg->msg_flags &= ~MSG_TRUNC;
595 	goto try_again;
596 }
597 
598 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
599 void udpv6_encap_enable(void)
600 {
601 	static_branch_inc(&udpv6_encap_needed_key);
602 }
603 EXPORT_SYMBOL(udpv6_encap_enable);
604 
605 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
606  * through error handlers in encapsulations looking for a match.
607  */
608 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
609 				      struct inet6_skb_parm *opt,
610 				      u8 type, u8 code, int offset, __be32 info)
611 {
612 	int i;
613 
614 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
615 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
616 			       u8 type, u8 code, int offset, __be32 info);
617 		const struct ip6_tnl_encap_ops *encap;
618 
619 		encap = rcu_dereference(ip6tun_encaps[i]);
620 		if (!encap)
621 			continue;
622 		handler = encap->err_handler;
623 		if (handler && !handler(skb, opt, type, code, offset, info))
624 			return 0;
625 	}
626 
627 	return -ENOENT;
628 }
629 
630 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
631  * reversing source and destination port: this will match tunnels that force the
632  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
633  * lwtunnels might actually break this assumption by being configured with
634  * different destination ports on endpoints, in this case we won't be able to
635  * trace ICMP messages back to them.
636  *
637  * If this doesn't match any socket, probe tunnels with arbitrary destination
638  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
639  * we've sent packets to won't necessarily match the local destination port.
640  *
641  * Then ask the tunnel implementation to match the error against a valid
642  * association.
643  *
644  * Return an error if we can't find a match, the socket if we need further
645  * processing, zero otherwise.
646  */
647 static struct sock *__udp6_lib_err_encap(struct net *net,
648 					 const struct ipv6hdr *hdr, int offset,
649 					 struct udphdr *uh,
650 					 struct udp_table *udptable,
651 					 struct sock *sk,
652 					 struct sk_buff *skb,
653 					 struct inet6_skb_parm *opt,
654 					 u8 type, u8 code, __be32 info)
655 {
656 	int (*lookup)(struct sock *sk, struct sk_buff *skb);
657 	int network_offset, transport_offset;
658 	struct udp_sock *up;
659 
660 	network_offset = skb_network_offset(skb);
661 	transport_offset = skb_transport_offset(skb);
662 
663 	/* Network header needs to point to the outer IPv6 header inside ICMP */
664 	skb_reset_network_header(skb);
665 
666 	/* Transport header needs to point to the UDP header */
667 	skb_set_transport_header(skb, offset);
668 
669 	if (sk) {
670 		up = udp_sk(sk);
671 
672 		lookup = READ_ONCE(up->encap_err_lookup);
673 		if (lookup && lookup(sk, skb))
674 			sk = NULL;
675 
676 		goto out;
677 	}
678 
679 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
680 			       &hdr->saddr, uh->dest,
681 			       inet6_iif(skb), 0, udptable, skb);
682 	if (sk) {
683 		up = udp_sk(sk);
684 
685 		lookup = READ_ONCE(up->encap_err_lookup);
686 		if (!lookup || lookup(sk, skb))
687 			sk = NULL;
688 	}
689 
690 out:
691 	if (!sk) {
692 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
693 							offset, info));
694 	}
695 
696 	skb_set_transport_header(skb, transport_offset);
697 	skb_set_network_header(skb, network_offset);
698 
699 	return sk;
700 }
701 
702 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
703 		   u8 type, u8 code, int offset, __be32 info,
704 		   struct udp_table *udptable)
705 {
706 	struct ipv6_pinfo *np;
707 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
708 	const struct in6_addr *saddr = &hdr->saddr;
709 	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
710 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
711 	bool tunnel = false;
712 	struct sock *sk;
713 	int harderr;
714 	int err;
715 	struct net *net = dev_net(skb->dev);
716 
717 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
718 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
719 
720 	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
721 		/* No socket for error: try tunnels before discarding */
722 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
723 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
724 						  udptable, sk, skb,
725 						  opt, type, code, info);
726 			if (!sk)
727 				return 0;
728 		} else
729 			sk = ERR_PTR(-ENOENT);
730 
731 		if (IS_ERR(sk)) {
732 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
733 					  ICMP6_MIB_INERRORS);
734 			return PTR_ERR(sk);
735 		}
736 
737 		tunnel = true;
738 	}
739 
740 	harderr = icmpv6_err_convert(type, code, &err);
741 	np = inet6_sk(sk);
742 
743 	if (type == ICMPV6_PKT_TOOBIG) {
744 		if (!ip6_sk_accept_pmtu(sk))
745 			goto out;
746 		ip6_sk_update_pmtu(skb, sk, info);
747 		if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
748 			harderr = 1;
749 	}
750 	if (type == NDISC_REDIRECT) {
751 		if (tunnel) {
752 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
753 				     READ_ONCE(sk->sk_mark), sk->sk_uid);
754 		} else {
755 			ip6_sk_redirect(skb, sk);
756 		}
757 		goto out;
758 	}
759 
760 	/* Tunnels don't have an application socket: don't pass errors back */
761 	if (tunnel) {
762 		if (udp_sk(sk)->encap_err_rcv)
763 			udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
764 						  ntohl(info), (u8 *)(uh+1));
765 		goto out;
766 	}
767 
768 	if (!inet6_test_bit(RECVERR6, sk)) {
769 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
770 			goto out;
771 	} else {
772 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
773 	}
774 
775 	sk->sk_err = err;
776 	sk_error_report(sk);
777 out:
778 	return 0;
779 }
780 
781 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
782 {
783 	int rc;
784 
785 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
786 		sock_rps_save_rxhash(sk, skb);
787 		sk_mark_napi_id(sk, skb);
788 		sk_incoming_cpu_update(sk);
789 	} else {
790 		sk_mark_napi_id_once(sk, skb);
791 	}
792 
793 	rc = __udp_enqueue_schedule_skb(sk, skb);
794 	if (rc < 0) {
795 		int is_udplite = IS_UDPLITE(sk);
796 		enum skb_drop_reason drop_reason;
797 
798 		/* Note that an ENOMEM error is charged twice */
799 		if (rc == -ENOMEM) {
800 			UDP6_INC_STATS(sock_net(sk),
801 					 UDP_MIB_RCVBUFERRORS, is_udplite);
802 			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
803 		} else {
804 			UDP6_INC_STATS(sock_net(sk),
805 				       UDP_MIB_MEMERRORS, is_udplite);
806 			drop_reason = SKB_DROP_REASON_PROTO_MEM;
807 		}
808 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
809 		trace_udp_fail_queue_rcv_skb(rc, sk, skb);
810 		sk_skb_reason_drop(sk, skb, drop_reason);
811 		return -1;
812 	}
813 
814 	return 0;
815 }
816 
817 static __inline__ int udpv6_err(struct sk_buff *skb,
818 				struct inet6_skb_parm *opt, u8 type,
819 				u8 code, int offset, __be32 info)
820 {
821 	return __udp6_lib_err(skb, opt, type, code, offset, info,
822 			      dev_net(skb->dev)->ipv4.udp_table);
823 }
824 
825 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
826 {
827 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
828 	struct udp_sock *up = udp_sk(sk);
829 	int is_udplite = IS_UDPLITE(sk);
830 
831 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
832 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
833 		goto drop;
834 	}
835 	nf_reset_ct(skb);
836 
837 	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
838 	    READ_ONCE(up->encap_type)) {
839 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
840 
841 		/*
842 		 * This is an encapsulation socket so pass the skb to
843 		 * the socket's udp_encap_rcv() hook. Otherwise, just
844 		 * fall through and pass this up the UDP socket.
845 		 * up->encap_rcv() returns the following value:
846 		 * =0 if skb was successfully passed to the encap
847 		 *    handler or was discarded by it.
848 		 * >0 if skb should be passed on to UDP.
849 		 * <0 if skb should be resubmitted as proto -N
850 		 */
851 
852 		/* if we're overly short, let UDP handle it */
853 		encap_rcv = READ_ONCE(up->encap_rcv);
854 		if (encap_rcv) {
855 			int ret;
856 
857 			/* Verify checksum before giving to encap */
858 			if (udp_lib_checksum_complete(skb))
859 				goto csum_error;
860 
861 			ret = encap_rcv(sk, skb);
862 			if (ret <= 0) {
863 				__UDP6_INC_STATS(sock_net(sk),
864 						 UDP_MIB_INDATAGRAMS,
865 						 is_udplite);
866 				return -ret;
867 			}
868 		}
869 
870 		/* FALLTHROUGH -- it's a UDP Packet */
871 	}
872 
873 	/*
874 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
875 	 */
876 	if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
877 		u16 pcrlen = READ_ONCE(up->pcrlen);
878 
879 		if (pcrlen == 0) {          /* full coverage was set  */
880 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
881 					    UDP_SKB_CB(skb)->cscov, skb->len);
882 			goto drop;
883 		}
884 		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
885 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
886 					    UDP_SKB_CB(skb)->cscov, pcrlen);
887 			goto drop;
888 		}
889 	}
890 
891 	prefetch(&sk->sk_rmem_alloc);
892 	if (rcu_access_pointer(sk->sk_filter) &&
893 	    udp_lib_checksum_complete(skb))
894 		goto csum_error;
895 
896 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
897 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
898 		goto drop;
899 	}
900 
901 	udp_csum_pull_header(skb);
902 
903 	skb_dst_drop(skb);
904 
905 	return __udpv6_queue_rcv_skb(sk, skb);
906 
907 csum_error:
908 	drop_reason = SKB_DROP_REASON_UDP_CSUM;
909 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
910 drop:
911 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
912 	atomic_inc(&sk->sk_drops);
913 	sk_skb_reason_drop(sk, skb, drop_reason);
914 	return -1;
915 }
916 
917 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
918 {
919 	struct sk_buff *next, *segs;
920 	int ret;
921 
922 	if (likely(!udp_unexpected_gso(sk, skb)))
923 		return udpv6_queue_rcv_one_skb(sk, skb);
924 
925 	__skb_push(skb, -skb_mac_offset(skb));
926 	segs = udp_rcv_segment(sk, skb, false);
927 	skb_list_walk_safe(segs, skb, next) {
928 		__skb_pull(skb, skb_transport_offset(skb));
929 
930 		udp_post_segment_fix_csum(skb);
931 		ret = udpv6_queue_rcv_one_skb(sk, skb);
932 		if (ret > 0)
933 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
934 						 true);
935 	}
936 	return 0;
937 }
938 
939 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
940 				   __be16 loc_port, const struct in6_addr *loc_addr,
941 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
942 				   int dif, int sdif, unsigned short hnum)
943 {
944 	const struct inet_sock *inet = inet_sk(sk);
945 
946 	if (!net_eq(sock_net(sk), net))
947 		return false;
948 
949 	if (udp_sk(sk)->udp_port_hash != hnum ||
950 	    sk->sk_family != PF_INET6 ||
951 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
952 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
953 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
954 	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
955 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
956 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
957 		return false;
958 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
959 		return false;
960 	return true;
961 }
962 
963 static void udp6_csum_zero_error(struct sk_buff *skb)
964 {
965 	/* RFC 2460 section 8.1 says that we SHOULD log
966 	 * this error. Well, it is reasonable.
967 	 */
968 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
969 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
970 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
971 }
972 
973 /*
974  * Note: called only from the BH handler context,
975  * so we don't need to lock the hashes.
976  */
977 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
978 		const struct in6_addr *saddr, const struct in6_addr *daddr,
979 		struct udp_table *udptable, int proto)
980 {
981 	struct sock *sk, *first = NULL;
982 	const struct udphdr *uh = udp_hdr(skb);
983 	unsigned short hnum = ntohs(uh->dest);
984 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
985 	unsigned int offset = offsetof(typeof(*sk), sk_node);
986 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
987 	int dif = inet6_iif(skb);
988 	int sdif = inet6_sdif(skb);
989 	struct hlist_node *node;
990 	struct sk_buff *nskb;
991 
992 	if (use_hash2) {
993 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
994 			    udptable->mask;
995 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
996 start_lookup:
997 		hslot = &udptable->hash2[hash2].hslot;
998 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
999 	}
1000 
1001 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
1002 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
1003 					    uh->source, saddr, dif, sdif,
1004 					    hnum))
1005 			continue;
1006 		/* If zero checksum and no_check is not on for
1007 		 * the socket then skip it.
1008 		 */
1009 		if (!uh->check && !udp_get_no_check6_rx(sk))
1010 			continue;
1011 		if (!first) {
1012 			first = sk;
1013 			continue;
1014 		}
1015 		nskb = skb_clone(skb, GFP_ATOMIC);
1016 		if (unlikely(!nskb)) {
1017 			atomic_inc(&sk->sk_drops);
1018 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
1019 					 IS_UDPLITE(sk));
1020 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
1021 					 IS_UDPLITE(sk));
1022 			continue;
1023 		}
1024 
1025 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
1026 			consume_skb(nskb);
1027 	}
1028 
1029 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
1030 	if (use_hash2 && hash2 != hash2_any) {
1031 		hash2 = hash2_any;
1032 		goto start_lookup;
1033 	}
1034 
1035 	if (first) {
1036 		if (udpv6_queue_rcv_skb(first, skb) > 0)
1037 			consume_skb(skb);
1038 	} else {
1039 		kfree_skb(skb);
1040 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
1041 				 proto == IPPROTO_UDPLITE);
1042 	}
1043 	return 0;
1044 }
1045 
1046 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1047 {
1048 	if (udp_sk_rx_dst_set(sk, dst))
1049 		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
1050 }
1051 
1052 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
1053  * return code conversion for ip layer consumption
1054  */
1055 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
1056 				struct udphdr *uh)
1057 {
1058 	int ret;
1059 
1060 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
1061 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
1062 
1063 	ret = udpv6_queue_rcv_skb(sk, skb);
1064 
1065 	/* a return value > 0 means to resubmit the input */
1066 	if (ret > 0)
1067 		return ret;
1068 	return 0;
1069 }
1070 
1071 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1072 		   int proto)
1073 {
1074 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1075 	const struct in6_addr *saddr, *daddr;
1076 	struct net *net = dev_net(skb->dev);
1077 	struct sock *sk = NULL;
1078 	struct udphdr *uh;
1079 	bool refcounted;
1080 	u32 ulen = 0;
1081 
1082 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1083 		goto discard;
1084 
1085 	saddr = &ipv6_hdr(skb)->saddr;
1086 	daddr = &ipv6_hdr(skb)->daddr;
1087 	uh = udp_hdr(skb);
1088 
1089 	ulen = ntohs(uh->len);
1090 	if (ulen > skb->len)
1091 		goto short_packet;
1092 
1093 	if (proto == IPPROTO_UDP) {
1094 		/* UDP validates ulen. */
1095 
1096 		/* Check for jumbo payload */
1097 		if (ulen == 0)
1098 			ulen = skb->len;
1099 
1100 		if (ulen < sizeof(*uh))
1101 			goto short_packet;
1102 
1103 		if (ulen < skb->len) {
1104 			if (pskb_trim_rcsum(skb, ulen))
1105 				goto short_packet;
1106 			saddr = &ipv6_hdr(skb)->saddr;
1107 			daddr = &ipv6_hdr(skb)->daddr;
1108 			uh = udp_hdr(skb);
1109 		}
1110 	}
1111 
1112 	if (udp6_csum_init(skb, uh, proto))
1113 		goto csum_error;
1114 
1115 	/* Check if the socket is already available, e.g. due to early demux */
1116 	sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
1117 			      &refcounted, udp6_ehashfn);
1118 	if (IS_ERR(sk))
1119 		goto no_sk;
1120 
1121 	if (sk) {
1122 		struct dst_entry *dst = skb_dst(skb);
1123 		int ret;
1124 
1125 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1126 			udp6_sk_rx_dst_set(sk, dst);
1127 
1128 		if (!uh->check && !udp_get_no_check6_rx(sk)) {
1129 			if (refcounted)
1130 				sock_put(sk);
1131 			goto report_csum_error;
1132 		}
1133 
1134 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
1135 		if (refcounted)
1136 			sock_put(sk);
1137 		return ret;
1138 	}
1139 
1140 	/*
1141 	 *	Multicast receive code
1142 	 */
1143 	if (ipv6_addr_is_multicast(daddr))
1144 		return __udp6_lib_mcast_deliver(net, skb,
1145 				saddr, daddr, udptable, proto);
1146 
1147 	/* Unicast */
1148 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1149 	if (sk) {
1150 		if (!uh->check && !udp_get_no_check6_rx(sk))
1151 			goto report_csum_error;
1152 		return udp6_unicast_rcv_skb(sk, skb, uh);
1153 	}
1154 no_sk:
1155 	reason = SKB_DROP_REASON_NO_SOCKET;
1156 
1157 	if (!uh->check)
1158 		goto report_csum_error;
1159 
1160 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1161 		goto discard;
1162 	nf_reset_ct(skb);
1163 
1164 	if (udp_lib_checksum_complete(skb))
1165 		goto csum_error;
1166 
1167 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1168 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1169 
1170 	sk_skb_reason_drop(sk, skb, reason);
1171 	return 0;
1172 
1173 short_packet:
1174 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1175 		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1176 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1177 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1178 			    saddr, ntohs(uh->source),
1179 			    ulen, skb->len,
1180 			    daddr, ntohs(uh->dest));
1181 	goto discard;
1182 
1183 report_csum_error:
1184 	udp6_csum_zero_error(skb);
1185 csum_error:
1186 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1187 		reason = SKB_DROP_REASON_UDP_CSUM;
1188 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1189 discard:
1190 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1191 	sk_skb_reason_drop(sk, skb, reason);
1192 	return 0;
1193 }
1194 
1195 
1196 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1197 			__be16 loc_port, const struct in6_addr *loc_addr,
1198 			__be16 rmt_port, const struct in6_addr *rmt_addr,
1199 			int dif, int sdif)
1200 {
1201 	struct udp_table *udptable = net->ipv4.udp_table;
1202 	unsigned short hnum = ntohs(loc_port);
1203 	struct udp_hslot *hslot2;
1204 	unsigned int hash2;
1205 	__portpair ports;
1206 	struct sock *sk;
1207 
1208 	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1209 	hslot2 = udp_hashslot2(udptable, hash2);
1210 	ports = INET_COMBINED_PORTS(rmt_port, hnum);
1211 
1212 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1213 		if (sk->sk_state == TCP_ESTABLISHED &&
1214 		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1215 			return sk;
1216 		/* Only check first socket in chain */
1217 		break;
1218 	}
1219 	return NULL;
1220 }
1221 
1222 void udp_v6_early_demux(struct sk_buff *skb)
1223 {
1224 	struct net *net = dev_net(skb->dev);
1225 	const struct udphdr *uh;
1226 	struct sock *sk;
1227 	struct dst_entry *dst;
1228 	int dif = skb->dev->ifindex;
1229 	int sdif = inet6_sdif(skb);
1230 
1231 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1232 	    sizeof(struct udphdr)))
1233 		return;
1234 
1235 	uh = udp_hdr(skb);
1236 
1237 	if (skb->pkt_type == PACKET_HOST)
1238 		sk = __udp6_lib_demux_lookup(net, uh->dest,
1239 					     &ipv6_hdr(skb)->daddr,
1240 					     uh->source, &ipv6_hdr(skb)->saddr,
1241 					     dif, sdif);
1242 	else
1243 		return;
1244 
1245 	if (!sk)
1246 		return;
1247 
1248 	skb->sk = sk;
1249 	DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1250 	skb->destructor = sock_pfree;
1251 	dst = rcu_dereference(sk->sk_rx_dst);
1252 
1253 	if (dst)
1254 		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1255 	if (dst) {
1256 		/* set noref for now.
1257 		 * any place which wants to hold dst has to call
1258 		 * dst_hold_safe()
1259 		 */
1260 		skb_dst_set_noref(skb, dst);
1261 	}
1262 }
1263 
1264 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1265 {
1266 	return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1267 }
1268 
1269 /*
1270  * Throw away all pending data and cancel the corking. Socket is locked.
1271  */
1272 static void udp_v6_flush_pending_frames(struct sock *sk)
1273 {
1274 	struct udp_sock *up = udp_sk(sk);
1275 
1276 	if (up->pending == AF_INET)
1277 		udp_flush_pending_frames(sk);
1278 	else if (up->pending) {
1279 		up->len = 0;
1280 		WRITE_ONCE(up->pending, 0);
1281 		ip6_flush_pending_frames(sk);
1282 	}
1283 }
1284 
1285 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1286 			     int addr_len)
1287 {
1288 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1289 		return -EINVAL;
1290 	/* The following checks are replicated from __ip6_datagram_connect()
1291 	 * and intended to prevent BPF program called below from accessing
1292 	 * bytes that are out of the bound specified by user in addr_len.
1293 	 */
1294 	if (uaddr->sa_family == AF_INET) {
1295 		if (ipv6_only_sock(sk))
1296 			return -EAFNOSUPPORT;
1297 		return udp_pre_connect(sk, uaddr, addr_len);
1298 	}
1299 
1300 	if (addr_len < SIN6_LEN_RFC2133)
1301 		return -EINVAL;
1302 
1303 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1304 }
1305 
1306 static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1307 {
1308 	int res;
1309 
1310 	lock_sock(sk);
1311 	res = __ip6_datagram_connect(sk, uaddr, addr_len);
1312 	if (!res)
1313 		udp6_hash4(sk);
1314 	release_sock(sk);
1315 	return res;
1316 }
1317 
1318 /**
1319  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1320  *	@sk:	socket we are sending on
1321  *	@skb:	sk_buff containing the filled-in UDP header
1322  *		(checksum field must be zeroed out)
1323  *	@saddr: source address
1324  *	@daddr: destination address
1325  *	@len:	length of packet
1326  */
1327 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1328 				 const struct in6_addr *saddr,
1329 				 const struct in6_addr *daddr, int len)
1330 {
1331 	unsigned int offset;
1332 	struct udphdr *uh = udp_hdr(skb);
1333 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1334 	__wsum csum = 0;
1335 
1336 	if (!frags) {
1337 		/* Only one fragment on the socket.  */
1338 		skb->csum_start = skb_transport_header(skb) - skb->head;
1339 		skb->csum_offset = offsetof(struct udphdr, check);
1340 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1341 	} else {
1342 		/*
1343 		 * HW-checksum won't work as there are two or more
1344 		 * fragments on the socket so that all csums of sk_buffs
1345 		 * should be together
1346 		 */
1347 		offset = skb_transport_offset(skb);
1348 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1349 		csum = skb->csum;
1350 
1351 		skb->ip_summed = CHECKSUM_NONE;
1352 
1353 		do {
1354 			csum = csum_add(csum, frags->csum);
1355 		} while ((frags = frags->next));
1356 
1357 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1358 					    csum);
1359 		if (uh->check == 0)
1360 			uh->check = CSUM_MANGLED_0;
1361 	}
1362 }
1363 
1364 /*
1365  *	Sending
1366  */
1367 
1368 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1369 			   struct inet_cork *cork)
1370 {
1371 	struct sock *sk = skb->sk;
1372 	struct udphdr *uh;
1373 	int err = 0;
1374 	int is_udplite = IS_UDPLITE(sk);
1375 	__wsum csum = 0;
1376 	int offset = skb_transport_offset(skb);
1377 	int len = skb->len - offset;
1378 	int datalen = len - sizeof(*uh);
1379 
1380 	/*
1381 	 * Create a UDP header
1382 	 */
1383 	uh = udp_hdr(skb);
1384 	uh->source = fl6->fl6_sport;
1385 	uh->dest = fl6->fl6_dport;
1386 	uh->len = htons(len);
1387 	uh->check = 0;
1388 
1389 	if (cork->gso_size) {
1390 		const int hlen = skb_network_header_len(skb) +
1391 				 sizeof(struct udphdr);
1392 
1393 		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1394 			kfree_skb(skb);
1395 			return -EMSGSIZE;
1396 		}
1397 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1398 			kfree_skb(skb);
1399 			return -EINVAL;
1400 		}
1401 		if (udp_get_no_check6_tx(sk)) {
1402 			kfree_skb(skb);
1403 			return -EINVAL;
1404 		}
1405 		if (is_udplite || dst_xfrm(skb_dst(skb))) {
1406 			kfree_skb(skb);
1407 			return -EIO;
1408 		}
1409 
1410 		if (datalen > cork->gso_size) {
1411 			skb_shinfo(skb)->gso_size = cork->gso_size;
1412 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1413 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1414 								 cork->gso_size);
1415 
1416 			/* Don't checksum the payload, skb will get segmented */
1417 			goto csum_partial;
1418 		}
1419 	}
1420 
1421 	if (is_udplite)
1422 		csum = udplite_csum(skb);
1423 	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
1424 		skb->ip_summed = CHECKSUM_NONE;
1425 		goto send;
1426 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1427 csum_partial:
1428 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1429 		goto send;
1430 	} else
1431 		csum = udp_csum(skb);
1432 
1433 	/* add protocol-dependent pseudo-header */
1434 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1435 				    len, fl6->flowi6_proto, csum);
1436 	if (uh->check == 0)
1437 		uh->check = CSUM_MANGLED_0;
1438 
1439 send:
1440 	err = ip6_send_skb(skb);
1441 	if (err) {
1442 		if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1443 			UDP6_INC_STATS(sock_net(sk),
1444 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1445 			err = 0;
1446 		}
1447 	} else {
1448 		UDP6_INC_STATS(sock_net(sk),
1449 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1450 	}
1451 	return err;
1452 }
1453 
1454 static int udp_v6_push_pending_frames(struct sock *sk)
1455 {
1456 	struct sk_buff *skb;
1457 	struct udp_sock  *up = udp_sk(sk);
1458 	int err = 0;
1459 
1460 	if (up->pending == AF_INET)
1461 		return udp_push_pending_frames(sk);
1462 
1463 	skb = ip6_finish_skb(sk);
1464 	if (!skb)
1465 		goto out;
1466 
1467 	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1468 			      &inet_sk(sk)->cork.base);
1469 out:
1470 	up->len = 0;
1471 	WRITE_ONCE(up->pending, 0);
1472 	return err;
1473 }
1474 
1475 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1476 {
1477 	struct ipv6_txoptions opt_space;
1478 	struct udp_sock *up = udp_sk(sk);
1479 	struct inet_sock *inet = inet_sk(sk);
1480 	struct ipv6_pinfo *np = inet6_sk(sk);
1481 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1482 	struct in6_addr *daddr, *final_p, final;
1483 	struct ipv6_txoptions *opt = NULL;
1484 	struct ipv6_txoptions *opt_to_free = NULL;
1485 	struct ip6_flowlabel *flowlabel = NULL;
1486 	struct inet_cork_full cork;
1487 	struct flowi6 *fl6 = &cork.fl.u.ip6;
1488 	struct dst_entry *dst;
1489 	struct ipcm6_cookie ipc6;
1490 	int addr_len = msg->msg_namelen;
1491 	bool connected = false;
1492 	int ulen = len;
1493 	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1494 	int err;
1495 	int is_udplite = IS_UDPLITE(sk);
1496 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1497 
1498 	ipcm6_init_sk(&ipc6, sk);
1499 	ipc6.gso_size = READ_ONCE(up->gso_size);
1500 
1501 	/* destination address check */
1502 	if (sin6) {
1503 		if (addr_len < offsetof(struct sockaddr, sa_data))
1504 			return -EINVAL;
1505 
1506 		switch (sin6->sin6_family) {
1507 		case AF_INET6:
1508 			if (addr_len < SIN6_LEN_RFC2133)
1509 				return -EINVAL;
1510 			daddr = &sin6->sin6_addr;
1511 			if (ipv6_addr_any(daddr) &&
1512 			    ipv6_addr_v4mapped(&np->saddr))
1513 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1514 						       daddr);
1515 			break;
1516 		case AF_INET:
1517 			goto do_udp_sendmsg;
1518 		case AF_UNSPEC:
1519 			msg->msg_name = sin6 = NULL;
1520 			msg->msg_namelen = addr_len = 0;
1521 			daddr = NULL;
1522 			break;
1523 		default:
1524 			return -EINVAL;
1525 		}
1526 	} else if (!READ_ONCE(up->pending)) {
1527 		if (sk->sk_state != TCP_ESTABLISHED)
1528 			return -EDESTADDRREQ;
1529 		daddr = &sk->sk_v6_daddr;
1530 	} else
1531 		daddr = NULL;
1532 
1533 	if (daddr) {
1534 		if (ipv6_addr_v4mapped(daddr)) {
1535 			struct sockaddr_in sin;
1536 			sin.sin_family = AF_INET;
1537 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1538 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1539 			msg->msg_name = &sin;
1540 			msg->msg_namelen = sizeof(sin);
1541 do_udp_sendmsg:
1542 			err = ipv6_only_sock(sk) ?
1543 				-ENETUNREACH : udp_sendmsg(sk, msg, len);
1544 			msg->msg_name = sin6;
1545 			msg->msg_namelen = addr_len;
1546 			return err;
1547 		}
1548 	}
1549 
1550 	/* Rough check on arithmetic overflow,
1551 	   better check is made in ip6_append_data().
1552 	   */
1553 	if (len > INT_MAX - sizeof(struct udphdr))
1554 		return -EMSGSIZE;
1555 
1556 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1557 	if (READ_ONCE(up->pending)) {
1558 		if (READ_ONCE(up->pending) == AF_INET)
1559 			return udp_sendmsg(sk, msg, len);
1560 		/*
1561 		 * There are pending frames.
1562 		 * The socket lock must be held while it's corked.
1563 		 */
1564 		lock_sock(sk);
1565 		if (likely(up->pending)) {
1566 			if (unlikely(up->pending != AF_INET6)) {
1567 				release_sock(sk);
1568 				return -EAFNOSUPPORT;
1569 			}
1570 			dst = NULL;
1571 			goto do_append_data;
1572 		}
1573 		release_sock(sk);
1574 	}
1575 	ulen += sizeof(struct udphdr);
1576 
1577 	memset(fl6, 0, sizeof(*fl6));
1578 
1579 	if (sin6) {
1580 		if (sin6->sin6_port == 0)
1581 			return -EINVAL;
1582 
1583 		fl6->fl6_dport = sin6->sin6_port;
1584 		daddr = &sin6->sin6_addr;
1585 
1586 		if (inet6_test_bit(SNDFLOW, sk)) {
1587 			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1588 			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1589 				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1590 				if (IS_ERR(flowlabel))
1591 					return -EINVAL;
1592 			}
1593 		}
1594 
1595 		/*
1596 		 * Otherwise it will be difficult to maintain
1597 		 * sk->sk_dst_cache.
1598 		 */
1599 		if (sk->sk_state == TCP_ESTABLISHED &&
1600 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1601 			daddr = &sk->sk_v6_daddr;
1602 
1603 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1604 		    sin6->sin6_scope_id &&
1605 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1606 			fl6->flowi6_oif = sin6->sin6_scope_id;
1607 	} else {
1608 		if (sk->sk_state != TCP_ESTABLISHED)
1609 			return -EDESTADDRREQ;
1610 
1611 		fl6->fl6_dport = inet->inet_dport;
1612 		daddr = &sk->sk_v6_daddr;
1613 		fl6->flowlabel = np->flow_label;
1614 		connected = true;
1615 	}
1616 
1617 	if (!fl6->flowi6_oif)
1618 		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1619 
1620 	if (!fl6->flowi6_oif)
1621 		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1622 
1623 	fl6->flowi6_uid = sk->sk_uid;
1624 
1625 	if (msg->msg_controllen) {
1626 		opt = &opt_space;
1627 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1628 		opt->tot_len = sizeof(*opt);
1629 		ipc6.opt = opt;
1630 
1631 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1632 		if (err > 0) {
1633 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1634 						    &ipc6);
1635 			connected = false;
1636 		}
1637 		if (err < 0) {
1638 			fl6_sock_release(flowlabel);
1639 			return err;
1640 		}
1641 		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1642 			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1643 			if (IS_ERR(flowlabel))
1644 				return -EINVAL;
1645 		}
1646 		if (!(opt->opt_nflen|opt->opt_flen))
1647 			opt = NULL;
1648 	}
1649 	if (!opt) {
1650 		opt = txopt_get(np);
1651 		opt_to_free = opt;
1652 	}
1653 	if (flowlabel)
1654 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1655 	opt = ipv6_fixup_options(&opt_space, opt);
1656 	ipc6.opt = opt;
1657 
1658 	fl6->flowi6_proto = sk->sk_protocol;
1659 	fl6->flowi6_mark = ipc6.sockc.mark;
1660 	fl6->daddr = *daddr;
1661 	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1662 		fl6->saddr = np->saddr;
1663 	fl6->fl6_sport = inet->inet_sport;
1664 
1665 	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1666 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1667 					   (struct sockaddr *)sin6,
1668 					   &addr_len,
1669 					   &fl6->saddr);
1670 		if (err)
1671 			goto out_no_dst;
1672 		if (sin6) {
1673 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1674 				/* BPF program rewrote IPv6-only by IPv4-mapped
1675 				 * IPv6. It's currently unsupported.
1676 				 */
1677 				err = -ENOTSUPP;
1678 				goto out_no_dst;
1679 			}
1680 			if (sin6->sin6_port == 0) {
1681 				/* BPF program set invalid port. Reject it. */
1682 				err = -EINVAL;
1683 				goto out_no_dst;
1684 			}
1685 			fl6->fl6_dport = sin6->sin6_port;
1686 			fl6->daddr = sin6->sin6_addr;
1687 		}
1688 	}
1689 
1690 	if (ipv6_addr_any(&fl6->daddr))
1691 		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1692 
1693 	final_p = fl6_update_dst(fl6, opt, &final);
1694 	if (final_p)
1695 		connected = false;
1696 
1697 	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1698 		fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1699 		connected = false;
1700 	} else if (!fl6->flowi6_oif)
1701 		fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1702 
1703 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1704 
1705 	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1706 
1707 	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1708 	if (IS_ERR(dst)) {
1709 		err = PTR_ERR(dst);
1710 		dst = NULL;
1711 		goto out;
1712 	}
1713 
1714 	if (ipc6.hlimit < 0)
1715 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1716 
1717 	if (msg->msg_flags&MSG_CONFIRM)
1718 		goto do_confirm;
1719 back_from_confirm:
1720 
1721 	/* Lockless fast path for the non-corking case */
1722 	if (!corkreq) {
1723 		struct sk_buff *skb;
1724 
1725 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1726 				   sizeof(struct udphdr), &ipc6,
1727 				   dst_rt6_info(dst),
1728 				   msg->msg_flags, &cork);
1729 		err = PTR_ERR(skb);
1730 		if (!IS_ERR_OR_NULL(skb))
1731 			err = udp_v6_send_skb(skb, fl6, &cork.base);
1732 		/* ip6_make_skb steals dst reference */
1733 		goto out_no_dst;
1734 	}
1735 
1736 	lock_sock(sk);
1737 	if (unlikely(up->pending)) {
1738 		/* The socket is already corked while preparing it. */
1739 		/* ... which is an evident application bug. --ANK */
1740 		release_sock(sk);
1741 
1742 		net_dbg_ratelimited("udp cork app bug 2\n");
1743 		err = -EINVAL;
1744 		goto out;
1745 	}
1746 
1747 	WRITE_ONCE(up->pending, AF_INET6);
1748 
1749 do_append_data:
1750 	up->len += ulen;
1751 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1752 			      &ipc6, fl6, dst_rt6_info(dst),
1753 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1754 	if (err)
1755 		udp_v6_flush_pending_frames(sk);
1756 	else if (!corkreq)
1757 		err = udp_v6_push_pending_frames(sk);
1758 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1759 		WRITE_ONCE(up->pending, 0);
1760 
1761 	if (err > 0)
1762 		err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1763 	release_sock(sk);
1764 
1765 out:
1766 	dst_release(dst);
1767 out_no_dst:
1768 	fl6_sock_release(flowlabel);
1769 	txopt_put(opt_to_free);
1770 	if (!err)
1771 		return len;
1772 	/*
1773 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1774 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1775 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1776 	 * things).  We could add another new stat but at least for now that
1777 	 * seems like overkill.
1778 	 */
1779 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1780 		UDP6_INC_STATS(sock_net(sk),
1781 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1782 	}
1783 	return err;
1784 
1785 do_confirm:
1786 	if (msg->msg_flags & MSG_PROBE)
1787 		dst_confirm_neigh(dst, &fl6->daddr);
1788 	if (!(msg->msg_flags&MSG_PROBE) || len)
1789 		goto back_from_confirm;
1790 	err = 0;
1791 	goto out;
1792 }
1793 EXPORT_SYMBOL(udpv6_sendmsg);
1794 
1795 static void udpv6_splice_eof(struct socket *sock)
1796 {
1797 	struct sock *sk = sock->sk;
1798 	struct udp_sock *up = udp_sk(sk);
1799 
1800 	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1801 		return;
1802 
1803 	lock_sock(sk);
1804 	if (up->pending && !udp_test_bit(CORK, sk))
1805 		udp_v6_push_pending_frames(sk);
1806 	release_sock(sk);
1807 }
1808 
1809 void udpv6_destroy_sock(struct sock *sk)
1810 {
1811 	struct udp_sock *up = udp_sk(sk);
1812 	lock_sock(sk);
1813 
1814 	/* protects from races with udp_abort() */
1815 	sock_set_flag(sk, SOCK_DEAD);
1816 	udp_v6_flush_pending_frames(sk);
1817 	release_sock(sk);
1818 
1819 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1820 		if (up->encap_type) {
1821 			void (*encap_destroy)(struct sock *sk);
1822 			encap_destroy = READ_ONCE(up->encap_destroy);
1823 			if (encap_destroy)
1824 				encap_destroy(sk);
1825 		}
1826 		if (udp_test_bit(ENCAP_ENABLED, sk)) {
1827 			static_branch_dec(&udpv6_encap_needed_key);
1828 			udp_encap_disable();
1829 			udp_tunnel_cleanup_gro(sk);
1830 		}
1831 	}
1832 }
1833 
1834 /*
1835  *	Socket option code for UDP
1836  */
1837 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1838 		     unsigned int optlen)
1839 {
1840 	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
1841 		return udp_lib_setsockopt(sk, level, optname,
1842 					  optval, optlen,
1843 					  udp_v6_push_pending_frames);
1844 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1845 }
1846 
1847 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1848 		     char __user *optval, int __user *optlen)
1849 {
1850 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1851 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1852 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1853 }
1854 
1855 
1856 /* ------------------------------------------------------------------------ */
1857 #ifdef CONFIG_PROC_FS
1858 int udp6_seq_show(struct seq_file *seq, void *v)
1859 {
1860 	if (v == SEQ_START_TOKEN) {
1861 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1862 	} else {
1863 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1864 		const struct inet_sock *inet = inet_sk((const struct sock *)v);
1865 		__u16 srcp = ntohs(inet->inet_sport);
1866 		__u16 destp = ntohs(inet->inet_dport);
1867 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1868 					  udp_rqueue_get(v), bucket);
1869 	}
1870 	return 0;
1871 }
1872 
1873 const struct seq_operations udp6_seq_ops = {
1874 	.start		= udp_seq_start,
1875 	.next		= udp_seq_next,
1876 	.stop		= udp_seq_stop,
1877 	.show		= udp6_seq_show,
1878 };
1879 EXPORT_SYMBOL(udp6_seq_ops);
1880 
1881 static struct udp_seq_afinfo udp6_seq_afinfo = {
1882 	.family		= AF_INET6,
1883 	.udp_table	= NULL,
1884 };
1885 
1886 int __net_init udp6_proc_init(struct net *net)
1887 {
1888 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1889 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1890 		return -ENOMEM;
1891 	return 0;
1892 }
1893 
1894 void udp6_proc_exit(struct net *net)
1895 {
1896 	remove_proc_entry("udp6", net->proc_net);
1897 }
1898 #endif /* CONFIG_PROC_FS */
1899 
1900 /* ------------------------------------------------------------------------ */
1901 
1902 struct proto udpv6_prot = {
1903 	.name			= "UDPv6",
1904 	.owner			= THIS_MODULE,
1905 	.close			= udp_lib_close,
1906 	.pre_connect		= udpv6_pre_connect,
1907 	.connect		= udpv6_connect,
1908 	.disconnect		= udp_disconnect,
1909 	.ioctl			= udp_ioctl,
1910 	.init			= udpv6_init_sock,
1911 	.destroy		= udpv6_destroy_sock,
1912 	.setsockopt		= udpv6_setsockopt,
1913 	.getsockopt		= udpv6_getsockopt,
1914 	.sendmsg		= udpv6_sendmsg,
1915 	.recvmsg		= udpv6_recvmsg,
1916 	.splice_eof		= udpv6_splice_eof,
1917 	.release_cb		= ip6_datagram_release_cb,
1918 	.hash			= udp_lib_hash,
1919 	.unhash			= udp_lib_unhash,
1920 	.rehash			= udp_v6_rehash,
1921 	.get_port		= udp_v6_get_port,
1922 	.put_port		= udp_lib_unhash,
1923 #ifdef CONFIG_BPF_SYSCALL
1924 	.psock_update_sk_prot	= udp_bpf_update_proto,
1925 #endif
1926 
1927 	.memory_allocated	= &udp_memory_allocated,
1928 	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1929 
1930 	.sysctl_mem		= sysctl_udp_mem,
1931 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1932 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1933 	.obj_size		= sizeof(struct udp6_sock),
1934 	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1935 	.h.udp_table		= NULL,
1936 	.diag_destroy		= udp_abort,
1937 };
1938 
1939 static struct inet_protosw udpv6_protosw = {
1940 	.type =      SOCK_DGRAM,
1941 	.protocol =  IPPROTO_UDP,
1942 	.prot =      &udpv6_prot,
1943 	.ops =       &inet6_dgram_ops,
1944 	.flags =     INET_PROTOSW_PERMANENT,
1945 };
1946 
1947 int __init udpv6_init(void)
1948 {
1949 	int ret;
1950 
1951 	net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1952 		.handler     = udpv6_rcv,
1953 		.err_handler = udpv6_err,
1954 		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1955 	};
1956 	ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1957 	if (ret)
1958 		goto out;
1959 
1960 	ret = inet6_register_protosw(&udpv6_protosw);
1961 	if (ret)
1962 		goto out_udpv6_protocol;
1963 out:
1964 	return ret;
1965 
1966 out_udpv6_protocol:
1967 	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1968 	goto out;
1969 }
1970 
1971 void udpv6_exit(void)
1972 {
1973 	inet6_unregister_protosw(&udpv6_protosw);
1974 	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1975 }
1976