xref: /linux/net/ipv6/udp.c (revision b0249c0d41b306ddd79de58ca7fea543ab5e7a2e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37 #include <trace/events/udp.h>
38 
39 #include <net/addrconf.h>
40 #include <net/ndisc.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/ip6_route.h>
44 #include <net/raw.h>
45 #include <net/seg6.h>
46 #include <net/tcp_states.h>
47 #include <net/ip6_checksum.h>
48 #include <net/ip6_tunnel.h>
49 #include <net/udp_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/inet_hashtables.h>
52 #include <net/inet6_hashtables.h>
53 #include <net/busy_poll.h>
54 #include <net/sock_reuseport.h>
55 #include <net/gro.h>
56 
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59 #include <trace/events/skb.h>
60 #include "udp_impl.h"
61 
62 static void udpv6_destruct_sock(struct sock *sk)
63 {
64 	udp_destruct_common(sk);
65 	inet6_sock_destruct(sk);
66 }
67 
68 int udpv6_init_sock(struct sock *sk)
69 {
70 	int res = udp_lib_init_sock(sk);
71 
72 	sk->sk_destruct = udpv6_destruct_sock;
73 	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
74 	return res;
75 }
76 
77 INDIRECT_CALLABLE_SCOPE
78 u32 udp6_ehashfn(const struct net *net,
79 		 const struct in6_addr *laddr,
80 		 const u16 lport,
81 		 const struct in6_addr *faddr,
82 		 const __be16 fport)
83 {
84 	u32 lhash, fhash;
85 
86 	net_get_random_once(&udp6_ehash_secret,
87 			    sizeof(udp6_ehash_secret));
88 	net_get_random_once(&udp_ipv6_hash_secret,
89 			    sizeof(udp_ipv6_hash_secret));
90 
91 	lhash = (__force u32)laddr->s6_addr32[3];
92 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
93 
94 	return __inet6_ehashfn(lhash, lport, fhash, fport,
95 			       udp6_ehash_secret + net_hash_mix(net));
96 }
97 
98 int udp_v6_get_port(struct sock *sk, unsigned short snum)
99 {
100 	unsigned int hash2_nulladdr =
101 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
102 	unsigned int hash2_partial =
103 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
104 
105 	/* precompute partial secondary hash */
106 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
107 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
108 }
109 
110 void udp_v6_rehash(struct sock *sk)
111 {
112 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
113 					  &sk->sk_v6_rcv_saddr,
114 					  inet_sk(sk)->inet_num);
115 	u16 new_hash4;
116 
117 	if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
118 		new_hash4 = udp_ehashfn(sock_net(sk),
119 					sk->sk_rcv_saddr, sk->sk_num,
120 					sk->sk_daddr, sk->sk_dport);
121 	} else {
122 		new_hash4 = udp6_ehashfn(sock_net(sk),
123 					 &sk->sk_v6_rcv_saddr, sk->sk_num,
124 					 &sk->sk_v6_daddr, sk->sk_dport);
125 	}
126 
127 	udp_lib_rehash(sk, new_hash, new_hash4);
128 }
129 
130 static int compute_score(struct sock *sk, const struct net *net,
131 			 const struct in6_addr *saddr, __be16 sport,
132 			 const struct in6_addr *daddr, unsigned short hnum,
133 			 int dif, int sdif)
134 {
135 	int bound_dev_if, score;
136 	struct inet_sock *inet;
137 	bool dev_match;
138 
139 	if (!net_eq(sock_net(sk), net) ||
140 	    udp_sk(sk)->udp_port_hash != hnum ||
141 	    sk->sk_family != PF_INET6)
142 		return -1;
143 
144 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
145 		return -1;
146 
147 	score = 0;
148 	inet = inet_sk(sk);
149 
150 	if (inet->inet_dport) {
151 		if (inet->inet_dport != sport)
152 			return -1;
153 		score++;
154 	}
155 
156 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
157 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
158 			return -1;
159 		score++;
160 	}
161 
162 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
163 	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
164 	if (!dev_match)
165 		return -1;
166 	if (bound_dev_if)
167 		score++;
168 
169 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
170 		score++;
171 
172 	return score;
173 }
174 
175 /**
176  * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
177  * @net:	Network namespace
178  * @saddr:	Source address, network order
179  * @sport:	Source port, network order
180  * @daddr:	Destination address, network order
181  * @hnum:	Destination port, host order
182  * @dif:	Destination interface index
183  * @sdif:	Destination bridge port index, if relevant
184  * @udptable:	Set of UDP hash tables
185  *
186  * Simplified lookup to be used as fallback if no sockets are found due to a
187  * potential race between (receive) address change, and lookup happening before
188  * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
189  * result sockets, because if we have one, we don't need the fallback at all.
190  *
191  * Called under rcu_read_lock().
192  *
193  * Return: socket with highest matching score if any, NULL if none
194  */
195 static struct sock *udp6_lib_lookup1(const struct net *net,
196 				     const struct in6_addr *saddr, __be16 sport,
197 				     const struct in6_addr *daddr,
198 				     unsigned int hnum, int dif, int sdif,
199 				     const struct udp_table *udptable)
200 {
201 	unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
202 	struct udp_hslot *hslot = &udptable->hash[slot];
203 	struct sock *sk, *result = NULL;
204 	int score, badness = 0;
205 
206 	sk_for_each_rcu(sk, &hslot->head) {
207 		score = compute_score(sk, net,
208 				      saddr, sport, daddr, hnum, dif, sdif);
209 		if (score > badness) {
210 			result = sk;
211 			badness = score;
212 		}
213 	}
214 
215 	return result;
216 }
217 
218 /* called with rcu_read_lock() */
219 static struct sock *udp6_lib_lookup2(const struct net *net,
220 		const struct in6_addr *saddr, __be16 sport,
221 		const struct in6_addr *daddr, unsigned int hnum,
222 		int dif, int sdif, struct udp_hslot *hslot2,
223 		struct sk_buff *skb)
224 {
225 	struct sock *sk, *result;
226 	int score, badness;
227 	bool need_rescore;
228 
229 	result = NULL;
230 	badness = -1;
231 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
232 		need_rescore = false;
233 rescore:
234 		score = compute_score(need_rescore ? result : sk, net, saddr,
235 				      sport, daddr, hnum, dif, sdif);
236 		if (score > badness) {
237 			badness = score;
238 
239 			if (need_rescore)
240 				continue;
241 
242 			if (sk->sk_state == TCP_ESTABLISHED) {
243 				result = sk;
244 				continue;
245 			}
246 
247 			result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
248 							saddr, sport, daddr, hnum, udp6_ehashfn);
249 			if (!result) {
250 				result = sk;
251 				continue;
252 			}
253 
254 			/* Fall back to scoring if group has connections */
255 			if (!reuseport_has_conns(sk))
256 				return result;
257 
258 			/* Reuseport logic returned an error, keep original score. */
259 			if (IS_ERR(result))
260 				continue;
261 
262 			/* compute_score is too long of a function to be
263 			 * inlined, and calling it again here yields
264 			 * measurable overhead for some
265 			 * workloads. Work around it by jumping
266 			 * backwards to rescore 'result'.
267 			 */
268 			need_rescore = true;
269 			goto rescore;
270 		}
271 	}
272 	return result;
273 }
274 
275 #if IS_ENABLED(CONFIG_BASE_SMALL)
276 static struct sock *udp6_lib_lookup4(const struct net *net,
277 				     const struct in6_addr *saddr, __be16 sport,
278 				     const struct in6_addr *daddr,
279 				     unsigned int hnum, int dif, int sdif,
280 				     struct udp_table *udptable)
281 {
282 	return NULL;
283 }
284 
285 static void udp6_hash4(struct sock *sk)
286 {
287 }
288 #else /* !CONFIG_BASE_SMALL */
289 static struct sock *udp6_lib_lookup4(const struct net *net,
290 				     const struct in6_addr *saddr, __be16 sport,
291 				     const struct in6_addr *daddr,
292 				     unsigned int hnum, int dif, int sdif,
293 				     struct udp_table *udptable)
294 {
295 	const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
296 	const struct hlist_nulls_node *node;
297 	struct udp_hslot *hslot4;
298 	unsigned int hash4, slot;
299 	struct udp_sock *up;
300 	struct sock *sk;
301 
302 	hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport);
303 	slot = hash4 & udptable->mask;
304 	hslot4 = &udptable->hash4[slot];
305 
306 begin:
307 	udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
308 		sk = (struct sock *)up;
309 		if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
310 			return sk;
311 	}
312 
313 	/* if the nulls value we got at the end of this lookup is not the
314 	 * expected one, we must restart lookup. We probably met an item that
315 	 * was moved to another chain due to rehash.
316 	 */
317 	if (get_nulls_value(node) != slot)
318 		goto begin;
319 
320 	return NULL;
321 }
322 
323 static void udp6_hash4(struct sock *sk)
324 {
325 	struct net *net = sock_net(sk);
326 	unsigned int hash;
327 
328 	if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
329 		udp4_hash4(sk);
330 		return;
331 	}
332 
333 	if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr))
334 		return;
335 
336 	hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num,
337 			    &sk->sk_v6_daddr, sk->sk_dport);
338 
339 	udp_lib_hash4(sk, hash);
340 }
341 #endif /* CONFIG_BASE_SMALL */
342 
343 /* rcu_read_lock() must be held */
344 struct sock *__udp6_lib_lookup(const struct net *net,
345 			       const struct in6_addr *saddr, __be16 sport,
346 			       const struct in6_addr *daddr, __be16 dport,
347 			       int dif, int sdif, struct udp_table *udptable,
348 			       struct sk_buff *skb)
349 {
350 	unsigned short hnum = ntohs(dport);
351 	struct udp_hslot *hslot2;
352 	struct sock *result, *sk;
353 	unsigned int hash2;
354 
355 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
356 	hslot2 = udp_hashslot2(udptable, hash2);
357 
358 	if (udp_has_hash4(hslot2)) {
359 		result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
360 					  dif, sdif, udptable);
361 		if (result) /* udp6_lib_lookup4 return sk or NULL */
362 			return result;
363 	}
364 
365 	/* Lookup connected or non-wildcard sockets */
366 	result = udp6_lib_lookup2(net, saddr, sport,
367 				  daddr, hnum, dif, sdif,
368 				  hslot2, skb);
369 	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
370 		goto done;
371 
372 	/* Lookup redirect from BPF */
373 	if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
374 	    udptable == net->ipv4.udp_table) {
375 		sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
376 						saddr, sport, daddr, hnum, dif,
377 						udp6_ehashfn);
378 		if (sk) {
379 			result = sk;
380 			goto done;
381 		}
382 	}
383 
384 	/* Got non-wildcard socket or error on first lookup */
385 	if (result)
386 		goto done;
387 
388 	/* Lookup wildcard sockets */
389 	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
390 	hslot2 = udp_hashslot2(udptable, hash2);
391 
392 	result = udp6_lib_lookup2(net, saddr, sport,
393 				  &in6addr_any, hnum, dif, sdif,
394 				  hslot2, skb);
395 	if (!IS_ERR_OR_NULL(result))
396 		goto done;
397 
398 	/* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
399 	result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
400 				  udptable);
401 
402 done:
403 	if (IS_ERR(result))
404 		return NULL;
405 	return result;
406 }
407 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
408 
409 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
410 					  __be16 sport, __be16 dport,
411 					  struct udp_table *udptable)
412 {
413 	const struct ipv6hdr *iph = ipv6_hdr(skb);
414 
415 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
416 				 &iph->daddr, dport, inet6_iif(skb),
417 				 inet6_sdif(skb), udptable, skb);
418 }
419 
420 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
421 				 __be16 sport, __be16 dport)
422 {
423 	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
424 	const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
425 	struct net *net = dev_net(skb->dev);
426 	int iif, sdif;
427 
428 	inet6_get_iif_sdif(skb, &iif, &sdif);
429 
430 	return __udp6_lib_lookup(net, &iph->saddr, sport,
431 				 &iph->daddr, dport, iif,
432 				 sdif, net->ipv4.udp_table, NULL);
433 }
434 
435 /* Must be called under rcu_read_lock().
436  * Does increment socket refcount.
437  */
438 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
439 struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
440 			     const struct in6_addr *daddr, __be16 dport, int dif)
441 {
442 	struct sock *sk;
443 
444 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
445 				dif, 0, net->ipv4.udp_table, NULL);
446 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
447 		sk = NULL;
448 	return sk;
449 }
450 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
451 #endif
452 
453 /* do not use the scratch area len for jumbogram: their length exceeds the
454  * scratch area space; note that the IP6CB flags is still in the first
455  * cacheline, so checking for jumbograms is cheap
456  */
457 static int udp6_skb_len(struct sk_buff *skb)
458 {
459 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
460 }
461 
462 /*
463  *	This should be easy, if there is something there we
464  *	return it, otherwise we block.
465  */
466 
467 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
468 		  int flags, int *addr_len)
469 {
470 	struct ipv6_pinfo *np = inet6_sk(sk);
471 	struct inet_sock *inet = inet_sk(sk);
472 	struct sk_buff *skb;
473 	unsigned int ulen, copied;
474 	int off, err, peeking = flags & MSG_PEEK;
475 	int is_udplite = IS_UDPLITE(sk);
476 	struct udp_mib __percpu *mib;
477 	bool checksum_valid = false;
478 	int is_udp4;
479 
480 	if (flags & MSG_ERRQUEUE)
481 		return ipv6_recv_error(sk, msg, len, addr_len);
482 
483 	if (np->rxopt.bits.rxpmtu && READ_ONCE(np->rxpmtu))
484 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
485 
486 try_again:
487 	off = sk_peek_offset(sk, flags);
488 	skb = __skb_recv_udp(sk, flags, &off, &err);
489 	if (!skb)
490 		return err;
491 
492 	ulen = udp6_skb_len(skb);
493 	copied = len;
494 	if (copied > ulen - off)
495 		copied = ulen - off;
496 	else if (copied < ulen)
497 		msg->msg_flags |= MSG_TRUNC;
498 
499 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
500 	mib = __UDPX_MIB(sk, is_udp4);
501 
502 	/*
503 	 * If checksum is needed at all, try to do it while copying the
504 	 * data.  If the data is truncated, or if we only want a partial
505 	 * coverage checksum (UDP-Lite), do it before the copy.
506 	 */
507 
508 	if (copied < ulen || peeking ||
509 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
510 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
511 				!__udp_lib_checksum_complete(skb);
512 		if (!checksum_valid)
513 			goto csum_copy_err;
514 	}
515 
516 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
517 		if (udp_skb_is_linear(skb))
518 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
519 		else
520 			err = skb_copy_datagram_msg(skb, off, msg, copied);
521 	} else {
522 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
523 		if (err == -EINVAL)
524 			goto csum_copy_err;
525 	}
526 	if (unlikely(err)) {
527 		if (!peeking) {
528 			udp_drops_inc(sk);
529 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
530 		}
531 		kfree_skb(skb);
532 		return err;
533 	}
534 	if (!peeking)
535 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
536 
537 	sock_recv_cmsgs(msg, sk, skb);
538 
539 	/* Copy the address. */
540 	if (msg->msg_name) {
541 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
542 		sin6->sin6_family = AF_INET6;
543 		sin6->sin6_port = udp_hdr(skb)->source;
544 		sin6->sin6_flowinfo = 0;
545 
546 		if (is_udp4) {
547 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
548 					       &sin6->sin6_addr);
549 			sin6->sin6_scope_id = 0;
550 		} else {
551 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
552 			sin6->sin6_scope_id =
553 				ipv6_iface_scope_id(&sin6->sin6_addr,
554 						    inet6_iif(skb));
555 		}
556 		*addr_len = sizeof(*sin6);
557 
558 		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
559 						      (struct sockaddr *)sin6,
560 						      addr_len);
561 	}
562 
563 	if (udp_test_bit(GRO_ENABLED, sk))
564 		udp_cmsg_recv(msg, sk, skb);
565 
566 	if (np->rxopt.all)
567 		ip6_datagram_recv_common_ctl(sk, msg, skb);
568 
569 	if (is_udp4) {
570 		if (inet_cmsg_flags(inet))
571 			ip_cmsg_recv_offset(msg, sk, skb,
572 					    sizeof(struct udphdr), off);
573 	} else {
574 		if (np->rxopt.all)
575 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
576 	}
577 
578 	err = copied;
579 	if (flags & MSG_TRUNC)
580 		err = ulen;
581 
582 	skb_consume_udp(sk, skb, peeking ? -err : err);
583 	return err;
584 
585 csum_copy_err:
586 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
587 				 udp_skb_destructor)) {
588 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
589 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
590 	}
591 	kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
592 
593 	/* starting over for a new packet, but check if we need to yield */
594 	cond_resched();
595 	msg->msg_flags &= ~MSG_TRUNC;
596 	goto try_again;
597 }
598 
599 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
600 void udpv6_encap_enable(void)
601 {
602 	static_branch_inc(&udpv6_encap_needed_key);
603 }
604 EXPORT_SYMBOL(udpv6_encap_enable);
605 
606 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
607  * through error handlers in encapsulations looking for a match.
608  */
609 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
610 				      struct inet6_skb_parm *opt,
611 				      u8 type, u8 code, int offset, __be32 info)
612 {
613 	int i;
614 
615 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
616 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
617 			       u8 type, u8 code, int offset, __be32 info);
618 		const struct ip6_tnl_encap_ops *encap;
619 
620 		encap = rcu_dereference(ip6tun_encaps[i]);
621 		if (!encap)
622 			continue;
623 		handler = encap->err_handler;
624 		if (handler && !handler(skb, opt, type, code, offset, info))
625 			return 0;
626 	}
627 
628 	return -ENOENT;
629 }
630 
631 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
632  * reversing source and destination port: this will match tunnels that force the
633  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
634  * lwtunnels might actually break this assumption by being configured with
635  * different destination ports on endpoints, in this case we won't be able to
636  * trace ICMP messages back to them.
637  *
638  * If this doesn't match any socket, probe tunnels with arbitrary destination
639  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
640  * we've sent packets to won't necessarily match the local destination port.
641  *
642  * Then ask the tunnel implementation to match the error against a valid
643  * association.
644  *
645  * Return an error if we can't find a match, the socket if we need further
646  * processing, zero otherwise.
647  */
648 static struct sock *__udp6_lib_err_encap(struct net *net,
649 					 const struct ipv6hdr *hdr, int offset,
650 					 struct udphdr *uh,
651 					 struct udp_table *udptable,
652 					 struct sock *sk,
653 					 struct sk_buff *skb,
654 					 struct inet6_skb_parm *opt,
655 					 u8 type, u8 code, __be32 info)
656 {
657 	int (*lookup)(struct sock *sk, struct sk_buff *skb);
658 	int network_offset, transport_offset;
659 	struct udp_sock *up;
660 
661 	network_offset = skb_network_offset(skb);
662 	transport_offset = skb_transport_offset(skb);
663 
664 	/* Network header needs to point to the outer IPv6 header inside ICMP */
665 	skb_reset_network_header(skb);
666 
667 	/* Transport header needs to point to the UDP header */
668 	skb_set_transport_header(skb, offset);
669 
670 	if (sk) {
671 		up = udp_sk(sk);
672 
673 		lookup = READ_ONCE(up->encap_err_lookup);
674 		if (lookup && lookup(sk, skb))
675 			sk = NULL;
676 
677 		goto out;
678 	}
679 
680 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
681 			       &hdr->saddr, uh->dest,
682 			       inet6_iif(skb), 0, udptable, skb);
683 	if (sk) {
684 		up = udp_sk(sk);
685 
686 		lookup = READ_ONCE(up->encap_err_lookup);
687 		if (!lookup || lookup(sk, skb))
688 			sk = NULL;
689 	}
690 
691 out:
692 	if (!sk) {
693 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
694 							offset, info));
695 	}
696 
697 	skb_set_transport_header(skb, transport_offset);
698 	skb_set_network_header(skb, network_offset);
699 
700 	return sk;
701 }
702 
703 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
704 		   u8 type, u8 code, int offset, __be32 info,
705 		   struct udp_table *udptable)
706 {
707 	struct ipv6_pinfo *np;
708 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
709 	const struct in6_addr *saddr = &hdr->saddr;
710 	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
711 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
712 	bool tunnel = false;
713 	struct sock *sk;
714 	int harderr;
715 	int err;
716 	struct net *net = dev_net(skb->dev);
717 
718 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
719 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
720 
721 	if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
722 		/* No socket for error: try tunnels before discarding */
723 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
724 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
725 						  udptable, sk, skb,
726 						  opt, type, code, info);
727 			if (!sk)
728 				return 0;
729 		} else
730 			sk = ERR_PTR(-ENOENT);
731 
732 		if (IS_ERR(sk)) {
733 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
734 					  ICMP6_MIB_INERRORS);
735 			return PTR_ERR(sk);
736 		}
737 
738 		tunnel = true;
739 	}
740 
741 	harderr = icmpv6_err_convert(type, code, &err);
742 	np = inet6_sk(sk);
743 
744 	if (type == ICMPV6_PKT_TOOBIG) {
745 		if (!ip6_sk_accept_pmtu(sk))
746 			goto out;
747 		ip6_sk_update_pmtu(skb, sk, info);
748 		if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
749 			harderr = 1;
750 	}
751 	if (type == NDISC_REDIRECT) {
752 		if (tunnel) {
753 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
754 				     READ_ONCE(sk->sk_mark),
755 				     sk_uid(sk));
756 		} else {
757 			ip6_sk_redirect(skb, sk);
758 		}
759 		goto out;
760 	}
761 
762 	/* Tunnels don't have an application socket: don't pass errors back */
763 	if (tunnel) {
764 		if (udp_sk(sk)->encap_err_rcv)
765 			udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
766 						  ntohl(info), (u8 *)(uh+1));
767 		goto out;
768 	}
769 
770 	if (!inet6_test_bit(RECVERR6, sk)) {
771 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
772 			goto out;
773 	} else {
774 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
775 	}
776 
777 	sk->sk_err = err;
778 	sk_error_report(sk);
779 out:
780 	return 0;
781 }
782 
783 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
784 {
785 	int rc;
786 
787 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
788 		sock_rps_save_rxhash(sk, skb);
789 		sk_mark_napi_id(sk, skb);
790 		sk_incoming_cpu_update(sk);
791 	} else {
792 		sk_mark_napi_id_once(sk, skb);
793 	}
794 
795 	rc = __udp_enqueue_schedule_skb(sk, skb);
796 	if (rc < 0) {
797 		int is_udplite = IS_UDPLITE(sk);
798 		enum skb_drop_reason drop_reason;
799 
800 		/* Note that an ENOMEM error is charged twice */
801 		if (rc == -ENOMEM) {
802 			UDP6_INC_STATS(sock_net(sk),
803 					 UDP_MIB_RCVBUFERRORS, is_udplite);
804 			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
805 		} else {
806 			UDP6_INC_STATS(sock_net(sk),
807 				       UDP_MIB_MEMERRORS, is_udplite);
808 			drop_reason = SKB_DROP_REASON_PROTO_MEM;
809 		}
810 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
811 		trace_udp_fail_queue_rcv_skb(rc, sk, skb);
812 		sk_skb_reason_drop(sk, skb, drop_reason);
813 		return -1;
814 	}
815 
816 	return 0;
817 }
818 
819 static __inline__ int udpv6_err(struct sk_buff *skb,
820 				struct inet6_skb_parm *opt, u8 type,
821 				u8 code, int offset, __be32 info)
822 {
823 	return __udp6_lib_err(skb, opt, type, code, offset, info,
824 			      dev_net(skb->dev)->ipv4.udp_table);
825 }
826 
827 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
828 {
829 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
830 	struct udp_sock *up = udp_sk(sk);
831 	int is_udplite = IS_UDPLITE(sk);
832 
833 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
834 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
835 		goto drop;
836 	}
837 	nf_reset_ct(skb);
838 
839 	if (static_branch_unlikely(&udpv6_encap_needed_key) &&
840 	    READ_ONCE(up->encap_type)) {
841 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
842 
843 		/*
844 		 * This is an encapsulation socket so pass the skb to
845 		 * the socket's udp_encap_rcv() hook. Otherwise, just
846 		 * fall through and pass this up the UDP socket.
847 		 * up->encap_rcv() returns the following value:
848 		 * =0 if skb was successfully passed to the encap
849 		 *    handler or was discarded by it.
850 		 * >0 if skb should be passed on to UDP.
851 		 * <0 if skb should be resubmitted as proto -N
852 		 */
853 
854 		/* if we're overly short, let UDP handle it */
855 		encap_rcv = READ_ONCE(up->encap_rcv);
856 		if (encap_rcv) {
857 			int ret;
858 
859 			/* Verify checksum before giving to encap */
860 			if (udp_lib_checksum_complete(skb))
861 				goto csum_error;
862 
863 			ret = encap_rcv(sk, skb);
864 			if (ret <= 0) {
865 				__UDP6_INC_STATS(sock_net(sk),
866 						 UDP_MIB_INDATAGRAMS,
867 						 is_udplite);
868 				return -ret;
869 			}
870 		}
871 
872 		/* FALLTHROUGH -- it's a UDP Packet */
873 	}
874 
875 	/*
876 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
877 	 */
878 	if (unlikely(udp_test_bit(UDPLITE_RECV_CC, sk) &&
879 		     UDP_SKB_CB(skb)->partial_cov)) {
880 		u16 pcrlen = READ_ONCE(up->pcrlen);
881 
882 		if (pcrlen == 0) {          /* full coverage was set  */
883 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
884 					    UDP_SKB_CB(skb)->cscov, skb->len);
885 			goto drop;
886 		}
887 		if (UDP_SKB_CB(skb)->cscov < pcrlen) {
888 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
889 					    UDP_SKB_CB(skb)->cscov, pcrlen);
890 			goto drop;
891 		}
892 	}
893 
894 	prefetch(&sk->sk_rmem_alloc);
895 	if (rcu_access_pointer(sk->sk_filter) &&
896 	    udp_lib_checksum_complete(skb))
897 		goto csum_error;
898 
899 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
900 		goto drop;
901 
902 	udp_csum_pull_header(skb);
903 
904 	skb_dst_drop(skb);
905 
906 	return __udpv6_queue_rcv_skb(sk, skb);
907 
908 csum_error:
909 	drop_reason = SKB_DROP_REASON_UDP_CSUM;
910 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
911 drop:
912 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
913 	udp_drops_inc(sk);
914 	sk_skb_reason_drop(sk, skb, drop_reason);
915 	return -1;
916 }
917 
918 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
919 {
920 	struct sk_buff *next, *segs;
921 	int ret;
922 
923 	if (likely(!udp_unexpected_gso(sk, skb)))
924 		return udpv6_queue_rcv_one_skb(sk, skb);
925 
926 	__skb_push(skb, -skb_mac_offset(skb));
927 	segs = udp_rcv_segment(sk, skb, false);
928 	skb_list_walk_safe(segs, skb, next) {
929 		__skb_pull(skb, skb_transport_offset(skb));
930 
931 		udp_post_segment_fix_csum(skb);
932 		ret = udpv6_queue_rcv_one_skb(sk, skb);
933 		if (ret > 0)
934 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
935 						 true);
936 	}
937 	return 0;
938 }
939 
940 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
941 				   __be16 loc_port, const struct in6_addr *loc_addr,
942 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
943 				   int dif, int sdif, unsigned short hnum)
944 {
945 	const struct inet_sock *inet = inet_sk(sk);
946 
947 	if (!net_eq(sock_net(sk), net))
948 		return false;
949 
950 	if (udp_sk(sk)->udp_port_hash != hnum ||
951 	    sk->sk_family != PF_INET6 ||
952 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
953 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
954 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
955 	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
956 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
957 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
958 		return false;
959 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
960 		return false;
961 	return true;
962 }
963 
964 static void udp6_csum_zero_error(struct sk_buff *skb)
965 {
966 	/* RFC 2460 section 8.1 says that we SHOULD log
967 	 * this error. Well, it is reasonable.
968 	 */
969 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
970 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
971 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
972 }
973 
974 /*
975  * Note: called only from the BH handler context,
976  * so we don't need to lock the hashes.
977  */
978 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
979 		const struct in6_addr *saddr, const struct in6_addr *daddr,
980 		struct udp_table *udptable, int proto)
981 {
982 	struct sock *sk, *first = NULL;
983 	const struct udphdr *uh = udp_hdr(skb);
984 	unsigned short hnum = ntohs(uh->dest);
985 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
986 	unsigned int offset = offsetof(typeof(*sk), sk_node);
987 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
988 	int dif = inet6_iif(skb);
989 	int sdif = inet6_sdif(skb);
990 	struct hlist_node *node;
991 	struct sk_buff *nskb;
992 
993 	if (use_hash2) {
994 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
995 			    udptable->mask;
996 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
997 start_lookup:
998 		hslot = &udptable->hash2[hash2].hslot;
999 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
1000 	}
1001 
1002 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
1003 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
1004 					    uh->source, saddr, dif, sdif,
1005 					    hnum))
1006 			continue;
1007 		/* If zero checksum and no_check is not on for
1008 		 * the socket then skip it.
1009 		 */
1010 		if (!uh->check && !udp_get_no_check6_rx(sk))
1011 			continue;
1012 		if (!first) {
1013 			first = sk;
1014 			continue;
1015 		}
1016 		nskb = skb_clone(skb, GFP_ATOMIC);
1017 		if (unlikely(!nskb)) {
1018 			udp_drops_inc(sk);
1019 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
1020 					 IS_UDPLITE(sk));
1021 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
1022 					 IS_UDPLITE(sk));
1023 			continue;
1024 		}
1025 
1026 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
1027 			consume_skb(nskb);
1028 	}
1029 
1030 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
1031 	if (use_hash2 && hash2 != hash2_any) {
1032 		hash2 = hash2_any;
1033 		goto start_lookup;
1034 	}
1035 
1036 	if (first) {
1037 		if (udpv6_queue_rcv_skb(first, skb) > 0)
1038 			consume_skb(skb);
1039 	} else {
1040 		kfree_skb(skb);
1041 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
1042 				 proto == IPPROTO_UDPLITE);
1043 	}
1044 	return 0;
1045 }
1046 
1047 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1048 {
1049 	if (udp_sk_rx_dst_set(sk, dst))
1050 		sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
1051 }
1052 
1053 /* wrapper for udp_queue_rcv_skb taking care of csum conversion and
1054  * return code conversion for ip layer consumption
1055  */
1056 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
1057 				struct udphdr *uh)
1058 {
1059 	int ret;
1060 
1061 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
1062 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
1063 
1064 	ret = udpv6_queue_rcv_skb(sk, skb);
1065 
1066 	/* a return value > 0 means to resubmit the input */
1067 	if (ret > 0)
1068 		return ret;
1069 	return 0;
1070 }
1071 
1072 static int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
1073 {
1074 	int err;
1075 
1076 	UDP_SKB_CB(skb)->partial_cov = 0;
1077 	UDP_SKB_CB(skb)->cscov = skb->len;
1078 
1079 	if (proto == IPPROTO_UDPLITE) {
1080 		err = udplite_checksum_init(skb, uh);
1081 		if (err)
1082 			return err;
1083 
1084 		if (UDP_SKB_CB(skb)->partial_cov) {
1085 			skb->csum = ip6_compute_pseudo(skb, proto);
1086 			return 0;
1087 		}
1088 	}
1089 
1090 	/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
1091 	 * we accept a checksum of zero here. When we find the socket
1092 	 * for the UDP packet we'll check if that socket allows zero checksum
1093 	 * for IPv6 (set by socket option).
1094 	 *
1095 	 * Note, we are only interested in != 0 or == 0, thus the
1096 	 * force to int.
1097 	 */
1098 	err = (__force int)skb_checksum_init_zero_check(skb, proto, uh->check,
1099 							ip6_compute_pseudo);
1100 	if (err)
1101 		return err;
1102 
1103 	if (skb->ip_summed == CHECKSUM_COMPLETE && !skb->csum_valid) {
1104 		/* If SW calculated the value, we know it's bad */
1105 		if (skb->csum_complete_sw)
1106 			return 1;
1107 
1108 		/* HW says the value is bad. Let's validate that.
1109 		 * skb->csum is no longer the full packet checksum,
1110 		 * so don't treat is as such.
1111 		 */
1112 		skb_checksum_complete_unset(skb);
1113 	}
1114 
1115 	return 0;
1116 }
1117 
1118 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1119 		   int proto)
1120 {
1121 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1122 	const struct in6_addr *saddr, *daddr;
1123 	struct net *net = dev_net(skb->dev);
1124 	struct sock *sk = NULL;
1125 	struct udphdr *uh;
1126 	bool refcounted;
1127 	u32 ulen = 0;
1128 
1129 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1130 		goto discard;
1131 
1132 	saddr = &ipv6_hdr(skb)->saddr;
1133 	daddr = &ipv6_hdr(skb)->daddr;
1134 	uh = udp_hdr(skb);
1135 
1136 	ulen = ntohs(uh->len);
1137 	if (ulen > skb->len)
1138 		goto short_packet;
1139 
1140 	if (proto == IPPROTO_UDP) {
1141 		/* UDP validates ulen. */
1142 
1143 		/* Check for jumbo payload */
1144 		if (ulen == 0)
1145 			ulen = skb->len;
1146 
1147 		if (ulen < sizeof(*uh))
1148 			goto short_packet;
1149 
1150 		if (ulen < skb->len) {
1151 			if (pskb_trim_rcsum(skb, ulen))
1152 				goto short_packet;
1153 			saddr = &ipv6_hdr(skb)->saddr;
1154 			daddr = &ipv6_hdr(skb)->daddr;
1155 			uh = udp_hdr(skb);
1156 		}
1157 	}
1158 
1159 	if (udp6_csum_init(skb, uh, proto))
1160 		goto csum_error;
1161 
1162 	/* Check if the socket is already available, e.g. due to early demux */
1163 	sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
1164 			      &refcounted, udp6_ehashfn);
1165 	if (IS_ERR(sk))
1166 		goto no_sk;
1167 
1168 	if (sk) {
1169 		struct dst_entry *dst = skb_dst(skb);
1170 		int ret;
1171 
1172 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1173 			udp6_sk_rx_dst_set(sk, dst);
1174 
1175 		if (!uh->check && !udp_get_no_check6_rx(sk)) {
1176 			if (refcounted)
1177 				sock_put(sk);
1178 			goto report_csum_error;
1179 		}
1180 
1181 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
1182 		if (refcounted)
1183 			sock_put(sk);
1184 		return ret;
1185 	}
1186 
1187 	/*
1188 	 *	Multicast receive code
1189 	 */
1190 	if (ipv6_addr_is_multicast(daddr))
1191 		return __udp6_lib_mcast_deliver(net, skb,
1192 				saddr, daddr, udptable, proto);
1193 
1194 	/* Unicast */
1195 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1196 	if (sk) {
1197 		if (!uh->check && !udp_get_no_check6_rx(sk))
1198 			goto report_csum_error;
1199 		return udp6_unicast_rcv_skb(sk, skb, uh);
1200 	}
1201 no_sk:
1202 	reason = SKB_DROP_REASON_NO_SOCKET;
1203 
1204 	if (!uh->check)
1205 		goto report_csum_error;
1206 
1207 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1208 		goto discard;
1209 	nf_reset_ct(skb);
1210 
1211 	if (udp_lib_checksum_complete(skb))
1212 		goto csum_error;
1213 
1214 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1215 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1216 
1217 	sk_skb_reason_drop(sk, skb, reason);
1218 	return 0;
1219 
1220 short_packet:
1221 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1222 		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1223 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1224 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1225 			    saddr, ntohs(uh->source),
1226 			    ulen, skb->len,
1227 			    daddr, ntohs(uh->dest));
1228 	goto discard;
1229 
1230 report_csum_error:
1231 	udp6_csum_zero_error(skb);
1232 csum_error:
1233 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1234 		reason = SKB_DROP_REASON_UDP_CSUM;
1235 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1236 discard:
1237 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1238 	sk_skb_reason_drop(sk, skb, reason);
1239 	return 0;
1240 }
1241 
1242 
1243 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1244 			__be16 loc_port, const struct in6_addr *loc_addr,
1245 			__be16 rmt_port, const struct in6_addr *rmt_addr,
1246 			int dif, int sdif)
1247 {
1248 	struct udp_table *udptable = net->ipv4.udp_table;
1249 	unsigned short hnum = ntohs(loc_port);
1250 	struct udp_hslot *hslot2;
1251 	unsigned int hash2;
1252 	__portpair ports;
1253 	struct sock *sk;
1254 
1255 	hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1256 	hslot2 = udp_hashslot2(udptable, hash2);
1257 	ports = INET_COMBINED_PORTS(rmt_port, hnum);
1258 
1259 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1260 		if (sk->sk_state == TCP_ESTABLISHED &&
1261 		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1262 			return sk;
1263 		/* Only check first socket in chain */
1264 		break;
1265 	}
1266 	return NULL;
1267 }
1268 
1269 void udp_v6_early_demux(struct sk_buff *skb)
1270 {
1271 	struct net *net = dev_net(skb->dev);
1272 	const struct udphdr *uh;
1273 	struct sock *sk;
1274 	struct dst_entry *dst;
1275 	int dif = skb->dev->ifindex;
1276 	int sdif = inet6_sdif(skb);
1277 
1278 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1279 	    sizeof(struct udphdr)))
1280 		return;
1281 
1282 	uh = udp_hdr(skb);
1283 
1284 	if (skb->pkt_type == PACKET_HOST)
1285 		sk = __udp6_lib_demux_lookup(net, uh->dest,
1286 					     &ipv6_hdr(skb)->daddr,
1287 					     uh->source, &ipv6_hdr(skb)->saddr,
1288 					     dif, sdif);
1289 	else
1290 		return;
1291 
1292 	if (!sk)
1293 		return;
1294 
1295 	skb->sk = sk;
1296 	DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1297 	skb->destructor = sock_pfree;
1298 	dst = rcu_dereference(sk->sk_rx_dst);
1299 
1300 	if (dst)
1301 		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1302 	if (dst) {
1303 		/* set noref for now.
1304 		 * any place which wants to hold dst has to call
1305 		 * dst_hold_safe()
1306 		 */
1307 		skb_dst_set_noref(skb, dst);
1308 	}
1309 }
1310 
1311 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1312 {
1313 	return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1314 }
1315 
1316 /*
1317  * Throw away all pending data and cancel the corking. Socket is locked.
1318  */
1319 static void udp_v6_flush_pending_frames(struct sock *sk)
1320 {
1321 	struct udp_sock *up = udp_sk(sk);
1322 
1323 	if (up->pending == AF_INET)
1324 		udp_flush_pending_frames(sk);
1325 	else if (up->pending) {
1326 		up->len = 0;
1327 		WRITE_ONCE(up->pending, 0);
1328 		ip6_flush_pending_frames(sk);
1329 	}
1330 }
1331 
1332 static int udpv6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
1333 			     int addr_len)
1334 {
1335 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1336 		return -EINVAL;
1337 	/* The following checks are replicated from __ip6_datagram_connect()
1338 	 * and intended to prevent BPF program called below from accessing
1339 	 * bytes that are out of the bound specified by user in addr_len.
1340 	 */
1341 	if (uaddr->sa_family == AF_INET) {
1342 		if (ipv6_only_sock(sk))
1343 			return -EAFNOSUPPORT;
1344 		return udp_pre_connect(sk, uaddr, addr_len);
1345 	}
1346 
1347 	if (addr_len < SIN6_LEN_RFC2133)
1348 		return -EINVAL;
1349 
1350 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1351 }
1352 
1353 static int udpv6_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
1354 			 int addr_len)
1355 {
1356 	int res;
1357 
1358 	lock_sock(sk);
1359 	res = __ip6_datagram_connect(sk, uaddr, addr_len);
1360 	if (!res)
1361 		udp6_hash4(sk);
1362 	release_sock(sk);
1363 	return res;
1364 }
1365 
1366 /**
1367  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1368  *	@sk:	socket we are sending on
1369  *	@skb:	sk_buff containing the filled-in UDP header
1370  *		(checksum field must be zeroed out)
1371  *	@saddr: source address
1372  *	@daddr: destination address
1373  *	@len:	length of packet
1374  */
1375 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1376 				 const struct in6_addr *saddr,
1377 				 const struct in6_addr *daddr, int len)
1378 {
1379 	unsigned int offset;
1380 	struct udphdr *uh = udp_hdr(skb);
1381 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1382 	__wsum csum = 0;
1383 
1384 	if (!frags) {
1385 		/* Only one fragment on the socket.  */
1386 		skb->csum_start = skb_transport_header(skb) - skb->head;
1387 		skb->csum_offset = offsetof(struct udphdr, check);
1388 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1389 	} else {
1390 		/*
1391 		 * HW-checksum won't work as there are two or more
1392 		 * fragments on the socket so that all csums of sk_buffs
1393 		 * should be together
1394 		 */
1395 		offset = skb_transport_offset(skb);
1396 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1397 		csum = skb->csum;
1398 
1399 		skb->ip_summed = CHECKSUM_NONE;
1400 
1401 		do {
1402 			csum = csum_add(csum, frags->csum);
1403 		} while ((frags = frags->next));
1404 
1405 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1406 					    csum);
1407 		if (uh->check == 0)
1408 			uh->check = CSUM_MANGLED_0;
1409 	}
1410 }
1411 
1412 /*
1413  *	Sending
1414  */
1415 
1416 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1417 			   struct inet_cork *cork)
1418 {
1419 	struct sock *sk = skb->sk;
1420 	struct udphdr *uh;
1421 	int err = 0;
1422 	int is_udplite = IS_UDPLITE(sk);
1423 	__wsum csum = 0;
1424 	int offset = skb_transport_offset(skb);
1425 	int len = skb->len - offset;
1426 	int datalen = len - sizeof(*uh);
1427 
1428 	/*
1429 	 * Create a UDP header
1430 	 */
1431 	uh = udp_hdr(skb);
1432 	uh->source = fl6->fl6_sport;
1433 	uh->dest = fl6->fl6_dport;
1434 	uh->len = htons(len);
1435 	uh->check = 0;
1436 
1437 	if (cork->gso_size) {
1438 		const int hlen = skb_network_header_len(skb) +
1439 				 sizeof(struct udphdr);
1440 
1441 		if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1442 			kfree_skb(skb);
1443 			return -EMSGSIZE;
1444 		}
1445 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1446 			kfree_skb(skb);
1447 			return -EINVAL;
1448 		}
1449 		if (udp_get_no_check6_tx(sk)) {
1450 			kfree_skb(skb);
1451 			return -EINVAL;
1452 		}
1453 		if (is_udplite || dst_xfrm(skb_dst(skb))) {
1454 			kfree_skb(skb);
1455 			return -EIO;
1456 		}
1457 
1458 		if (datalen > cork->gso_size) {
1459 			skb_shinfo(skb)->gso_size = cork->gso_size;
1460 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1461 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1462 								 cork->gso_size);
1463 
1464 			/* Don't checksum the payload, skb will get segmented */
1465 			goto csum_partial;
1466 		}
1467 	}
1468 
1469 	if (is_udplite)
1470 		csum = udplite_csum(skb);
1471 	else if (udp_get_no_check6_tx(sk)) {   /* UDP csum disabled */
1472 		skb->ip_summed = CHECKSUM_NONE;
1473 		goto send;
1474 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1475 csum_partial:
1476 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1477 		goto send;
1478 	} else
1479 		csum = udp_csum(skb);
1480 
1481 	/* add protocol-dependent pseudo-header */
1482 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1483 				    len, fl6->flowi6_proto, csum);
1484 	if (uh->check == 0)
1485 		uh->check = CSUM_MANGLED_0;
1486 
1487 send:
1488 	err = ip6_send_skb(skb);
1489 	if (unlikely(err)) {
1490 		if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1491 			UDP6_INC_STATS(sock_net(sk),
1492 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1493 			err = 0;
1494 		}
1495 	} else {
1496 		UDP6_INC_STATS(sock_net(sk),
1497 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1498 	}
1499 	return err;
1500 }
1501 
1502 static int udp_v6_push_pending_frames(struct sock *sk)
1503 {
1504 	struct sk_buff *skb;
1505 	struct udp_sock  *up = udp_sk(sk);
1506 	int err = 0;
1507 
1508 	if (up->pending == AF_INET)
1509 		return udp_push_pending_frames(sk);
1510 
1511 	skb = ip6_finish_skb(sk);
1512 	if (!skb)
1513 		goto out;
1514 
1515 	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1516 			      &inet_sk(sk)->cork.base);
1517 out:
1518 	up->len = 0;
1519 	WRITE_ONCE(up->pending, 0);
1520 	return err;
1521 }
1522 
1523 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1524 {
1525 	struct ipv6_txoptions opt_space;
1526 	struct udp_sock *up = udp_sk(sk);
1527 	struct inet_sock *inet = inet_sk(sk);
1528 	struct ipv6_pinfo *np = inet6_sk(sk);
1529 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1530 	struct in6_addr *daddr, *final_p, final;
1531 	struct ipv6_txoptions *opt = NULL;
1532 	struct ipv6_txoptions *opt_to_free = NULL;
1533 	struct ip6_flowlabel *flowlabel = NULL;
1534 	struct inet_cork_full cork;
1535 	struct flowi6 *fl6 = &cork.fl.u.ip6;
1536 	struct dst_entry *dst;
1537 	struct ipcm6_cookie ipc6;
1538 	int addr_len = msg->msg_namelen;
1539 	bool connected = false;
1540 	int ulen = len;
1541 	int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1542 	int err;
1543 	int is_udplite = IS_UDPLITE(sk);
1544 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1545 
1546 	ipcm6_init_sk(&ipc6, sk);
1547 	ipc6.gso_size = READ_ONCE(up->gso_size);
1548 
1549 	/* destination address check */
1550 	if (sin6) {
1551 		if (addr_len < offsetof(struct sockaddr, sa_data))
1552 			return -EINVAL;
1553 
1554 		switch (sin6->sin6_family) {
1555 		case AF_INET6:
1556 			if (addr_len < SIN6_LEN_RFC2133)
1557 				return -EINVAL;
1558 			daddr = &sin6->sin6_addr;
1559 			if (ipv6_addr_any(daddr) &&
1560 			    ipv6_addr_v4mapped(&np->saddr))
1561 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1562 						       daddr);
1563 			break;
1564 		case AF_INET:
1565 			goto do_udp_sendmsg;
1566 		case AF_UNSPEC:
1567 			msg->msg_name = sin6 = NULL;
1568 			msg->msg_namelen = addr_len = 0;
1569 			daddr = NULL;
1570 			break;
1571 		default:
1572 			return -EINVAL;
1573 		}
1574 	} else if (!READ_ONCE(up->pending)) {
1575 		if (sk->sk_state != TCP_ESTABLISHED)
1576 			return -EDESTADDRREQ;
1577 		daddr = &sk->sk_v6_daddr;
1578 	} else
1579 		daddr = NULL;
1580 
1581 	if (daddr) {
1582 		if (ipv6_addr_v4mapped(daddr)) {
1583 			struct sockaddr_in sin;
1584 			sin.sin_family = AF_INET;
1585 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1586 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1587 			msg->msg_name = &sin;
1588 			msg->msg_namelen = sizeof(sin);
1589 do_udp_sendmsg:
1590 			err = ipv6_only_sock(sk) ?
1591 				-ENETUNREACH : udp_sendmsg(sk, msg, len);
1592 			msg->msg_name = sin6;
1593 			msg->msg_namelen = addr_len;
1594 			return err;
1595 		}
1596 	}
1597 
1598 	/* Rough check on arithmetic overflow,
1599 	   better check is made in ip6_append_data().
1600 	   */
1601 	if (len > INT_MAX - sizeof(struct udphdr))
1602 		return -EMSGSIZE;
1603 
1604 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1605 	if (READ_ONCE(up->pending)) {
1606 		if (READ_ONCE(up->pending) == AF_INET)
1607 			return udp_sendmsg(sk, msg, len);
1608 		/*
1609 		 * There are pending frames.
1610 		 * The socket lock must be held while it's corked.
1611 		 */
1612 		lock_sock(sk);
1613 		if (likely(up->pending)) {
1614 			if (unlikely(up->pending != AF_INET6)) {
1615 				release_sock(sk);
1616 				return -EAFNOSUPPORT;
1617 			}
1618 			dst = NULL;
1619 			goto do_append_data;
1620 		}
1621 		release_sock(sk);
1622 	}
1623 	ulen += sizeof(struct udphdr);
1624 
1625 	memset(fl6, 0, sizeof(*fl6));
1626 
1627 	if (sin6) {
1628 		if (sin6->sin6_port == 0)
1629 			return -EINVAL;
1630 
1631 		fl6->fl6_dport = sin6->sin6_port;
1632 		daddr = &sin6->sin6_addr;
1633 
1634 		if (inet6_test_bit(SNDFLOW, sk)) {
1635 			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1636 			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1637 				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1638 				if (IS_ERR(flowlabel))
1639 					return -EINVAL;
1640 			}
1641 		}
1642 
1643 		/*
1644 		 * Otherwise it will be difficult to maintain
1645 		 * sk->sk_dst_cache.
1646 		 */
1647 		if (sk->sk_state == TCP_ESTABLISHED &&
1648 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1649 			daddr = &sk->sk_v6_daddr;
1650 
1651 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1652 		    sin6->sin6_scope_id &&
1653 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1654 			fl6->flowi6_oif = sin6->sin6_scope_id;
1655 	} else {
1656 		if (sk->sk_state != TCP_ESTABLISHED)
1657 			return -EDESTADDRREQ;
1658 
1659 		fl6->fl6_dport = inet->inet_dport;
1660 		daddr = &sk->sk_v6_daddr;
1661 		fl6->flowlabel = np->flow_label;
1662 		connected = true;
1663 	}
1664 
1665 	if (!fl6->flowi6_oif)
1666 		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1667 
1668 	if (!fl6->flowi6_oif)
1669 		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1670 
1671 	fl6->flowi6_uid = sk_uid(sk);
1672 
1673 	if (msg->msg_controllen) {
1674 		opt = &opt_space;
1675 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1676 		opt->tot_len = sizeof(*opt);
1677 		ipc6.opt = opt;
1678 
1679 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1680 		if (err > 0) {
1681 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1682 						    &ipc6);
1683 			connected = false;
1684 		}
1685 		if (err < 0) {
1686 			fl6_sock_release(flowlabel);
1687 			return err;
1688 		}
1689 		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1690 			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1691 			if (IS_ERR(flowlabel))
1692 				return -EINVAL;
1693 		}
1694 		if (!(opt->opt_nflen|opt->opt_flen))
1695 			opt = NULL;
1696 	}
1697 	if (!opt) {
1698 		opt = txopt_get(np);
1699 		opt_to_free = opt;
1700 	}
1701 	if (flowlabel)
1702 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1703 	opt = ipv6_fixup_options(&opt_space, opt);
1704 	ipc6.opt = opt;
1705 
1706 	fl6->flowi6_proto = sk->sk_protocol;
1707 	fl6->flowi6_mark = ipc6.sockc.mark;
1708 	fl6->daddr = *daddr;
1709 	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1710 		fl6->saddr = np->saddr;
1711 	fl6->fl6_sport = inet->inet_sport;
1712 
1713 	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1714 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1715 					   (struct sockaddr *)sin6,
1716 					   &addr_len,
1717 					   &fl6->saddr);
1718 		if (err)
1719 			goto out_no_dst;
1720 		if (sin6) {
1721 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1722 				/* BPF program rewrote IPv6-only by IPv4-mapped
1723 				 * IPv6. It's currently unsupported.
1724 				 */
1725 				err = -ENOTSUPP;
1726 				goto out_no_dst;
1727 			}
1728 			if (sin6->sin6_port == 0) {
1729 				/* BPF program set invalid port. Reject it. */
1730 				err = -EINVAL;
1731 				goto out_no_dst;
1732 			}
1733 			fl6->fl6_dport = sin6->sin6_port;
1734 			fl6->daddr = sin6->sin6_addr;
1735 		}
1736 	}
1737 
1738 	if (ipv6_addr_any(&fl6->daddr))
1739 		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1740 
1741 	final_p = fl6_update_dst(fl6, opt, &final);
1742 	if (final_p)
1743 		connected = false;
1744 
1745 	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1746 		fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1747 		connected = false;
1748 	} else if (!fl6->flowi6_oif)
1749 		fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1750 
1751 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1752 
1753 	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1754 
1755 	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1756 	if (IS_ERR(dst)) {
1757 		err = PTR_ERR(dst);
1758 		dst = NULL;
1759 		goto out;
1760 	}
1761 
1762 	if (ipc6.hlimit < 0)
1763 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1764 
1765 	if (msg->msg_flags&MSG_CONFIRM)
1766 		goto do_confirm;
1767 back_from_confirm:
1768 
1769 	/* Lockless fast path for the non-corking case */
1770 	if (!corkreq) {
1771 		struct sk_buff *skb;
1772 
1773 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1774 				   sizeof(struct udphdr), &ipc6,
1775 				   dst_rt6_info(dst),
1776 				   msg->msg_flags, &cork);
1777 		err = PTR_ERR(skb);
1778 		if (!IS_ERR_OR_NULL(skb))
1779 			err = udp_v6_send_skb(skb, fl6, &cork.base);
1780 		/* ip6_make_skb steals dst reference */
1781 		goto out_no_dst;
1782 	}
1783 
1784 	lock_sock(sk);
1785 	if (unlikely(up->pending)) {
1786 		/* The socket is already corked while preparing it. */
1787 		/* ... which is an evident application bug. --ANK */
1788 		release_sock(sk);
1789 
1790 		net_dbg_ratelimited("udp cork app bug 2\n");
1791 		err = -EINVAL;
1792 		goto out;
1793 	}
1794 
1795 	WRITE_ONCE(up->pending, AF_INET6);
1796 
1797 do_append_data:
1798 	up->len += ulen;
1799 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1800 			      &ipc6, fl6, dst_rt6_info(dst),
1801 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1802 	if (err)
1803 		udp_v6_flush_pending_frames(sk);
1804 	else if (!corkreq)
1805 		err = udp_v6_push_pending_frames(sk);
1806 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1807 		WRITE_ONCE(up->pending, 0);
1808 
1809 	if (err > 0)
1810 		err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1811 	release_sock(sk);
1812 
1813 out:
1814 	dst_release(dst);
1815 out_no_dst:
1816 	fl6_sock_release(flowlabel);
1817 	txopt_put(opt_to_free);
1818 	if (!err)
1819 		return len;
1820 	/*
1821 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1822 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1823 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1824 	 * things).  We could add another new stat but at least for now that
1825 	 * seems like overkill.
1826 	 */
1827 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1828 		UDP6_INC_STATS(sock_net(sk),
1829 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1830 	}
1831 	return err;
1832 
1833 do_confirm:
1834 	if (msg->msg_flags & MSG_PROBE)
1835 		dst_confirm_neigh(dst, &fl6->daddr);
1836 	if (!(msg->msg_flags&MSG_PROBE) || len)
1837 		goto back_from_confirm;
1838 	err = 0;
1839 	goto out;
1840 }
1841 EXPORT_SYMBOL(udpv6_sendmsg);
1842 
1843 static void udpv6_splice_eof(struct socket *sock)
1844 {
1845 	struct sock *sk = sock->sk;
1846 	struct udp_sock *up = udp_sk(sk);
1847 
1848 	if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1849 		return;
1850 
1851 	lock_sock(sk);
1852 	if (up->pending && !udp_test_bit(CORK, sk))
1853 		udp_v6_push_pending_frames(sk);
1854 	release_sock(sk);
1855 }
1856 
1857 void udpv6_destroy_sock(struct sock *sk)
1858 {
1859 	struct udp_sock *up = udp_sk(sk);
1860 	lock_sock(sk);
1861 
1862 	/* protects from races with udp_abort() */
1863 	sock_set_flag(sk, SOCK_DEAD);
1864 	udp_v6_flush_pending_frames(sk);
1865 	release_sock(sk);
1866 
1867 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1868 		if (up->encap_type) {
1869 			void (*encap_destroy)(struct sock *sk);
1870 			encap_destroy = READ_ONCE(up->encap_destroy);
1871 			if (encap_destroy)
1872 				encap_destroy(sk);
1873 		}
1874 		if (udp_test_bit(ENCAP_ENABLED, sk)) {
1875 			static_branch_dec(&udpv6_encap_needed_key);
1876 			udp_encap_disable();
1877 			udp_tunnel_cleanup_gro(sk);
1878 		}
1879 	}
1880 }
1881 
1882 /*
1883  *	Socket option code for UDP
1884  */
1885 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1886 		     unsigned int optlen)
1887 {
1888 	if (level == SOL_UDP  ||  level == SOL_UDPLITE || level == SOL_SOCKET)
1889 		return udp_lib_setsockopt(sk, level, optname,
1890 					  optval, optlen,
1891 					  udp_v6_push_pending_frames);
1892 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1893 }
1894 
1895 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1896 		     char __user *optval, int __user *optlen)
1897 {
1898 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1899 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1900 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1901 }
1902 
1903 
1904 /* ------------------------------------------------------------------------ */
1905 #ifdef CONFIG_PROC_FS
1906 int udp6_seq_show(struct seq_file *seq, void *v)
1907 {
1908 	if (v == SEQ_START_TOKEN) {
1909 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1910 	} else {
1911 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1912 		const struct inet_sock *inet = inet_sk((const struct sock *)v);
1913 		__u16 srcp = ntohs(inet->inet_sport);
1914 		__u16 destp = ntohs(inet->inet_dport);
1915 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1916 					  udp_rqueue_get(v), bucket);
1917 	}
1918 	return 0;
1919 }
1920 
1921 const struct seq_operations udp6_seq_ops = {
1922 	.start		= udp_seq_start,
1923 	.next		= udp_seq_next,
1924 	.stop		= udp_seq_stop,
1925 	.show		= udp6_seq_show,
1926 };
1927 EXPORT_SYMBOL(udp6_seq_ops);
1928 
1929 static struct udp_seq_afinfo udp6_seq_afinfo = {
1930 	.family		= AF_INET6,
1931 	.udp_table	= NULL,
1932 };
1933 
1934 int __net_init udp6_proc_init(struct net *net)
1935 {
1936 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1937 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1938 		return -ENOMEM;
1939 	return 0;
1940 }
1941 
1942 void udp6_proc_exit(struct net *net)
1943 {
1944 	remove_proc_entry("udp6", net->proc_net);
1945 }
1946 #endif /* CONFIG_PROC_FS */
1947 
1948 /* ------------------------------------------------------------------------ */
1949 
1950 struct proto udpv6_prot = {
1951 	.name			= "UDPv6",
1952 	.owner			= THIS_MODULE,
1953 	.close			= udp_lib_close,
1954 	.pre_connect		= udpv6_pre_connect,
1955 	.connect		= udpv6_connect,
1956 	.disconnect		= udp_disconnect,
1957 	.ioctl			= udp_ioctl,
1958 	.init			= udpv6_init_sock,
1959 	.destroy		= udpv6_destroy_sock,
1960 	.setsockopt		= udpv6_setsockopt,
1961 	.getsockopt		= udpv6_getsockopt,
1962 	.sendmsg		= udpv6_sendmsg,
1963 	.recvmsg		= udpv6_recvmsg,
1964 	.splice_eof		= udpv6_splice_eof,
1965 	.release_cb		= ip6_datagram_release_cb,
1966 	.hash			= udp_lib_hash,
1967 	.unhash			= udp_lib_unhash,
1968 	.rehash			= udp_v6_rehash,
1969 	.get_port		= udp_v6_get_port,
1970 	.put_port		= udp_lib_unhash,
1971 #ifdef CONFIG_BPF_SYSCALL
1972 	.psock_update_sk_prot	= udp_bpf_update_proto,
1973 #endif
1974 
1975 	.memory_allocated	= &net_aligned_data.udp_memory_allocated,
1976 	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1977 
1978 	.sysctl_mem		= sysctl_udp_mem,
1979 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1980 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1981 	.obj_size		= sizeof(struct udp6_sock),
1982 	.ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1983 	.h.udp_table		= NULL,
1984 	.diag_destroy		= udp_abort,
1985 };
1986 
1987 static struct inet_protosw udpv6_protosw = {
1988 	.type =      SOCK_DGRAM,
1989 	.protocol =  IPPROTO_UDP,
1990 	.prot =      &udpv6_prot,
1991 	.ops =       &inet6_dgram_ops,
1992 	.flags =     INET_PROTOSW_PERMANENT,
1993 };
1994 
1995 int __init udpv6_init(void)
1996 {
1997 	int ret;
1998 
1999 	net_hotdata.udpv6_protocol = (struct inet6_protocol) {
2000 		.handler     = udpv6_rcv,
2001 		.err_handler = udpv6_err,
2002 		.flags	     = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
2003 	};
2004 	ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
2005 	if (ret)
2006 		goto out;
2007 
2008 	ret = inet6_register_protosw(&udpv6_protosw);
2009 	if (ret)
2010 		goto out_udpv6_protocol;
2011 out:
2012 	return ret;
2013 
2014 out_udpv6_protocol:
2015 	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
2016 	goto out;
2017 }
2018 
2019 void udpv6_exit(void)
2020 {
2021 	inet6_unregister_protosw(&udpv6_protosw);
2022 	inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
2023 }
2024