xref: /linux/net/ipv6/udp.c (revision 90e0d94d369d342e735a75174439482119b6c393)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37 
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
43 #include <net/raw.h>
44 #include <net/seg6.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <net/xfrm.h>
49 #include <net/inet_hashtables.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/busy_poll.h>
52 #include <net/sock_reuseport.h>
53 
54 #include <linux/proc_fs.h>
55 #include <linux/seq_file.h>
56 #include <trace/events/skb.h>
57 #include "udp_impl.h"
58 
59 static u32 udp6_ehashfn(const struct net *net,
60 			const struct in6_addr *laddr,
61 			const u16 lport,
62 			const struct in6_addr *faddr,
63 			const __be16 fport)
64 {
65 	static u32 udp6_ehash_secret __read_mostly;
66 	static u32 udp_ipv6_hash_secret __read_mostly;
67 
68 	u32 lhash, fhash;
69 
70 	net_get_random_once(&udp6_ehash_secret,
71 			    sizeof(udp6_ehash_secret));
72 	net_get_random_once(&udp_ipv6_hash_secret,
73 			    sizeof(udp_ipv6_hash_secret));
74 
75 	lhash = (__force u32)laddr->s6_addr32[3];
76 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
77 
78 	return __inet6_ehashfn(lhash, lport, fhash, fport,
79 			       udp_ipv6_hash_secret + net_hash_mix(net));
80 }
81 
82 int udp_v6_get_port(struct sock *sk, unsigned short snum)
83 {
84 	unsigned int hash2_nulladdr =
85 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
86 	unsigned int hash2_partial =
87 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
88 
89 	/* precompute partial secondary hash */
90 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
91 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
92 }
93 
94 void udp_v6_rehash(struct sock *sk)
95 {
96 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
97 					  &sk->sk_v6_rcv_saddr,
98 					  inet_sk(sk)->inet_num);
99 
100 	udp_lib_rehash(sk, new_hash);
101 }
102 
103 static int compute_score(struct sock *sk, struct net *net,
104 			 const struct in6_addr *saddr, __be16 sport,
105 			 const struct in6_addr *daddr, unsigned short hnum,
106 			 int dif, int sdif)
107 {
108 	int bound_dev_if, score;
109 	struct inet_sock *inet;
110 	bool dev_match;
111 
112 	if (!net_eq(sock_net(sk), net) ||
113 	    udp_sk(sk)->udp_port_hash != hnum ||
114 	    sk->sk_family != PF_INET6)
115 		return -1;
116 
117 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
118 		return -1;
119 
120 	score = 0;
121 	inet = inet_sk(sk);
122 
123 	if (inet->inet_dport) {
124 		if (inet->inet_dport != sport)
125 			return -1;
126 		score++;
127 	}
128 
129 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
130 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
131 			return -1;
132 		score++;
133 	}
134 
135 	bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
136 	dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
137 	if (!dev_match)
138 		return -1;
139 	if (bound_dev_if)
140 		score++;
141 
142 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
143 		score++;
144 
145 	return score;
146 }
147 
148 static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
149 				     struct sk_buff *skb,
150 				     const struct in6_addr *saddr,
151 				     __be16 sport,
152 				     const struct in6_addr *daddr,
153 				     unsigned int hnum)
154 {
155 	struct sock *reuse_sk = NULL;
156 	u32 hash;
157 
158 	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
159 		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
160 		reuse_sk = reuseport_select_sock(sk, hash, skb,
161 						 sizeof(struct udphdr));
162 	}
163 	return reuse_sk;
164 }
165 
166 /* called with rcu_read_lock() */
167 static struct sock *udp6_lib_lookup2(struct net *net,
168 		const struct in6_addr *saddr, __be16 sport,
169 		const struct in6_addr *daddr, unsigned int hnum,
170 		int dif, int sdif, struct udp_hslot *hslot2,
171 		struct sk_buff *skb)
172 {
173 	struct sock *sk, *result;
174 	int score, badness;
175 
176 	result = NULL;
177 	badness = -1;
178 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
179 		score = compute_score(sk, net, saddr, sport,
180 				      daddr, hnum, dif, sdif);
181 		if (score > badness) {
182 			result = lookup_reuseport(net, sk, skb,
183 						  saddr, sport, daddr, hnum);
184 			/* Fall back to scoring if group has connections */
185 			if (result && !reuseport_has_conns(sk, false))
186 				return result;
187 
188 			result = result ? : sk;
189 			badness = score;
190 		}
191 	}
192 	return result;
193 }
194 
195 static inline struct sock *udp6_lookup_run_bpf(struct net *net,
196 					       struct udp_table *udptable,
197 					       struct sk_buff *skb,
198 					       const struct in6_addr *saddr,
199 					       __be16 sport,
200 					       const struct in6_addr *daddr,
201 					       u16 hnum, const int dif)
202 {
203 	struct sock *sk, *reuse_sk;
204 	bool no_reuseport;
205 
206 	if (udptable != &udp_table)
207 		return NULL; /* only UDP is supported */
208 
209 	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
210 					    daddr, hnum, dif, &sk);
211 	if (no_reuseport || IS_ERR_OR_NULL(sk))
212 		return sk;
213 
214 	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
215 	if (reuse_sk)
216 		sk = reuse_sk;
217 	return sk;
218 }
219 
220 /* rcu_read_lock() must be held */
221 struct sock *__udp6_lib_lookup(struct net *net,
222 			       const struct in6_addr *saddr, __be16 sport,
223 			       const struct in6_addr *daddr, __be16 dport,
224 			       int dif, int sdif, struct udp_table *udptable,
225 			       struct sk_buff *skb)
226 {
227 	unsigned short hnum = ntohs(dport);
228 	unsigned int hash2, slot2;
229 	struct udp_hslot *hslot2;
230 	struct sock *result, *sk;
231 
232 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
233 	slot2 = hash2 & udptable->mask;
234 	hslot2 = &udptable->hash2[slot2];
235 
236 	/* Lookup connected or non-wildcard sockets */
237 	result = udp6_lib_lookup2(net, saddr, sport,
238 				  daddr, hnum, dif, sdif,
239 				  hslot2, skb);
240 	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
241 		goto done;
242 
243 	/* Lookup redirect from BPF */
244 	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
245 		sk = udp6_lookup_run_bpf(net, udptable, skb,
246 					 saddr, sport, daddr, hnum, dif);
247 		if (sk) {
248 			result = sk;
249 			goto done;
250 		}
251 	}
252 
253 	/* Got non-wildcard socket or error on first lookup */
254 	if (result)
255 		goto done;
256 
257 	/* Lookup wildcard sockets */
258 	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
259 	slot2 = hash2 & udptable->mask;
260 	hslot2 = &udptable->hash2[slot2];
261 
262 	result = udp6_lib_lookup2(net, saddr, sport,
263 				  &in6addr_any, hnum, dif, sdif,
264 				  hslot2, skb);
265 done:
266 	if (IS_ERR(result))
267 		return NULL;
268 	return result;
269 }
270 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
271 
272 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
273 					  __be16 sport, __be16 dport,
274 					  struct udp_table *udptable)
275 {
276 	const struct ipv6hdr *iph = ipv6_hdr(skb);
277 
278 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
279 				 &iph->daddr, dport, inet6_iif(skb),
280 				 inet6_sdif(skb), udptable, skb);
281 }
282 
283 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
284 				 __be16 sport, __be16 dport)
285 {
286 	const struct ipv6hdr *iph = ipv6_hdr(skb);
287 
288 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
289 				 &iph->daddr, dport, inet6_iif(skb),
290 				 inet6_sdif(skb), &udp_table, NULL);
291 }
292 
293 /* Must be called under rcu_read_lock().
294  * Does increment socket refcount.
295  */
296 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
297 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
298 			     const struct in6_addr *daddr, __be16 dport, int dif)
299 {
300 	struct sock *sk;
301 
302 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
303 				dif, 0, &udp_table, NULL);
304 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
305 		sk = NULL;
306 	return sk;
307 }
308 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
309 #endif
310 
311 /* do not use the scratch area len for jumbogram: their length execeeds the
312  * scratch area space; note that the IP6CB flags is still in the first
313  * cacheline, so checking for jumbograms is cheap
314  */
315 static int udp6_skb_len(struct sk_buff *skb)
316 {
317 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
318 }
319 
320 /*
321  *	This should be easy, if there is something there we
322  *	return it, otherwise we block.
323  */
324 
325 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
326 		  int flags, int *addr_len)
327 {
328 	struct ipv6_pinfo *np = inet6_sk(sk);
329 	struct inet_sock *inet = inet_sk(sk);
330 	struct sk_buff *skb;
331 	unsigned int ulen, copied;
332 	int off, err, peeking = flags & MSG_PEEK;
333 	int is_udplite = IS_UDPLITE(sk);
334 	struct udp_mib __percpu *mib;
335 	bool checksum_valid = false;
336 	int is_udp4;
337 
338 	if (flags & MSG_ERRQUEUE)
339 		return ipv6_recv_error(sk, msg, len, addr_len);
340 
341 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
342 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
343 
344 try_again:
345 	off = sk_peek_offset(sk, flags);
346 	skb = __skb_recv_udp(sk, flags, &off, &err);
347 	if (!skb)
348 		return err;
349 
350 	ulen = udp6_skb_len(skb);
351 	copied = len;
352 	if (copied > ulen - off)
353 		copied = ulen - off;
354 	else if (copied < ulen)
355 		msg->msg_flags |= MSG_TRUNC;
356 
357 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
358 	mib = __UDPX_MIB(sk, is_udp4);
359 
360 	/*
361 	 * If checksum is needed at all, try to do it while copying the
362 	 * data.  If the data is truncated, or if we only want a partial
363 	 * coverage checksum (UDP-Lite), do it before the copy.
364 	 */
365 
366 	if (copied < ulen || peeking ||
367 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
368 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
369 				!__udp_lib_checksum_complete(skb);
370 		if (!checksum_valid)
371 			goto csum_copy_err;
372 	}
373 
374 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
375 		if (udp_skb_is_linear(skb))
376 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
377 		else
378 			err = skb_copy_datagram_msg(skb, off, msg, copied);
379 	} else {
380 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
381 		if (err == -EINVAL)
382 			goto csum_copy_err;
383 	}
384 	if (unlikely(err)) {
385 		if (!peeking) {
386 			atomic_inc(&sk->sk_drops);
387 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
388 		}
389 		kfree_skb(skb);
390 		return err;
391 	}
392 	if (!peeking)
393 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
394 
395 	sock_recv_cmsgs(msg, sk, skb);
396 
397 	/* Copy the address. */
398 	if (msg->msg_name) {
399 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
400 		sin6->sin6_family = AF_INET6;
401 		sin6->sin6_port = udp_hdr(skb)->source;
402 		sin6->sin6_flowinfo = 0;
403 
404 		if (is_udp4) {
405 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
406 					       &sin6->sin6_addr);
407 			sin6->sin6_scope_id = 0;
408 		} else {
409 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
410 			sin6->sin6_scope_id =
411 				ipv6_iface_scope_id(&sin6->sin6_addr,
412 						    inet6_iif(skb));
413 		}
414 		*addr_len = sizeof(*sin6);
415 
416 		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
417 						      (struct sockaddr *)sin6);
418 	}
419 
420 	if (udp_sk(sk)->gro_enabled)
421 		udp_cmsg_recv(msg, sk, skb);
422 
423 	if (np->rxopt.all)
424 		ip6_datagram_recv_common_ctl(sk, msg, skb);
425 
426 	if (is_udp4) {
427 		if (inet->cmsg_flags)
428 			ip_cmsg_recv_offset(msg, sk, skb,
429 					    sizeof(struct udphdr), off);
430 	} else {
431 		if (np->rxopt.all)
432 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
433 	}
434 
435 	err = copied;
436 	if (flags & MSG_TRUNC)
437 		err = ulen;
438 
439 	skb_consume_udp(sk, skb, peeking ? -err : err);
440 	return err;
441 
442 csum_copy_err:
443 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
444 				 udp_skb_destructor)) {
445 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
446 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
447 	}
448 	kfree_skb(skb);
449 
450 	/* starting over for a new packet, but check if we need to yield */
451 	cond_resched();
452 	msg->msg_flags &= ~MSG_TRUNC;
453 	goto try_again;
454 }
455 
456 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
457 void udpv6_encap_enable(void)
458 {
459 	static_branch_inc(&udpv6_encap_needed_key);
460 }
461 EXPORT_SYMBOL(udpv6_encap_enable);
462 
463 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
464  * through error handlers in encapsulations looking for a match.
465  */
466 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
467 				      struct inet6_skb_parm *opt,
468 				      u8 type, u8 code, int offset, __be32 info)
469 {
470 	int i;
471 
472 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
473 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
474 			       u8 type, u8 code, int offset, __be32 info);
475 		const struct ip6_tnl_encap_ops *encap;
476 
477 		encap = rcu_dereference(ip6tun_encaps[i]);
478 		if (!encap)
479 			continue;
480 		handler = encap->err_handler;
481 		if (handler && !handler(skb, opt, type, code, offset, info))
482 			return 0;
483 	}
484 
485 	return -ENOENT;
486 }
487 
488 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
489  * reversing source and destination port: this will match tunnels that force the
490  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
491  * lwtunnels might actually break this assumption by being configured with
492  * different destination ports on endpoints, in this case we won't be able to
493  * trace ICMP messages back to them.
494  *
495  * If this doesn't match any socket, probe tunnels with arbitrary destination
496  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
497  * we've sent packets to won't necessarily match the local destination port.
498  *
499  * Then ask the tunnel implementation to match the error against a valid
500  * association.
501  *
502  * Return an error if we can't find a match, the socket if we need further
503  * processing, zero otherwise.
504  */
505 static struct sock *__udp6_lib_err_encap(struct net *net,
506 					 const struct ipv6hdr *hdr, int offset,
507 					 struct udphdr *uh,
508 					 struct udp_table *udptable,
509 					 struct sock *sk,
510 					 struct sk_buff *skb,
511 					 struct inet6_skb_parm *opt,
512 					 u8 type, u8 code, __be32 info)
513 {
514 	int (*lookup)(struct sock *sk, struct sk_buff *skb);
515 	int network_offset, transport_offset;
516 	struct udp_sock *up;
517 
518 	network_offset = skb_network_offset(skb);
519 	transport_offset = skb_transport_offset(skb);
520 
521 	/* Network header needs to point to the outer IPv6 header inside ICMP */
522 	skb_reset_network_header(skb);
523 
524 	/* Transport header needs to point to the UDP header */
525 	skb_set_transport_header(skb, offset);
526 
527 	if (sk) {
528 		up = udp_sk(sk);
529 
530 		lookup = READ_ONCE(up->encap_err_lookup);
531 		if (lookup && lookup(sk, skb))
532 			sk = NULL;
533 
534 		goto out;
535 	}
536 
537 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
538 			       &hdr->saddr, uh->dest,
539 			       inet6_iif(skb), 0, udptable, skb);
540 	if (sk) {
541 		up = udp_sk(sk);
542 
543 		lookup = READ_ONCE(up->encap_err_lookup);
544 		if (!lookup || lookup(sk, skb))
545 			sk = NULL;
546 	}
547 
548 out:
549 	if (!sk) {
550 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
551 							offset, info));
552 	}
553 
554 	skb_set_transport_header(skb, transport_offset);
555 	skb_set_network_header(skb, network_offset);
556 
557 	return sk;
558 }
559 
560 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
561 		   u8 type, u8 code, int offset, __be32 info,
562 		   struct udp_table *udptable)
563 {
564 	struct ipv6_pinfo *np;
565 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
566 	const struct in6_addr *saddr = &hdr->saddr;
567 	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
568 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
569 	bool tunnel = false;
570 	struct sock *sk;
571 	int harderr;
572 	int err;
573 	struct net *net = dev_net(skb->dev);
574 
575 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
576 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
577 
578 	if (!sk || udp_sk(sk)->encap_type) {
579 		/* No socket for error: try tunnels before discarding */
580 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
581 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
582 						  udptable, sk, skb,
583 						  opt, type, code, info);
584 			if (!sk)
585 				return 0;
586 		} else
587 			sk = ERR_PTR(-ENOENT);
588 
589 		if (IS_ERR(sk)) {
590 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
591 					  ICMP6_MIB_INERRORS);
592 			return PTR_ERR(sk);
593 		}
594 
595 		tunnel = true;
596 	}
597 
598 	harderr = icmpv6_err_convert(type, code, &err);
599 	np = inet6_sk(sk);
600 
601 	if (type == ICMPV6_PKT_TOOBIG) {
602 		if (!ip6_sk_accept_pmtu(sk))
603 			goto out;
604 		ip6_sk_update_pmtu(skb, sk, info);
605 		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
606 			harderr = 1;
607 	}
608 	if (type == NDISC_REDIRECT) {
609 		if (tunnel) {
610 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
611 				     sk->sk_mark, sk->sk_uid);
612 		} else {
613 			ip6_sk_redirect(skb, sk);
614 		}
615 		goto out;
616 	}
617 
618 	/* Tunnels don't have an application socket: don't pass errors back */
619 	if (tunnel) {
620 		if (udp_sk(sk)->encap_err_rcv)
621 			udp_sk(sk)->encap_err_rcv(sk, skb, offset);
622 		goto out;
623 	}
624 
625 	if (!np->recverr) {
626 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
627 			goto out;
628 	} else {
629 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
630 	}
631 
632 	sk->sk_err = err;
633 	sk_error_report(sk);
634 out:
635 	return 0;
636 }
637 
638 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
639 {
640 	int rc;
641 
642 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
643 		sock_rps_save_rxhash(sk, skb);
644 		sk_mark_napi_id(sk, skb);
645 		sk_incoming_cpu_update(sk);
646 	} else {
647 		sk_mark_napi_id_once(sk, skb);
648 	}
649 
650 	rc = __udp_enqueue_schedule_skb(sk, skb);
651 	if (rc < 0) {
652 		int is_udplite = IS_UDPLITE(sk);
653 		enum skb_drop_reason drop_reason;
654 
655 		/* Note that an ENOMEM error is charged twice */
656 		if (rc == -ENOMEM) {
657 			UDP6_INC_STATS(sock_net(sk),
658 					 UDP_MIB_RCVBUFERRORS, is_udplite);
659 			drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
660 		} else {
661 			UDP6_INC_STATS(sock_net(sk),
662 				       UDP_MIB_MEMERRORS, is_udplite);
663 			drop_reason = SKB_DROP_REASON_PROTO_MEM;
664 		}
665 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
666 		kfree_skb_reason(skb, drop_reason);
667 		return -1;
668 	}
669 
670 	return 0;
671 }
672 
673 static __inline__ int udpv6_err(struct sk_buff *skb,
674 				struct inet6_skb_parm *opt, u8 type,
675 				u8 code, int offset, __be32 info)
676 {
677 	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
678 }
679 
680 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
681 {
682 	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
683 	struct udp_sock *up = udp_sk(sk);
684 	int is_udplite = IS_UDPLITE(sk);
685 
686 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
687 		drop_reason = SKB_DROP_REASON_XFRM_POLICY;
688 		goto drop;
689 	}
690 
691 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
692 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
693 
694 		/*
695 		 * This is an encapsulation socket so pass the skb to
696 		 * the socket's udp_encap_rcv() hook. Otherwise, just
697 		 * fall through and pass this up the UDP socket.
698 		 * up->encap_rcv() returns the following value:
699 		 * =0 if skb was successfully passed to the encap
700 		 *    handler or was discarded by it.
701 		 * >0 if skb should be passed on to UDP.
702 		 * <0 if skb should be resubmitted as proto -N
703 		 */
704 
705 		/* if we're overly short, let UDP handle it */
706 		encap_rcv = READ_ONCE(up->encap_rcv);
707 		if (encap_rcv) {
708 			int ret;
709 
710 			/* Verify checksum before giving to encap */
711 			if (udp_lib_checksum_complete(skb))
712 				goto csum_error;
713 
714 			ret = encap_rcv(sk, skb);
715 			if (ret <= 0) {
716 				__UDP6_INC_STATS(sock_net(sk),
717 						 UDP_MIB_INDATAGRAMS,
718 						 is_udplite);
719 				return -ret;
720 			}
721 		}
722 
723 		/* FALLTHROUGH -- it's a UDP Packet */
724 	}
725 
726 	/*
727 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
728 	 */
729 	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
730 
731 		if (up->pcrlen == 0) {          /* full coverage was set  */
732 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
733 					    UDP_SKB_CB(skb)->cscov, skb->len);
734 			goto drop;
735 		}
736 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
737 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
738 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
739 			goto drop;
740 		}
741 	}
742 
743 	prefetch(&sk->sk_rmem_alloc);
744 	if (rcu_access_pointer(sk->sk_filter) &&
745 	    udp_lib_checksum_complete(skb))
746 		goto csum_error;
747 
748 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr))) {
749 		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
750 		goto drop;
751 	}
752 
753 	udp_csum_pull_header(skb);
754 
755 	skb_dst_drop(skb);
756 
757 	return __udpv6_queue_rcv_skb(sk, skb);
758 
759 csum_error:
760 	drop_reason = SKB_DROP_REASON_UDP_CSUM;
761 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
762 drop:
763 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
764 	atomic_inc(&sk->sk_drops);
765 	kfree_skb_reason(skb, drop_reason);
766 	return -1;
767 }
768 
769 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
770 {
771 	struct sk_buff *next, *segs;
772 	int ret;
773 
774 	if (likely(!udp_unexpected_gso(sk, skb)))
775 		return udpv6_queue_rcv_one_skb(sk, skb);
776 
777 	__skb_push(skb, -skb_mac_offset(skb));
778 	segs = udp_rcv_segment(sk, skb, false);
779 	skb_list_walk_safe(segs, skb, next) {
780 		__skb_pull(skb, skb_transport_offset(skb));
781 
782 		udp_post_segment_fix_csum(skb);
783 		ret = udpv6_queue_rcv_one_skb(sk, skb);
784 		if (ret > 0)
785 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
786 						 true);
787 	}
788 	return 0;
789 }
790 
791 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
792 				   __be16 loc_port, const struct in6_addr *loc_addr,
793 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
794 				   int dif, int sdif, unsigned short hnum)
795 {
796 	struct inet_sock *inet = inet_sk(sk);
797 
798 	if (!net_eq(sock_net(sk), net))
799 		return false;
800 
801 	if (udp_sk(sk)->udp_port_hash != hnum ||
802 	    sk->sk_family != PF_INET6 ||
803 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
804 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
805 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
806 	    !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
807 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
808 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
809 		return false;
810 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
811 		return false;
812 	return true;
813 }
814 
815 static void udp6_csum_zero_error(struct sk_buff *skb)
816 {
817 	/* RFC 2460 section 8.1 says that we SHOULD log
818 	 * this error. Well, it is reasonable.
819 	 */
820 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
821 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
822 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
823 }
824 
825 /*
826  * Note: called only from the BH handler context,
827  * so we don't need to lock the hashes.
828  */
829 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
830 		const struct in6_addr *saddr, const struct in6_addr *daddr,
831 		struct udp_table *udptable, int proto)
832 {
833 	struct sock *sk, *first = NULL;
834 	const struct udphdr *uh = udp_hdr(skb);
835 	unsigned short hnum = ntohs(uh->dest);
836 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
837 	unsigned int offset = offsetof(typeof(*sk), sk_node);
838 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
839 	int dif = inet6_iif(skb);
840 	int sdif = inet6_sdif(skb);
841 	struct hlist_node *node;
842 	struct sk_buff *nskb;
843 
844 	if (use_hash2) {
845 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
846 			    udptable->mask;
847 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
848 start_lookup:
849 		hslot = &udptable->hash2[hash2];
850 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
851 	}
852 
853 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
854 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
855 					    uh->source, saddr, dif, sdif,
856 					    hnum))
857 			continue;
858 		/* If zero checksum and no_check is not on for
859 		 * the socket then skip it.
860 		 */
861 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
862 			continue;
863 		if (!first) {
864 			first = sk;
865 			continue;
866 		}
867 		nskb = skb_clone(skb, GFP_ATOMIC);
868 		if (unlikely(!nskb)) {
869 			atomic_inc(&sk->sk_drops);
870 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
871 					 IS_UDPLITE(sk));
872 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
873 					 IS_UDPLITE(sk));
874 			continue;
875 		}
876 
877 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
878 			consume_skb(nskb);
879 	}
880 
881 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
882 	if (use_hash2 && hash2 != hash2_any) {
883 		hash2 = hash2_any;
884 		goto start_lookup;
885 	}
886 
887 	if (first) {
888 		if (udpv6_queue_rcv_skb(first, skb) > 0)
889 			consume_skb(skb);
890 	} else {
891 		kfree_skb(skb);
892 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
893 				 proto == IPPROTO_UDPLITE);
894 	}
895 	return 0;
896 }
897 
898 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
899 {
900 	if (udp_sk_rx_dst_set(sk, dst)) {
901 		const struct rt6_info *rt = (const struct rt6_info *)dst;
902 
903 		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
904 	}
905 }
906 
907 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
908  * return code conversion for ip layer consumption
909  */
910 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
911 				struct udphdr *uh)
912 {
913 	int ret;
914 
915 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
916 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
917 
918 	ret = udpv6_queue_rcv_skb(sk, skb);
919 
920 	/* a return value > 0 means to resubmit the input */
921 	if (ret > 0)
922 		return ret;
923 	return 0;
924 }
925 
926 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
927 		   int proto)
928 {
929 	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
930 	const struct in6_addr *saddr, *daddr;
931 	struct net *net = dev_net(skb->dev);
932 	struct udphdr *uh;
933 	struct sock *sk;
934 	bool refcounted;
935 	u32 ulen = 0;
936 
937 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
938 		goto discard;
939 
940 	saddr = &ipv6_hdr(skb)->saddr;
941 	daddr = &ipv6_hdr(skb)->daddr;
942 	uh = udp_hdr(skb);
943 
944 	ulen = ntohs(uh->len);
945 	if (ulen > skb->len)
946 		goto short_packet;
947 
948 	if (proto == IPPROTO_UDP) {
949 		/* UDP validates ulen. */
950 
951 		/* Check for jumbo payload */
952 		if (ulen == 0)
953 			ulen = skb->len;
954 
955 		if (ulen < sizeof(*uh))
956 			goto short_packet;
957 
958 		if (ulen < skb->len) {
959 			if (pskb_trim_rcsum(skb, ulen))
960 				goto short_packet;
961 			saddr = &ipv6_hdr(skb)->saddr;
962 			daddr = &ipv6_hdr(skb)->daddr;
963 			uh = udp_hdr(skb);
964 		}
965 	}
966 
967 	if (udp6_csum_init(skb, uh, proto))
968 		goto csum_error;
969 
970 	/* Check if the socket is already available, e.g. due to early demux */
971 	sk = skb_steal_sock(skb, &refcounted);
972 	if (sk) {
973 		struct dst_entry *dst = skb_dst(skb);
974 		int ret;
975 
976 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
977 			udp6_sk_rx_dst_set(sk, dst);
978 
979 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
980 			if (refcounted)
981 				sock_put(sk);
982 			goto report_csum_error;
983 		}
984 
985 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
986 		if (refcounted)
987 			sock_put(sk);
988 		return ret;
989 	}
990 
991 	/*
992 	 *	Multicast receive code
993 	 */
994 	if (ipv6_addr_is_multicast(daddr))
995 		return __udp6_lib_mcast_deliver(net, skb,
996 				saddr, daddr, udptable, proto);
997 
998 	/* Unicast */
999 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1000 	if (sk) {
1001 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
1002 			goto report_csum_error;
1003 		return udp6_unicast_rcv_skb(sk, skb, uh);
1004 	}
1005 
1006 	reason = SKB_DROP_REASON_NO_SOCKET;
1007 
1008 	if (!uh->check)
1009 		goto report_csum_error;
1010 
1011 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1012 		goto discard;
1013 
1014 	if (udp_lib_checksum_complete(skb))
1015 		goto csum_error;
1016 
1017 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1018 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1019 
1020 	kfree_skb_reason(skb, reason);
1021 	return 0;
1022 
1023 short_packet:
1024 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1025 		reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1026 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1027 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1028 			    saddr, ntohs(uh->source),
1029 			    ulen, skb->len,
1030 			    daddr, ntohs(uh->dest));
1031 	goto discard;
1032 
1033 report_csum_error:
1034 	udp6_csum_zero_error(skb);
1035 csum_error:
1036 	if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1037 		reason = SKB_DROP_REASON_UDP_CSUM;
1038 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1039 discard:
1040 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1041 	kfree_skb_reason(skb, reason);
1042 	return 0;
1043 }
1044 
1045 
1046 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1047 			__be16 loc_port, const struct in6_addr *loc_addr,
1048 			__be16 rmt_port, const struct in6_addr *rmt_addr,
1049 			int dif, int sdif)
1050 {
1051 	unsigned short hnum = ntohs(loc_port);
1052 	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1053 	unsigned int slot2 = hash2 & udp_table.mask;
1054 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1055 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1056 	struct sock *sk;
1057 
1058 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1059 		if (sk->sk_state == TCP_ESTABLISHED &&
1060 		    inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1061 			return sk;
1062 		/* Only check first socket in chain */
1063 		break;
1064 	}
1065 	return NULL;
1066 }
1067 
1068 void udp_v6_early_demux(struct sk_buff *skb)
1069 {
1070 	struct net *net = dev_net(skb->dev);
1071 	const struct udphdr *uh;
1072 	struct sock *sk;
1073 	struct dst_entry *dst;
1074 	int dif = skb->dev->ifindex;
1075 	int sdif = inet6_sdif(skb);
1076 
1077 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1078 	    sizeof(struct udphdr)))
1079 		return;
1080 
1081 	uh = udp_hdr(skb);
1082 
1083 	if (skb->pkt_type == PACKET_HOST)
1084 		sk = __udp6_lib_demux_lookup(net, uh->dest,
1085 					     &ipv6_hdr(skb)->daddr,
1086 					     uh->source, &ipv6_hdr(skb)->saddr,
1087 					     dif, sdif);
1088 	else
1089 		return;
1090 
1091 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1092 		return;
1093 
1094 	skb->sk = sk;
1095 	skb->destructor = sock_efree;
1096 	dst = rcu_dereference(sk->sk_rx_dst);
1097 
1098 	if (dst)
1099 		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1100 	if (dst) {
1101 		/* set noref for now.
1102 		 * any place which wants to hold dst has to call
1103 		 * dst_hold_safe()
1104 		 */
1105 		skb_dst_set_noref(skb, dst);
1106 	}
1107 }
1108 
1109 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1110 {
1111 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1112 }
1113 
1114 /*
1115  * Throw away all pending data and cancel the corking. Socket is locked.
1116  */
1117 static void udp_v6_flush_pending_frames(struct sock *sk)
1118 {
1119 	struct udp_sock *up = udp_sk(sk);
1120 
1121 	if (up->pending == AF_INET)
1122 		udp_flush_pending_frames(sk);
1123 	else if (up->pending) {
1124 		up->len = 0;
1125 		up->pending = 0;
1126 		ip6_flush_pending_frames(sk);
1127 	}
1128 }
1129 
1130 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1131 			     int addr_len)
1132 {
1133 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1134 		return -EINVAL;
1135 	/* The following checks are replicated from __ip6_datagram_connect()
1136 	 * and intended to prevent BPF program called below from accessing
1137 	 * bytes that are out of the bound specified by user in addr_len.
1138 	 */
1139 	if (uaddr->sa_family == AF_INET) {
1140 		if (ipv6_only_sock(sk))
1141 			return -EAFNOSUPPORT;
1142 		return udp_pre_connect(sk, uaddr, addr_len);
1143 	}
1144 
1145 	if (addr_len < SIN6_LEN_RFC2133)
1146 		return -EINVAL;
1147 
1148 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1149 }
1150 
1151 /**
1152  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1153  *	@sk:	socket we are sending on
1154  *	@skb:	sk_buff containing the filled-in UDP header
1155  *		(checksum field must be zeroed out)
1156  *	@saddr: source address
1157  *	@daddr: destination address
1158  *	@len:	length of packet
1159  */
1160 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1161 				 const struct in6_addr *saddr,
1162 				 const struct in6_addr *daddr, int len)
1163 {
1164 	unsigned int offset;
1165 	struct udphdr *uh = udp_hdr(skb);
1166 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1167 	__wsum csum = 0;
1168 
1169 	if (!frags) {
1170 		/* Only one fragment on the socket.  */
1171 		skb->csum_start = skb_transport_header(skb) - skb->head;
1172 		skb->csum_offset = offsetof(struct udphdr, check);
1173 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1174 	} else {
1175 		/*
1176 		 * HW-checksum won't work as there are two or more
1177 		 * fragments on the socket so that all csums of sk_buffs
1178 		 * should be together
1179 		 */
1180 		offset = skb_transport_offset(skb);
1181 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1182 		csum = skb->csum;
1183 
1184 		skb->ip_summed = CHECKSUM_NONE;
1185 
1186 		do {
1187 			csum = csum_add(csum, frags->csum);
1188 		} while ((frags = frags->next));
1189 
1190 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1191 					    csum);
1192 		if (uh->check == 0)
1193 			uh->check = CSUM_MANGLED_0;
1194 	}
1195 }
1196 
1197 /*
1198  *	Sending
1199  */
1200 
1201 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1202 			   struct inet_cork *cork)
1203 {
1204 	struct sock *sk = skb->sk;
1205 	struct udphdr *uh;
1206 	int err = 0;
1207 	int is_udplite = IS_UDPLITE(sk);
1208 	__wsum csum = 0;
1209 	int offset = skb_transport_offset(skb);
1210 	int len = skb->len - offset;
1211 	int datalen = len - sizeof(*uh);
1212 
1213 	/*
1214 	 * Create a UDP header
1215 	 */
1216 	uh = udp_hdr(skb);
1217 	uh->source = fl6->fl6_sport;
1218 	uh->dest = fl6->fl6_dport;
1219 	uh->len = htons(len);
1220 	uh->check = 0;
1221 
1222 	if (cork->gso_size) {
1223 		const int hlen = skb_network_header_len(skb) +
1224 				 sizeof(struct udphdr);
1225 
1226 		if (hlen + cork->gso_size > cork->fragsize) {
1227 			kfree_skb(skb);
1228 			return -EINVAL;
1229 		}
1230 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1231 			kfree_skb(skb);
1232 			return -EINVAL;
1233 		}
1234 		if (udp_sk(sk)->no_check6_tx) {
1235 			kfree_skb(skb);
1236 			return -EINVAL;
1237 		}
1238 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1239 		    dst_xfrm(skb_dst(skb))) {
1240 			kfree_skb(skb);
1241 			return -EIO;
1242 		}
1243 
1244 		if (datalen > cork->gso_size) {
1245 			skb_shinfo(skb)->gso_size = cork->gso_size;
1246 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1247 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1248 								 cork->gso_size);
1249 		}
1250 		goto csum_partial;
1251 	}
1252 
1253 	if (is_udplite)
1254 		csum = udplite_csum(skb);
1255 	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1256 		skb->ip_summed = CHECKSUM_NONE;
1257 		goto send;
1258 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1259 csum_partial:
1260 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1261 		goto send;
1262 	} else
1263 		csum = udp_csum(skb);
1264 
1265 	/* add protocol-dependent pseudo-header */
1266 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1267 				    len, fl6->flowi6_proto, csum);
1268 	if (uh->check == 0)
1269 		uh->check = CSUM_MANGLED_0;
1270 
1271 send:
1272 	err = ip6_send_skb(skb);
1273 	if (err) {
1274 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1275 			UDP6_INC_STATS(sock_net(sk),
1276 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1277 			err = 0;
1278 		}
1279 	} else {
1280 		UDP6_INC_STATS(sock_net(sk),
1281 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1282 	}
1283 	return err;
1284 }
1285 
1286 static int udp_v6_push_pending_frames(struct sock *sk)
1287 {
1288 	struct sk_buff *skb;
1289 	struct udp_sock  *up = udp_sk(sk);
1290 	int err = 0;
1291 
1292 	if (up->pending == AF_INET)
1293 		return udp_push_pending_frames(sk);
1294 
1295 	skb = ip6_finish_skb(sk);
1296 	if (!skb)
1297 		goto out;
1298 
1299 	err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1300 			      &inet_sk(sk)->cork.base);
1301 out:
1302 	up->len = 0;
1303 	up->pending = 0;
1304 	return err;
1305 }
1306 
1307 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1308 {
1309 	struct ipv6_txoptions opt_space;
1310 	struct udp_sock *up = udp_sk(sk);
1311 	struct inet_sock *inet = inet_sk(sk);
1312 	struct ipv6_pinfo *np = inet6_sk(sk);
1313 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1314 	struct in6_addr *daddr, *final_p, final;
1315 	struct ipv6_txoptions *opt = NULL;
1316 	struct ipv6_txoptions *opt_to_free = NULL;
1317 	struct ip6_flowlabel *flowlabel = NULL;
1318 	struct inet_cork_full cork;
1319 	struct flowi6 *fl6 = &cork.fl.u.ip6;
1320 	struct dst_entry *dst;
1321 	struct ipcm6_cookie ipc6;
1322 	int addr_len = msg->msg_namelen;
1323 	bool connected = false;
1324 	int ulen = len;
1325 	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1326 	int err;
1327 	int is_udplite = IS_UDPLITE(sk);
1328 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1329 
1330 	ipcm6_init(&ipc6);
1331 	ipc6.gso_size = READ_ONCE(up->gso_size);
1332 	ipc6.sockc.tsflags = sk->sk_tsflags;
1333 	ipc6.sockc.mark = sk->sk_mark;
1334 
1335 	/* destination address check */
1336 	if (sin6) {
1337 		if (addr_len < offsetof(struct sockaddr, sa_data))
1338 			return -EINVAL;
1339 
1340 		switch (sin6->sin6_family) {
1341 		case AF_INET6:
1342 			if (addr_len < SIN6_LEN_RFC2133)
1343 				return -EINVAL;
1344 			daddr = &sin6->sin6_addr;
1345 			if (ipv6_addr_any(daddr) &&
1346 			    ipv6_addr_v4mapped(&np->saddr))
1347 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1348 						       daddr);
1349 			break;
1350 		case AF_INET:
1351 			goto do_udp_sendmsg;
1352 		case AF_UNSPEC:
1353 			msg->msg_name = sin6 = NULL;
1354 			msg->msg_namelen = addr_len = 0;
1355 			daddr = NULL;
1356 			break;
1357 		default:
1358 			return -EINVAL;
1359 		}
1360 	} else if (!up->pending) {
1361 		if (sk->sk_state != TCP_ESTABLISHED)
1362 			return -EDESTADDRREQ;
1363 		daddr = &sk->sk_v6_daddr;
1364 	} else
1365 		daddr = NULL;
1366 
1367 	if (daddr) {
1368 		if (ipv6_addr_v4mapped(daddr)) {
1369 			struct sockaddr_in sin;
1370 			sin.sin_family = AF_INET;
1371 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1372 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1373 			msg->msg_name = &sin;
1374 			msg->msg_namelen = sizeof(sin);
1375 do_udp_sendmsg:
1376 			if (ipv6_only_sock(sk))
1377 				return -ENETUNREACH;
1378 			return udp_sendmsg(sk, msg, len);
1379 		}
1380 	}
1381 
1382 	/* Rough check on arithmetic overflow,
1383 	   better check is made in ip6_append_data().
1384 	   */
1385 	if (len > INT_MAX - sizeof(struct udphdr))
1386 		return -EMSGSIZE;
1387 
1388 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1389 	if (up->pending) {
1390 		if (up->pending == AF_INET)
1391 			return udp_sendmsg(sk, msg, len);
1392 		/*
1393 		 * There are pending frames.
1394 		 * The socket lock must be held while it's corked.
1395 		 */
1396 		lock_sock(sk);
1397 		if (likely(up->pending)) {
1398 			if (unlikely(up->pending != AF_INET6)) {
1399 				release_sock(sk);
1400 				return -EAFNOSUPPORT;
1401 			}
1402 			dst = NULL;
1403 			goto do_append_data;
1404 		}
1405 		release_sock(sk);
1406 	}
1407 	ulen += sizeof(struct udphdr);
1408 
1409 	memset(fl6, 0, sizeof(*fl6));
1410 
1411 	if (sin6) {
1412 		if (sin6->sin6_port == 0)
1413 			return -EINVAL;
1414 
1415 		fl6->fl6_dport = sin6->sin6_port;
1416 		daddr = &sin6->sin6_addr;
1417 
1418 		if (np->sndflow) {
1419 			fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1420 			if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1421 				flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1422 				if (IS_ERR(flowlabel))
1423 					return -EINVAL;
1424 			}
1425 		}
1426 
1427 		/*
1428 		 * Otherwise it will be difficult to maintain
1429 		 * sk->sk_dst_cache.
1430 		 */
1431 		if (sk->sk_state == TCP_ESTABLISHED &&
1432 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1433 			daddr = &sk->sk_v6_daddr;
1434 
1435 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1436 		    sin6->sin6_scope_id &&
1437 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1438 			fl6->flowi6_oif = sin6->sin6_scope_id;
1439 	} else {
1440 		if (sk->sk_state != TCP_ESTABLISHED)
1441 			return -EDESTADDRREQ;
1442 
1443 		fl6->fl6_dport = inet->inet_dport;
1444 		daddr = &sk->sk_v6_daddr;
1445 		fl6->flowlabel = np->flow_label;
1446 		connected = true;
1447 	}
1448 
1449 	if (!fl6->flowi6_oif)
1450 		fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1451 
1452 	if (!fl6->flowi6_oif)
1453 		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1454 
1455 	fl6->flowi6_uid = sk->sk_uid;
1456 
1457 	if (msg->msg_controllen) {
1458 		opt = &opt_space;
1459 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1460 		opt->tot_len = sizeof(*opt);
1461 		ipc6.opt = opt;
1462 
1463 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1464 		if (err > 0)
1465 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1466 						    &ipc6);
1467 		if (err < 0) {
1468 			fl6_sock_release(flowlabel);
1469 			return err;
1470 		}
1471 		if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1472 			flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1473 			if (IS_ERR(flowlabel))
1474 				return -EINVAL;
1475 		}
1476 		if (!(opt->opt_nflen|opt->opt_flen))
1477 			opt = NULL;
1478 		connected = false;
1479 	}
1480 	if (!opt) {
1481 		opt = txopt_get(np);
1482 		opt_to_free = opt;
1483 	}
1484 	if (flowlabel)
1485 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1486 	opt = ipv6_fixup_options(&opt_space, opt);
1487 	ipc6.opt = opt;
1488 
1489 	fl6->flowi6_proto = sk->sk_protocol;
1490 	fl6->flowi6_mark = ipc6.sockc.mark;
1491 	fl6->daddr = *daddr;
1492 	if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1493 		fl6->saddr = np->saddr;
1494 	fl6->fl6_sport = inet->inet_sport;
1495 
1496 	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1497 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1498 					   (struct sockaddr *)sin6,
1499 					   &fl6->saddr);
1500 		if (err)
1501 			goto out_no_dst;
1502 		if (sin6) {
1503 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1504 				/* BPF program rewrote IPv6-only by IPv4-mapped
1505 				 * IPv6. It's currently unsupported.
1506 				 */
1507 				err = -ENOTSUPP;
1508 				goto out_no_dst;
1509 			}
1510 			if (sin6->sin6_port == 0) {
1511 				/* BPF program set invalid port. Reject it. */
1512 				err = -EINVAL;
1513 				goto out_no_dst;
1514 			}
1515 			fl6->fl6_dport = sin6->sin6_port;
1516 			fl6->daddr = sin6->sin6_addr;
1517 		}
1518 	}
1519 
1520 	if (ipv6_addr_any(&fl6->daddr))
1521 		fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1522 
1523 	final_p = fl6_update_dst(fl6, opt, &final);
1524 	if (final_p)
1525 		connected = false;
1526 
1527 	if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1528 		fl6->flowi6_oif = np->mcast_oif;
1529 		connected = false;
1530 	} else if (!fl6->flowi6_oif)
1531 		fl6->flowi6_oif = np->ucast_oif;
1532 
1533 	security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1534 
1535 	if (ipc6.tclass < 0)
1536 		ipc6.tclass = np->tclass;
1537 
1538 	fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1539 
1540 	dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1541 	if (IS_ERR(dst)) {
1542 		err = PTR_ERR(dst);
1543 		dst = NULL;
1544 		goto out;
1545 	}
1546 
1547 	if (ipc6.hlimit < 0)
1548 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1549 
1550 	if (msg->msg_flags&MSG_CONFIRM)
1551 		goto do_confirm;
1552 back_from_confirm:
1553 
1554 	/* Lockless fast path for the non-corking case */
1555 	if (!corkreq) {
1556 		struct sk_buff *skb;
1557 
1558 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1559 				   sizeof(struct udphdr), &ipc6,
1560 				   (struct rt6_info *)dst,
1561 				   msg->msg_flags, &cork);
1562 		err = PTR_ERR(skb);
1563 		if (!IS_ERR_OR_NULL(skb))
1564 			err = udp_v6_send_skb(skb, fl6, &cork.base);
1565 		/* ip6_make_skb steals dst reference */
1566 		goto out_no_dst;
1567 	}
1568 
1569 	lock_sock(sk);
1570 	if (unlikely(up->pending)) {
1571 		/* The socket is already corked while preparing it. */
1572 		/* ... which is an evident application bug. --ANK */
1573 		release_sock(sk);
1574 
1575 		net_dbg_ratelimited("udp cork app bug 2\n");
1576 		err = -EINVAL;
1577 		goto out;
1578 	}
1579 
1580 	up->pending = AF_INET6;
1581 
1582 do_append_data:
1583 	if (ipc6.dontfrag < 0)
1584 		ipc6.dontfrag = np->dontfrag;
1585 	up->len += ulen;
1586 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1587 			      &ipc6, fl6, (struct rt6_info *)dst,
1588 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1589 	if (err)
1590 		udp_v6_flush_pending_frames(sk);
1591 	else if (!corkreq)
1592 		err = udp_v6_push_pending_frames(sk);
1593 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1594 		up->pending = 0;
1595 
1596 	if (err > 0)
1597 		err = np->recverr ? net_xmit_errno(err) : 0;
1598 	release_sock(sk);
1599 
1600 out:
1601 	dst_release(dst);
1602 out_no_dst:
1603 	fl6_sock_release(flowlabel);
1604 	txopt_put(opt_to_free);
1605 	if (!err)
1606 		return len;
1607 	/*
1608 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1609 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1610 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1611 	 * things).  We could add another new stat but at least for now that
1612 	 * seems like overkill.
1613 	 */
1614 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1615 		UDP6_INC_STATS(sock_net(sk),
1616 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1617 	}
1618 	return err;
1619 
1620 do_confirm:
1621 	if (msg->msg_flags & MSG_PROBE)
1622 		dst_confirm_neigh(dst, &fl6->daddr);
1623 	if (!(msg->msg_flags&MSG_PROBE) || len)
1624 		goto back_from_confirm;
1625 	err = 0;
1626 	goto out;
1627 }
1628 
1629 void udpv6_destroy_sock(struct sock *sk)
1630 {
1631 	struct udp_sock *up = udp_sk(sk);
1632 	lock_sock(sk);
1633 
1634 	/* protects from races with udp_abort() */
1635 	sock_set_flag(sk, SOCK_DEAD);
1636 	udp_v6_flush_pending_frames(sk);
1637 	release_sock(sk);
1638 
1639 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1640 		if (up->encap_type) {
1641 			void (*encap_destroy)(struct sock *sk);
1642 			encap_destroy = READ_ONCE(up->encap_destroy);
1643 			if (encap_destroy)
1644 				encap_destroy(sk);
1645 		}
1646 		if (up->encap_enabled) {
1647 			static_branch_dec(&udpv6_encap_needed_key);
1648 			udp_encap_disable();
1649 		}
1650 	}
1651 
1652 	inet6_destroy_sock(sk);
1653 }
1654 
1655 /*
1656  *	Socket option code for UDP
1657  */
1658 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1659 		     unsigned int optlen)
1660 {
1661 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1662 		return udp_lib_setsockopt(sk, level, optname,
1663 					  optval, optlen,
1664 					  udp_v6_push_pending_frames);
1665 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1666 }
1667 
1668 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1669 		     char __user *optval, int __user *optlen)
1670 {
1671 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1672 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1673 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1674 }
1675 
1676 static const struct inet6_protocol udpv6_protocol = {
1677 	.handler	=	udpv6_rcv,
1678 	.err_handler	=	udpv6_err,
1679 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1680 };
1681 
1682 /* ------------------------------------------------------------------------ */
1683 #ifdef CONFIG_PROC_FS
1684 int udp6_seq_show(struct seq_file *seq, void *v)
1685 {
1686 	if (v == SEQ_START_TOKEN) {
1687 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1688 	} else {
1689 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1690 		struct inet_sock *inet = inet_sk(v);
1691 		__u16 srcp = ntohs(inet->inet_sport);
1692 		__u16 destp = ntohs(inet->inet_dport);
1693 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1694 					  udp_rqueue_get(v), bucket);
1695 	}
1696 	return 0;
1697 }
1698 
1699 const struct seq_operations udp6_seq_ops = {
1700 	.start		= udp_seq_start,
1701 	.next		= udp_seq_next,
1702 	.stop		= udp_seq_stop,
1703 	.show		= udp6_seq_show,
1704 };
1705 EXPORT_SYMBOL(udp6_seq_ops);
1706 
1707 static struct udp_seq_afinfo udp6_seq_afinfo = {
1708 	.family		= AF_INET6,
1709 	.udp_table	= &udp_table,
1710 };
1711 
1712 int __net_init udp6_proc_init(struct net *net)
1713 {
1714 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1715 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1716 		return -ENOMEM;
1717 	return 0;
1718 }
1719 
1720 void udp6_proc_exit(struct net *net)
1721 {
1722 	remove_proc_entry("udp6", net->proc_net);
1723 }
1724 #endif /* CONFIG_PROC_FS */
1725 
1726 /* ------------------------------------------------------------------------ */
1727 
1728 struct proto udpv6_prot = {
1729 	.name			= "UDPv6",
1730 	.owner			= THIS_MODULE,
1731 	.close			= udp_lib_close,
1732 	.pre_connect		= udpv6_pre_connect,
1733 	.connect		= ip6_datagram_connect,
1734 	.disconnect		= udp_disconnect,
1735 	.ioctl			= udp_ioctl,
1736 	.init			= udp_init_sock,
1737 	.destroy		= udpv6_destroy_sock,
1738 	.setsockopt		= udpv6_setsockopt,
1739 	.getsockopt		= udpv6_getsockopt,
1740 	.sendmsg		= udpv6_sendmsg,
1741 	.recvmsg		= udpv6_recvmsg,
1742 	.release_cb		= ip6_datagram_release_cb,
1743 	.hash			= udp_lib_hash,
1744 	.unhash			= udp_lib_unhash,
1745 	.rehash			= udp_v6_rehash,
1746 	.get_port		= udp_v6_get_port,
1747 	.put_port		= udp_lib_unhash,
1748 #ifdef CONFIG_BPF_SYSCALL
1749 	.psock_update_sk_prot	= udp_bpf_update_proto,
1750 #endif
1751 
1752 	.memory_allocated	= &udp_memory_allocated,
1753 	.per_cpu_fw_alloc	= &udp_memory_per_cpu_fw_alloc,
1754 
1755 	.sysctl_mem		= sysctl_udp_mem,
1756 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1757 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1758 	.obj_size		= sizeof(struct udp6_sock),
1759 	.h.udp_table		= &udp_table,
1760 	.diag_destroy		= udp_abort,
1761 };
1762 
1763 static struct inet_protosw udpv6_protosw = {
1764 	.type =      SOCK_DGRAM,
1765 	.protocol =  IPPROTO_UDP,
1766 	.prot =      &udpv6_prot,
1767 	.ops =       &inet6_dgram_ops,
1768 	.flags =     INET_PROTOSW_PERMANENT,
1769 };
1770 
1771 int __init udpv6_init(void)
1772 {
1773 	int ret;
1774 
1775 	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1776 	if (ret)
1777 		goto out;
1778 
1779 	ret = inet6_register_protosw(&udpv6_protosw);
1780 	if (ret)
1781 		goto out_udpv6_protocol;
1782 out:
1783 	return ret;
1784 
1785 out_udpv6_protocol:
1786 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1787 	goto out;
1788 }
1789 
1790 void udpv6_exit(void)
1791 {
1792 	inet6_unregister_protosw(&udpv6_protosw);
1793 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1794 }
1795