xref: /linux/net/ipv6/udp.c (revision eb057b44dbe35ae14527830236a92f51de8f9184)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	UDP over IPv6
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  *
9  *	Based on linux/ipv4/udp.c
10  *
11  *	Fixes:
12  *	Hideaki YOSHIFUJI	:	sin6_scope_id support
13  *	YOSHIFUJI Hideaki @USAGI and:	Support IPV6_V6ONLY socket option, which
14  *	Alexey Kuznetsov		allow both IPv4 and IPv6 sockets to bind
15  *					a single port at the same time.
16  *      Kazunori MIYAZAWA @USAGI:       change process style to use ip6_append_data
17  *      YOSHIFUJI Hideaki @USAGI:	convert /proc/net/udp6 to seq_file.
18  */
19 
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37 
38 #include <net/addrconf.h>
39 #include <net/ndisc.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/ip6_route.h>
43 #include <net/raw.h>
44 #include <net/seg6.h>
45 #include <net/tcp_states.h>
46 #include <net/ip6_checksum.h>
47 #include <net/ip6_tunnel.h>
48 #include <net/xfrm.h>
49 #include <net/inet_hashtables.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/busy_poll.h>
52 #include <net/sock_reuseport.h>
53 
54 #include <linux/proc_fs.h>
55 #include <linux/seq_file.h>
56 #include <trace/events/skb.h>
57 #include "udp_impl.h"
58 
59 static u32 udp6_ehashfn(const struct net *net,
60 			const struct in6_addr *laddr,
61 			const u16 lport,
62 			const struct in6_addr *faddr,
63 			const __be16 fport)
64 {
65 	static u32 udp6_ehash_secret __read_mostly;
66 	static u32 udp_ipv6_hash_secret __read_mostly;
67 
68 	u32 lhash, fhash;
69 
70 	net_get_random_once(&udp6_ehash_secret,
71 			    sizeof(udp6_ehash_secret));
72 	net_get_random_once(&udp_ipv6_hash_secret,
73 			    sizeof(udp_ipv6_hash_secret));
74 
75 	lhash = (__force u32)laddr->s6_addr32[3];
76 	fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
77 
78 	return __inet6_ehashfn(lhash, lport, fhash, fport,
79 			       udp_ipv6_hash_secret + net_hash_mix(net));
80 }
81 
82 int udp_v6_get_port(struct sock *sk, unsigned short snum)
83 {
84 	unsigned int hash2_nulladdr =
85 		ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
86 	unsigned int hash2_partial =
87 		ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
88 
89 	/* precompute partial secondary hash */
90 	udp_sk(sk)->udp_portaddr_hash = hash2_partial;
91 	return udp_lib_get_port(sk, snum, hash2_nulladdr);
92 }
93 
94 void udp_v6_rehash(struct sock *sk)
95 {
96 	u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
97 					  &sk->sk_v6_rcv_saddr,
98 					  inet_sk(sk)->inet_num);
99 
100 	udp_lib_rehash(sk, new_hash);
101 }
102 
103 static int compute_score(struct sock *sk, struct net *net,
104 			 const struct in6_addr *saddr, __be16 sport,
105 			 const struct in6_addr *daddr, unsigned short hnum,
106 			 int dif, int sdif)
107 {
108 	int score;
109 	struct inet_sock *inet;
110 	bool dev_match;
111 
112 	if (!net_eq(sock_net(sk), net) ||
113 	    udp_sk(sk)->udp_port_hash != hnum ||
114 	    sk->sk_family != PF_INET6)
115 		return -1;
116 
117 	if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
118 		return -1;
119 
120 	score = 0;
121 	inet = inet_sk(sk);
122 
123 	if (inet->inet_dport) {
124 		if (inet->inet_dport != sport)
125 			return -1;
126 		score++;
127 	}
128 
129 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
130 		if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
131 			return -1;
132 		score++;
133 	}
134 
135 	dev_match = udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif);
136 	if (!dev_match)
137 		return -1;
138 	if (sk->sk_bound_dev_if)
139 		score++;
140 
141 	if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
142 		score++;
143 
144 	return score;
145 }
146 
147 static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
148 				     struct sk_buff *skb,
149 				     const struct in6_addr *saddr,
150 				     __be16 sport,
151 				     const struct in6_addr *daddr,
152 				     unsigned int hnum)
153 {
154 	struct sock *reuse_sk = NULL;
155 	u32 hash;
156 
157 	if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
158 		hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
159 		reuse_sk = reuseport_select_sock(sk, hash, skb,
160 						 sizeof(struct udphdr));
161 	}
162 	return reuse_sk;
163 }
164 
165 /* called with rcu_read_lock() */
166 static struct sock *udp6_lib_lookup2(struct net *net,
167 		const struct in6_addr *saddr, __be16 sport,
168 		const struct in6_addr *daddr, unsigned int hnum,
169 		int dif, int sdif, struct udp_hslot *hslot2,
170 		struct sk_buff *skb)
171 {
172 	struct sock *sk, *result;
173 	int score, badness;
174 
175 	result = NULL;
176 	badness = -1;
177 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
178 		score = compute_score(sk, net, saddr, sport,
179 				      daddr, hnum, dif, sdif);
180 		if (score > badness) {
181 			result = lookup_reuseport(net, sk, skb,
182 						  saddr, sport, daddr, hnum);
183 			/* Fall back to scoring if group has connections */
184 			if (result && !reuseport_has_conns(sk, false))
185 				return result;
186 
187 			result = result ? : sk;
188 			badness = score;
189 		}
190 	}
191 	return result;
192 }
193 
194 static inline struct sock *udp6_lookup_run_bpf(struct net *net,
195 					       struct udp_table *udptable,
196 					       struct sk_buff *skb,
197 					       const struct in6_addr *saddr,
198 					       __be16 sport,
199 					       const struct in6_addr *daddr,
200 					       u16 hnum, const int dif)
201 {
202 	struct sock *sk, *reuse_sk;
203 	bool no_reuseport;
204 
205 	if (udptable != &udp_table)
206 		return NULL; /* only UDP is supported */
207 
208 	no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
209 					    daddr, hnum, dif, &sk);
210 	if (no_reuseport || IS_ERR_OR_NULL(sk))
211 		return sk;
212 
213 	reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
214 	if (reuse_sk)
215 		sk = reuse_sk;
216 	return sk;
217 }
218 
219 /* rcu_read_lock() must be held */
220 struct sock *__udp6_lib_lookup(struct net *net,
221 			       const struct in6_addr *saddr, __be16 sport,
222 			       const struct in6_addr *daddr, __be16 dport,
223 			       int dif, int sdif, struct udp_table *udptable,
224 			       struct sk_buff *skb)
225 {
226 	unsigned short hnum = ntohs(dport);
227 	unsigned int hash2, slot2;
228 	struct udp_hslot *hslot2;
229 	struct sock *result, *sk;
230 
231 	hash2 = ipv6_portaddr_hash(net, daddr, hnum);
232 	slot2 = hash2 & udptable->mask;
233 	hslot2 = &udptable->hash2[slot2];
234 
235 	/* Lookup connected or non-wildcard sockets */
236 	result = udp6_lib_lookup2(net, saddr, sport,
237 				  daddr, hnum, dif, sdif,
238 				  hslot2, skb);
239 	if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
240 		goto done;
241 
242 	/* Lookup redirect from BPF */
243 	if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
244 		sk = udp6_lookup_run_bpf(net, udptable, skb,
245 					 saddr, sport, daddr, hnum, dif);
246 		if (sk) {
247 			result = sk;
248 			goto done;
249 		}
250 	}
251 
252 	/* Got non-wildcard socket or error on first lookup */
253 	if (result)
254 		goto done;
255 
256 	/* Lookup wildcard sockets */
257 	hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
258 	slot2 = hash2 & udptable->mask;
259 	hslot2 = &udptable->hash2[slot2];
260 
261 	result = udp6_lib_lookup2(net, saddr, sport,
262 				  &in6addr_any, hnum, dif, sdif,
263 				  hslot2, skb);
264 done:
265 	if (IS_ERR(result))
266 		return NULL;
267 	return result;
268 }
269 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
270 
271 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
272 					  __be16 sport, __be16 dport,
273 					  struct udp_table *udptable)
274 {
275 	const struct ipv6hdr *iph = ipv6_hdr(skb);
276 
277 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
278 				 &iph->daddr, dport, inet6_iif(skb),
279 				 inet6_sdif(skb), udptable, skb);
280 }
281 
282 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
283 				 __be16 sport, __be16 dport)
284 {
285 	const struct ipv6hdr *iph = ipv6_hdr(skb);
286 
287 	return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
288 				 &iph->daddr, dport, inet6_iif(skb),
289 				 inet6_sdif(skb), &udp_table, NULL);
290 }
291 
292 /* Must be called under rcu_read_lock().
293  * Does increment socket refcount.
294  */
295 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
296 struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
297 			     const struct in6_addr *daddr, __be16 dport, int dif)
298 {
299 	struct sock *sk;
300 
301 	sk =  __udp6_lib_lookup(net, saddr, sport, daddr, dport,
302 				dif, 0, &udp_table, NULL);
303 	if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
304 		sk = NULL;
305 	return sk;
306 }
307 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
308 #endif
309 
310 /* do not use the scratch area len for jumbogram: their length execeeds the
311  * scratch area space; note that the IP6CB flags is still in the first
312  * cacheline, so checking for jumbograms is cheap
313  */
314 static int udp6_skb_len(struct sk_buff *skb)
315 {
316 	return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
317 }
318 
319 /*
320  *	This should be easy, if there is something there we
321  *	return it, otherwise we block.
322  */
323 
324 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
325 		  int noblock, int flags, int *addr_len)
326 {
327 	struct ipv6_pinfo *np = inet6_sk(sk);
328 	struct inet_sock *inet = inet_sk(sk);
329 	struct sk_buff *skb;
330 	unsigned int ulen, copied;
331 	int off, err, peeking = flags & MSG_PEEK;
332 	int is_udplite = IS_UDPLITE(sk);
333 	struct udp_mib __percpu *mib;
334 	bool checksum_valid = false;
335 	int is_udp4;
336 
337 	if (flags & MSG_ERRQUEUE)
338 		return ipv6_recv_error(sk, msg, len, addr_len);
339 
340 	if (np->rxpmtu && np->rxopt.bits.rxpmtu)
341 		return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
342 
343 try_again:
344 	off = sk_peek_offset(sk, flags);
345 	skb = __skb_recv_udp(sk, flags, noblock, &off, &err);
346 	if (!skb)
347 		return err;
348 
349 	ulen = udp6_skb_len(skb);
350 	copied = len;
351 	if (copied > ulen - off)
352 		copied = ulen - off;
353 	else if (copied < ulen)
354 		msg->msg_flags |= MSG_TRUNC;
355 
356 	is_udp4 = (skb->protocol == htons(ETH_P_IP));
357 	mib = __UDPX_MIB(sk, is_udp4);
358 
359 	/*
360 	 * If checksum is needed at all, try to do it while copying the
361 	 * data.  If the data is truncated, or if we only want a partial
362 	 * coverage checksum (UDP-Lite), do it before the copy.
363 	 */
364 
365 	if (copied < ulen || peeking ||
366 	    (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
367 		checksum_valid = udp_skb_csum_unnecessary(skb) ||
368 				!__udp_lib_checksum_complete(skb);
369 		if (!checksum_valid)
370 			goto csum_copy_err;
371 	}
372 
373 	if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
374 		if (udp_skb_is_linear(skb))
375 			err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
376 		else
377 			err = skb_copy_datagram_msg(skb, off, msg, copied);
378 	} else {
379 		err = skb_copy_and_csum_datagram_msg(skb, off, msg);
380 		if (err == -EINVAL)
381 			goto csum_copy_err;
382 	}
383 	if (unlikely(err)) {
384 		if (!peeking) {
385 			atomic_inc(&sk->sk_drops);
386 			SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
387 		}
388 		kfree_skb(skb);
389 		return err;
390 	}
391 	if (!peeking)
392 		SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
393 
394 	sock_recv_ts_and_drops(msg, sk, skb);
395 
396 	/* Copy the address. */
397 	if (msg->msg_name) {
398 		DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
399 		sin6->sin6_family = AF_INET6;
400 		sin6->sin6_port = udp_hdr(skb)->source;
401 		sin6->sin6_flowinfo = 0;
402 
403 		if (is_udp4) {
404 			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
405 					       &sin6->sin6_addr);
406 			sin6->sin6_scope_id = 0;
407 		} else {
408 			sin6->sin6_addr = ipv6_hdr(skb)->saddr;
409 			sin6->sin6_scope_id =
410 				ipv6_iface_scope_id(&sin6->sin6_addr,
411 						    inet6_iif(skb));
412 		}
413 		*addr_len = sizeof(*sin6);
414 
415 		BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
416 						      (struct sockaddr *)sin6);
417 	}
418 
419 	if (udp_sk(sk)->gro_enabled)
420 		udp_cmsg_recv(msg, sk, skb);
421 
422 	if (np->rxopt.all)
423 		ip6_datagram_recv_common_ctl(sk, msg, skb);
424 
425 	if (is_udp4) {
426 		if (inet->cmsg_flags)
427 			ip_cmsg_recv_offset(msg, sk, skb,
428 					    sizeof(struct udphdr), off);
429 	} else {
430 		if (np->rxopt.all)
431 			ip6_datagram_recv_specific_ctl(sk, msg, skb);
432 	}
433 
434 	err = copied;
435 	if (flags & MSG_TRUNC)
436 		err = ulen;
437 
438 	skb_consume_udp(sk, skb, peeking ? -err : err);
439 	return err;
440 
441 csum_copy_err:
442 	if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
443 				 udp_skb_destructor)) {
444 		SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
445 		SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
446 	}
447 	kfree_skb(skb);
448 
449 	/* starting over for a new packet, but check if we need to yield */
450 	cond_resched();
451 	msg->msg_flags &= ~MSG_TRUNC;
452 	goto try_again;
453 }
454 
455 DEFINE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
456 void udpv6_encap_enable(void)
457 {
458 	static_branch_inc(&udpv6_encap_needed_key);
459 }
460 EXPORT_SYMBOL(udpv6_encap_enable);
461 
462 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
463  * through error handlers in encapsulations looking for a match.
464  */
465 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
466 				      struct inet6_skb_parm *opt,
467 				      u8 type, u8 code, int offset, __be32 info)
468 {
469 	int i;
470 
471 	for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
472 		int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
473 			       u8 type, u8 code, int offset, __be32 info);
474 		const struct ip6_tnl_encap_ops *encap;
475 
476 		encap = rcu_dereference(ip6tun_encaps[i]);
477 		if (!encap)
478 			continue;
479 		handler = encap->err_handler;
480 		if (handler && !handler(skb, opt, type, code, offset, info))
481 			return 0;
482 	}
483 
484 	return -ENOENT;
485 }
486 
487 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
488  * reversing source and destination port: this will match tunnels that force the
489  * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
490  * lwtunnels might actually break this assumption by being configured with
491  * different destination ports on endpoints, in this case we won't be able to
492  * trace ICMP messages back to them.
493  *
494  * If this doesn't match any socket, probe tunnels with arbitrary destination
495  * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
496  * we've sent packets to won't necessarily match the local destination port.
497  *
498  * Then ask the tunnel implementation to match the error against a valid
499  * association.
500  *
501  * Return an error if we can't find a match, the socket if we need further
502  * processing, zero otherwise.
503  */
504 static struct sock *__udp6_lib_err_encap(struct net *net,
505 					 const struct ipv6hdr *hdr, int offset,
506 					 struct udphdr *uh,
507 					 struct udp_table *udptable,
508 					 struct sock *sk,
509 					 struct sk_buff *skb,
510 					 struct inet6_skb_parm *opt,
511 					 u8 type, u8 code, __be32 info)
512 {
513 	int (*lookup)(struct sock *sk, struct sk_buff *skb);
514 	int network_offset, transport_offset;
515 	struct udp_sock *up;
516 
517 	network_offset = skb_network_offset(skb);
518 	transport_offset = skb_transport_offset(skb);
519 
520 	/* Network header needs to point to the outer IPv6 header inside ICMP */
521 	skb_reset_network_header(skb);
522 
523 	/* Transport header needs to point to the UDP header */
524 	skb_set_transport_header(skb, offset);
525 
526 	if (sk) {
527 		up = udp_sk(sk);
528 
529 		lookup = READ_ONCE(up->encap_err_lookup);
530 		if (lookup && lookup(sk, skb))
531 			sk = NULL;
532 
533 		goto out;
534 	}
535 
536 	sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
537 			       &hdr->saddr, uh->dest,
538 			       inet6_iif(skb), 0, udptable, skb);
539 	if (sk) {
540 		up = udp_sk(sk);
541 
542 		lookup = READ_ONCE(up->encap_err_lookup);
543 		if (!lookup || lookup(sk, skb))
544 			sk = NULL;
545 	}
546 
547 out:
548 	if (!sk) {
549 		sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
550 							offset, info));
551 	}
552 
553 	skb_set_transport_header(skb, transport_offset);
554 	skb_set_network_header(skb, network_offset);
555 
556 	return sk;
557 }
558 
559 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
560 		   u8 type, u8 code, int offset, __be32 info,
561 		   struct udp_table *udptable)
562 {
563 	struct ipv6_pinfo *np;
564 	const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
565 	const struct in6_addr *saddr = &hdr->saddr;
566 	const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
567 	struct udphdr *uh = (struct udphdr *)(skb->data+offset);
568 	bool tunnel = false;
569 	struct sock *sk;
570 	int harderr;
571 	int err;
572 	struct net *net = dev_net(skb->dev);
573 
574 	sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
575 			       inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
576 
577 	if (!sk || udp_sk(sk)->encap_type) {
578 		/* No socket for error: try tunnels before discarding */
579 		if (static_branch_unlikely(&udpv6_encap_needed_key)) {
580 			sk = __udp6_lib_err_encap(net, hdr, offset, uh,
581 						  udptable, sk, skb,
582 						  opt, type, code, info);
583 			if (!sk)
584 				return 0;
585 		} else
586 			sk = ERR_PTR(-ENOENT);
587 
588 		if (IS_ERR(sk)) {
589 			__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
590 					  ICMP6_MIB_INERRORS);
591 			return PTR_ERR(sk);
592 		}
593 
594 		tunnel = true;
595 	}
596 
597 	harderr = icmpv6_err_convert(type, code, &err);
598 	np = inet6_sk(sk);
599 
600 	if (type == ICMPV6_PKT_TOOBIG) {
601 		if (!ip6_sk_accept_pmtu(sk))
602 			goto out;
603 		ip6_sk_update_pmtu(skb, sk, info);
604 		if (np->pmtudisc != IPV6_PMTUDISC_DONT)
605 			harderr = 1;
606 	}
607 	if (type == NDISC_REDIRECT) {
608 		if (tunnel) {
609 			ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
610 				     sk->sk_mark, sk->sk_uid);
611 		} else {
612 			ip6_sk_redirect(skb, sk);
613 		}
614 		goto out;
615 	}
616 
617 	/* Tunnels don't have an application socket: don't pass errors back */
618 	if (tunnel)
619 		goto out;
620 
621 	if (!np->recverr) {
622 		if (!harderr || sk->sk_state != TCP_ESTABLISHED)
623 			goto out;
624 	} else {
625 		ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
626 	}
627 
628 	sk->sk_err = err;
629 	sk_error_report(sk);
630 out:
631 	return 0;
632 }
633 
634 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
635 {
636 	int rc;
637 
638 	if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
639 		sock_rps_save_rxhash(sk, skb);
640 		sk_mark_napi_id(sk, skb);
641 		sk_incoming_cpu_update(sk);
642 	} else {
643 		sk_mark_napi_id_once(sk, skb);
644 	}
645 
646 	rc = __udp_enqueue_schedule_skb(sk, skb);
647 	if (rc < 0) {
648 		int is_udplite = IS_UDPLITE(sk);
649 
650 		/* Note that an ENOMEM error is charged twice */
651 		if (rc == -ENOMEM)
652 			UDP6_INC_STATS(sock_net(sk),
653 					 UDP_MIB_RCVBUFERRORS, is_udplite);
654 		else
655 			UDP6_INC_STATS(sock_net(sk),
656 				       UDP_MIB_MEMERRORS, is_udplite);
657 		UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
658 		kfree_skb(skb);
659 		return -1;
660 	}
661 
662 	return 0;
663 }
664 
665 static __inline__ int udpv6_err(struct sk_buff *skb,
666 				struct inet6_skb_parm *opt, u8 type,
667 				u8 code, int offset, __be32 info)
668 {
669 	return __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
670 }
671 
672 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
673 {
674 	struct udp_sock *up = udp_sk(sk);
675 	int is_udplite = IS_UDPLITE(sk);
676 
677 	if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
678 		goto drop;
679 
680 	if (static_branch_unlikely(&udpv6_encap_needed_key) && up->encap_type) {
681 		int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
682 
683 		/*
684 		 * This is an encapsulation socket so pass the skb to
685 		 * the socket's udp_encap_rcv() hook. Otherwise, just
686 		 * fall through and pass this up the UDP socket.
687 		 * up->encap_rcv() returns the following value:
688 		 * =0 if skb was successfully passed to the encap
689 		 *    handler or was discarded by it.
690 		 * >0 if skb should be passed on to UDP.
691 		 * <0 if skb should be resubmitted as proto -N
692 		 */
693 
694 		/* if we're overly short, let UDP handle it */
695 		encap_rcv = READ_ONCE(up->encap_rcv);
696 		if (encap_rcv) {
697 			int ret;
698 
699 			/* Verify checksum before giving to encap */
700 			if (udp_lib_checksum_complete(skb))
701 				goto csum_error;
702 
703 			ret = encap_rcv(sk, skb);
704 			if (ret <= 0) {
705 				__UDP6_INC_STATS(sock_net(sk),
706 						 UDP_MIB_INDATAGRAMS,
707 						 is_udplite);
708 				return -ret;
709 			}
710 		}
711 
712 		/* FALLTHROUGH -- it's a UDP Packet */
713 	}
714 
715 	/*
716 	 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
717 	 */
718 	if ((up->pcflag & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {
719 
720 		if (up->pcrlen == 0) {          /* full coverage was set  */
721 			net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
722 					    UDP_SKB_CB(skb)->cscov, skb->len);
723 			goto drop;
724 		}
725 		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
726 			net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
727 					    UDP_SKB_CB(skb)->cscov, up->pcrlen);
728 			goto drop;
729 		}
730 	}
731 
732 	prefetch(&sk->sk_rmem_alloc);
733 	if (rcu_access_pointer(sk->sk_filter) &&
734 	    udp_lib_checksum_complete(skb))
735 		goto csum_error;
736 
737 	if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr)))
738 		goto drop;
739 
740 	udp_csum_pull_header(skb);
741 
742 	skb_dst_drop(skb);
743 
744 	return __udpv6_queue_rcv_skb(sk, skb);
745 
746 csum_error:
747 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
748 drop:
749 	__UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
750 	atomic_inc(&sk->sk_drops);
751 	kfree_skb(skb);
752 	return -1;
753 }
754 
755 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
756 {
757 	struct sk_buff *next, *segs;
758 	int ret;
759 
760 	if (likely(!udp_unexpected_gso(sk, skb)))
761 		return udpv6_queue_rcv_one_skb(sk, skb);
762 
763 	__skb_push(skb, -skb_mac_offset(skb));
764 	segs = udp_rcv_segment(sk, skb, false);
765 	skb_list_walk_safe(segs, skb, next) {
766 		__skb_pull(skb, skb_transport_offset(skb));
767 
768 		udp_post_segment_fix_csum(skb);
769 		ret = udpv6_queue_rcv_one_skb(sk, skb);
770 		if (ret > 0)
771 			ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
772 						 true);
773 	}
774 	return 0;
775 }
776 
777 static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
778 				   __be16 loc_port, const struct in6_addr *loc_addr,
779 				   __be16 rmt_port, const struct in6_addr *rmt_addr,
780 				   int dif, int sdif, unsigned short hnum)
781 {
782 	struct inet_sock *inet = inet_sk(sk);
783 
784 	if (!net_eq(sock_net(sk), net))
785 		return false;
786 
787 	if (udp_sk(sk)->udp_port_hash != hnum ||
788 	    sk->sk_family != PF_INET6 ||
789 	    (inet->inet_dport && inet->inet_dport != rmt_port) ||
790 	    (!ipv6_addr_any(&sk->sk_v6_daddr) &&
791 		    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
792 	    !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif) ||
793 	    (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
794 		    !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
795 		return false;
796 	if (!inet6_mc_check(sk, loc_addr, rmt_addr))
797 		return false;
798 	return true;
799 }
800 
801 static void udp6_csum_zero_error(struct sk_buff *skb)
802 {
803 	/* RFC 2460 section 8.1 says that we SHOULD log
804 	 * this error. Well, it is reasonable.
805 	 */
806 	net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
807 			    &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
808 			    &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
809 }
810 
811 /*
812  * Note: called only from the BH handler context,
813  * so we don't need to lock the hashes.
814  */
815 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
816 		const struct in6_addr *saddr, const struct in6_addr *daddr,
817 		struct udp_table *udptable, int proto)
818 {
819 	struct sock *sk, *first = NULL;
820 	const struct udphdr *uh = udp_hdr(skb);
821 	unsigned short hnum = ntohs(uh->dest);
822 	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
823 	unsigned int offset = offsetof(typeof(*sk), sk_node);
824 	unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
825 	int dif = inet6_iif(skb);
826 	int sdif = inet6_sdif(skb);
827 	struct hlist_node *node;
828 	struct sk_buff *nskb;
829 
830 	if (use_hash2) {
831 		hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
832 			    udptable->mask;
833 		hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
834 start_lookup:
835 		hslot = &udptable->hash2[hash2];
836 		offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
837 	}
838 
839 	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
840 		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
841 					    uh->source, saddr, dif, sdif,
842 					    hnum))
843 			continue;
844 		/* If zero checksum and no_check is not on for
845 		 * the socket then skip it.
846 		 */
847 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
848 			continue;
849 		if (!first) {
850 			first = sk;
851 			continue;
852 		}
853 		nskb = skb_clone(skb, GFP_ATOMIC);
854 		if (unlikely(!nskb)) {
855 			atomic_inc(&sk->sk_drops);
856 			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
857 					 IS_UDPLITE(sk));
858 			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
859 					 IS_UDPLITE(sk));
860 			continue;
861 		}
862 
863 		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
864 			consume_skb(nskb);
865 	}
866 
867 	/* Also lookup *:port if we are using hash2 and haven't done so yet. */
868 	if (use_hash2 && hash2 != hash2_any) {
869 		hash2 = hash2_any;
870 		goto start_lookup;
871 	}
872 
873 	if (first) {
874 		if (udpv6_queue_rcv_skb(first, skb) > 0)
875 			consume_skb(skb);
876 	} else {
877 		kfree_skb(skb);
878 		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
879 				 proto == IPPROTO_UDPLITE);
880 	}
881 	return 0;
882 }
883 
884 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
885 {
886 	if (udp_sk_rx_dst_set(sk, dst)) {
887 		const struct rt6_info *rt = (const struct rt6_info *)dst;
888 
889 		sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
890 	}
891 }
892 
893 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
894  * return code conversion for ip layer consumption
895  */
896 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
897 				struct udphdr *uh)
898 {
899 	int ret;
900 
901 	if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
902 		skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
903 
904 	ret = udpv6_queue_rcv_skb(sk, skb);
905 
906 	/* a return value > 0 means to resubmit the input */
907 	if (ret > 0)
908 		return ret;
909 	return 0;
910 }
911 
912 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
913 		   int proto)
914 {
915 	const struct in6_addr *saddr, *daddr;
916 	struct net *net = dev_net(skb->dev);
917 	struct udphdr *uh;
918 	struct sock *sk;
919 	bool refcounted;
920 	u32 ulen = 0;
921 
922 	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
923 		goto discard;
924 
925 	saddr = &ipv6_hdr(skb)->saddr;
926 	daddr = &ipv6_hdr(skb)->daddr;
927 	uh = udp_hdr(skb);
928 
929 	ulen = ntohs(uh->len);
930 	if (ulen > skb->len)
931 		goto short_packet;
932 
933 	if (proto == IPPROTO_UDP) {
934 		/* UDP validates ulen. */
935 
936 		/* Check for jumbo payload */
937 		if (ulen == 0)
938 			ulen = skb->len;
939 
940 		if (ulen < sizeof(*uh))
941 			goto short_packet;
942 
943 		if (ulen < skb->len) {
944 			if (pskb_trim_rcsum(skb, ulen))
945 				goto short_packet;
946 			saddr = &ipv6_hdr(skb)->saddr;
947 			daddr = &ipv6_hdr(skb)->daddr;
948 			uh = udp_hdr(skb);
949 		}
950 	}
951 
952 	if (udp6_csum_init(skb, uh, proto))
953 		goto csum_error;
954 
955 	/* Check if the socket is already available, e.g. due to early demux */
956 	sk = skb_steal_sock(skb, &refcounted);
957 	if (sk) {
958 		struct dst_entry *dst = skb_dst(skb);
959 		int ret;
960 
961 		if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
962 			udp6_sk_rx_dst_set(sk, dst);
963 
964 		if (!uh->check && !udp_sk(sk)->no_check6_rx) {
965 			if (refcounted)
966 				sock_put(sk);
967 			goto report_csum_error;
968 		}
969 
970 		ret = udp6_unicast_rcv_skb(sk, skb, uh);
971 		if (refcounted)
972 			sock_put(sk);
973 		return ret;
974 	}
975 
976 	/*
977 	 *	Multicast receive code
978 	 */
979 	if (ipv6_addr_is_multicast(daddr))
980 		return __udp6_lib_mcast_deliver(net, skb,
981 				saddr, daddr, udptable, proto);
982 
983 	/* Unicast */
984 	sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
985 	if (sk) {
986 		if (!uh->check && !udp_sk(sk)->no_check6_rx)
987 			goto report_csum_error;
988 		return udp6_unicast_rcv_skb(sk, skb, uh);
989 	}
990 
991 	if (!uh->check)
992 		goto report_csum_error;
993 
994 	if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
995 		goto discard;
996 
997 	if (udp_lib_checksum_complete(skb))
998 		goto csum_error;
999 
1000 	__UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1001 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1002 
1003 	kfree_skb(skb);
1004 	return 0;
1005 
1006 short_packet:
1007 	net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1008 			    proto == IPPROTO_UDPLITE ? "-Lite" : "",
1009 			    saddr, ntohs(uh->source),
1010 			    ulen, skb->len,
1011 			    daddr, ntohs(uh->dest));
1012 	goto discard;
1013 
1014 report_csum_error:
1015 	udp6_csum_zero_error(skb);
1016 csum_error:
1017 	__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1018 discard:
1019 	__UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1020 	kfree_skb(skb);
1021 	return 0;
1022 }
1023 
1024 
1025 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1026 			__be16 loc_port, const struct in6_addr *loc_addr,
1027 			__be16 rmt_port, const struct in6_addr *rmt_addr,
1028 			int dif, int sdif)
1029 {
1030 	unsigned short hnum = ntohs(loc_port);
1031 	unsigned int hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1032 	unsigned int slot2 = hash2 & udp_table.mask;
1033 	struct udp_hslot *hslot2 = &udp_table.hash2[slot2];
1034 	const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum);
1035 	struct sock *sk;
1036 
1037 	udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1038 		if (sk->sk_state == TCP_ESTABLISHED &&
1039 		    INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif))
1040 			return sk;
1041 		/* Only check first socket in chain */
1042 		break;
1043 	}
1044 	return NULL;
1045 }
1046 
1047 INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb)
1048 {
1049 	struct net *net = dev_net(skb->dev);
1050 	const struct udphdr *uh;
1051 	struct sock *sk;
1052 	struct dst_entry *dst;
1053 	int dif = skb->dev->ifindex;
1054 	int sdif = inet6_sdif(skb);
1055 
1056 	if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1057 	    sizeof(struct udphdr)))
1058 		return;
1059 
1060 	uh = udp_hdr(skb);
1061 
1062 	if (skb->pkt_type == PACKET_HOST)
1063 		sk = __udp6_lib_demux_lookup(net, uh->dest,
1064 					     &ipv6_hdr(skb)->daddr,
1065 					     uh->source, &ipv6_hdr(skb)->saddr,
1066 					     dif, sdif);
1067 	else
1068 		return;
1069 
1070 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
1071 		return;
1072 
1073 	skb->sk = sk;
1074 	skb->destructor = sock_efree;
1075 	dst = rcu_dereference(sk->sk_rx_dst);
1076 
1077 	if (dst)
1078 		dst = dst_check(dst, sk->sk_rx_dst_cookie);
1079 	if (dst) {
1080 		/* set noref for now.
1081 		 * any place which wants to hold dst has to call
1082 		 * dst_hold_safe()
1083 		 */
1084 		skb_dst_set_noref(skb, dst);
1085 	}
1086 }
1087 
1088 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1089 {
1090 	return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
1091 }
1092 
1093 /*
1094  * Throw away all pending data and cancel the corking. Socket is locked.
1095  */
1096 static void udp_v6_flush_pending_frames(struct sock *sk)
1097 {
1098 	struct udp_sock *up = udp_sk(sk);
1099 
1100 	if (up->pending == AF_INET)
1101 		udp_flush_pending_frames(sk);
1102 	else if (up->pending) {
1103 		up->len = 0;
1104 		up->pending = 0;
1105 		ip6_flush_pending_frames(sk);
1106 	}
1107 }
1108 
1109 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1110 			     int addr_len)
1111 {
1112 	if (addr_len < offsetofend(struct sockaddr, sa_family))
1113 		return -EINVAL;
1114 	/* The following checks are replicated from __ip6_datagram_connect()
1115 	 * and intended to prevent BPF program called below from accessing
1116 	 * bytes that are out of the bound specified by user in addr_len.
1117 	 */
1118 	if (uaddr->sa_family == AF_INET) {
1119 		if (__ipv6_only_sock(sk))
1120 			return -EAFNOSUPPORT;
1121 		return udp_pre_connect(sk, uaddr, addr_len);
1122 	}
1123 
1124 	if (addr_len < SIN6_LEN_RFC2133)
1125 		return -EINVAL;
1126 
1127 	return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr);
1128 }
1129 
1130 /**
1131  *	udp6_hwcsum_outgoing  -  handle outgoing HW checksumming
1132  *	@sk:	socket we are sending on
1133  *	@skb:	sk_buff containing the filled-in UDP header
1134  *		(checksum field must be zeroed out)
1135  *	@saddr: source address
1136  *	@daddr: destination address
1137  *	@len:	length of packet
1138  */
1139 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1140 				 const struct in6_addr *saddr,
1141 				 const struct in6_addr *daddr, int len)
1142 {
1143 	unsigned int offset;
1144 	struct udphdr *uh = udp_hdr(skb);
1145 	struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1146 	__wsum csum = 0;
1147 
1148 	if (!frags) {
1149 		/* Only one fragment on the socket.  */
1150 		skb->csum_start = skb_transport_header(skb) - skb->head;
1151 		skb->csum_offset = offsetof(struct udphdr, check);
1152 		uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1153 	} else {
1154 		/*
1155 		 * HW-checksum won't work as there are two or more
1156 		 * fragments on the socket so that all csums of sk_buffs
1157 		 * should be together
1158 		 */
1159 		offset = skb_transport_offset(skb);
1160 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1161 		csum = skb->csum;
1162 
1163 		skb->ip_summed = CHECKSUM_NONE;
1164 
1165 		do {
1166 			csum = csum_add(csum, frags->csum);
1167 		} while ((frags = frags->next));
1168 
1169 		uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1170 					    csum);
1171 		if (uh->check == 0)
1172 			uh->check = CSUM_MANGLED_0;
1173 	}
1174 }
1175 
1176 /*
1177  *	Sending
1178  */
1179 
1180 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1181 			   struct inet_cork *cork)
1182 {
1183 	struct sock *sk = skb->sk;
1184 	struct udphdr *uh;
1185 	int err = 0;
1186 	int is_udplite = IS_UDPLITE(sk);
1187 	__wsum csum = 0;
1188 	int offset = skb_transport_offset(skb);
1189 	int len = skb->len - offset;
1190 	int datalen = len - sizeof(*uh);
1191 
1192 	/*
1193 	 * Create a UDP header
1194 	 */
1195 	uh = udp_hdr(skb);
1196 	uh->source = fl6->fl6_sport;
1197 	uh->dest = fl6->fl6_dport;
1198 	uh->len = htons(len);
1199 	uh->check = 0;
1200 
1201 	if (cork->gso_size) {
1202 		const int hlen = skb_network_header_len(skb) +
1203 				 sizeof(struct udphdr);
1204 
1205 		if (hlen + cork->gso_size > cork->fragsize) {
1206 			kfree_skb(skb);
1207 			return -EINVAL;
1208 		}
1209 		if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1210 			kfree_skb(skb);
1211 			return -EINVAL;
1212 		}
1213 		if (udp_sk(sk)->no_check6_tx) {
1214 			kfree_skb(skb);
1215 			return -EINVAL;
1216 		}
1217 		if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite ||
1218 		    dst_xfrm(skb_dst(skb))) {
1219 			kfree_skb(skb);
1220 			return -EIO;
1221 		}
1222 
1223 		if (datalen > cork->gso_size) {
1224 			skb_shinfo(skb)->gso_size = cork->gso_size;
1225 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1226 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1227 								 cork->gso_size);
1228 		}
1229 		goto csum_partial;
1230 	}
1231 
1232 	if (is_udplite)
1233 		csum = udplite_csum(skb);
1234 	else if (udp_sk(sk)->no_check6_tx) {   /* UDP csum disabled */
1235 		skb->ip_summed = CHECKSUM_NONE;
1236 		goto send;
1237 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1238 csum_partial:
1239 		udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1240 		goto send;
1241 	} else
1242 		csum = udp_csum(skb);
1243 
1244 	/* add protocol-dependent pseudo-header */
1245 	uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1246 				    len, fl6->flowi6_proto, csum);
1247 	if (uh->check == 0)
1248 		uh->check = CSUM_MANGLED_0;
1249 
1250 send:
1251 	err = ip6_send_skb(skb);
1252 	if (err) {
1253 		if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
1254 			UDP6_INC_STATS(sock_net(sk),
1255 				       UDP_MIB_SNDBUFERRORS, is_udplite);
1256 			err = 0;
1257 		}
1258 	} else {
1259 		UDP6_INC_STATS(sock_net(sk),
1260 			       UDP_MIB_OUTDATAGRAMS, is_udplite);
1261 	}
1262 	return err;
1263 }
1264 
1265 static int udp_v6_push_pending_frames(struct sock *sk)
1266 {
1267 	struct sk_buff *skb;
1268 	struct udp_sock  *up = udp_sk(sk);
1269 	struct flowi6 fl6;
1270 	int err = 0;
1271 
1272 	if (up->pending == AF_INET)
1273 		return udp_push_pending_frames(sk);
1274 
1275 	/* ip6_finish_skb will release the cork, so make a copy of
1276 	 * fl6 here.
1277 	 */
1278 	fl6 = inet_sk(sk)->cork.fl.u.ip6;
1279 
1280 	skb = ip6_finish_skb(sk);
1281 	if (!skb)
1282 		goto out;
1283 
1284 	err = udp_v6_send_skb(skb, &fl6, &inet_sk(sk)->cork.base);
1285 
1286 out:
1287 	up->len = 0;
1288 	up->pending = 0;
1289 	return err;
1290 }
1291 
1292 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1293 {
1294 	struct ipv6_txoptions opt_space;
1295 	struct udp_sock *up = udp_sk(sk);
1296 	struct inet_sock *inet = inet_sk(sk);
1297 	struct ipv6_pinfo *np = inet6_sk(sk);
1298 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1299 	struct in6_addr *daddr, *final_p, final;
1300 	struct ipv6_txoptions *opt = NULL;
1301 	struct ipv6_txoptions *opt_to_free = NULL;
1302 	struct ip6_flowlabel *flowlabel = NULL;
1303 	struct flowi6 fl6;
1304 	struct dst_entry *dst;
1305 	struct ipcm6_cookie ipc6;
1306 	int addr_len = msg->msg_namelen;
1307 	bool connected = false;
1308 	int ulen = len;
1309 	int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
1310 	int err;
1311 	int is_udplite = IS_UDPLITE(sk);
1312 	int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1313 
1314 	ipcm6_init(&ipc6);
1315 	ipc6.gso_size = READ_ONCE(up->gso_size);
1316 	ipc6.sockc.tsflags = sk->sk_tsflags;
1317 	ipc6.sockc.mark = sk->sk_mark;
1318 
1319 	/* destination address check */
1320 	if (sin6) {
1321 		if (addr_len < offsetof(struct sockaddr, sa_data))
1322 			return -EINVAL;
1323 
1324 		switch (sin6->sin6_family) {
1325 		case AF_INET6:
1326 			if (addr_len < SIN6_LEN_RFC2133)
1327 				return -EINVAL;
1328 			daddr = &sin6->sin6_addr;
1329 			if (ipv6_addr_any(daddr) &&
1330 			    ipv6_addr_v4mapped(&np->saddr))
1331 				ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1332 						       daddr);
1333 			break;
1334 		case AF_INET:
1335 			goto do_udp_sendmsg;
1336 		case AF_UNSPEC:
1337 			msg->msg_name = sin6 = NULL;
1338 			msg->msg_namelen = addr_len = 0;
1339 			daddr = NULL;
1340 			break;
1341 		default:
1342 			return -EINVAL;
1343 		}
1344 	} else if (!up->pending) {
1345 		if (sk->sk_state != TCP_ESTABLISHED)
1346 			return -EDESTADDRREQ;
1347 		daddr = &sk->sk_v6_daddr;
1348 	} else
1349 		daddr = NULL;
1350 
1351 	if (daddr) {
1352 		if (ipv6_addr_v4mapped(daddr)) {
1353 			struct sockaddr_in sin;
1354 			sin.sin_family = AF_INET;
1355 			sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1356 			sin.sin_addr.s_addr = daddr->s6_addr32[3];
1357 			msg->msg_name = &sin;
1358 			msg->msg_namelen = sizeof(sin);
1359 do_udp_sendmsg:
1360 			if (__ipv6_only_sock(sk))
1361 				return -ENETUNREACH;
1362 			return udp_sendmsg(sk, msg, len);
1363 		}
1364 	}
1365 
1366 	if (up->pending == AF_INET)
1367 		return udp_sendmsg(sk, msg, len);
1368 
1369 	/* Rough check on arithmetic overflow,
1370 	   better check is made in ip6_append_data().
1371 	   */
1372 	if (len > INT_MAX - sizeof(struct udphdr))
1373 		return -EMSGSIZE;
1374 
1375 	getfrag  =  is_udplite ?  udplite_getfrag : ip_generic_getfrag;
1376 	if (up->pending) {
1377 		/*
1378 		 * There are pending frames.
1379 		 * The socket lock must be held while it's corked.
1380 		 */
1381 		lock_sock(sk);
1382 		if (likely(up->pending)) {
1383 			if (unlikely(up->pending != AF_INET6)) {
1384 				release_sock(sk);
1385 				return -EAFNOSUPPORT;
1386 			}
1387 			dst = NULL;
1388 			goto do_append_data;
1389 		}
1390 		release_sock(sk);
1391 	}
1392 	ulen += sizeof(struct udphdr);
1393 
1394 	memset(&fl6, 0, sizeof(fl6));
1395 
1396 	if (sin6) {
1397 		if (sin6->sin6_port == 0)
1398 			return -EINVAL;
1399 
1400 		fl6.fl6_dport = sin6->sin6_port;
1401 		daddr = &sin6->sin6_addr;
1402 
1403 		if (np->sndflow) {
1404 			fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1405 			if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
1406 				flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1407 				if (IS_ERR(flowlabel))
1408 					return -EINVAL;
1409 			}
1410 		}
1411 
1412 		/*
1413 		 * Otherwise it will be difficult to maintain
1414 		 * sk->sk_dst_cache.
1415 		 */
1416 		if (sk->sk_state == TCP_ESTABLISHED &&
1417 		    ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1418 			daddr = &sk->sk_v6_daddr;
1419 
1420 		if (addr_len >= sizeof(struct sockaddr_in6) &&
1421 		    sin6->sin6_scope_id &&
1422 		    __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1423 			fl6.flowi6_oif = sin6->sin6_scope_id;
1424 	} else {
1425 		if (sk->sk_state != TCP_ESTABLISHED)
1426 			return -EDESTADDRREQ;
1427 
1428 		fl6.fl6_dport = inet->inet_dport;
1429 		daddr = &sk->sk_v6_daddr;
1430 		fl6.flowlabel = np->flow_label;
1431 		connected = true;
1432 	}
1433 
1434 	if (!fl6.flowi6_oif)
1435 		fl6.flowi6_oif = sk->sk_bound_dev_if;
1436 
1437 	if (!fl6.flowi6_oif)
1438 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1439 
1440 	fl6.flowi6_uid = sk->sk_uid;
1441 
1442 	if (msg->msg_controllen) {
1443 		opt = &opt_space;
1444 		memset(opt, 0, sizeof(struct ipv6_txoptions));
1445 		opt->tot_len = sizeof(*opt);
1446 		ipc6.opt = opt;
1447 
1448 		err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1449 		if (err > 0)
1450 			err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
1451 						    &ipc6);
1452 		if (err < 0) {
1453 			fl6_sock_release(flowlabel);
1454 			return err;
1455 		}
1456 		if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1457 			flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
1458 			if (IS_ERR(flowlabel))
1459 				return -EINVAL;
1460 		}
1461 		if (!(opt->opt_nflen|opt->opt_flen))
1462 			opt = NULL;
1463 		connected = false;
1464 	}
1465 	if (!opt) {
1466 		opt = txopt_get(np);
1467 		opt_to_free = opt;
1468 	}
1469 	if (flowlabel)
1470 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
1471 	opt = ipv6_fixup_options(&opt_space, opt);
1472 	ipc6.opt = opt;
1473 
1474 	fl6.flowi6_proto = sk->sk_protocol;
1475 	fl6.flowi6_mark = ipc6.sockc.mark;
1476 	fl6.daddr = *daddr;
1477 	if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
1478 		fl6.saddr = np->saddr;
1479 	fl6.fl6_sport = inet->inet_sport;
1480 
1481 	if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1482 		err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1483 					   (struct sockaddr *)sin6, &fl6.saddr);
1484 		if (err)
1485 			goto out_no_dst;
1486 		if (sin6) {
1487 			if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1488 				/* BPF program rewrote IPv6-only by IPv4-mapped
1489 				 * IPv6. It's currently unsupported.
1490 				 */
1491 				err = -ENOTSUPP;
1492 				goto out_no_dst;
1493 			}
1494 			if (sin6->sin6_port == 0) {
1495 				/* BPF program set invalid port. Reject it. */
1496 				err = -EINVAL;
1497 				goto out_no_dst;
1498 			}
1499 			fl6.fl6_dport = sin6->sin6_port;
1500 			fl6.daddr = sin6->sin6_addr;
1501 		}
1502 	}
1503 
1504 	if (ipv6_addr_any(&fl6.daddr))
1505 		fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1506 
1507 	final_p = fl6_update_dst(&fl6, opt, &final);
1508 	if (final_p)
1509 		connected = false;
1510 
1511 	if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
1512 		fl6.flowi6_oif = np->mcast_oif;
1513 		connected = false;
1514 	} else if (!fl6.flowi6_oif)
1515 		fl6.flowi6_oif = np->ucast_oif;
1516 
1517 	security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
1518 
1519 	if (ipc6.tclass < 0)
1520 		ipc6.tclass = np->tclass;
1521 
1522 	fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
1523 
1524 	dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, connected);
1525 	if (IS_ERR(dst)) {
1526 		err = PTR_ERR(dst);
1527 		dst = NULL;
1528 		goto out;
1529 	}
1530 
1531 	if (ipc6.hlimit < 0)
1532 		ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
1533 
1534 	if (msg->msg_flags&MSG_CONFIRM)
1535 		goto do_confirm;
1536 back_from_confirm:
1537 
1538 	/* Lockless fast path for the non-corking case */
1539 	if (!corkreq) {
1540 		struct inet_cork_full cork;
1541 		struct sk_buff *skb;
1542 
1543 		skb = ip6_make_skb(sk, getfrag, msg, ulen,
1544 				   sizeof(struct udphdr), &ipc6,
1545 				   &fl6, (struct rt6_info *)dst,
1546 				   msg->msg_flags, &cork);
1547 		err = PTR_ERR(skb);
1548 		if (!IS_ERR_OR_NULL(skb))
1549 			err = udp_v6_send_skb(skb, &fl6, &cork.base);
1550 		goto out;
1551 	}
1552 
1553 	lock_sock(sk);
1554 	if (unlikely(up->pending)) {
1555 		/* The socket is already corked while preparing it. */
1556 		/* ... which is an evident application bug. --ANK */
1557 		release_sock(sk);
1558 
1559 		net_dbg_ratelimited("udp cork app bug 2\n");
1560 		err = -EINVAL;
1561 		goto out;
1562 	}
1563 
1564 	up->pending = AF_INET6;
1565 
1566 do_append_data:
1567 	if (ipc6.dontfrag < 0)
1568 		ipc6.dontfrag = np->dontfrag;
1569 	up->len += ulen;
1570 	err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1571 			      &ipc6, &fl6, (struct rt6_info *)dst,
1572 			      corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1573 	if (err)
1574 		udp_v6_flush_pending_frames(sk);
1575 	else if (!corkreq)
1576 		err = udp_v6_push_pending_frames(sk);
1577 	else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1578 		up->pending = 0;
1579 
1580 	if (err > 0)
1581 		err = np->recverr ? net_xmit_errno(err) : 0;
1582 	release_sock(sk);
1583 
1584 out:
1585 	dst_release(dst);
1586 out_no_dst:
1587 	fl6_sock_release(flowlabel);
1588 	txopt_put(opt_to_free);
1589 	if (!err)
1590 		return len;
1591 	/*
1592 	 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space.  Reporting
1593 	 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1594 	 * we don't have a good statistic (IpOutDiscards but it can be too many
1595 	 * things).  We could add another new stat but at least for now that
1596 	 * seems like overkill.
1597 	 */
1598 	if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1599 		UDP6_INC_STATS(sock_net(sk),
1600 			       UDP_MIB_SNDBUFERRORS, is_udplite);
1601 	}
1602 	return err;
1603 
1604 do_confirm:
1605 	if (msg->msg_flags & MSG_PROBE)
1606 		dst_confirm_neigh(dst, &fl6.daddr);
1607 	if (!(msg->msg_flags&MSG_PROBE) || len)
1608 		goto back_from_confirm;
1609 	err = 0;
1610 	goto out;
1611 }
1612 
1613 void udpv6_destroy_sock(struct sock *sk)
1614 {
1615 	struct udp_sock *up = udp_sk(sk);
1616 	lock_sock(sk);
1617 
1618 	/* protects from races with udp_abort() */
1619 	sock_set_flag(sk, SOCK_DEAD);
1620 	udp_v6_flush_pending_frames(sk);
1621 	release_sock(sk);
1622 
1623 	if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1624 		if (up->encap_type) {
1625 			void (*encap_destroy)(struct sock *sk);
1626 			encap_destroy = READ_ONCE(up->encap_destroy);
1627 			if (encap_destroy)
1628 				encap_destroy(sk);
1629 		}
1630 		if (up->encap_enabled) {
1631 			static_branch_dec(&udpv6_encap_needed_key);
1632 			udp_encap_disable();
1633 		}
1634 	}
1635 
1636 	inet6_destroy_sock(sk);
1637 }
1638 
1639 /*
1640  *	Socket option code for UDP
1641  */
1642 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1643 		     unsigned int optlen)
1644 {
1645 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1646 		return udp_lib_setsockopt(sk, level, optname,
1647 					  optval, optlen,
1648 					  udp_v6_push_pending_frames);
1649 	return ipv6_setsockopt(sk, level, optname, optval, optlen);
1650 }
1651 
1652 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1653 		     char __user *optval, int __user *optlen)
1654 {
1655 	if (level == SOL_UDP  ||  level == SOL_UDPLITE)
1656 		return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1657 	return ipv6_getsockopt(sk, level, optname, optval, optlen);
1658 }
1659 
1660 /* thinking of making this const? Don't.
1661  * early_demux can change based on sysctl.
1662  */
1663 static struct inet6_protocol udpv6_protocol = {
1664 	.early_demux	=	udp_v6_early_demux,
1665 	.early_demux_handler =  udp_v6_early_demux,
1666 	.handler	=	udpv6_rcv,
1667 	.err_handler	=	udpv6_err,
1668 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1669 };
1670 
1671 /* ------------------------------------------------------------------------ */
1672 #ifdef CONFIG_PROC_FS
1673 int udp6_seq_show(struct seq_file *seq, void *v)
1674 {
1675 	if (v == SEQ_START_TOKEN) {
1676 		seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1677 	} else {
1678 		int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1679 		struct inet_sock *inet = inet_sk(v);
1680 		__u16 srcp = ntohs(inet->inet_sport);
1681 		__u16 destp = ntohs(inet->inet_dport);
1682 		__ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1683 					  udp_rqueue_get(v), bucket);
1684 	}
1685 	return 0;
1686 }
1687 
1688 const struct seq_operations udp6_seq_ops = {
1689 	.start		= udp_seq_start,
1690 	.next		= udp_seq_next,
1691 	.stop		= udp_seq_stop,
1692 	.show		= udp6_seq_show,
1693 };
1694 EXPORT_SYMBOL(udp6_seq_ops);
1695 
1696 static struct udp_seq_afinfo udp6_seq_afinfo = {
1697 	.family		= AF_INET6,
1698 	.udp_table	= &udp_table,
1699 };
1700 
1701 int __net_init udp6_proc_init(struct net *net)
1702 {
1703 	if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1704 			sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1705 		return -ENOMEM;
1706 	return 0;
1707 }
1708 
1709 void udp6_proc_exit(struct net *net)
1710 {
1711 	remove_proc_entry("udp6", net->proc_net);
1712 }
1713 #endif /* CONFIG_PROC_FS */
1714 
1715 /* ------------------------------------------------------------------------ */
1716 
1717 struct proto udpv6_prot = {
1718 	.name			= "UDPv6",
1719 	.owner			= THIS_MODULE,
1720 	.close			= udp_lib_close,
1721 	.pre_connect		= udpv6_pre_connect,
1722 	.connect		= ip6_datagram_connect,
1723 	.disconnect		= udp_disconnect,
1724 	.ioctl			= udp_ioctl,
1725 	.init			= udp_init_sock,
1726 	.destroy		= udpv6_destroy_sock,
1727 	.setsockopt		= udpv6_setsockopt,
1728 	.getsockopt		= udpv6_getsockopt,
1729 	.sendmsg		= udpv6_sendmsg,
1730 	.recvmsg		= udpv6_recvmsg,
1731 	.release_cb		= ip6_datagram_release_cb,
1732 	.hash			= udp_lib_hash,
1733 	.unhash			= udp_lib_unhash,
1734 	.rehash			= udp_v6_rehash,
1735 	.get_port		= udp_v6_get_port,
1736 	.put_port		= udp_lib_unhash,
1737 #ifdef CONFIG_BPF_SYSCALL
1738 	.psock_update_sk_prot	= udp_bpf_update_proto,
1739 #endif
1740 	.memory_allocated	= &udp_memory_allocated,
1741 	.sysctl_mem		= sysctl_udp_mem,
1742 	.sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1743 	.sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1744 	.obj_size		= sizeof(struct udp6_sock),
1745 	.h.udp_table		= &udp_table,
1746 	.diag_destroy		= udp_abort,
1747 };
1748 
1749 static struct inet_protosw udpv6_protosw = {
1750 	.type =      SOCK_DGRAM,
1751 	.protocol =  IPPROTO_UDP,
1752 	.prot =      &udpv6_prot,
1753 	.ops =       &inet6_dgram_ops,
1754 	.flags =     INET_PROTOSW_PERMANENT,
1755 };
1756 
1757 int __init udpv6_init(void)
1758 {
1759 	int ret;
1760 
1761 	ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
1762 	if (ret)
1763 		goto out;
1764 
1765 	ret = inet6_register_protosw(&udpv6_protosw);
1766 	if (ret)
1767 		goto out_udpv6_protocol;
1768 out:
1769 	return ret;
1770 
1771 out_udpv6_protocol:
1772 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1773 	goto out;
1774 }
1775 
1776 void udpv6_exit(void)
1777 {
1778 	inet6_unregister_protosw(&udpv6_protosw);
1779 	inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
1780 }
1781