1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * UDP over IPv6
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Pedro Roque <roque@di.fc.ul.pt>
8 *
9 * Based on linux/ipv4/udp.c
10 *
11 * Fixes:
12 * Hideaki YOSHIFUJI : sin6_scope_id support
13 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
14 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
15 * a single port at the same time.
16 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
17 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 */
19
20 #include <linux/bpf-cgroup.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/in6.h>
27 #include <linux/netdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/ipv6.h>
30 #include <linux/icmpv6.h>
31 #include <linux/init.h>
32 #include <linux/module.h>
33 #include <linux/skbuff.h>
34 #include <linux/slab.h>
35 #include <linux/uaccess.h>
36 #include <linux/indirect_call_wrapper.h>
37 #include <trace/events/udp.h>
38
39 #include <net/addrconf.h>
40 #include <net/ndisc.h>
41 #include <net/protocol.h>
42 #include <net/transp_v6.h>
43 #include <net/ip6_route.h>
44 #include <net/raw.h>
45 #include <net/seg6.h>
46 #include <net/tcp_states.h>
47 #include <net/ip6_checksum.h>
48 #include <net/ip6_tunnel.h>
49 #include <net/udp_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/inet_hashtables.h>
52 #include <net/inet6_hashtables.h>
53 #include <net/busy_poll.h>
54 #include <net/sock_reuseport.h>
55 #include <net/gro.h>
56
57 #include <linux/proc_fs.h>
58 #include <linux/seq_file.h>
59 #include <trace/events/skb.h>
60 #include "udp_impl.h"
61
udpv6_destruct_sock(struct sock * sk)62 static void udpv6_destruct_sock(struct sock *sk)
63 {
64 udp_destruct_common(sk);
65 inet6_sock_destruct(sk);
66 }
67
udpv6_init_sock(struct sock * sk)68 int udpv6_init_sock(struct sock *sk)
69 {
70 udp_lib_init_sock(sk);
71 sk->sk_destruct = udpv6_destruct_sock;
72 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
73 return 0;
74 }
75
76 INDIRECT_CALLABLE_SCOPE
udp6_ehashfn(const struct net * net,const struct in6_addr * laddr,const u16 lport,const struct in6_addr * faddr,const __be16 fport)77 u32 udp6_ehashfn(const struct net *net,
78 const struct in6_addr *laddr,
79 const u16 lport,
80 const struct in6_addr *faddr,
81 const __be16 fport)
82 {
83 u32 lhash, fhash;
84
85 net_get_random_once(&udp6_ehash_secret,
86 sizeof(udp6_ehash_secret));
87 net_get_random_once(&udp_ipv6_hash_secret,
88 sizeof(udp_ipv6_hash_secret));
89
90 lhash = (__force u32)laddr->s6_addr32[3];
91 fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
92
93 return __inet6_ehashfn(lhash, lport, fhash, fport,
94 udp6_ehash_secret + net_hash_mix(net));
95 }
96
udp_v6_get_port(struct sock * sk,unsigned short snum)97 int udp_v6_get_port(struct sock *sk, unsigned short snum)
98 {
99 unsigned int hash2_nulladdr =
100 ipv6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
101 unsigned int hash2_partial =
102 ipv6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
103
104 /* precompute partial secondary hash */
105 udp_sk(sk)->udp_portaddr_hash = hash2_partial;
106 return udp_lib_get_port(sk, snum, hash2_nulladdr);
107 }
108
udp_v6_rehash(struct sock * sk)109 void udp_v6_rehash(struct sock *sk)
110 {
111 u16 new_hash = ipv6_portaddr_hash(sock_net(sk),
112 &sk->sk_v6_rcv_saddr,
113 inet_sk(sk)->inet_num);
114 u16 new_hash4;
115
116 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
117 new_hash4 = udp_ehashfn(sock_net(sk),
118 sk->sk_rcv_saddr, sk->sk_num,
119 sk->sk_daddr, sk->sk_dport);
120 } else {
121 new_hash4 = udp6_ehashfn(sock_net(sk),
122 &sk->sk_v6_rcv_saddr, sk->sk_num,
123 &sk->sk_v6_daddr, sk->sk_dport);
124 }
125
126 udp_lib_rehash(sk, new_hash, new_hash4);
127 }
128
compute_score(struct sock * sk,const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned short hnum,int dif,int sdif)129 static int compute_score(struct sock *sk, const struct net *net,
130 const struct in6_addr *saddr, __be16 sport,
131 const struct in6_addr *daddr, unsigned short hnum,
132 int dif, int sdif)
133 {
134 int bound_dev_if, score;
135 struct inet_sock *inet;
136 bool dev_match;
137
138 if (!net_eq(sock_net(sk), net) ||
139 udp_sk(sk)->udp_port_hash != hnum ||
140 sk->sk_family != PF_INET6)
141 return -1;
142
143 if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
144 return -1;
145
146 score = 0;
147 inet = inet_sk(sk);
148
149 if (inet->inet_dport) {
150 if (inet->inet_dport != sport)
151 return -1;
152 score++;
153 }
154
155 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
156 if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
157 return -1;
158 score++;
159 }
160
161 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
162 dev_match = udp_sk_bound_dev_eq(net, bound_dev_if, dif, sdif);
163 if (!dev_match)
164 return -1;
165 if (bound_dev_if)
166 score++;
167
168 if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
169 score++;
170
171 return score;
172 }
173
174 /**
175 * udp6_lib_lookup1() - Simplified lookup using primary hash (destination port)
176 * @net: Network namespace
177 * @saddr: Source address, network order
178 * @sport: Source port, network order
179 * @daddr: Destination address, network order
180 * @hnum: Destination port, host order
181 * @dif: Destination interface index
182 * @sdif: Destination bridge port index, if relevant
183 * @udptable: Set of UDP hash tables
184 *
185 * Simplified lookup to be used as fallback if no sockets are found due to a
186 * potential race between (receive) address change, and lookup happening before
187 * the rehash operation. This function ignores SO_REUSEPORT groups while scoring
188 * result sockets, because if we have one, we don't need the fallback at all.
189 *
190 * Called under rcu_read_lock().
191 *
192 * Return: socket with highest matching score if any, NULL if none
193 */
udp6_lib_lookup1(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,const struct udp_table * udptable)194 static struct sock *udp6_lib_lookup1(const struct net *net,
195 const struct in6_addr *saddr, __be16 sport,
196 const struct in6_addr *daddr,
197 unsigned int hnum, int dif, int sdif,
198 const struct udp_table *udptable)
199 {
200 unsigned int slot = udp_hashfn(net, hnum, udptable->mask);
201 struct udp_hslot *hslot = &udptable->hash[slot];
202 struct sock *sk, *result = NULL;
203 int score, badness = 0;
204
205 sk_for_each_rcu(sk, &hslot->head) {
206 score = compute_score(sk, net,
207 saddr, sport, daddr, hnum, dif, sdif);
208 if (score > badness) {
209 result = sk;
210 badness = score;
211 }
212 }
213
214 return result;
215 }
216
217 /* called with rcu_read_lock() */
udp6_lib_lookup2(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_hslot * hslot2,struct sk_buff * skb)218 static struct sock *udp6_lib_lookup2(const struct net *net,
219 const struct in6_addr *saddr, __be16 sport,
220 const struct in6_addr *daddr, unsigned int hnum,
221 int dif, int sdif, struct udp_hslot *hslot2,
222 struct sk_buff *skb)
223 {
224 struct sock *sk, *result;
225 int score, badness;
226 bool need_rescore;
227
228 result = NULL;
229 badness = -1;
230 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
231 need_rescore = false;
232 rescore:
233 score = compute_score(need_rescore ? result : sk, net, saddr,
234 sport, daddr, hnum, dif, sdif);
235 if (score > badness) {
236 badness = score;
237
238 if (need_rescore)
239 continue;
240
241 if (sk->sk_state == TCP_ESTABLISHED) {
242 result = sk;
243 continue;
244 }
245
246 result = inet6_lookup_reuseport(net, sk, skb, sizeof(struct udphdr),
247 saddr, sport, daddr, hnum, udp6_ehashfn);
248 if (!result) {
249 result = sk;
250 continue;
251 }
252
253 /* Fall back to scoring if group has connections */
254 if (!reuseport_has_conns(sk))
255 return result;
256
257 /* Reuseport logic returned an error, keep original score. */
258 if (IS_ERR(result))
259 continue;
260
261 /* compute_score is too long of a function to be
262 * inlined, and calling it again here yields
263 * measureable overhead for some
264 * workloads. Work around it by jumping
265 * backwards to rescore 'result'.
266 */
267 need_rescore = true;
268 goto rescore;
269 }
270 }
271 return result;
272 }
273
274 #if IS_ENABLED(CONFIG_BASE_SMALL)
udp6_lib_lookup4(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_table * udptable)275 static struct sock *udp6_lib_lookup4(const struct net *net,
276 const struct in6_addr *saddr, __be16 sport,
277 const struct in6_addr *daddr,
278 unsigned int hnum, int dif, int sdif,
279 struct udp_table *udptable)
280 {
281 return NULL;
282 }
283
udp6_hash4(struct sock * sk)284 static void udp6_hash4(struct sock *sk)
285 {
286 }
287 #else /* !CONFIG_BASE_SMALL */
udp6_lib_lookup4(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,unsigned int hnum,int dif,int sdif,struct udp_table * udptable)288 static struct sock *udp6_lib_lookup4(const struct net *net,
289 const struct in6_addr *saddr, __be16 sport,
290 const struct in6_addr *daddr,
291 unsigned int hnum, int dif, int sdif,
292 struct udp_table *udptable)
293 {
294 const __portpair ports = INET_COMBINED_PORTS(sport, hnum);
295 const struct hlist_nulls_node *node;
296 struct udp_hslot *hslot4;
297 unsigned int hash4, slot;
298 struct udp_sock *up;
299 struct sock *sk;
300
301 hash4 = udp6_ehashfn(net, daddr, hnum, saddr, sport);
302 slot = hash4 & udptable->mask;
303 hslot4 = &udptable->hash4[slot];
304
305 begin:
306 udp_lrpa_for_each_entry_rcu(up, node, &hslot4->nulls_head) {
307 sk = (struct sock *)up;
308 if (inet6_match(net, sk, saddr, daddr, ports, dif, sdif))
309 return sk;
310 }
311
312 /* if the nulls value we got at the end of this lookup is not the
313 * expected one, we must restart lookup. We probably met an item that
314 * was moved to another chain due to rehash.
315 */
316 if (get_nulls_value(node) != slot)
317 goto begin;
318
319 return NULL;
320 }
321
udp6_hash4(struct sock * sk)322 static void udp6_hash4(struct sock *sk)
323 {
324 struct net *net = sock_net(sk);
325 unsigned int hash;
326
327 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)) {
328 udp4_hash4(sk);
329 return;
330 }
331
332 if (sk_unhashed(sk) || ipv6_addr_any(&sk->sk_v6_rcv_saddr))
333 return;
334
335 hash = udp6_ehashfn(net, &sk->sk_v6_rcv_saddr, sk->sk_num,
336 &sk->sk_v6_daddr, sk->sk_dport);
337
338 udp_lib_hash4(sk, hash);
339 }
340 #endif /* CONFIG_BASE_SMALL */
341
342 /* rcu_read_lock() must be held */
__udp6_lib_lookup(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif,int sdif,struct udp_table * udptable,struct sk_buff * skb)343 struct sock *__udp6_lib_lookup(const struct net *net,
344 const struct in6_addr *saddr, __be16 sport,
345 const struct in6_addr *daddr, __be16 dport,
346 int dif, int sdif, struct udp_table *udptable,
347 struct sk_buff *skb)
348 {
349 unsigned short hnum = ntohs(dport);
350 struct udp_hslot *hslot2;
351 struct sock *result, *sk;
352 unsigned int hash2;
353
354 hash2 = ipv6_portaddr_hash(net, daddr, hnum);
355 hslot2 = udp_hashslot2(udptable, hash2);
356
357 if (udp_has_hash4(hslot2)) {
358 result = udp6_lib_lookup4(net, saddr, sport, daddr, hnum,
359 dif, sdif, udptable);
360 if (result) /* udp6_lib_lookup4 return sk or NULL */
361 return result;
362 }
363
364 /* Lookup connected or non-wildcard sockets */
365 result = udp6_lib_lookup2(net, saddr, sport,
366 daddr, hnum, dif, sdif,
367 hslot2, skb);
368 if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
369 goto done;
370
371 /* Lookup redirect from BPF */
372 if (static_branch_unlikely(&bpf_sk_lookup_enabled) &&
373 udptable == net->ipv4.udp_table) {
374 sk = inet6_lookup_run_sk_lookup(net, IPPROTO_UDP, skb, sizeof(struct udphdr),
375 saddr, sport, daddr, hnum, dif,
376 udp6_ehashfn);
377 if (sk) {
378 result = sk;
379 goto done;
380 }
381 }
382
383 /* Got non-wildcard socket or error on first lookup */
384 if (result)
385 goto done;
386
387 /* Lookup wildcard sockets */
388 hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
389 hslot2 = udp_hashslot2(udptable, hash2);
390
391 result = udp6_lib_lookup2(net, saddr, sport,
392 &in6addr_any, hnum, dif, sdif,
393 hslot2, skb);
394 if (!IS_ERR_OR_NULL(result))
395 goto done;
396
397 /* Cover address change/lookup/rehash race: see __udp4_lib_lookup() */
398 result = udp6_lib_lookup1(net, saddr, sport, daddr, hnum, dif, sdif,
399 udptable);
400
401 done:
402 if (IS_ERR(result))
403 return NULL;
404 return result;
405 }
406 EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
407
__udp6_lib_lookup_skb(struct sk_buff * skb,__be16 sport,__be16 dport,struct udp_table * udptable)408 static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
409 __be16 sport, __be16 dport,
410 struct udp_table *udptable)
411 {
412 const struct ipv6hdr *iph = ipv6_hdr(skb);
413
414 return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
415 &iph->daddr, dport, inet6_iif(skb),
416 inet6_sdif(skb), udptable, skb);
417 }
418
udp6_lib_lookup_skb(const struct sk_buff * skb,__be16 sport,__be16 dport)419 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
420 __be16 sport, __be16 dport)
421 {
422 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
423 const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
424 struct net *net = dev_net(skb->dev);
425 int iif, sdif;
426
427 inet6_get_iif_sdif(skb, &iif, &sdif);
428
429 return __udp6_lib_lookup(net, &iph->saddr, sport,
430 &iph->daddr, dport, iif,
431 sdif, net->ipv4.udp_table, NULL);
432 }
433
434 /* Must be called under rcu_read_lock().
435 * Does increment socket refcount.
436 */
437 #if IS_ENABLED(CONFIG_NF_TPROXY_IPV6) || IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
udp6_lib_lookup(const struct net * net,const struct in6_addr * saddr,__be16 sport,const struct in6_addr * daddr,__be16 dport,int dif)438 struct sock *udp6_lib_lookup(const struct net *net, const struct in6_addr *saddr, __be16 sport,
439 const struct in6_addr *daddr, __be16 dport, int dif)
440 {
441 struct sock *sk;
442
443 sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
444 dif, 0, net->ipv4.udp_table, NULL);
445 if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
446 sk = NULL;
447 return sk;
448 }
449 EXPORT_SYMBOL_GPL(udp6_lib_lookup);
450 #endif
451
452 /* do not use the scratch area len for jumbogram: their length execeeds the
453 * scratch area space; note that the IP6CB flags is still in the first
454 * cacheline, so checking for jumbograms is cheap
455 */
udp6_skb_len(struct sk_buff * skb)456 static int udp6_skb_len(struct sk_buff *skb)
457 {
458 return unlikely(inet6_is_jumbogram(skb)) ? skb->len : udp_skb_len(skb);
459 }
460
461 /*
462 * This should be easy, if there is something there we
463 * return it, otherwise we block.
464 */
465
udpv6_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)466 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
467 int flags, int *addr_len)
468 {
469 struct ipv6_pinfo *np = inet6_sk(sk);
470 struct inet_sock *inet = inet_sk(sk);
471 struct sk_buff *skb;
472 unsigned int ulen, copied;
473 int off, err, peeking = flags & MSG_PEEK;
474 int is_udplite = IS_UDPLITE(sk);
475 struct udp_mib __percpu *mib;
476 bool checksum_valid = false;
477 int is_udp4;
478
479 if (flags & MSG_ERRQUEUE)
480 return ipv6_recv_error(sk, msg, len, addr_len);
481
482 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
483 return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
484
485 try_again:
486 off = sk_peek_offset(sk, flags);
487 skb = __skb_recv_udp(sk, flags, &off, &err);
488 if (!skb)
489 return err;
490
491 ulen = udp6_skb_len(skb);
492 copied = len;
493 if (copied > ulen - off)
494 copied = ulen - off;
495 else if (copied < ulen)
496 msg->msg_flags |= MSG_TRUNC;
497
498 is_udp4 = (skb->protocol == htons(ETH_P_IP));
499 mib = __UDPX_MIB(sk, is_udp4);
500
501 /*
502 * If checksum is needed at all, try to do it while copying the
503 * data. If the data is truncated, or if we only want a partial
504 * coverage checksum (UDP-Lite), do it before the copy.
505 */
506
507 if (copied < ulen || peeking ||
508 (is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
509 checksum_valid = udp_skb_csum_unnecessary(skb) ||
510 !__udp_lib_checksum_complete(skb);
511 if (!checksum_valid)
512 goto csum_copy_err;
513 }
514
515 if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
516 if (udp_skb_is_linear(skb))
517 err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
518 else
519 err = skb_copy_datagram_msg(skb, off, msg, copied);
520 } else {
521 err = skb_copy_and_csum_datagram_msg(skb, off, msg);
522 if (err == -EINVAL)
523 goto csum_copy_err;
524 }
525 if (unlikely(err)) {
526 if (!peeking) {
527 atomic_inc(&sk->sk_drops);
528 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
529 }
530 kfree_skb(skb);
531 return err;
532 }
533 if (!peeking)
534 SNMP_INC_STATS(mib, UDP_MIB_INDATAGRAMS);
535
536 sock_recv_cmsgs(msg, sk, skb);
537
538 /* Copy the address. */
539 if (msg->msg_name) {
540 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
541 sin6->sin6_family = AF_INET6;
542 sin6->sin6_port = udp_hdr(skb)->source;
543 sin6->sin6_flowinfo = 0;
544
545 if (is_udp4) {
546 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
547 &sin6->sin6_addr);
548 sin6->sin6_scope_id = 0;
549 } else {
550 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
551 sin6->sin6_scope_id =
552 ipv6_iface_scope_id(&sin6->sin6_addr,
553 inet6_iif(skb));
554 }
555 *addr_len = sizeof(*sin6);
556
557 BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk,
558 (struct sockaddr *)sin6,
559 addr_len);
560 }
561
562 if (udp_test_bit(GRO_ENABLED, sk))
563 udp_cmsg_recv(msg, sk, skb);
564
565 if (np->rxopt.all)
566 ip6_datagram_recv_common_ctl(sk, msg, skb);
567
568 if (is_udp4) {
569 if (inet_cmsg_flags(inet))
570 ip_cmsg_recv_offset(msg, sk, skb,
571 sizeof(struct udphdr), off);
572 } else {
573 if (np->rxopt.all)
574 ip6_datagram_recv_specific_ctl(sk, msg, skb);
575 }
576
577 err = copied;
578 if (flags & MSG_TRUNC)
579 err = ulen;
580
581 skb_consume_udp(sk, skb, peeking ? -err : err);
582 return err;
583
584 csum_copy_err:
585 if (!__sk_queue_drop_skb(sk, &udp_sk(sk)->reader_queue, skb, flags,
586 udp_skb_destructor)) {
587 SNMP_INC_STATS(mib, UDP_MIB_CSUMERRORS);
588 SNMP_INC_STATS(mib, UDP_MIB_INERRORS);
589 }
590 kfree_skb_reason(skb, SKB_DROP_REASON_UDP_CSUM);
591
592 /* starting over for a new packet, but check if we need to yield */
593 cond_resched();
594 msg->msg_flags &= ~MSG_TRUNC;
595 goto try_again;
596 }
597
598 DECLARE_STATIC_KEY_FALSE(udpv6_encap_needed_key);
udpv6_encap_enable(void)599 void udpv6_encap_enable(void)
600 {
601 static_branch_inc(&udpv6_encap_needed_key);
602 }
603 EXPORT_SYMBOL(udpv6_encap_enable);
604
605 /* Handler for tunnels with arbitrary destination ports: no socket lookup, go
606 * through error handlers in encapsulations looking for a match.
607 */
__udp6_lib_err_encap_no_sk(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)608 static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb,
609 struct inet6_skb_parm *opt,
610 u8 type, u8 code, int offset, __be32 info)
611 {
612 int i;
613
614 for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) {
615 int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
616 u8 type, u8 code, int offset, __be32 info);
617 const struct ip6_tnl_encap_ops *encap;
618
619 encap = rcu_dereference(ip6tun_encaps[i]);
620 if (!encap)
621 continue;
622 handler = encap->err_handler;
623 if (handler && !handler(skb, opt, type, code, offset, info))
624 return 0;
625 }
626
627 return -ENOENT;
628 }
629
630 /* Try to match ICMP errors to UDP tunnels by looking up a socket without
631 * reversing source and destination port: this will match tunnels that force the
632 * same destination port on both endpoints (e.g. VXLAN, GENEVE). Note that
633 * lwtunnels might actually break this assumption by being configured with
634 * different destination ports on endpoints, in this case we won't be able to
635 * trace ICMP messages back to them.
636 *
637 * If this doesn't match any socket, probe tunnels with arbitrary destination
638 * ports (e.g. FoU, GUE): there, the receiving socket is useless, as the port
639 * we've sent packets to won't necessarily match the local destination port.
640 *
641 * Then ask the tunnel implementation to match the error against a valid
642 * association.
643 *
644 * Return an error if we can't find a match, the socket if we need further
645 * processing, zero otherwise.
646 */
__udp6_lib_err_encap(struct net * net,const struct ipv6hdr * hdr,int offset,struct udphdr * uh,struct udp_table * udptable,struct sock * sk,struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,__be32 info)647 static struct sock *__udp6_lib_err_encap(struct net *net,
648 const struct ipv6hdr *hdr, int offset,
649 struct udphdr *uh,
650 struct udp_table *udptable,
651 struct sock *sk,
652 struct sk_buff *skb,
653 struct inet6_skb_parm *opt,
654 u8 type, u8 code, __be32 info)
655 {
656 int (*lookup)(struct sock *sk, struct sk_buff *skb);
657 int network_offset, transport_offset;
658 struct udp_sock *up;
659
660 network_offset = skb_network_offset(skb);
661 transport_offset = skb_transport_offset(skb);
662
663 /* Network header needs to point to the outer IPv6 header inside ICMP */
664 skb_reset_network_header(skb);
665
666 /* Transport header needs to point to the UDP header */
667 skb_set_transport_header(skb, offset);
668
669 if (sk) {
670 up = udp_sk(sk);
671
672 lookup = READ_ONCE(up->encap_err_lookup);
673 if (lookup && lookup(sk, skb))
674 sk = NULL;
675
676 goto out;
677 }
678
679 sk = __udp6_lib_lookup(net, &hdr->daddr, uh->source,
680 &hdr->saddr, uh->dest,
681 inet6_iif(skb), 0, udptable, skb);
682 if (sk) {
683 up = udp_sk(sk);
684
685 lookup = READ_ONCE(up->encap_err_lookup);
686 if (!lookup || lookup(sk, skb))
687 sk = NULL;
688 }
689
690 out:
691 if (!sk) {
692 sk = ERR_PTR(__udp6_lib_err_encap_no_sk(skb, opt, type, code,
693 offset, info));
694 }
695
696 skb_set_transport_header(skb, transport_offset);
697 skb_set_network_header(skb, network_offset);
698
699 return sk;
700 }
701
__udp6_lib_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info,struct udp_table * udptable)702 int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
703 u8 type, u8 code, int offset, __be32 info,
704 struct udp_table *udptable)
705 {
706 struct ipv6_pinfo *np;
707 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
708 const struct in6_addr *saddr = &hdr->saddr;
709 const struct in6_addr *daddr = seg6_get_daddr(skb, opt) ? : &hdr->daddr;
710 struct udphdr *uh = (struct udphdr *)(skb->data+offset);
711 bool tunnel = false;
712 struct sock *sk;
713 int harderr;
714 int err;
715 struct net *net = dev_net(skb->dev);
716
717 sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
718 inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
719
720 if (!sk || READ_ONCE(udp_sk(sk)->encap_type)) {
721 /* No socket for error: try tunnels before discarding */
722 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
723 sk = __udp6_lib_err_encap(net, hdr, offset, uh,
724 udptable, sk, skb,
725 opt, type, code, info);
726 if (!sk)
727 return 0;
728 } else
729 sk = ERR_PTR(-ENOENT);
730
731 if (IS_ERR(sk)) {
732 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
733 ICMP6_MIB_INERRORS);
734 return PTR_ERR(sk);
735 }
736
737 tunnel = true;
738 }
739
740 harderr = icmpv6_err_convert(type, code, &err);
741 np = inet6_sk(sk);
742
743 if (type == ICMPV6_PKT_TOOBIG) {
744 if (!ip6_sk_accept_pmtu(sk))
745 goto out;
746 ip6_sk_update_pmtu(skb, sk, info);
747 if (READ_ONCE(np->pmtudisc) != IPV6_PMTUDISC_DONT)
748 harderr = 1;
749 }
750 if (type == NDISC_REDIRECT) {
751 if (tunnel) {
752 ip6_redirect(skb, sock_net(sk), inet6_iif(skb),
753 READ_ONCE(sk->sk_mark),
754 sk_uid(sk));
755 } else {
756 ip6_sk_redirect(skb, sk);
757 }
758 goto out;
759 }
760
761 /* Tunnels don't have an application socket: don't pass errors back */
762 if (tunnel) {
763 if (udp_sk(sk)->encap_err_rcv)
764 udp_sk(sk)->encap_err_rcv(sk, skb, err, uh->dest,
765 ntohl(info), (u8 *)(uh+1));
766 goto out;
767 }
768
769 if (!inet6_test_bit(RECVERR6, sk)) {
770 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
771 goto out;
772 } else {
773 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
774 }
775
776 sk->sk_err = err;
777 sk_error_report(sk);
778 out:
779 return 0;
780 }
781
__udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)782 static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
783 {
784 int rc;
785
786 if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
787 sock_rps_save_rxhash(sk, skb);
788 sk_mark_napi_id(sk, skb);
789 sk_incoming_cpu_update(sk);
790 } else {
791 sk_mark_napi_id_once(sk, skb);
792 }
793
794 rc = __udp_enqueue_schedule_skb(sk, skb);
795 if (rc < 0) {
796 int is_udplite = IS_UDPLITE(sk);
797 enum skb_drop_reason drop_reason;
798
799 /* Note that an ENOMEM error is charged twice */
800 if (rc == -ENOMEM) {
801 UDP6_INC_STATS(sock_net(sk),
802 UDP_MIB_RCVBUFERRORS, is_udplite);
803 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF;
804 } else {
805 UDP6_INC_STATS(sock_net(sk),
806 UDP_MIB_MEMERRORS, is_udplite);
807 drop_reason = SKB_DROP_REASON_PROTO_MEM;
808 }
809 UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
810 trace_udp_fail_queue_rcv_skb(rc, sk, skb);
811 sk_skb_reason_drop(sk, skb, drop_reason);
812 return -1;
813 }
814
815 return 0;
816 }
817
udpv6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)818 static __inline__ int udpv6_err(struct sk_buff *skb,
819 struct inet6_skb_parm *opt, u8 type,
820 u8 code, int offset, __be32 info)
821 {
822 return __udp6_lib_err(skb, opt, type, code, offset, info,
823 dev_net(skb->dev)->ipv4.udp_table);
824 }
825
udpv6_queue_rcv_one_skb(struct sock * sk,struct sk_buff * skb)826 static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
827 {
828 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
829 struct udp_sock *up = udp_sk(sk);
830 int is_udplite = IS_UDPLITE(sk);
831
832 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
833 drop_reason = SKB_DROP_REASON_XFRM_POLICY;
834 goto drop;
835 }
836 nf_reset_ct(skb);
837
838 if (static_branch_unlikely(&udpv6_encap_needed_key) &&
839 READ_ONCE(up->encap_type)) {
840 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
841
842 /*
843 * This is an encapsulation socket so pass the skb to
844 * the socket's udp_encap_rcv() hook. Otherwise, just
845 * fall through and pass this up the UDP socket.
846 * up->encap_rcv() returns the following value:
847 * =0 if skb was successfully passed to the encap
848 * handler or was discarded by it.
849 * >0 if skb should be passed on to UDP.
850 * <0 if skb should be resubmitted as proto -N
851 */
852
853 /* if we're overly short, let UDP handle it */
854 encap_rcv = READ_ONCE(up->encap_rcv);
855 if (encap_rcv) {
856 int ret;
857
858 /* Verify checksum before giving to encap */
859 if (udp_lib_checksum_complete(skb))
860 goto csum_error;
861
862 ret = encap_rcv(sk, skb);
863 if (ret <= 0) {
864 __UDP6_INC_STATS(sock_net(sk),
865 UDP_MIB_INDATAGRAMS,
866 is_udplite);
867 return -ret;
868 }
869 }
870
871 /* FALLTHROUGH -- it's a UDP Packet */
872 }
873
874 /*
875 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
876 */
877 if (udp_test_bit(UDPLITE_RECV_CC, sk) && UDP_SKB_CB(skb)->partial_cov) {
878 u16 pcrlen = READ_ONCE(up->pcrlen);
879
880 if (pcrlen == 0) { /* full coverage was set */
881 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
882 UDP_SKB_CB(skb)->cscov, skb->len);
883 goto drop;
884 }
885 if (UDP_SKB_CB(skb)->cscov < pcrlen) {
886 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
887 UDP_SKB_CB(skb)->cscov, pcrlen);
888 goto drop;
889 }
890 }
891
892 prefetch(&sk->sk_rmem_alloc);
893 if (rcu_access_pointer(sk->sk_filter) &&
894 udp_lib_checksum_complete(skb))
895 goto csum_error;
896
897 if (sk_filter_trim_cap(sk, skb, sizeof(struct udphdr), &drop_reason))
898 goto drop;
899
900 udp_csum_pull_header(skb);
901
902 skb_dst_drop(skb);
903
904 return __udpv6_queue_rcv_skb(sk, skb);
905
906 csum_error:
907 drop_reason = SKB_DROP_REASON_UDP_CSUM;
908 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
909 drop:
910 __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
911 atomic_inc(&sk->sk_drops);
912 sk_skb_reason_drop(sk, skb, drop_reason);
913 return -1;
914 }
915
udpv6_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)916 static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
917 {
918 struct sk_buff *next, *segs;
919 int ret;
920
921 if (likely(!udp_unexpected_gso(sk, skb)))
922 return udpv6_queue_rcv_one_skb(sk, skb);
923
924 __skb_push(skb, -skb_mac_offset(skb));
925 segs = udp_rcv_segment(sk, skb, false);
926 skb_list_walk_safe(segs, skb, next) {
927 __skb_pull(skb, skb_transport_offset(skb));
928
929 udp_post_segment_fix_csum(skb);
930 ret = udpv6_queue_rcv_one_skb(sk, skb);
931 if (ret > 0)
932 ip6_protocol_deliver_rcu(dev_net(skb->dev), skb, ret,
933 true);
934 }
935 return 0;
936 }
937
__udp_v6_is_mcast_sock(struct net * net,const struct sock * sk,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif,unsigned short hnum)938 static bool __udp_v6_is_mcast_sock(struct net *net, const struct sock *sk,
939 __be16 loc_port, const struct in6_addr *loc_addr,
940 __be16 rmt_port, const struct in6_addr *rmt_addr,
941 int dif, int sdif, unsigned short hnum)
942 {
943 const struct inet_sock *inet = inet_sk(sk);
944
945 if (!net_eq(sock_net(sk), net))
946 return false;
947
948 if (udp_sk(sk)->udp_port_hash != hnum ||
949 sk->sk_family != PF_INET6 ||
950 (inet->inet_dport && inet->inet_dport != rmt_port) ||
951 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
952 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
953 !udp_sk_bound_dev_eq(net, READ_ONCE(sk->sk_bound_dev_if), dif, sdif) ||
954 (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
955 !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
956 return false;
957 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
958 return false;
959 return true;
960 }
961
udp6_csum_zero_error(struct sk_buff * skb)962 static void udp6_csum_zero_error(struct sk_buff *skb)
963 {
964 /* RFC 2460 section 8.1 says that we SHOULD log
965 * this error. Well, it is reasonable.
966 */
967 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
968 &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
969 &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
970 }
971
972 /*
973 * Note: called only from the BH handler context,
974 * so we don't need to lock the hashes.
975 */
__udp6_lib_mcast_deliver(struct net * net,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,struct udp_table * udptable,int proto)976 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
977 const struct in6_addr *saddr, const struct in6_addr *daddr,
978 struct udp_table *udptable, int proto)
979 {
980 struct sock *sk, *first = NULL;
981 const struct udphdr *uh = udp_hdr(skb);
982 unsigned short hnum = ntohs(uh->dest);
983 struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
984 unsigned int offset = offsetof(typeof(*sk), sk_node);
985 unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
986 int dif = inet6_iif(skb);
987 int sdif = inet6_sdif(skb);
988 struct hlist_node *node;
989 struct sk_buff *nskb;
990
991 if (use_hash2) {
992 hash2_any = ipv6_portaddr_hash(net, &in6addr_any, hnum) &
993 udptable->mask;
994 hash2 = ipv6_portaddr_hash(net, daddr, hnum) & udptable->mask;
995 start_lookup:
996 hslot = &udptable->hash2[hash2].hslot;
997 offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
998 }
999
1000 sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
1001 if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
1002 uh->source, saddr, dif, sdif,
1003 hnum))
1004 continue;
1005 /* If zero checksum and no_check is not on for
1006 * the socket then skip it.
1007 */
1008 if (!uh->check && !udp_get_no_check6_rx(sk))
1009 continue;
1010 if (!first) {
1011 first = sk;
1012 continue;
1013 }
1014 nskb = skb_clone(skb, GFP_ATOMIC);
1015 if (unlikely(!nskb)) {
1016 atomic_inc(&sk->sk_drops);
1017 __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
1018 IS_UDPLITE(sk));
1019 __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
1020 IS_UDPLITE(sk));
1021 continue;
1022 }
1023
1024 if (udpv6_queue_rcv_skb(sk, nskb) > 0)
1025 consume_skb(nskb);
1026 }
1027
1028 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
1029 if (use_hash2 && hash2 != hash2_any) {
1030 hash2 = hash2_any;
1031 goto start_lookup;
1032 }
1033
1034 if (first) {
1035 if (udpv6_queue_rcv_skb(first, skb) > 0)
1036 consume_skb(skb);
1037 } else {
1038 kfree_skb(skb);
1039 __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
1040 proto == IPPROTO_UDPLITE);
1041 }
1042 return 0;
1043 }
1044
udp6_sk_rx_dst_set(struct sock * sk,struct dst_entry * dst)1045 static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
1046 {
1047 if (udp_sk_rx_dst_set(sk, dst))
1048 sk->sk_rx_dst_cookie = rt6_get_cookie(dst_rt6_info(dst));
1049 }
1050
1051 /* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
1052 * return code conversion for ip layer consumption
1053 */
udp6_unicast_rcv_skb(struct sock * sk,struct sk_buff * skb,struct udphdr * uh)1054 static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
1055 struct udphdr *uh)
1056 {
1057 int ret;
1058
1059 if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
1060 skb_checksum_try_convert(skb, IPPROTO_UDP, ip6_compute_pseudo);
1061
1062 ret = udpv6_queue_rcv_skb(sk, skb);
1063
1064 /* a return value > 0 means to resubmit the input */
1065 if (ret > 0)
1066 return ret;
1067 return 0;
1068 }
1069
__udp6_lib_rcv(struct sk_buff * skb,struct udp_table * udptable,int proto)1070 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
1071 int proto)
1072 {
1073 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
1074 const struct in6_addr *saddr, *daddr;
1075 struct net *net = dev_net(skb->dev);
1076 struct sock *sk = NULL;
1077 struct udphdr *uh;
1078 bool refcounted;
1079 u32 ulen = 0;
1080
1081 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1082 goto discard;
1083
1084 saddr = &ipv6_hdr(skb)->saddr;
1085 daddr = &ipv6_hdr(skb)->daddr;
1086 uh = udp_hdr(skb);
1087
1088 ulen = ntohs(uh->len);
1089 if (ulen > skb->len)
1090 goto short_packet;
1091
1092 if (proto == IPPROTO_UDP) {
1093 /* UDP validates ulen. */
1094
1095 /* Check for jumbo payload */
1096 if (ulen == 0)
1097 ulen = skb->len;
1098
1099 if (ulen < sizeof(*uh))
1100 goto short_packet;
1101
1102 if (ulen < skb->len) {
1103 if (pskb_trim_rcsum(skb, ulen))
1104 goto short_packet;
1105 saddr = &ipv6_hdr(skb)->saddr;
1106 daddr = &ipv6_hdr(skb)->daddr;
1107 uh = udp_hdr(skb);
1108 }
1109 }
1110
1111 if (udp6_csum_init(skb, uh, proto))
1112 goto csum_error;
1113
1114 /* Check if the socket is already available, e.g. due to early demux */
1115 sk = inet6_steal_sock(net, skb, sizeof(struct udphdr), saddr, uh->source, daddr, uh->dest,
1116 &refcounted, udp6_ehashfn);
1117 if (IS_ERR(sk))
1118 goto no_sk;
1119
1120 if (sk) {
1121 struct dst_entry *dst = skb_dst(skb);
1122 int ret;
1123
1124 if (unlikely(rcu_dereference(sk->sk_rx_dst) != dst))
1125 udp6_sk_rx_dst_set(sk, dst);
1126
1127 if (!uh->check && !udp_get_no_check6_rx(sk)) {
1128 if (refcounted)
1129 sock_put(sk);
1130 goto report_csum_error;
1131 }
1132
1133 ret = udp6_unicast_rcv_skb(sk, skb, uh);
1134 if (refcounted)
1135 sock_put(sk);
1136 return ret;
1137 }
1138
1139 /*
1140 * Multicast receive code
1141 */
1142 if (ipv6_addr_is_multicast(daddr))
1143 return __udp6_lib_mcast_deliver(net, skb,
1144 saddr, daddr, udptable, proto);
1145
1146 /* Unicast */
1147 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
1148 if (sk) {
1149 if (!uh->check && !udp_get_no_check6_rx(sk))
1150 goto report_csum_error;
1151 return udp6_unicast_rcv_skb(sk, skb, uh);
1152 }
1153 no_sk:
1154 reason = SKB_DROP_REASON_NO_SOCKET;
1155
1156 if (!uh->check)
1157 goto report_csum_error;
1158
1159 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1160 goto discard;
1161 nf_reset_ct(skb);
1162
1163 if (udp_lib_checksum_complete(skb))
1164 goto csum_error;
1165
1166 __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
1167 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
1168
1169 sk_skb_reason_drop(sk, skb, reason);
1170 return 0;
1171
1172 short_packet:
1173 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1174 reason = SKB_DROP_REASON_PKT_TOO_SMALL;
1175 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
1176 proto == IPPROTO_UDPLITE ? "-Lite" : "",
1177 saddr, ntohs(uh->source),
1178 ulen, skb->len,
1179 daddr, ntohs(uh->dest));
1180 goto discard;
1181
1182 report_csum_error:
1183 udp6_csum_zero_error(skb);
1184 csum_error:
1185 if (reason == SKB_DROP_REASON_NOT_SPECIFIED)
1186 reason = SKB_DROP_REASON_UDP_CSUM;
1187 __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
1188 discard:
1189 __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
1190 sk_skb_reason_drop(sk, skb, reason);
1191 return 0;
1192 }
1193
1194
__udp6_lib_demux_lookup(struct net * net,__be16 loc_port,const struct in6_addr * loc_addr,__be16 rmt_port,const struct in6_addr * rmt_addr,int dif,int sdif)1195 static struct sock *__udp6_lib_demux_lookup(struct net *net,
1196 __be16 loc_port, const struct in6_addr *loc_addr,
1197 __be16 rmt_port, const struct in6_addr *rmt_addr,
1198 int dif, int sdif)
1199 {
1200 struct udp_table *udptable = net->ipv4.udp_table;
1201 unsigned short hnum = ntohs(loc_port);
1202 struct udp_hslot *hslot2;
1203 unsigned int hash2;
1204 __portpair ports;
1205 struct sock *sk;
1206
1207 hash2 = ipv6_portaddr_hash(net, loc_addr, hnum);
1208 hslot2 = udp_hashslot2(udptable, hash2);
1209 ports = INET_COMBINED_PORTS(rmt_port, hnum);
1210
1211 udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
1212 if (sk->sk_state == TCP_ESTABLISHED &&
1213 inet6_match(net, sk, rmt_addr, loc_addr, ports, dif, sdif))
1214 return sk;
1215 /* Only check first socket in chain */
1216 break;
1217 }
1218 return NULL;
1219 }
1220
udp_v6_early_demux(struct sk_buff * skb)1221 void udp_v6_early_demux(struct sk_buff *skb)
1222 {
1223 struct net *net = dev_net(skb->dev);
1224 const struct udphdr *uh;
1225 struct sock *sk;
1226 struct dst_entry *dst;
1227 int dif = skb->dev->ifindex;
1228 int sdif = inet6_sdif(skb);
1229
1230 if (!pskb_may_pull(skb, skb_transport_offset(skb) +
1231 sizeof(struct udphdr)))
1232 return;
1233
1234 uh = udp_hdr(skb);
1235
1236 if (skb->pkt_type == PACKET_HOST)
1237 sk = __udp6_lib_demux_lookup(net, uh->dest,
1238 &ipv6_hdr(skb)->daddr,
1239 uh->source, &ipv6_hdr(skb)->saddr,
1240 dif, sdif);
1241 else
1242 return;
1243
1244 if (!sk)
1245 return;
1246
1247 skb->sk = sk;
1248 DEBUG_NET_WARN_ON_ONCE(sk_is_refcounted(sk));
1249 skb->destructor = sock_pfree;
1250 dst = rcu_dereference(sk->sk_rx_dst);
1251
1252 if (dst)
1253 dst = dst_check(dst, sk->sk_rx_dst_cookie);
1254 if (dst) {
1255 /* set noref for now.
1256 * any place which wants to hold dst has to call
1257 * dst_hold_safe()
1258 */
1259 skb_dst_set_noref(skb, dst);
1260 }
1261 }
1262
udpv6_rcv(struct sk_buff * skb)1263 INDIRECT_CALLABLE_SCOPE int udpv6_rcv(struct sk_buff *skb)
1264 {
1265 return __udp6_lib_rcv(skb, dev_net(skb->dev)->ipv4.udp_table, IPPROTO_UDP);
1266 }
1267
1268 /*
1269 * Throw away all pending data and cancel the corking. Socket is locked.
1270 */
udp_v6_flush_pending_frames(struct sock * sk)1271 static void udp_v6_flush_pending_frames(struct sock *sk)
1272 {
1273 struct udp_sock *up = udp_sk(sk);
1274
1275 if (up->pending == AF_INET)
1276 udp_flush_pending_frames(sk);
1277 else if (up->pending) {
1278 up->len = 0;
1279 WRITE_ONCE(up->pending, 0);
1280 ip6_flush_pending_frames(sk);
1281 }
1282 }
1283
udpv6_pre_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1284 static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1285 int addr_len)
1286 {
1287 if (addr_len < offsetofend(struct sockaddr, sa_family))
1288 return -EINVAL;
1289 /* The following checks are replicated from __ip6_datagram_connect()
1290 * and intended to prevent BPF program called below from accessing
1291 * bytes that are out of the bound specified by user in addr_len.
1292 */
1293 if (uaddr->sa_family == AF_INET) {
1294 if (ipv6_only_sock(sk))
1295 return -EAFNOSUPPORT;
1296 return udp_pre_connect(sk, uaddr, addr_len);
1297 }
1298
1299 if (addr_len < SIN6_LEN_RFC2133)
1300 return -EINVAL;
1301
1302 return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len);
1303 }
1304
udpv6_connect(struct sock * sk,struct sockaddr * uaddr,int addr_len)1305 static int udpv6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1306 {
1307 int res;
1308
1309 lock_sock(sk);
1310 res = __ip6_datagram_connect(sk, uaddr, addr_len);
1311 if (!res)
1312 udp6_hash4(sk);
1313 release_sock(sk);
1314 return res;
1315 }
1316
1317 /**
1318 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
1319 * @sk: socket we are sending on
1320 * @skb: sk_buff containing the filled-in UDP header
1321 * (checksum field must be zeroed out)
1322 * @saddr: source address
1323 * @daddr: destination address
1324 * @len: length of packet
1325 */
udp6_hwcsum_outgoing(struct sock * sk,struct sk_buff * skb,const struct in6_addr * saddr,const struct in6_addr * daddr,int len)1326 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
1327 const struct in6_addr *saddr,
1328 const struct in6_addr *daddr, int len)
1329 {
1330 unsigned int offset;
1331 struct udphdr *uh = udp_hdr(skb);
1332 struct sk_buff *frags = skb_shinfo(skb)->frag_list;
1333 __wsum csum = 0;
1334
1335 if (!frags) {
1336 /* Only one fragment on the socket. */
1337 skb->csum_start = skb_transport_header(skb) - skb->head;
1338 skb->csum_offset = offsetof(struct udphdr, check);
1339 uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
1340 } else {
1341 /*
1342 * HW-checksum won't work as there are two or more
1343 * fragments on the socket so that all csums of sk_buffs
1344 * should be together
1345 */
1346 offset = skb_transport_offset(skb);
1347 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
1348 csum = skb->csum;
1349
1350 skb->ip_summed = CHECKSUM_NONE;
1351
1352 do {
1353 csum = csum_add(csum, frags->csum);
1354 } while ((frags = frags->next));
1355
1356 uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
1357 csum);
1358 if (uh->check == 0)
1359 uh->check = CSUM_MANGLED_0;
1360 }
1361 }
1362
1363 /*
1364 * Sending
1365 */
1366
udp_v6_send_skb(struct sk_buff * skb,struct flowi6 * fl6,struct inet_cork * cork)1367 static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
1368 struct inet_cork *cork)
1369 {
1370 struct sock *sk = skb->sk;
1371 struct udphdr *uh;
1372 int err = 0;
1373 int is_udplite = IS_UDPLITE(sk);
1374 __wsum csum = 0;
1375 int offset = skb_transport_offset(skb);
1376 int len = skb->len - offset;
1377 int datalen = len - sizeof(*uh);
1378
1379 /*
1380 * Create a UDP header
1381 */
1382 uh = udp_hdr(skb);
1383 uh->source = fl6->fl6_sport;
1384 uh->dest = fl6->fl6_dport;
1385 uh->len = htons(len);
1386 uh->check = 0;
1387
1388 if (cork->gso_size) {
1389 const int hlen = skb_network_header_len(skb) +
1390 sizeof(struct udphdr);
1391
1392 if (hlen + min(datalen, cork->gso_size) > cork->fragsize) {
1393 kfree_skb(skb);
1394 return -EMSGSIZE;
1395 }
1396 if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
1397 kfree_skb(skb);
1398 return -EINVAL;
1399 }
1400 if (udp_get_no_check6_tx(sk)) {
1401 kfree_skb(skb);
1402 return -EINVAL;
1403 }
1404 if (is_udplite || dst_xfrm(skb_dst(skb))) {
1405 kfree_skb(skb);
1406 return -EIO;
1407 }
1408
1409 if (datalen > cork->gso_size) {
1410 skb_shinfo(skb)->gso_size = cork->gso_size;
1411 skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
1412 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
1413 cork->gso_size);
1414
1415 /* Don't checksum the payload, skb will get segmented */
1416 goto csum_partial;
1417 }
1418 }
1419
1420 if (is_udplite)
1421 csum = udplite_csum(skb);
1422 else if (udp_get_no_check6_tx(sk)) { /* UDP csum disabled */
1423 skb->ip_summed = CHECKSUM_NONE;
1424 goto send;
1425 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
1426 csum_partial:
1427 udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
1428 goto send;
1429 } else
1430 csum = udp_csum(skb);
1431
1432 /* add protocol-dependent pseudo-header */
1433 uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
1434 len, fl6->flowi6_proto, csum);
1435 if (uh->check == 0)
1436 uh->check = CSUM_MANGLED_0;
1437
1438 send:
1439 err = ip6_send_skb(skb);
1440 if (err) {
1441 if (err == -ENOBUFS && !inet6_test_bit(RECVERR6, sk)) {
1442 UDP6_INC_STATS(sock_net(sk),
1443 UDP_MIB_SNDBUFERRORS, is_udplite);
1444 err = 0;
1445 }
1446 } else {
1447 UDP6_INC_STATS(sock_net(sk),
1448 UDP_MIB_OUTDATAGRAMS, is_udplite);
1449 }
1450 return err;
1451 }
1452
udp_v6_push_pending_frames(struct sock * sk)1453 static int udp_v6_push_pending_frames(struct sock *sk)
1454 {
1455 struct sk_buff *skb;
1456 struct udp_sock *up = udp_sk(sk);
1457 int err = 0;
1458
1459 if (up->pending == AF_INET)
1460 return udp_push_pending_frames(sk);
1461
1462 skb = ip6_finish_skb(sk);
1463 if (!skb)
1464 goto out;
1465
1466 err = udp_v6_send_skb(skb, &inet_sk(sk)->cork.fl.u.ip6,
1467 &inet_sk(sk)->cork.base);
1468 out:
1469 up->len = 0;
1470 WRITE_ONCE(up->pending, 0);
1471 return err;
1472 }
1473
udpv6_sendmsg(struct sock * sk,struct msghdr * msg,size_t len)1474 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1475 {
1476 struct ipv6_txoptions opt_space;
1477 struct udp_sock *up = udp_sk(sk);
1478 struct inet_sock *inet = inet_sk(sk);
1479 struct ipv6_pinfo *np = inet6_sk(sk);
1480 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
1481 struct in6_addr *daddr, *final_p, final;
1482 struct ipv6_txoptions *opt = NULL;
1483 struct ipv6_txoptions *opt_to_free = NULL;
1484 struct ip6_flowlabel *flowlabel = NULL;
1485 struct inet_cork_full cork;
1486 struct flowi6 *fl6 = &cork.fl.u.ip6;
1487 struct dst_entry *dst;
1488 struct ipcm6_cookie ipc6;
1489 int addr_len = msg->msg_namelen;
1490 bool connected = false;
1491 int ulen = len;
1492 int corkreq = udp_test_bit(CORK, sk) || msg->msg_flags & MSG_MORE;
1493 int err;
1494 int is_udplite = IS_UDPLITE(sk);
1495 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
1496
1497 ipcm6_init_sk(&ipc6, sk);
1498 ipc6.gso_size = READ_ONCE(up->gso_size);
1499
1500 /* destination address check */
1501 if (sin6) {
1502 if (addr_len < offsetof(struct sockaddr, sa_data))
1503 return -EINVAL;
1504
1505 switch (sin6->sin6_family) {
1506 case AF_INET6:
1507 if (addr_len < SIN6_LEN_RFC2133)
1508 return -EINVAL;
1509 daddr = &sin6->sin6_addr;
1510 if (ipv6_addr_any(daddr) &&
1511 ipv6_addr_v4mapped(&np->saddr))
1512 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
1513 daddr);
1514 break;
1515 case AF_INET:
1516 goto do_udp_sendmsg;
1517 case AF_UNSPEC:
1518 msg->msg_name = sin6 = NULL;
1519 msg->msg_namelen = addr_len = 0;
1520 daddr = NULL;
1521 break;
1522 default:
1523 return -EINVAL;
1524 }
1525 } else if (!READ_ONCE(up->pending)) {
1526 if (sk->sk_state != TCP_ESTABLISHED)
1527 return -EDESTADDRREQ;
1528 daddr = &sk->sk_v6_daddr;
1529 } else
1530 daddr = NULL;
1531
1532 if (daddr) {
1533 if (ipv6_addr_v4mapped(daddr)) {
1534 struct sockaddr_in sin;
1535 sin.sin_family = AF_INET;
1536 sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
1537 sin.sin_addr.s_addr = daddr->s6_addr32[3];
1538 msg->msg_name = &sin;
1539 msg->msg_namelen = sizeof(sin);
1540 do_udp_sendmsg:
1541 err = ipv6_only_sock(sk) ?
1542 -ENETUNREACH : udp_sendmsg(sk, msg, len);
1543 msg->msg_name = sin6;
1544 msg->msg_namelen = addr_len;
1545 return err;
1546 }
1547 }
1548
1549 /* Rough check on arithmetic overflow,
1550 better check is made in ip6_append_data().
1551 */
1552 if (len > INT_MAX - sizeof(struct udphdr))
1553 return -EMSGSIZE;
1554
1555 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
1556 if (READ_ONCE(up->pending)) {
1557 if (READ_ONCE(up->pending) == AF_INET)
1558 return udp_sendmsg(sk, msg, len);
1559 /*
1560 * There are pending frames.
1561 * The socket lock must be held while it's corked.
1562 */
1563 lock_sock(sk);
1564 if (likely(up->pending)) {
1565 if (unlikely(up->pending != AF_INET6)) {
1566 release_sock(sk);
1567 return -EAFNOSUPPORT;
1568 }
1569 dst = NULL;
1570 goto do_append_data;
1571 }
1572 release_sock(sk);
1573 }
1574 ulen += sizeof(struct udphdr);
1575
1576 memset(fl6, 0, sizeof(*fl6));
1577
1578 if (sin6) {
1579 if (sin6->sin6_port == 0)
1580 return -EINVAL;
1581
1582 fl6->fl6_dport = sin6->sin6_port;
1583 daddr = &sin6->sin6_addr;
1584
1585 if (inet6_test_bit(SNDFLOW, sk)) {
1586 fl6->flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
1587 if (fl6->flowlabel & IPV6_FLOWLABEL_MASK) {
1588 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1589 if (IS_ERR(flowlabel))
1590 return -EINVAL;
1591 }
1592 }
1593
1594 /*
1595 * Otherwise it will be difficult to maintain
1596 * sk->sk_dst_cache.
1597 */
1598 if (sk->sk_state == TCP_ESTABLISHED &&
1599 ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
1600 daddr = &sk->sk_v6_daddr;
1601
1602 if (addr_len >= sizeof(struct sockaddr_in6) &&
1603 sin6->sin6_scope_id &&
1604 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1605 fl6->flowi6_oif = sin6->sin6_scope_id;
1606 } else {
1607 if (sk->sk_state != TCP_ESTABLISHED)
1608 return -EDESTADDRREQ;
1609
1610 fl6->fl6_dport = inet->inet_dport;
1611 daddr = &sk->sk_v6_daddr;
1612 fl6->flowlabel = np->flow_label;
1613 connected = true;
1614 }
1615
1616 if (!fl6->flowi6_oif)
1617 fl6->flowi6_oif = READ_ONCE(sk->sk_bound_dev_if);
1618
1619 if (!fl6->flowi6_oif)
1620 fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
1621
1622 fl6->flowi6_uid = sk_uid(sk);
1623
1624 if (msg->msg_controllen) {
1625 opt = &opt_space;
1626 memset(opt, 0, sizeof(struct ipv6_txoptions));
1627 opt->tot_len = sizeof(*opt);
1628 ipc6.opt = opt;
1629
1630 err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
1631 if (err > 0) {
1632 err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
1633 &ipc6);
1634 connected = false;
1635 }
1636 if (err < 0) {
1637 fl6_sock_release(flowlabel);
1638 return err;
1639 }
1640 if ((fl6->flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
1641 flowlabel = fl6_sock_lookup(sk, fl6->flowlabel);
1642 if (IS_ERR(flowlabel))
1643 return -EINVAL;
1644 }
1645 if (!(opt->opt_nflen|opt->opt_flen))
1646 opt = NULL;
1647 }
1648 if (!opt) {
1649 opt = txopt_get(np);
1650 opt_to_free = opt;
1651 }
1652 if (flowlabel)
1653 opt = fl6_merge_options(&opt_space, flowlabel, opt);
1654 opt = ipv6_fixup_options(&opt_space, opt);
1655 ipc6.opt = opt;
1656
1657 fl6->flowi6_proto = sk->sk_protocol;
1658 fl6->flowi6_mark = ipc6.sockc.mark;
1659 fl6->daddr = *daddr;
1660 if (ipv6_addr_any(&fl6->saddr) && !ipv6_addr_any(&np->saddr))
1661 fl6->saddr = np->saddr;
1662 fl6->fl6_sport = inet->inet_sport;
1663
1664 if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
1665 err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
1666 (struct sockaddr *)sin6,
1667 &addr_len,
1668 &fl6->saddr);
1669 if (err)
1670 goto out_no_dst;
1671 if (sin6) {
1672 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
1673 /* BPF program rewrote IPv6-only by IPv4-mapped
1674 * IPv6. It's currently unsupported.
1675 */
1676 err = -ENOTSUPP;
1677 goto out_no_dst;
1678 }
1679 if (sin6->sin6_port == 0) {
1680 /* BPF program set invalid port. Reject it. */
1681 err = -EINVAL;
1682 goto out_no_dst;
1683 }
1684 fl6->fl6_dport = sin6->sin6_port;
1685 fl6->daddr = sin6->sin6_addr;
1686 }
1687 }
1688
1689 if (ipv6_addr_any(&fl6->daddr))
1690 fl6->daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
1691
1692 final_p = fl6_update_dst(fl6, opt, &final);
1693 if (final_p)
1694 connected = false;
1695
1696 if (!fl6->flowi6_oif && ipv6_addr_is_multicast(&fl6->daddr)) {
1697 fl6->flowi6_oif = READ_ONCE(np->mcast_oif);
1698 connected = false;
1699 } else if (!fl6->flowi6_oif)
1700 fl6->flowi6_oif = READ_ONCE(np->ucast_oif);
1701
1702 security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
1703
1704 fl6->flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6->flowlabel);
1705
1706 dst = ip6_sk_dst_lookup_flow(sk, fl6, final_p, connected);
1707 if (IS_ERR(dst)) {
1708 err = PTR_ERR(dst);
1709 dst = NULL;
1710 goto out;
1711 }
1712
1713 if (ipc6.hlimit < 0)
1714 ipc6.hlimit = ip6_sk_dst_hoplimit(np, fl6, dst);
1715
1716 if (msg->msg_flags&MSG_CONFIRM)
1717 goto do_confirm;
1718 back_from_confirm:
1719
1720 /* Lockless fast path for the non-corking case */
1721 if (!corkreq) {
1722 struct sk_buff *skb;
1723
1724 skb = ip6_make_skb(sk, getfrag, msg, ulen,
1725 sizeof(struct udphdr), &ipc6,
1726 dst_rt6_info(dst),
1727 msg->msg_flags, &cork);
1728 err = PTR_ERR(skb);
1729 if (!IS_ERR_OR_NULL(skb))
1730 err = udp_v6_send_skb(skb, fl6, &cork.base);
1731 /* ip6_make_skb steals dst reference */
1732 goto out_no_dst;
1733 }
1734
1735 lock_sock(sk);
1736 if (unlikely(up->pending)) {
1737 /* The socket is already corked while preparing it. */
1738 /* ... which is an evident application bug. --ANK */
1739 release_sock(sk);
1740
1741 net_dbg_ratelimited("udp cork app bug 2\n");
1742 err = -EINVAL;
1743 goto out;
1744 }
1745
1746 WRITE_ONCE(up->pending, AF_INET6);
1747
1748 do_append_data:
1749 up->len += ulen;
1750 err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr),
1751 &ipc6, fl6, dst_rt6_info(dst),
1752 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
1753 if (err)
1754 udp_v6_flush_pending_frames(sk);
1755 else if (!corkreq)
1756 err = udp_v6_push_pending_frames(sk);
1757 else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
1758 WRITE_ONCE(up->pending, 0);
1759
1760 if (err > 0)
1761 err = inet6_test_bit(RECVERR6, sk) ? net_xmit_errno(err) : 0;
1762 release_sock(sk);
1763
1764 out:
1765 dst_release(dst);
1766 out_no_dst:
1767 fl6_sock_release(flowlabel);
1768 txopt_put(opt_to_free);
1769 if (!err)
1770 return len;
1771 /*
1772 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1773 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1774 * we don't have a good statistic (IpOutDiscards but it can be too many
1775 * things). We could add another new stat but at least for now that
1776 * seems like overkill.
1777 */
1778 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1779 UDP6_INC_STATS(sock_net(sk),
1780 UDP_MIB_SNDBUFERRORS, is_udplite);
1781 }
1782 return err;
1783
1784 do_confirm:
1785 if (msg->msg_flags & MSG_PROBE)
1786 dst_confirm_neigh(dst, &fl6->daddr);
1787 if (!(msg->msg_flags&MSG_PROBE) || len)
1788 goto back_from_confirm;
1789 err = 0;
1790 goto out;
1791 }
1792 EXPORT_SYMBOL(udpv6_sendmsg);
1793
udpv6_splice_eof(struct socket * sock)1794 static void udpv6_splice_eof(struct socket *sock)
1795 {
1796 struct sock *sk = sock->sk;
1797 struct udp_sock *up = udp_sk(sk);
1798
1799 if (!READ_ONCE(up->pending) || udp_test_bit(CORK, sk))
1800 return;
1801
1802 lock_sock(sk);
1803 if (up->pending && !udp_test_bit(CORK, sk))
1804 udp_v6_push_pending_frames(sk);
1805 release_sock(sk);
1806 }
1807
udpv6_destroy_sock(struct sock * sk)1808 void udpv6_destroy_sock(struct sock *sk)
1809 {
1810 struct udp_sock *up = udp_sk(sk);
1811 lock_sock(sk);
1812
1813 /* protects from races with udp_abort() */
1814 sock_set_flag(sk, SOCK_DEAD);
1815 udp_v6_flush_pending_frames(sk);
1816 release_sock(sk);
1817
1818 if (static_branch_unlikely(&udpv6_encap_needed_key)) {
1819 if (up->encap_type) {
1820 void (*encap_destroy)(struct sock *sk);
1821 encap_destroy = READ_ONCE(up->encap_destroy);
1822 if (encap_destroy)
1823 encap_destroy(sk);
1824 }
1825 if (udp_test_bit(ENCAP_ENABLED, sk)) {
1826 static_branch_dec(&udpv6_encap_needed_key);
1827 udp_encap_disable();
1828 udp_tunnel_cleanup_gro(sk);
1829 }
1830 }
1831 }
1832
1833 /*
1834 * Socket option code for UDP
1835 */
udpv6_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)1836 int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
1837 unsigned int optlen)
1838 {
1839 if (level == SOL_UDP || level == SOL_UDPLITE || level == SOL_SOCKET)
1840 return udp_lib_setsockopt(sk, level, optname,
1841 optval, optlen,
1842 udp_v6_push_pending_frames);
1843 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1844 }
1845
udpv6_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)1846 int udpv6_getsockopt(struct sock *sk, int level, int optname,
1847 char __user *optval, int __user *optlen)
1848 {
1849 if (level == SOL_UDP || level == SOL_UDPLITE)
1850 return udp_lib_getsockopt(sk, level, optname, optval, optlen);
1851 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1852 }
1853
1854
1855 /* ------------------------------------------------------------------------ */
1856 #ifdef CONFIG_PROC_FS
udp6_seq_show(struct seq_file * seq,void * v)1857 int udp6_seq_show(struct seq_file *seq, void *v)
1858 {
1859 if (v == SEQ_START_TOKEN) {
1860 seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
1861 } else {
1862 int bucket = ((struct udp_iter_state *)seq->private)->bucket;
1863 const struct inet_sock *inet = inet_sk((const struct sock *)v);
1864 __u16 srcp = ntohs(inet->inet_sport);
1865 __u16 destp = ntohs(inet->inet_dport);
1866 __ip6_dgram_sock_seq_show(seq, v, srcp, destp,
1867 udp_rqueue_get(v), bucket);
1868 }
1869 return 0;
1870 }
1871
1872 const struct seq_operations udp6_seq_ops = {
1873 .start = udp_seq_start,
1874 .next = udp_seq_next,
1875 .stop = udp_seq_stop,
1876 .show = udp6_seq_show,
1877 };
1878 EXPORT_SYMBOL(udp6_seq_ops);
1879
1880 static struct udp_seq_afinfo udp6_seq_afinfo = {
1881 .family = AF_INET6,
1882 .udp_table = NULL,
1883 };
1884
udp6_proc_init(struct net * net)1885 int __net_init udp6_proc_init(struct net *net)
1886 {
1887 if (!proc_create_net_data("udp6", 0444, net->proc_net, &udp6_seq_ops,
1888 sizeof(struct udp_iter_state), &udp6_seq_afinfo))
1889 return -ENOMEM;
1890 return 0;
1891 }
1892
udp6_proc_exit(struct net * net)1893 void udp6_proc_exit(struct net *net)
1894 {
1895 remove_proc_entry("udp6", net->proc_net);
1896 }
1897 #endif /* CONFIG_PROC_FS */
1898
1899 /* ------------------------------------------------------------------------ */
1900
1901 struct proto udpv6_prot = {
1902 .name = "UDPv6",
1903 .owner = THIS_MODULE,
1904 .close = udp_lib_close,
1905 .pre_connect = udpv6_pre_connect,
1906 .connect = udpv6_connect,
1907 .disconnect = udp_disconnect,
1908 .ioctl = udp_ioctl,
1909 .init = udpv6_init_sock,
1910 .destroy = udpv6_destroy_sock,
1911 .setsockopt = udpv6_setsockopt,
1912 .getsockopt = udpv6_getsockopt,
1913 .sendmsg = udpv6_sendmsg,
1914 .recvmsg = udpv6_recvmsg,
1915 .splice_eof = udpv6_splice_eof,
1916 .release_cb = ip6_datagram_release_cb,
1917 .hash = udp_lib_hash,
1918 .unhash = udp_lib_unhash,
1919 .rehash = udp_v6_rehash,
1920 .get_port = udp_v6_get_port,
1921 .put_port = udp_lib_unhash,
1922 #ifdef CONFIG_BPF_SYSCALL
1923 .psock_update_sk_prot = udp_bpf_update_proto,
1924 #endif
1925
1926 .memory_allocated = &net_aligned_data.udp_memory_allocated,
1927 .per_cpu_fw_alloc = &udp_memory_per_cpu_fw_alloc,
1928
1929 .sysctl_mem = sysctl_udp_mem,
1930 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
1931 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
1932 .obj_size = sizeof(struct udp6_sock),
1933 .ipv6_pinfo_offset = offsetof(struct udp6_sock, inet6),
1934 .h.udp_table = NULL,
1935 .diag_destroy = udp_abort,
1936 };
1937
1938 static struct inet_protosw udpv6_protosw = {
1939 .type = SOCK_DGRAM,
1940 .protocol = IPPROTO_UDP,
1941 .prot = &udpv6_prot,
1942 .ops = &inet6_dgram_ops,
1943 .flags = INET_PROTOSW_PERMANENT,
1944 };
1945
udpv6_init(void)1946 int __init udpv6_init(void)
1947 {
1948 int ret;
1949
1950 net_hotdata.udpv6_protocol = (struct inet6_protocol) {
1951 .handler = udpv6_rcv,
1952 .err_handler = udpv6_err,
1953 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
1954 };
1955 ret = inet6_add_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1956 if (ret)
1957 goto out;
1958
1959 ret = inet6_register_protosw(&udpv6_protosw);
1960 if (ret)
1961 goto out_udpv6_protocol;
1962 out:
1963 return ret;
1964
1965 out_udpv6_protocol:
1966 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1967 goto out;
1968 }
1969
udpv6_exit(void)1970 void udpv6_exit(void)
1971 {
1972 inet6_unregister_protosw(&udpv6_protosw);
1973 inet6_del_protocol(&net_hotdata.udpv6_protocol, IPPROTO_UDP);
1974 }
1975