1 /* 2 * UDP over IPv6 3 * Linux INET6 implementation 4 * 5 * Authors: 6 * Pedro Roque <roque@di.fc.ul.pt> 7 * 8 * Based on linux/ipv4/udp.c 9 * 10 * $Id: udp.c,v 1.65 2002/02/01 22:01:04 davem Exp $ 11 * 12 * Fixes: 13 * Hideaki YOSHIFUJI : sin6_scope_id support 14 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which 15 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind 16 * a single port at the same time. 17 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data 18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file. 19 * 20 * This program is free software; you can redistribute it and/or 21 * modify it under the terms of the GNU General Public License 22 * as published by the Free Software Foundation; either version 23 * 2 of the License, or (at your option) any later version. 24 */ 25 26 #include <linux/errno.h> 27 #include <linux/types.h> 28 #include <linux/socket.h> 29 #include <linux/sockios.h> 30 #include <linux/sched.h> 31 #include <linux/net.h> 32 #include <linux/in6.h> 33 #include <linux/netdevice.h> 34 #include <linux/if_arp.h> 35 #include <linux/ipv6.h> 36 #include <linux/icmpv6.h> 37 #include <linux/init.h> 38 #include <linux/skbuff.h> 39 #include <asm/uaccess.h> 40 41 #include <net/ndisc.h> 42 #include <net/protocol.h> 43 #include <net/transp_v6.h> 44 #include <net/ip6_route.h> 45 #include <net/raw.h> 46 #include <net/tcp_states.h> 47 #include <net/ip6_checksum.h> 48 #include <net/xfrm.h> 49 50 #include <linux/proc_fs.h> 51 #include <linux/seq_file.h> 52 #include "udp_impl.h" 53 54 DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly; 55 56 static inline int udp_v6_get_port(struct sock *sk, unsigned short snum) 57 { 58 return udp_get_port(sk, snum, ipv6_rcv_saddr_equal); 59 } 60 61 static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport, 62 struct in6_addr *daddr, __be16 dport, 63 int dif, struct hlist_head udptable[]) 64 { 65 struct sock *sk, *result = NULL; 66 struct hlist_node *node; 67 unsigned short hnum = ntohs(dport); 68 int badness = -1; 69 70 read_lock(&udp_hash_lock); 71 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 72 struct inet_sock *inet = inet_sk(sk); 73 74 if (inet->num == hnum && sk->sk_family == PF_INET6) { 75 struct ipv6_pinfo *np = inet6_sk(sk); 76 int score = 0; 77 if (inet->dport) { 78 if (inet->dport != sport) 79 continue; 80 score++; 81 } 82 if (!ipv6_addr_any(&np->rcv_saddr)) { 83 if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) 84 continue; 85 score++; 86 } 87 if (!ipv6_addr_any(&np->daddr)) { 88 if (!ipv6_addr_equal(&np->daddr, saddr)) 89 continue; 90 score++; 91 } 92 if (sk->sk_bound_dev_if) { 93 if (sk->sk_bound_dev_if != dif) 94 continue; 95 score++; 96 } 97 if(score == 4) { 98 result = sk; 99 break; 100 } else if(score > badness) { 101 result = sk; 102 badness = score; 103 } 104 } 105 } 106 if (result) 107 sock_hold(result); 108 read_unlock(&udp_hash_lock); 109 return result; 110 } 111 112 /* 113 * This should be easy, if there is something there we 114 * return it, otherwise we block. 115 */ 116 117 int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, 118 struct msghdr *msg, size_t len, 119 int noblock, int flags, int *addr_len) 120 { 121 struct ipv6_pinfo *np = inet6_sk(sk); 122 struct inet_sock *inet = inet_sk(sk); 123 struct sk_buff *skb; 124 size_t copied; 125 int err, copy_only, is_udplite = IS_UDPLITE(sk); 126 127 if (addr_len) 128 *addr_len=sizeof(struct sockaddr_in6); 129 130 if (flags & MSG_ERRQUEUE) 131 return ipv6_recv_error(sk, msg, len); 132 133 try_again: 134 skb = skb_recv_datagram(sk, flags, noblock, &err); 135 if (!skb) 136 goto out; 137 138 copied = skb->len - sizeof(struct udphdr); 139 if (copied > len) { 140 copied = len; 141 msg->msg_flags |= MSG_TRUNC; 142 } 143 144 /* 145 * Decide whether to checksum and/or copy data. 146 */ 147 copy_only = (skb->ip_summed==CHECKSUM_UNNECESSARY); 148 149 if (is_udplite || (!copy_only && msg->msg_flags&MSG_TRUNC)) { 150 if (__udp_lib_checksum_complete(skb)) 151 goto csum_copy_err; 152 copy_only = 1; 153 } 154 155 if (copy_only) 156 err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), 157 msg->msg_iov, copied ); 158 else { 159 err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); 160 if (err == -EINVAL) 161 goto csum_copy_err; 162 } 163 if (err) 164 goto out_free; 165 166 sock_recv_timestamp(msg, sk, skb); 167 168 /* Copy the address. */ 169 if (msg->msg_name) { 170 struct sockaddr_in6 *sin6; 171 172 sin6 = (struct sockaddr_in6 *) msg->msg_name; 173 sin6->sin6_family = AF_INET6; 174 sin6->sin6_port = skb->h.uh->source; 175 sin6->sin6_flowinfo = 0; 176 sin6->sin6_scope_id = 0; 177 178 if (skb->protocol == htons(ETH_P_IP)) 179 ipv6_addr_set(&sin6->sin6_addr, 0, 0, 180 htonl(0xffff), skb->nh.iph->saddr); 181 else { 182 ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); 183 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 184 sin6->sin6_scope_id = IP6CB(skb)->iif; 185 } 186 187 } 188 if (skb->protocol == htons(ETH_P_IP)) { 189 if (inet->cmsg_flags) 190 ip_cmsg_recv(msg, skb); 191 } else { 192 if (np->rxopt.all) 193 datagram_recv_ctl(sk, msg, skb); 194 } 195 196 err = copied; 197 if (flags & MSG_TRUNC) 198 err = skb->len - sizeof(struct udphdr); 199 200 out_free: 201 skb_free_datagram(sk, skb); 202 out: 203 return err; 204 205 csum_copy_err: 206 skb_kill_datagram(sk, skb, flags); 207 208 if (flags & MSG_DONTWAIT) { 209 UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite); 210 return -EAGAIN; 211 } 212 goto try_again; 213 } 214 215 void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 216 int type, int code, int offset, __be32 info, 217 struct hlist_head udptable[] ) 218 { 219 struct ipv6_pinfo *np; 220 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 221 struct in6_addr *saddr = &hdr->saddr; 222 struct in6_addr *daddr = &hdr->daddr; 223 struct udphdr *uh = (struct udphdr*)(skb->data+offset); 224 struct sock *sk; 225 int err; 226 227 sk = __udp6_lib_lookup(daddr, uh->dest, 228 saddr, uh->source, inet6_iif(skb), udptable); 229 if (sk == NULL) 230 return; 231 232 np = inet6_sk(sk); 233 234 if (!icmpv6_err_convert(type, code, &err) && !np->recverr) 235 goto out; 236 237 if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) 238 goto out; 239 240 if (np->recverr) 241 ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); 242 243 sk->sk_err = err; 244 sk->sk_error_report(sk); 245 out: 246 sock_put(sk); 247 } 248 249 static __inline__ void udpv6_err(struct sk_buff *skb, 250 struct inet6_skb_parm *opt, int type, 251 int code, int offset, __be32 info ) 252 { 253 return __udp6_lib_err(skb, opt, type, code, offset, info, udp_hash); 254 } 255 256 int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 257 { 258 struct udp_sock *up = udp_sk(sk); 259 int rc; 260 261 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 262 goto drop; 263 264 /* 265 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 266 */ 267 if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { 268 269 if (up->pcrlen == 0) { /* full coverage was set */ 270 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage" 271 " %d while full coverage %d requested\n", 272 UDP_SKB_CB(skb)->cscov, skb->len); 273 goto drop; 274 } 275 if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { 276 LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d " 277 "too small, need min %d\n", 278 UDP_SKB_CB(skb)->cscov, up->pcrlen); 279 goto drop; 280 } 281 } 282 283 if (udp_lib_checksum_complete(skb)) 284 goto drop; 285 286 if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { 287 /* Note that an ENOMEM error is charged twice */ 288 if (rc == -ENOMEM) 289 UDP6_INC_STATS_BH(UDP_MIB_RCVBUFERRORS, up->pcflag); 290 goto drop; 291 } 292 UDP6_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag); 293 return 0; 294 drop: 295 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag); 296 kfree_skb(skb); 297 return -1; 298 } 299 300 static struct sock *udp_v6_mcast_next(struct sock *sk, 301 __be16 loc_port, struct in6_addr *loc_addr, 302 __be16 rmt_port, struct in6_addr *rmt_addr, 303 int dif) 304 { 305 struct hlist_node *node; 306 struct sock *s = sk; 307 unsigned short num = ntohs(loc_port); 308 309 sk_for_each_from(s, node) { 310 struct inet_sock *inet = inet_sk(s); 311 312 if (inet->num == num && s->sk_family == PF_INET6) { 313 struct ipv6_pinfo *np = inet6_sk(s); 314 if (inet->dport) { 315 if (inet->dport != rmt_port) 316 continue; 317 } 318 if (!ipv6_addr_any(&np->daddr) && 319 !ipv6_addr_equal(&np->daddr, rmt_addr)) 320 continue; 321 322 if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif) 323 continue; 324 325 if (!ipv6_addr_any(&np->rcv_saddr)) { 326 if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr)) 327 continue; 328 } 329 if(!inet6_mc_check(s, loc_addr, rmt_addr)) 330 continue; 331 return s; 332 } 333 } 334 return NULL; 335 } 336 337 /* 338 * Note: called only from the BH handler context, 339 * so we don't need to lock the hashes. 340 */ 341 static int __udp6_lib_mcast_deliver(struct sk_buff *skb, struct in6_addr *saddr, 342 struct in6_addr *daddr, struct hlist_head udptable[]) 343 { 344 struct sock *sk, *sk2; 345 const struct udphdr *uh = skb->h.uh; 346 int dif; 347 348 read_lock(&udp_hash_lock); 349 sk = sk_head(&udptable[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); 350 dif = inet6_iif(skb); 351 sk = udp_v6_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); 352 if (!sk) { 353 kfree_skb(skb); 354 goto out; 355 } 356 357 sk2 = sk; 358 while ((sk2 = udp_v6_mcast_next(sk_next(sk2), uh->dest, daddr, 359 uh->source, saddr, dif))) { 360 struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC); 361 if (buff) 362 udpv6_queue_rcv_skb(sk2, buff); 363 } 364 udpv6_queue_rcv_skb(sk, skb); 365 out: 366 read_unlock(&udp_hash_lock); 367 return 0; 368 } 369 370 static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh) 371 372 { 373 if (uh->check == 0) { 374 /* RFC 2460 section 8.1 says that we SHOULD log 375 this error. Well, it is reasonable. 376 */ 377 LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n"); 378 return 1; 379 } 380 if (skb->ip_summed == CHECKSUM_COMPLETE && 381 !csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr, 382 skb->len, IPPROTO_UDP, skb->csum )) 383 skb->ip_summed = CHECKSUM_UNNECESSARY; 384 385 if (skb->ip_summed != CHECKSUM_UNNECESSARY) 386 skb->csum = ~csum_unfold(csum_ipv6_magic(&skb->nh.ipv6h->saddr, 387 &skb->nh.ipv6h->daddr, 388 skb->len, IPPROTO_UDP, 389 0)); 390 391 return (UDP_SKB_CB(skb)->partial_cov = 0); 392 } 393 394 int __udp6_lib_rcv(struct sk_buff **pskb, struct hlist_head udptable[], 395 int is_udplite) 396 { 397 struct sk_buff *skb = *pskb; 398 struct sock *sk; 399 struct udphdr *uh; 400 struct net_device *dev = skb->dev; 401 struct in6_addr *saddr, *daddr; 402 u32 ulen = 0; 403 404 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 405 goto short_packet; 406 407 saddr = &skb->nh.ipv6h->saddr; 408 daddr = &skb->nh.ipv6h->daddr; 409 uh = skb->h.uh; 410 411 ulen = ntohs(uh->len); 412 if (ulen > skb->len) 413 goto short_packet; 414 415 if(! is_udplite ) { /* UDP validates ulen. */ 416 417 /* Check for jumbo payload */ 418 if (ulen == 0) 419 ulen = skb->len; 420 421 if (ulen < sizeof(*uh)) 422 goto short_packet; 423 424 if (ulen < skb->len) { 425 if (pskb_trim_rcsum(skb, ulen)) 426 goto short_packet; 427 saddr = &skb->nh.ipv6h->saddr; 428 daddr = &skb->nh.ipv6h->daddr; 429 uh = skb->h.uh; 430 } 431 432 if (udp6_csum_init(skb, uh)) 433 goto discard; 434 435 } else { /* UDP-Lite validates cscov. */ 436 if (udplite6_csum_init(skb, uh)) 437 goto discard; 438 } 439 440 /* 441 * Multicast receive code 442 */ 443 if (ipv6_addr_is_multicast(daddr)) 444 return __udp6_lib_mcast_deliver(skb, saddr, daddr, udptable); 445 446 /* Unicast */ 447 448 /* 449 * check socket cache ... must talk to Alan about his plans 450 * for sock caches... i'll skip this for now. 451 */ 452 sk = __udp6_lib_lookup(saddr, uh->source, 453 daddr, uh->dest, inet6_iif(skb), udptable); 454 455 if (sk == NULL) { 456 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 457 goto discard; 458 459 if (udp_lib_checksum_complete(skb)) 460 goto discard; 461 UDP6_INC_STATS_BH(UDP_MIB_NOPORTS, is_udplite); 462 463 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev); 464 465 kfree_skb(skb); 466 return(0); 467 } 468 469 /* deliver */ 470 471 udpv6_queue_rcv_skb(sk, skb); 472 sock_put(sk); 473 return(0); 474 475 short_packet: 476 LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: %d/%u\n", 477 is_udplite? "-Lite" : "", ulen, skb->len); 478 479 discard: 480 UDP6_INC_STATS_BH(UDP_MIB_INERRORS, is_udplite); 481 kfree_skb(skb); 482 return(0); 483 } 484 485 static __inline__ int udpv6_rcv(struct sk_buff **pskb) 486 { 487 return __udp6_lib_rcv(pskb, udp_hash, 0); 488 } 489 490 /* 491 * Throw away all pending data and cancel the corking. Socket is locked. 492 */ 493 static void udp_v6_flush_pending_frames(struct sock *sk) 494 { 495 struct udp_sock *up = udp_sk(sk); 496 497 if (up->pending) { 498 up->len = 0; 499 up->pending = 0; 500 ip6_flush_pending_frames(sk); 501 } 502 } 503 504 /* 505 * Sending 506 */ 507 508 static int udp_v6_push_pending_frames(struct sock *sk) 509 { 510 struct sk_buff *skb; 511 struct udphdr *uh; 512 struct udp_sock *up = udp_sk(sk); 513 struct inet_sock *inet = inet_sk(sk); 514 struct flowi *fl = &inet->cork.fl; 515 int err = 0; 516 __wsum csum = 0; 517 518 /* Grab the skbuff where UDP header space exists. */ 519 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) 520 goto out; 521 522 /* 523 * Create a UDP header 524 */ 525 uh = skb->h.uh; 526 uh->source = fl->fl_ip_sport; 527 uh->dest = fl->fl_ip_dport; 528 uh->len = htons(up->len); 529 uh->check = 0; 530 531 if (up->pcflag) 532 csum = udplite_csum_outgoing(sk, skb); 533 else 534 csum = udp_csum_outgoing(sk, skb); 535 536 /* add protocol-dependent pseudo-header */ 537 uh->check = csum_ipv6_magic(&fl->fl6_src, &fl->fl6_dst, 538 up->len, fl->proto, csum ); 539 if (uh->check == 0) 540 uh->check = CSUM_MANGLED_0; 541 542 err = ip6_push_pending_frames(sk); 543 out: 544 up->len = 0; 545 up->pending = 0; 546 return err; 547 } 548 549 int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk, 550 struct msghdr *msg, size_t len) 551 { 552 struct ipv6_txoptions opt_space; 553 struct udp_sock *up = udp_sk(sk); 554 struct inet_sock *inet = inet_sk(sk); 555 struct ipv6_pinfo *np = inet6_sk(sk); 556 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; 557 struct in6_addr *daddr, *final_p = NULL, final; 558 struct ipv6_txoptions *opt = NULL; 559 struct ip6_flowlabel *flowlabel = NULL; 560 struct flowi fl; 561 struct dst_entry *dst; 562 int addr_len = msg->msg_namelen; 563 int ulen = len; 564 int hlimit = -1; 565 int tclass = -1; 566 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 567 int err; 568 int connected = 0; 569 int is_udplite = up->pcflag; 570 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 571 572 /* destination address check */ 573 if (sin6) { 574 if (addr_len < offsetof(struct sockaddr, sa_data)) 575 return -EINVAL; 576 577 switch (sin6->sin6_family) { 578 case AF_INET6: 579 if (addr_len < SIN6_LEN_RFC2133) 580 return -EINVAL; 581 daddr = &sin6->sin6_addr; 582 break; 583 case AF_INET: 584 goto do_udp_sendmsg; 585 case AF_UNSPEC: 586 msg->msg_name = sin6 = NULL; 587 msg->msg_namelen = addr_len = 0; 588 daddr = NULL; 589 break; 590 default: 591 return -EINVAL; 592 } 593 } else if (!up->pending) { 594 if (sk->sk_state != TCP_ESTABLISHED) 595 return -EDESTADDRREQ; 596 daddr = &np->daddr; 597 } else 598 daddr = NULL; 599 600 if (daddr) { 601 if (ipv6_addr_type(daddr) == IPV6_ADDR_MAPPED) { 602 struct sockaddr_in sin; 603 sin.sin_family = AF_INET; 604 sin.sin_port = sin6 ? sin6->sin6_port : inet->dport; 605 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 606 msg->msg_name = &sin; 607 msg->msg_namelen = sizeof(sin); 608 do_udp_sendmsg: 609 if (__ipv6_only_sock(sk)) 610 return -ENETUNREACH; 611 return udp_sendmsg(iocb, sk, msg, len); 612 } 613 } 614 615 if (up->pending == AF_INET) 616 return udp_sendmsg(iocb, sk, msg, len); 617 618 /* Rough check on arithmetic overflow, 619 better check is made in ip6_build_xmit 620 */ 621 if (len > INT_MAX - sizeof(struct udphdr)) 622 return -EMSGSIZE; 623 624 if (up->pending) { 625 /* 626 * There are pending frames. 627 * The socket lock must be held while it's corked. 628 */ 629 lock_sock(sk); 630 if (likely(up->pending)) { 631 if (unlikely(up->pending != AF_INET6)) { 632 release_sock(sk); 633 return -EAFNOSUPPORT; 634 } 635 dst = NULL; 636 goto do_append_data; 637 } 638 release_sock(sk); 639 } 640 ulen += sizeof(struct udphdr); 641 642 memset(&fl, 0, sizeof(fl)); 643 644 if (sin6) { 645 if (sin6->sin6_port == 0) 646 return -EINVAL; 647 648 fl.fl_ip_dport = sin6->sin6_port; 649 daddr = &sin6->sin6_addr; 650 651 if (np->sndflow) { 652 fl.fl6_flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; 653 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) { 654 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 655 if (flowlabel == NULL) 656 return -EINVAL; 657 daddr = &flowlabel->dst; 658 } 659 } 660 661 /* 662 * Otherwise it will be difficult to maintain 663 * sk->sk_dst_cache. 664 */ 665 if (sk->sk_state == TCP_ESTABLISHED && 666 ipv6_addr_equal(daddr, &np->daddr)) 667 daddr = &np->daddr; 668 669 if (addr_len >= sizeof(struct sockaddr_in6) && 670 sin6->sin6_scope_id && 671 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) 672 fl.oif = sin6->sin6_scope_id; 673 } else { 674 if (sk->sk_state != TCP_ESTABLISHED) 675 return -EDESTADDRREQ; 676 677 fl.fl_ip_dport = inet->dport; 678 daddr = &np->daddr; 679 fl.fl6_flowlabel = np->flow_label; 680 connected = 1; 681 } 682 683 if (!fl.oif) 684 fl.oif = sk->sk_bound_dev_if; 685 686 if (msg->msg_controllen) { 687 opt = &opt_space; 688 memset(opt, 0, sizeof(struct ipv6_txoptions)); 689 opt->tot_len = sizeof(*opt); 690 691 err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 692 if (err < 0) { 693 fl6_sock_release(flowlabel); 694 return err; 695 } 696 if ((fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { 697 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel); 698 if (flowlabel == NULL) 699 return -EINVAL; 700 } 701 if (!(opt->opt_nflen|opt->opt_flen)) 702 opt = NULL; 703 connected = 0; 704 } 705 if (opt == NULL) 706 opt = np->opt; 707 if (flowlabel) 708 opt = fl6_merge_options(&opt_space, flowlabel, opt); 709 opt = ipv6_fixup_options(&opt_space, opt); 710 711 fl.proto = sk->sk_protocol; 712 ipv6_addr_copy(&fl.fl6_dst, daddr); 713 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 714 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 715 fl.fl_ip_sport = inet->sport; 716 717 /* merge ip6_build_xmit from ip6_output */ 718 if (opt && opt->srcrt) { 719 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt; 720 ipv6_addr_copy(&final, &fl.fl6_dst); 721 ipv6_addr_copy(&fl.fl6_dst, rt0->addr); 722 final_p = &final; 723 connected = 0; 724 } 725 726 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { 727 fl.oif = np->mcast_oif; 728 connected = 0; 729 } 730 731 security_sk_classify_flow(sk, &fl); 732 733 err = ip6_sk_dst_lookup(sk, &dst, &fl); 734 if (err) 735 goto out; 736 if (final_p) 737 ipv6_addr_copy(&fl.fl6_dst, final_p); 738 739 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) 740 goto out; 741 742 if (hlimit < 0) { 743 if (ipv6_addr_is_multicast(&fl.fl6_dst)) 744 hlimit = np->mcast_hops; 745 else 746 hlimit = np->hop_limit; 747 if (hlimit < 0) 748 hlimit = dst_metric(dst, RTAX_HOPLIMIT); 749 if (hlimit < 0) 750 hlimit = ipv6_get_hoplimit(dst->dev); 751 } 752 753 if (tclass < 0) { 754 tclass = np->tclass; 755 if (tclass < 0) 756 tclass = 0; 757 } 758 759 if (msg->msg_flags&MSG_CONFIRM) 760 goto do_confirm; 761 back_from_confirm: 762 763 lock_sock(sk); 764 if (unlikely(up->pending)) { 765 /* The socket is already corked while preparing it. */ 766 /* ... which is an evident application bug. --ANK */ 767 release_sock(sk); 768 769 LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); 770 err = -EINVAL; 771 goto out; 772 } 773 774 up->pending = AF_INET6; 775 776 do_append_data: 777 up->len += ulen; 778 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 779 err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen, 780 sizeof(struct udphdr), hlimit, tclass, opt, &fl, 781 (struct rt6_info*)dst, 782 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 783 if (err) 784 udp_v6_flush_pending_frames(sk); 785 else if (!corkreq) 786 err = udp_v6_push_pending_frames(sk); 787 else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) 788 up->pending = 0; 789 790 if (dst) { 791 if (connected) { 792 ip6_dst_store(sk, dst, 793 ipv6_addr_equal(&fl.fl6_dst, &np->daddr) ? 794 &np->daddr : NULL, 795 #ifdef CONFIG_IPV6_SUBTREES 796 ipv6_addr_equal(&fl.fl6_src, &np->saddr) ? 797 &np->saddr : 798 #endif 799 NULL); 800 } else { 801 dst_release(dst); 802 } 803 } 804 805 if (err > 0) 806 err = np->recverr ? net_xmit_errno(err) : 0; 807 release_sock(sk); 808 out: 809 fl6_sock_release(flowlabel); 810 if (!err) { 811 UDP6_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS, is_udplite); 812 return len; 813 } 814 /* 815 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting 816 * ENOBUFS might not be good (it's not tunable per se), but otherwise 817 * we don't have a good statistic (IpOutDiscards but it can be too many 818 * things). We could add another new stat but at least for now that 819 * seems like overkill. 820 */ 821 if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { 822 UDP6_INC_STATS_USER(UDP_MIB_SNDBUFERRORS, is_udplite); 823 } 824 return err; 825 826 do_confirm: 827 dst_confirm(dst); 828 if (!(msg->msg_flags&MSG_PROBE) || len) 829 goto back_from_confirm; 830 err = 0; 831 goto out; 832 } 833 834 int udpv6_destroy_sock(struct sock *sk) 835 { 836 lock_sock(sk); 837 udp_v6_flush_pending_frames(sk); 838 release_sock(sk); 839 840 inet6_destroy_sock(sk); 841 842 return 0; 843 } 844 845 /* 846 * Socket option code for UDP 847 */ 848 int udpv6_setsockopt(struct sock *sk, int level, int optname, 849 char __user *optval, int optlen) 850 { 851 if (level == SOL_UDP || level == SOL_UDPLITE) 852 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 853 udp_v6_push_pending_frames); 854 return ipv6_setsockopt(sk, level, optname, optval, optlen); 855 } 856 857 #ifdef CONFIG_COMPAT 858 int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, 859 char __user *optval, int optlen) 860 { 861 if (level == SOL_UDP || level == SOL_UDPLITE) 862 return udp_lib_setsockopt(sk, level, optname, optval, optlen, 863 udp_v6_push_pending_frames); 864 return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); 865 } 866 #endif 867 868 int udpv6_getsockopt(struct sock *sk, int level, int optname, 869 char __user *optval, int __user *optlen) 870 { 871 if (level == SOL_UDP || level == SOL_UDPLITE) 872 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 873 return ipv6_getsockopt(sk, level, optname, optval, optlen); 874 } 875 876 #ifdef CONFIG_COMPAT 877 int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, 878 char __user *optval, int __user *optlen) 879 { 880 if (level == SOL_UDP || level == SOL_UDPLITE) 881 return udp_lib_getsockopt(sk, level, optname, optval, optlen); 882 return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); 883 } 884 #endif 885 886 static struct inet6_protocol udpv6_protocol = { 887 .handler = udpv6_rcv, 888 .err_handler = udpv6_err, 889 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, 890 }; 891 892 /* ------------------------------------------------------------------------ */ 893 #ifdef CONFIG_PROC_FS 894 895 static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket) 896 { 897 struct inet_sock *inet = inet_sk(sp); 898 struct ipv6_pinfo *np = inet6_sk(sp); 899 struct in6_addr *dest, *src; 900 __u16 destp, srcp; 901 902 dest = &np->daddr; 903 src = &np->rcv_saddr; 904 destp = ntohs(inet->dport); 905 srcp = ntohs(inet->sport); 906 seq_printf(seq, 907 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 908 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p\n", 909 bucket, 910 src->s6_addr32[0], src->s6_addr32[1], 911 src->s6_addr32[2], src->s6_addr32[3], srcp, 912 dest->s6_addr32[0], dest->s6_addr32[1], 913 dest->s6_addr32[2], dest->s6_addr32[3], destp, 914 sp->sk_state, 915 atomic_read(&sp->sk_wmem_alloc), 916 atomic_read(&sp->sk_rmem_alloc), 917 0, 0L, 0, 918 sock_i_uid(sp), 0, 919 sock_i_ino(sp), 920 atomic_read(&sp->sk_refcnt), sp); 921 } 922 923 int udp6_seq_show(struct seq_file *seq, void *v) 924 { 925 if (v == SEQ_START_TOKEN) 926 seq_printf(seq, 927 " sl " 928 "local_address " 929 "remote_address " 930 "st tx_queue rx_queue tr tm->when retrnsmt" 931 " uid timeout inode\n"); 932 else 933 udp6_sock_seq_show(seq, v, ((struct udp_iter_state *)seq->private)->bucket); 934 return 0; 935 } 936 937 static struct file_operations udp6_seq_fops; 938 static struct udp_seq_afinfo udp6_seq_afinfo = { 939 .owner = THIS_MODULE, 940 .name = "udp6", 941 .family = AF_INET6, 942 .hashtable = udp_hash, 943 .seq_show = udp6_seq_show, 944 .seq_fops = &udp6_seq_fops, 945 }; 946 947 int __init udp6_proc_init(void) 948 { 949 return udp_proc_register(&udp6_seq_afinfo); 950 } 951 952 void udp6_proc_exit(void) { 953 udp_proc_unregister(&udp6_seq_afinfo); 954 } 955 #endif /* CONFIG_PROC_FS */ 956 957 /* ------------------------------------------------------------------------ */ 958 959 struct proto udpv6_prot = { 960 .name = "UDPv6", 961 .owner = THIS_MODULE, 962 .close = udp_lib_close, 963 .connect = ip6_datagram_connect, 964 .disconnect = udp_disconnect, 965 .ioctl = udp_ioctl, 966 .destroy = udpv6_destroy_sock, 967 .setsockopt = udpv6_setsockopt, 968 .getsockopt = udpv6_getsockopt, 969 .sendmsg = udpv6_sendmsg, 970 .recvmsg = udpv6_recvmsg, 971 .backlog_rcv = udpv6_queue_rcv_skb, 972 .hash = udp_lib_hash, 973 .unhash = udp_lib_unhash, 974 .get_port = udp_v6_get_port, 975 .obj_size = sizeof(struct udp6_sock), 976 #ifdef CONFIG_COMPAT 977 .compat_setsockopt = compat_udpv6_setsockopt, 978 .compat_getsockopt = compat_udpv6_getsockopt, 979 #endif 980 }; 981 982 static struct inet_protosw udpv6_protosw = { 983 .type = SOCK_DGRAM, 984 .protocol = IPPROTO_UDP, 985 .prot = &udpv6_prot, 986 .ops = &inet6_dgram_ops, 987 .capability =-1, 988 .no_check = UDP_CSUM_DEFAULT, 989 .flags = INET_PROTOSW_PERMANENT, 990 }; 991 992 993 void __init udpv6_init(void) 994 { 995 if (inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP) < 0) 996 printk(KERN_ERR "udpv6_init: Could not register protocol\n"); 997 inet6_register_protosw(&udpv6_protosw); 998 } 999