1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92 #include <linux/capability.h> 93 #include <linux/errno.h> 94 #include <linux/types.h> 95 #include <linux/socket.h> 96 #include <linux/in.h> 97 #include <linux/kernel.h> 98 #include <linux/module.h> 99 #include <linux/proc_fs.h> 100 #include <linux/seq_file.h> 101 #include <linux/sched.h> 102 #include <linux/timer.h> 103 #include <linux/string.h> 104 #include <linux/sockios.h> 105 #include <linux/net.h> 106 #include <linux/mm.h> 107 #include <linux/slab.h> 108 #include <linux/interrupt.h> 109 #include <linux/poll.h> 110 #include <linux/tcp.h> 111 #include <linux/init.h> 112 #include <linux/highmem.h> 113 114 #include <asm/uaccess.h> 115 #include <asm/system.h> 116 117 #include <linux/netdevice.h> 118 #include <net/protocol.h> 119 #include <linux/skbuff.h> 120 #include <net/net_namespace.h> 121 #include <net/request_sock.h> 122 #include <net/sock.h> 123 #include <net/xfrm.h> 124 #include <linux/ipsec.h> 125 126 #include <linux/filter.h> 127 128 #ifdef CONFIG_INET 129 #include <net/tcp.h> 130 #endif 131 132 /* 133 * Each address family might have different locking rules, so we have 134 * one slock key per address family: 135 */ 136 static struct lock_class_key af_family_keys[AF_MAX]; 137 static struct lock_class_key af_family_slock_keys[AF_MAX]; 138 139 /* 140 * Make lock validator output more readable. (we pre-construct these 141 * strings build-time, so that runtime initialization of socket 142 * locks is fast): 143 */ 144 static const char *af_family_key_strings[AF_MAX+1] = { 145 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 146 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 147 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 148 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 149 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 150 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 151 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 152 "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 153 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 154 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 155 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 156 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 157 "sk_lock-AF_MAX" 158 }; 159 static const char *af_family_slock_key_strings[AF_MAX+1] = { 160 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 161 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 162 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 163 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 164 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 165 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 166 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 167 "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" , 168 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 169 "slock-27" , "slock-28" , "slock-AF_CAN" , 170 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 171 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 172 "slock-AF_MAX" 173 }; 174 static const char *af_family_clock_key_strings[AF_MAX+1] = { 175 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 176 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 177 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 178 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 179 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 180 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 181 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 182 "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" , 183 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 184 "clock-27" , "clock-28" , "clock-AF_CAN" , 185 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 186 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 187 "clock-AF_MAX" 188 }; 189 190 /* 191 * sk_callback_lock locking rules are per-address-family, 192 * so split the lock classes by using a per-AF key: 193 */ 194 static struct lock_class_key af_callback_keys[AF_MAX]; 195 196 /* Take into consideration the size of the struct sk_buff overhead in the 197 * determination of these values, since that is non-constant across 198 * platforms. This makes socket queueing behavior and performance 199 * not depend upon such differences. 200 */ 201 #define _SK_MEM_PACKETS 256 202 #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) 203 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 204 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 205 206 /* Run time adjustable parameters. */ 207 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 208 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 209 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 210 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 211 212 /* Maximal space eaten by iovec or ancilliary data plus some space */ 213 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 214 215 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 216 { 217 struct timeval tv; 218 219 if (optlen < sizeof(tv)) 220 return -EINVAL; 221 if (copy_from_user(&tv, optval, sizeof(tv))) 222 return -EFAULT; 223 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 224 return -EDOM; 225 226 if (tv.tv_sec < 0) { 227 static int warned __read_mostly; 228 229 *timeo_p = 0; 230 if (warned < 10 && net_ratelimit()) { 231 warned++; 232 printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " 233 "tries to set negative timeout\n", 234 current->comm, task_pid_nr(current)); 235 } 236 return 0; 237 } 238 *timeo_p = MAX_SCHEDULE_TIMEOUT; 239 if (tv.tv_sec == 0 && tv.tv_usec == 0) 240 return 0; 241 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 242 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 243 return 0; 244 } 245 246 static void sock_warn_obsolete_bsdism(const char *name) 247 { 248 static int warned; 249 static char warncomm[TASK_COMM_LEN]; 250 if (strcmp(warncomm, current->comm) && warned < 5) { 251 strcpy(warncomm, current->comm); 252 printk(KERN_WARNING "process `%s' is using obsolete " 253 "%s SO_BSDCOMPAT\n", warncomm, name); 254 warned++; 255 } 256 } 257 258 static void sock_disable_timestamp(struct sock *sk) 259 { 260 if (sock_flag(sk, SOCK_TIMESTAMP)) { 261 sock_reset_flag(sk, SOCK_TIMESTAMP); 262 net_disable_timestamp(); 263 } 264 } 265 266 267 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 268 { 269 int err = 0; 270 int skb_len; 271 272 /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces 273 number of warnings when compiling with -W --ANK 274 */ 275 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 276 (unsigned)sk->sk_rcvbuf) { 277 err = -ENOMEM; 278 goto out; 279 } 280 281 err = sk_filter(sk, skb); 282 if (err) 283 goto out; 284 285 if (!sk_rmem_schedule(sk, skb->truesize)) { 286 err = -ENOBUFS; 287 goto out; 288 } 289 290 skb->dev = NULL; 291 skb_set_owner_r(skb, sk); 292 293 /* Cache the SKB length before we tack it onto the receive 294 * queue. Once it is added it no longer belongs to us and 295 * may be freed by other threads of control pulling packets 296 * from the queue. 297 */ 298 skb_len = skb->len; 299 300 skb_queue_tail(&sk->sk_receive_queue, skb); 301 302 if (!sock_flag(sk, SOCK_DEAD)) 303 sk->sk_data_ready(sk, skb_len); 304 out: 305 return err; 306 } 307 EXPORT_SYMBOL(sock_queue_rcv_skb); 308 309 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 310 { 311 int rc = NET_RX_SUCCESS; 312 313 if (sk_filter(sk, skb)) 314 goto discard_and_relse; 315 316 skb->dev = NULL; 317 318 if (nested) 319 bh_lock_sock_nested(sk); 320 else 321 bh_lock_sock(sk); 322 if (!sock_owned_by_user(sk)) { 323 /* 324 * trylock + unlock semantics: 325 */ 326 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 327 328 rc = sk_backlog_rcv(sk, skb); 329 330 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 331 } else 332 sk_add_backlog(sk, skb); 333 bh_unlock_sock(sk); 334 out: 335 sock_put(sk); 336 return rc; 337 discard_and_relse: 338 kfree_skb(skb); 339 goto out; 340 } 341 EXPORT_SYMBOL(sk_receive_skb); 342 343 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 344 { 345 struct dst_entry *dst = sk->sk_dst_cache; 346 347 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 348 sk->sk_dst_cache = NULL; 349 dst_release(dst); 350 return NULL; 351 } 352 353 return dst; 354 } 355 EXPORT_SYMBOL(__sk_dst_check); 356 357 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 358 { 359 struct dst_entry *dst = sk_dst_get(sk); 360 361 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 362 sk_dst_reset(sk); 363 dst_release(dst); 364 return NULL; 365 } 366 367 return dst; 368 } 369 EXPORT_SYMBOL(sk_dst_check); 370 371 static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) 372 { 373 int ret = -ENOPROTOOPT; 374 #ifdef CONFIG_NETDEVICES 375 struct net *net = sock_net(sk); 376 char devname[IFNAMSIZ]; 377 int index; 378 379 /* Sorry... */ 380 ret = -EPERM; 381 if (!capable(CAP_NET_RAW)) 382 goto out; 383 384 ret = -EINVAL; 385 if (optlen < 0) 386 goto out; 387 388 /* Bind this socket to a particular device like "eth0", 389 * as specified in the passed interface name. If the 390 * name is "" or the option length is zero the socket 391 * is not bound. 392 */ 393 if (optlen > IFNAMSIZ - 1) 394 optlen = IFNAMSIZ - 1; 395 memset(devname, 0, sizeof(devname)); 396 397 ret = -EFAULT; 398 if (copy_from_user(devname, optval, optlen)) 399 goto out; 400 401 if (devname[0] == '\0') { 402 index = 0; 403 } else { 404 struct net_device *dev = dev_get_by_name(net, devname); 405 406 ret = -ENODEV; 407 if (!dev) 408 goto out; 409 410 index = dev->ifindex; 411 dev_put(dev); 412 } 413 414 lock_sock(sk); 415 sk->sk_bound_dev_if = index; 416 sk_dst_reset(sk); 417 release_sock(sk); 418 419 ret = 0; 420 421 out: 422 #endif 423 424 return ret; 425 } 426 427 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 428 { 429 if (valbool) 430 sock_set_flag(sk, bit); 431 else 432 sock_reset_flag(sk, bit); 433 } 434 435 /* 436 * This is meant for all protocols to use and covers goings on 437 * at the socket level. Everything here is generic. 438 */ 439 440 int sock_setsockopt(struct socket *sock, int level, int optname, 441 char __user *optval, int optlen) 442 { 443 struct sock *sk=sock->sk; 444 int val; 445 int valbool; 446 struct linger ling; 447 int ret = 0; 448 449 /* 450 * Options without arguments 451 */ 452 453 if (optname == SO_BINDTODEVICE) 454 return sock_bindtodevice(sk, optval, optlen); 455 456 if (optlen < sizeof(int)) 457 return -EINVAL; 458 459 if (get_user(val, (int __user *)optval)) 460 return -EFAULT; 461 462 valbool = val?1:0; 463 464 lock_sock(sk); 465 466 switch(optname) { 467 case SO_DEBUG: 468 if (val && !capable(CAP_NET_ADMIN)) { 469 ret = -EACCES; 470 } else 471 sock_valbool_flag(sk, SOCK_DBG, valbool); 472 break; 473 case SO_REUSEADDR: 474 sk->sk_reuse = valbool; 475 break; 476 case SO_TYPE: 477 case SO_ERROR: 478 ret = -ENOPROTOOPT; 479 break; 480 case SO_DONTROUTE: 481 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 482 break; 483 case SO_BROADCAST: 484 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 485 break; 486 case SO_SNDBUF: 487 /* Don't error on this BSD doesn't and if you think 488 about it this is right. Otherwise apps have to 489 play 'guess the biggest size' games. RCVBUF/SNDBUF 490 are treated in BSD as hints */ 491 492 if (val > sysctl_wmem_max) 493 val = sysctl_wmem_max; 494 set_sndbuf: 495 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 496 if ((val * 2) < SOCK_MIN_SNDBUF) 497 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 498 else 499 sk->sk_sndbuf = val * 2; 500 501 /* 502 * Wake up sending tasks if we 503 * upped the value. 504 */ 505 sk->sk_write_space(sk); 506 break; 507 508 case SO_SNDBUFFORCE: 509 if (!capable(CAP_NET_ADMIN)) { 510 ret = -EPERM; 511 break; 512 } 513 goto set_sndbuf; 514 515 case SO_RCVBUF: 516 /* Don't error on this BSD doesn't and if you think 517 about it this is right. Otherwise apps have to 518 play 'guess the biggest size' games. RCVBUF/SNDBUF 519 are treated in BSD as hints */ 520 521 if (val > sysctl_rmem_max) 522 val = sysctl_rmem_max; 523 set_rcvbuf: 524 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 525 /* 526 * We double it on the way in to account for 527 * "struct sk_buff" etc. overhead. Applications 528 * assume that the SO_RCVBUF setting they make will 529 * allow that much actual data to be received on that 530 * socket. 531 * 532 * Applications are unaware that "struct sk_buff" and 533 * other overheads allocate from the receive buffer 534 * during socket buffer allocation. 535 * 536 * And after considering the possible alternatives, 537 * returning the value we actually used in getsockopt 538 * is the most desirable behavior. 539 */ 540 if ((val * 2) < SOCK_MIN_RCVBUF) 541 sk->sk_rcvbuf = SOCK_MIN_RCVBUF; 542 else 543 sk->sk_rcvbuf = val * 2; 544 break; 545 546 case SO_RCVBUFFORCE: 547 if (!capable(CAP_NET_ADMIN)) { 548 ret = -EPERM; 549 break; 550 } 551 goto set_rcvbuf; 552 553 case SO_KEEPALIVE: 554 #ifdef CONFIG_INET 555 if (sk->sk_protocol == IPPROTO_TCP) 556 tcp_set_keepalive(sk, valbool); 557 #endif 558 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 559 break; 560 561 case SO_OOBINLINE: 562 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 563 break; 564 565 case SO_NO_CHECK: 566 sk->sk_no_check = valbool; 567 break; 568 569 case SO_PRIORITY: 570 if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 571 sk->sk_priority = val; 572 else 573 ret = -EPERM; 574 break; 575 576 case SO_LINGER: 577 if (optlen < sizeof(ling)) { 578 ret = -EINVAL; /* 1003.1g */ 579 break; 580 } 581 if (copy_from_user(&ling,optval,sizeof(ling))) { 582 ret = -EFAULT; 583 break; 584 } 585 if (!ling.l_onoff) 586 sock_reset_flag(sk, SOCK_LINGER); 587 else { 588 #if (BITS_PER_LONG == 32) 589 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 590 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 591 else 592 #endif 593 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 594 sock_set_flag(sk, SOCK_LINGER); 595 } 596 break; 597 598 case SO_BSDCOMPAT: 599 sock_warn_obsolete_bsdism("setsockopt"); 600 break; 601 602 case SO_PASSCRED: 603 if (valbool) 604 set_bit(SOCK_PASSCRED, &sock->flags); 605 else 606 clear_bit(SOCK_PASSCRED, &sock->flags); 607 break; 608 609 case SO_TIMESTAMP: 610 case SO_TIMESTAMPNS: 611 if (valbool) { 612 if (optname == SO_TIMESTAMP) 613 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 614 else 615 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 616 sock_set_flag(sk, SOCK_RCVTSTAMP); 617 sock_enable_timestamp(sk); 618 } else { 619 sock_reset_flag(sk, SOCK_RCVTSTAMP); 620 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 621 } 622 break; 623 624 case SO_RCVLOWAT: 625 if (val < 0) 626 val = INT_MAX; 627 sk->sk_rcvlowat = val ? : 1; 628 break; 629 630 case SO_RCVTIMEO: 631 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 632 break; 633 634 case SO_SNDTIMEO: 635 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 636 break; 637 638 case SO_ATTACH_FILTER: 639 ret = -EINVAL; 640 if (optlen == sizeof(struct sock_fprog)) { 641 struct sock_fprog fprog; 642 643 ret = -EFAULT; 644 if (copy_from_user(&fprog, optval, sizeof(fprog))) 645 break; 646 647 ret = sk_attach_filter(&fprog, sk); 648 } 649 break; 650 651 case SO_DETACH_FILTER: 652 ret = sk_detach_filter(sk); 653 break; 654 655 case SO_PASSSEC: 656 if (valbool) 657 set_bit(SOCK_PASSSEC, &sock->flags); 658 else 659 clear_bit(SOCK_PASSSEC, &sock->flags); 660 break; 661 case SO_MARK: 662 if (!capable(CAP_NET_ADMIN)) 663 ret = -EPERM; 664 else { 665 sk->sk_mark = val; 666 } 667 break; 668 669 /* We implement the SO_SNDLOWAT etc to 670 not be settable (1003.1g 5.3) */ 671 default: 672 ret = -ENOPROTOOPT; 673 break; 674 } 675 release_sock(sk); 676 return ret; 677 } 678 679 680 int sock_getsockopt(struct socket *sock, int level, int optname, 681 char __user *optval, int __user *optlen) 682 { 683 struct sock *sk = sock->sk; 684 685 union { 686 int val; 687 struct linger ling; 688 struct timeval tm; 689 } v; 690 691 unsigned int lv = sizeof(int); 692 int len; 693 694 if (get_user(len, optlen)) 695 return -EFAULT; 696 if (len < 0) 697 return -EINVAL; 698 699 memset(&v, 0, sizeof(v)); 700 701 switch(optname) { 702 case SO_DEBUG: 703 v.val = sock_flag(sk, SOCK_DBG); 704 break; 705 706 case SO_DONTROUTE: 707 v.val = sock_flag(sk, SOCK_LOCALROUTE); 708 break; 709 710 case SO_BROADCAST: 711 v.val = !!sock_flag(sk, SOCK_BROADCAST); 712 break; 713 714 case SO_SNDBUF: 715 v.val = sk->sk_sndbuf; 716 break; 717 718 case SO_RCVBUF: 719 v.val = sk->sk_rcvbuf; 720 break; 721 722 case SO_REUSEADDR: 723 v.val = sk->sk_reuse; 724 break; 725 726 case SO_KEEPALIVE: 727 v.val = !!sock_flag(sk, SOCK_KEEPOPEN); 728 break; 729 730 case SO_TYPE: 731 v.val = sk->sk_type; 732 break; 733 734 case SO_ERROR: 735 v.val = -sock_error(sk); 736 if (v.val==0) 737 v.val = xchg(&sk->sk_err_soft, 0); 738 break; 739 740 case SO_OOBINLINE: 741 v.val = !!sock_flag(sk, SOCK_URGINLINE); 742 break; 743 744 case SO_NO_CHECK: 745 v.val = sk->sk_no_check; 746 break; 747 748 case SO_PRIORITY: 749 v.val = sk->sk_priority; 750 break; 751 752 case SO_LINGER: 753 lv = sizeof(v.ling); 754 v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); 755 v.ling.l_linger = sk->sk_lingertime / HZ; 756 break; 757 758 case SO_BSDCOMPAT: 759 sock_warn_obsolete_bsdism("getsockopt"); 760 break; 761 762 case SO_TIMESTAMP: 763 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 764 !sock_flag(sk, SOCK_RCVTSTAMPNS); 765 break; 766 767 case SO_TIMESTAMPNS: 768 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 769 break; 770 771 case SO_RCVTIMEO: 772 lv=sizeof(struct timeval); 773 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 774 v.tm.tv_sec = 0; 775 v.tm.tv_usec = 0; 776 } else { 777 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 778 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 779 } 780 break; 781 782 case SO_SNDTIMEO: 783 lv=sizeof(struct timeval); 784 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 785 v.tm.tv_sec = 0; 786 v.tm.tv_usec = 0; 787 } else { 788 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 789 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 790 } 791 break; 792 793 case SO_RCVLOWAT: 794 v.val = sk->sk_rcvlowat; 795 break; 796 797 case SO_SNDLOWAT: 798 v.val=1; 799 break; 800 801 case SO_PASSCRED: 802 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 803 break; 804 805 case SO_PEERCRED: 806 if (len > sizeof(sk->sk_peercred)) 807 len = sizeof(sk->sk_peercred); 808 if (copy_to_user(optval, &sk->sk_peercred, len)) 809 return -EFAULT; 810 goto lenout; 811 812 case SO_PEERNAME: 813 { 814 char address[128]; 815 816 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 817 return -ENOTCONN; 818 if (lv < len) 819 return -EINVAL; 820 if (copy_to_user(optval, address, len)) 821 return -EFAULT; 822 goto lenout; 823 } 824 825 /* Dubious BSD thing... Probably nobody even uses it, but 826 * the UNIX standard wants it for whatever reason... -DaveM 827 */ 828 case SO_ACCEPTCONN: 829 v.val = sk->sk_state == TCP_LISTEN; 830 break; 831 832 case SO_PASSSEC: 833 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 834 break; 835 836 case SO_PEERSEC: 837 return security_socket_getpeersec_stream(sock, optval, optlen, len); 838 839 case SO_MARK: 840 v.val = sk->sk_mark; 841 break; 842 843 default: 844 return -ENOPROTOOPT; 845 } 846 847 if (len > lv) 848 len = lv; 849 if (copy_to_user(optval, &v, len)) 850 return -EFAULT; 851 lenout: 852 if (put_user(len, optlen)) 853 return -EFAULT; 854 return 0; 855 } 856 857 /* 858 * Initialize an sk_lock. 859 * 860 * (We also register the sk_lock with the lock validator.) 861 */ 862 static inline void sock_lock_init(struct sock *sk) 863 { 864 sock_lock_init_class_and_name(sk, 865 af_family_slock_key_strings[sk->sk_family], 866 af_family_slock_keys + sk->sk_family, 867 af_family_key_strings[sk->sk_family], 868 af_family_keys + sk->sk_family); 869 } 870 871 static void sock_copy(struct sock *nsk, const struct sock *osk) 872 { 873 #ifdef CONFIG_SECURITY_NETWORK 874 void *sptr = nsk->sk_security; 875 #endif 876 877 memcpy(nsk, osk, osk->sk_prot->obj_size); 878 #ifdef CONFIG_SECURITY_NETWORK 879 nsk->sk_security = sptr; 880 security_sk_clone(osk, nsk); 881 #endif 882 } 883 884 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 885 int family) 886 { 887 struct sock *sk; 888 struct kmem_cache *slab; 889 890 slab = prot->slab; 891 if (slab != NULL) 892 sk = kmem_cache_alloc(slab, priority); 893 else 894 sk = kmalloc(prot->obj_size, priority); 895 896 if (sk != NULL) { 897 if (security_sk_alloc(sk, family, priority)) 898 goto out_free; 899 900 if (!try_module_get(prot->owner)) 901 goto out_free_sec; 902 } 903 904 return sk; 905 906 out_free_sec: 907 security_sk_free(sk); 908 out_free: 909 if (slab != NULL) 910 kmem_cache_free(slab, sk); 911 else 912 kfree(sk); 913 return NULL; 914 } 915 916 static void sk_prot_free(struct proto *prot, struct sock *sk) 917 { 918 struct kmem_cache *slab; 919 struct module *owner; 920 921 owner = prot->owner; 922 slab = prot->slab; 923 924 security_sk_free(sk); 925 if (slab != NULL) 926 kmem_cache_free(slab, sk); 927 else 928 kfree(sk); 929 module_put(owner); 930 } 931 932 /** 933 * sk_alloc - All socket objects are allocated here 934 * @net: the applicable net namespace 935 * @family: protocol family 936 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 937 * @prot: struct proto associated with this new sock instance 938 */ 939 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 940 struct proto *prot) 941 { 942 struct sock *sk; 943 944 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 945 if (sk) { 946 sk->sk_family = family; 947 /* 948 * See comment in struct sock definition to understand 949 * why we need sk_prot_creator -acme 950 */ 951 sk->sk_prot = sk->sk_prot_creator = prot; 952 sock_lock_init(sk); 953 sock_net_set(sk, get_net(net)); 954 } 955 956 return sk; 957 } 958 959 void sk_free(struct sock *sk) 960 { 961 struct sk_filter *filter; 962 963 if (sk->sk_destruct) 964 sk->sk_destruct(sk); 965 966 filter = rcu_dereference(sk->sk_filter); 967 if (filter) { 968 sk_filter_uncharge(sk, filter); 969 rcu_assign_pointer(sk->sk_filter, NULL); 970 } 971 972 sock_disable_timestamp(sk); 973 974 if (atomic_read(&sk->sk_omem_alloc)) 975 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 976 __func__, atomic_read(&sk->sk_omem_alloc)); 977 978 put_net(sock_net(sk)); 979 sk_prot_free(sk->sk_prot_creator, sk); 980 } 981 982 /* 983 * Last sock_put should drop referrence to sk->sk_net. It has already 984 * been dropped in sk_change_net. Taking referrence to stopping namespace 985 * is not an option. 986 * Take referrence to a socket to remove it from hash _alive_ and after that 987 * destroy it in the context of init_net. 988 */ 989 void sk_release_kernel(struct sock *sk) 990 { 991 if (sk == NULL || sk->sk_socket == NULL) 992 return; 993 994 sock_hold(sk); 995 sock_release(sk->sk_socket); 996 release_net(sock_net(sk)); 997 sock_net_set(sk, get_net(&init_net)); 998 sock_put(sk); 999 } 1000 EXPORT_SYMBOL(sk_release_kernel); 1001 1002 struct sock *sk_clone(const struct sock *sk, const gfp_t priority) 1003 { 1004 struct sock *newsk; 1005 1006 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1007 if (newsk != NULL) { 1008 struct sk_filter *filter; 1009 1010 sock_copy(newsk, sk); 1011 1012 /* SANITY */ 1013 get_net(sock_net(newsk)); 1014 sk_node_init(&newsk->sk_node); 1015 sock_lock_init(newsk); 1016 bh_lock_sock(newsk); 1017 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1018 1019 atomic_set(&newsk->sk_rmem_alloc, 0); 1020 atomic_set(&newsk->sk_wmem_alloc, 0); 1021 atomic_set(&newsk->sk_omem_alloc, 0); 1022 skb_queue_head_init(&newsk->sk_receive_queue); 1023 skb_queue_head_init(&newsk->sk_write_queue); 1024 #ifdef CONFIG_NET_DMA 1025 skb_queue_head_init(&newsk->sk_async_wait_queue); 1026 #endif 1027 1028 rwlock_init(&newsk->sk_dst_lock); 1029 rwlock_init(&newsk->sk_callback_lock); 1030 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1031 af_callback_keys + newsk->sk_family, 1032 af_family_clock_key_strings[newsk->sk_family]); 1033 1034 newsk->sk_dst_cache = NULL; 1035 newsk->sk_wmem_queued = 0; 1036 newsk->sk_forward_alloc = 0; 1037 newsk->sk_send_head = NULL; 1038 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1039 1040 sock_reset_flag(newsk, SOCK_DONE); 1041 skb_queue_head_init(&newsk->sk_error_queue); 1042 1043 filter = newsk->sk_filter; 1044 if (filter != NULL) 1045 sk_filter_charge(newsk, filter); 1046 1047 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1048 /* It is still raw copy of parent, so invalidate 1049 * destructor and make plain sk_free() */ 1050 newsk->sk_destruct = NULL; 1051 sk_free(newsk); 1052 newsk = NULL; 1053 goto out; 1054 } 1055 1056 newsk->sk_err = 0; 1057 newsk->sk_priority = 0; 1058 atomic_set(&newsk->sk_refcnt, 2); 1059 1060 /* 1061 * Increment the counter in the same struct proto as the master 1062 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1063 * is the same as sk->sk_prot->socks, as this field was copied 1064 * with memcpy). 1065 * 1066 * This _changes_ the previous behaviour, where 1067 * tcp_create_openreq_child always was incrementing the 1068 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1069 * to be taken into account in all callers. -acme 1070 */ 1071 sk_refcnt_debug_inc(newsk); 1072 sk_set_socket(newsk, NULL); 1073 newsk->sk_sleep = NULL; 1074 1075 if (newsk->sk_prot->sockets_allocated) 1076 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1077 } 1078 out: 1079 return newsk; 1080 } 1081 1082 EXPORT_SYMBOL_GPL(sk_clone); 1083 1084 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1085 { 1086 __sk_dst_set(sk, dst); 1087 sk->sk_route_caps = dst->dev->features; 1088 if (sk->sk_route_caps & NETIF_F_GSO) 1089 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1090 if (sk_can_gso(sk)) { 1091 if (dst->header_len) { 1092 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1093 } else { 1094 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1095 sk->sk_gso_max_size = dst->dev->gso_max_size; 1096 } 1097 } 1098 } 1099 EXPORT_SYMBOL_GPL(sk_setup_caps); 1100 1101 void __init sk_init(void) 1102 { 1103 if (num_physpages <= 4096) { 1104 sysctl_wmem_max = 32767; 1105 sysctl_rmem_max = 32767; 1106 sysctl_wmem_default = 32767; 1107 sysctl_rmem_default = 32767; 1108 } else if (num_physpages >= 131072) { 1109 sysctl_wmem_max = 131071; 1110 sysctl_rmem_max = 131071; 1111 } 1112 } 1113 1114 /* 1115 * Simple resource managers for sockets. 1116 */ 1117 1118 1119 /* 1120 * Write buffer destructor automatically called from kfree_skb. 1121 */ 1122 void sock_wfree(struct sk_buff *skb) 1123 { 1124 struct sock *sk = skb->sk; 1125 1126 /* In case it might be waiting for more memory. */ 1127 atomic_sub(skb->truesize, &sk->sk_wmem_alloc); 1128 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) 1129 sk->sk_write_space(sk); 1130 sock_put(sk); 1131 } 1132 1133 /* 1134 * Read buffer destructor automatically called from kfree_skb. 1135 */ 1136 void sock_rfree(struct sk_buff *skb) 1137 { 1138 struct sock *sk = skb->sk; 1139 1140 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1141 sk_mem_uncharge(skb->sk, skb->truesize); 1142 } 1143 1144 1145 int sock_i_uid(struct sock *sk) 1146 { 1147 int uid; 1148 1149 read_lock(&sk->sk_callback_lock); 1150 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; 1151 read_unlock(&sk->sk_callback_lock); 1152 return uid; 1153 } 1154 1155 unsigned long sock_i_ino(struct sock *sk) 1156 { 1157 unsigned long ino; 1158 1159 read_lock(&sk->sk_callback_lock); 1160 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1161 read_unlock(&sk->sk_callback_lock); 1162 return ino; 1163 } 1164 1165 /* 1166 * Allocate a skb from the socket's send buffer. 1167 */ 1168 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1169 gfp_t priority) 1170 { 1171 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1172 struct sk_buff * skb = alloc_skb(size, priority); 1173 if (skb) { 1174 skb_set_owner_w(skb, sk); 1175 return skb; 1176 } 1177 } 1178 return NULL; 1179 } 1180 1181 /* 1182 * Allocate a skb from the socket's receive buffer. 1183 */ 1184 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1185 gfp_t priority) 1186 { 1187 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1188 struct sk_buff *skb = alloc_skb(size, priority); 1189 if (skb) { 1190 skb_set_owner_r(skb, sk); 1191 return skb; 1192 } 1193 } 1194 return NULL; 1195 } 1196 1197 /* 1198 * Allocate a memory block from the socket's option memory buffer. 1199 */ 1200 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1201 { 1202 if ((unsigned)size <= sysctl_optmem_max && 1203 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1204 void *mem; 1205 /* First do the add, to avoid the race if kmalloc 1206 * might sleep. 1207 */ 1208 atomic_add(size, &sk->sk_omem_alloc); 1209 mem = kmalloc(size, priority); 1210 if (mem) 1211 return mem; 1212 atomic_sub(size, &sk->sk_omem_alloc); 1213 } 1214 return NULL; 1215 } 1216 1217 /* 1218 * Free an option memory block. 1219 */ 1220 void sock_kfree_s(struct sock *sk, void *mem, int size) 1221 { 1222 kfree(mem); 1223 atomic_sub(size, &sk->sk_omem_alloc); 1224 } 1225 1226 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1227 I think, these locks should be removed for datagram sockets. 1228 */ 1229 static long sock_wait_for_wmem(struct sock * sk, long timeo) 1230 { 1231 DEFINE_WAIT(wait); 1232 1233 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1234 for (;;) { 1235 if (!timeo) 1236 break; 1237 if (signal_pending(current)) 1238 break; 1239 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1240 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1241 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1242 break; 1243 if (sk->sk_shutdown & SEND_SHUTDOWN) 1244 break; 1245 if (sk->sk_err) 1246 break; 1247 timeo = schedule_timeout(timeo); 1248 } 1249 finish_wait(sk->sk_sleep, &wait); 1250 return timeo; 1251 } 1252 1253 1254 /* 1255 * Generic send/receive buffer handlers 1256 */ 1257 1258 static struct sk_buff *sock_alloc_send_pskb(struct sock *sk, 1259 unsigned long header_len, 1260 unsigned long data_len, 1261 int noblock, int *errcode) 1262 { 1263 struct sk_buff *skb; 1264 gfp_t gfp_mask; 1265 long timeo; 1266 int err; 1267 1268 gfp_mask = sk->sk_allocation; 1269 if (gfp_mask & __GFP_WAIT) 1270 gfp_mask |= __GFP_REPEAT; 1271 1272 timeo = sock_sndtimeo(sk, noblock); 1273 while (1) { 1274 err = sock_error(sk); 1275 if (err != 0) 1276 goto failure; 1277 1278 err = -EPIPE; 1279 if (sk->sk_shutdown & SEND_SHUTDOWN) 1280 goto failure; 1281 1282 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1283 skb = alloc_skb(header_len, gfp_mask); 1284 if (skb) { 1285 int npages; 1286 int i; 1287 1288 /* No pages, we're done... */ 1289 if (!data_len) 1290 break; 1291 1292 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1293 skb->truesize += data_len; 1294 skb_shinfo(skb)->nr_frags = npages; 1295 for (i = 0; i < npages; i++) { 1296 struct page *page; 1297 skb_frag_t *frag; 1298 1299 page = alloc_pages(sk->sk_allocation, 0); 1300 if (!page) { 1301 err = -ENOBUFS; 1302 skb_shinfo(skb)->nr_frags = i; 1303 kfree_skb(skb); 1304 goto failure; 1305 } 1306 1307 frag = &skb_shinfo(skb)->frags[i]; 1308 frag->page = page; 1309 frag->page_offset = 0; 1310 frag->size = (data_len >= PAGE_SIZE ? 1311 PAGE_SIZE : 1312 data_len); 1313 data_len -= PAGE_SIZE; 1314 } 1315 1316 /* Full success... */ 1317 break; 1318 } 1319 err = -ENOBUFS; 1320 goto failure; 1321 } 1322 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1323 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1324 err = -EAGAIN; 1325 if (!timeo) 1326 goto failure; 1327 if (signal_pending(current)) 1328 goto interrupted; 1329 timeo = sock_wait_for_wmem(sk, timeo); 1330 } 1331 1332 skb_set_owner_w(skb, sk); 1333 return skb; 1334 1335 interrupted: 1336 err = sock_intr_errno(timeo); 1337 failure: 1338 *errcode = err; 1339 return NULL; 1340 } 1341 1342 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1343 int noblock, int *errcode) 1344 { 1345 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); 1346 } 1347 1348 static void __lock_sock(struct sock *sk) 1349 { 1350 DEFINE_WAIT(wait); 1351 1352 for (;;) { 1353 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1354 TASK_UNINTERRUPTIBLE); 1355 spin_unlock_bh(&sk->sk_lock.slock); 1356 schedule(); 1357 spin_lock_bh(&sk->sk_lock.slock); 1358 if (!sock_owned_by_user(sk)) 1359 break; 1360 } 1361 finish_wait(&sk->sk_lock.wq, &wait); 1362 } 1363 1364 static void __release_sock(struct sock *sk) 1365 { 1366 struct sk_buff *skb = sk->sk_backlog.head; 1367 1368 do { 1369 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1370 bh_unlock_sock(sk); 1371 1372 do { 1373 struct sk_buff *next = skb->next; 1374 1375 skb->next = NULL; 1376 sk_backlog_rcv(sk, skb); 1377 1378 /* 1379 * We are in process context here with softirqs 1380 * disabled, use cond_resched_softirq() to preempt. 1381 * This is safe to do because we've taken the backlog 1382 * queue private: 1383 */ 1384 cond_resched_softirq(); 1385 1386 skb = next; 1387 } while (skb != NULL); 1388 1389 bh_lock_sock(sk); 1390 } while ((skb = sk->sk_backlog.head) != NULL); 1391 } 1392 1393 /** 1394 * sk_wait_data - wait for data to arrive at sk_receive_queue 1395 * @sk: sock to wait on 1396 * @timeo: for how long 1397 * 1398 * Now socket state including sk->sk_err is changed only under lock, 1399 * hence we may omit checks after joining wait queue. 1400 * We check receive queue before schedule() only as optimization; 1401 * it is very likely that release_sock() added new data. 1402 */ 1403 int sk_wait_data(struct sock *sk, long *timeo) 1404 { 1405 int rc; 1406 DEFINE_WAIT(wait); 1407 1408 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1409 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1410 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1411 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1412 finish_wait(sk->sk_sleep, &wait); 1413 return rc; 1414 } 1415 1416 EXPORT_SYMBOL(sk_wait_data); 1417 1418 /** 1419 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1420 * @sk: socket 1421 * @size: memory size to allocate 1422 * @kind: allocation type 1423 * 1424 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1425 * rmem allocation. This function assumes that protocols which have 1426 * memory_pressure use sk_wmem_queued as write buffer accounting. 1427 */ 1428 int __sk_mem_schedule(struct sock *sk, int size, int kind) 1429 { 1430 struct proto *prot = sk->sk_prot; 1431 int amt = sk_mem_pages(size); 1432 int allocated; 1433 1434 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1435 allocated = atomic_add_return(amt, prot->memory_allocated); 1436 1437 /* Under limit. */ 1438 if (allocated <= prot->sysctl_mem[0]) { 1439 if (prot->memory_pressure && *prot->memory_pressure) 1440 *prot->memory_pressure = 0; 1441 return 1; 1442 } 1443 1444 /* Under pressure. */ 1445 if (allocated > prot->sysctl_mem[1]) 1446 if (prot->enter_memory_pressure) 1447 prot->enter_memory_pressure(sk); 1448 1449 /* Over hard limit. */ 1450 if (allocated > prot->sysctl_mem[2]) 1451 goto suppress_allocation; 1452 1453 /* guarantee minimum buffer size under pressure */ 1454 if (kind == SK_MEM_RECV) { 1455 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 1456 return 1; 1457 } else { /* SK_MEM_SEND */ 1458 if (sk->sk_type == SOCK_STREAM) { 1459 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 1460 return 1; 1461 } else if (atomic_read(&sk->sk_wmem_alloc) < 1462 prot->sysctl_wmem[0]) 1463 return 1; 1464 } 1465 1466 if (prot->memory_pressure) { 1467 int alloc; 1468 1469 if (!*prot->memory_pressure) 1470 return 1; 1471 alloc = percpu_counter_read_positive(prot->sockets_allocated); 1472 if (prot->sysctl_mem[2] > alloc * 1473 sk_mem_pages(sk->sk_wmem_queued + 1474 atomic_read(&sk->sk_rmem_alloc) + 1475 sk->sk_forward_alloc)) 1476 return 1; 1477 } 1478 1479 suppress_allocation: 1480 1481 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 1482 sk_stream_moderate_sndbuf(sk); 1483 1484 /* Fail only if socket is _under_ its sndbuf. 1485 * In this case we cannot block, so that we have to fail. 1486 */ 1487 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 1488 return 1; 1489 } 1490 1491 /* Alas. Undo changes. */ 1492 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1493 atomic_sub(amt, prot->memory_allocated); 1494 return 0; 1495 } 1496 1497 EXPORT_SYMBOL(__sk_mem_schedule); 1498 1499 /** 1500 * __sk_reclaim - reclaim memory_allocated 1501 * @sk: socket 1502 */ 1503 void __sk_mem_reclaim(struct sock *sk) 1504 { 1505 struct proto *prot = sk->sk_prot; 1506 1507 atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 1508 prot->memory_allocated); 1509 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 1510 1511 if (prot->memory_pressure && *prot->memory_pressure && 1512 (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) 1513 *prot->memory_pressure = 0; 1514 } 1515 1516 EXPORT_SYMBOL(__sk_mem_reclaim); 1517 1518 1519 /* 1520 * Set of default routines for initialising struct proto_ops when 1521 * the protocol does not support a particular function. In certain 1522 * cases where it makes no sense for a protocol to have a "do nothing" 1523 * function, some default processing is provided. 1524 */ 1525 1526 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 1527 { 1528 return -EOPNOTSUPP; 1529 } 1530 1531 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 1532 int len, int flags) 1533 { 1534 return -EOPNOTSUPP; 1535 } 1536 1537 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 1538 { 1539 return -EOPNOTSUPP; 1540 } 1541 1542 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 1543 { 1544 return -EOPNOTSUPP; 1545 } 1546 1547 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 1548 int *len, int peer) 1549 { 1550 return -EOPNOTSUPP; 1551 } 1552 1553 unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt) 1554 { 1555 return 0; 1556 } 1557 1558 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 1559 { 1560 return -EOPNOTSUPP; 1561 } 1562 1563 int sock_no_listen(struct socket *sock, int backlog) 1564 { 1565 return -EOPNOTSUPP; 1566 } 1567 1568 int sock_no_shutdown(struct socket *sock, int how) 1569 { 1570 return -EOPNOTSUPP; 1571 } 1572 1573 int sock_no_setsockopt(struct socket *sock, int level, int optname, 1574 char __user *optval, int optlen) 1575 { 1576 return -EOPNOTSUPP; 1577 } 1578 1579 int sock_no_getsockopt(struct socket *sock, int level, int optname, 1580 char __user *optval, int __user *optlen) 1581 { 1582 return -EOPNOTSUPP; 1583 } 1584 1585 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1586 size_t len) 1587 { 1588 return -EOPNOTSUPP; 1589 } 1590 1591 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 1592 size_t len, int flags) 1593 { 1594 return -EOPNOTSUPP; 1595 } 1596 1597 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 1598 { 1599 /* Mirror missing mmap method error code */ 1600 return -ENODEV; 1601 } 1602 1603 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 1604 { 1605 ssize_t res; 1606 struct msghdr msg = {.msg_flags = flags}; 1607 struct kvec iov; 1608 char *kaddr = kmap(page); 1609 iov.iov_base = kaddr + offset; 1610 iov.iov_len = size; 1611 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 1612 kunmap(page); 1613 return res; 1614 } 1615 1616 /* 1617 * Default Socket Callbacks 1618 */ 1619 1620 static void sock_def_wakeup(struct sock *sk) 1621 { 1622 read_lock(&sk->sk_callback_lock); 1623 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1624 wake_up_interruptible_all(sk->sk_sleep); 1625 read_unlock(&sk->sk_callback_lock); 1626 } 1627 1628 static void sock_def_error_report(struct sock *sk) 1629 { 1630 read_lock(&sk->sk_callback_lock); 1631 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1632 wake_up_interruptible(sk->sk_sleep); 1633 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1634 read_unlock(&sk->sk_callback_lock); 1635 } 1636 1637 static void sock_def_readable(struct sock *sk, int len) 1638 { 1639 read_lock(&sk->sk_callback_lock); 1640 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1641 wake_up_interruptible_sync(sk->sk_sleep); 1642 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1643 read_unlock(&sk->sk_callback_lock); 1644 } 1645 1646 static void sock_def_write_space(struct sock *sk) 1647 { 1648 read_lock(&sk->sk_callback_lock); 1649 1650 /* Do not wake up a writer until he can make "significant" 1651 * progress. --DaveM 1652 */ 1653 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1654 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1655 wake_up_interruptible_sync(sk->sk_sleep); 1656 1657 /* Should agree with poll, otherwise some programs break */ 1658 if (sock_writeable(sk)) 1659 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 1660 } 1661 1662 read_unlock(&sk->sk_callback_lock); 1663 } 1664 1665 static void sock_def_destruct(struct sock *sk) 1666 { 1667 kfree(sk->sk_protinfo); 1668 } 1669 1670 void sk_send_sigurg(struct sock *sk) 1671 { 1672 if (sk->sk_socket && sk->sk_socket->file) 1673 if (send_sigurg(&sk->sk_socket->file->f_owner)) 1674 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 1675 } 1676 1677 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 1678 unsigned long expires) 1679 { 1680 if (!mod_timer(timer, expires)) 1681 sock_hold(sk); 1682 } 1683 1684 EXPORT_SYMBOL(sk_reset_timer); 1685 1686 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 1687 { 1688 if (timer_pending(timer) && del_timer(timer)) 1689 __sock_put(sk); 1690 } 1691 1692 EXPORT_SYMBOL(sk_stop_timer); 1693 1694 void sock_init_data(struct socket *sock, struct sock *sk) 1695 { 1696 skb_queue_head_init(&sk->sk_receive_queue); 1697 skb_queue_head_init(&sk->sk_write_queue); 1698 skb_queue_head_init(&sk->sk_error_queue); 1699 #ifdef CONFIG_NET_DMA 1700 skb_queue_head_init(&sk->sk_async_wait_queue); 1701 #endif 1702 1703 sk->sk_send_head = NULL; 1704 1705 init_timer(&sk->sk_timer); 1706 1707 sk->sk_allocation = GFP_KERNEL; 1708 sk->sk_rcvbuf = sysctl_rmem_default; 1709 sk->sk_sndbuf = sysctl_wmem_default; 1710 sk->sk_state = TCP_CLOSE; 1711 sk_set_socket(sk, sock); 1712 1713 sock_set_flag(sk, SOCK_ZAPPED); 1714 1715 if (sock) { 1716 sk->sk_type = sock->type; 1717 sk->sk_sleep = &sock->wait; 1718 sock->sk = sk; 1719 } else 1720 sk->sk_sleep = NULL; 1721 1722 rwlock_init(&sk->sk_dst_lock); 1723 rwlock_init(&sk->sk_callback_lock); 1724 lockdep_set_class_and_name(&sk->sk_callback_lock, 1725 af_callback_keys + sk->sk_family, 1726 af_family_clock_key_strings[sk->sk_family]); 1727 1728 sk->sk_state_change = sock_def_wakeup; 1729 sk->sk_data_ready = sock_def_readable; 1730 sk->sk_write_space = sock_def_write_space; 1731 sk->sk_error_report = sock_def_error_report; 1732 sk->sk_destruct = sock_def_destruct; 1733 1734 sk->sk_sndmsg_page = NULL; 1735 sk->sk_sndmsg_off = 0; 1736 1737 sk->sk_peercred.pid = 0; 1738 sk->sk_peercred.uid = -1; 1739 sk->sk_peercred.gid = -1; 1740 sk->sk_write_pending = 0; 1741 sk->sk_rcvlowat = 1; 1742 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1743 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 1744 1745 sk->sk_stamp = ktime_set(-1L, 0); 1746 1747 atomic_set(&sk->sk_refcnt, 1); 1748 atomic_set(&sk->sk_drops, 0); 1749 } 1750 1751 void lock_sock_nested(struct sock *sk, int subclass) 1752 { 1753 might_sleep(); 1754 spin_lock_bh(&sk->sk_lock.slock); 1755 if (sk->sk_lock.owned) 1756 __lock_sock(sk); 1757 sk->sk_lock.owned = 1; 1758 spin_unlock(&sk->sk_lock.slock); 1759 /* 1760 * The sk_lock has mutex_lock() semantics here: 1761 */ 1762 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 1763 local_bh_enable(); 1764 } 1765 1766 EXPORT_SYMBOL(lock_sock_nested); 1767 1768 void release_sock(struct sock *sk) 1769 { 1770 /* 1771 * The sk_lock has mutex_unlock() semantics: 1772 */ 1773 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 1774 1775 spin_lock_bh(&sk->sk_lock.slock); 1776 if (sk->sk_backlog.tail) 1777 __release_sock(sk); 1778 sk->sk_lock.owned = 0; 1779 if (waitqueue_active(&sk->sk_lock.wq)) 1780 wake_up(&sk->sk_lock.wq); 1781 spin_unlock_bh(&sk->sk_lock.slock); 1782 } 1783 EXPORT_SYMBOL(release_sock); 1784 1785 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 1786 { 1787 struct timeval tv; 1788 if (!sock_flag(sk, SOCK_TIMESTAMP)) 1789 sock_enable_timestamp(sk); 1790 tv = ktime_to_timeval(sk->sk_stamp); 1791 if (tv.tv_sec == -1) 1792 return -ENOENT; 1793 if (tv.tv_sec == 0) { 1794 sk->sk_stamp = ktime_get_real(); 1795 tv = ktime_to_timeval(sk->sk_stamp); 1796 } 1797 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 1798 } 1799 EXPORT_SYMBOL(sock_get_timestamp); 1800 1801 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 1802 { 1803 struct timespec ts; 1804 if (!sock_flag(sk, SOCK_TIMESTAMP)) 1805 sock_enable_timestamp(sk); 1806 ts = ktime_to_timespec(sk->sk_stamp); 1807 if (ts.tv_sec == -1) 1808 return -ENOENT; 1809 if (ts.tv_sec == 0) { 1810 sk->sk_stamp = ktime_get_real(); 1811 ts = ktime_to_timespec(sk->sk_stamp); 1812 } 1813 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 1814 } 1815 EXPORT_SYMBOL(sock_get_timestampns); 1816 1817 void sock_enable_timestamp(struct sock *sk) 1818 { 1819 if (!sock_flag(sk, SOCK_TIMESTAMP)) { 1820 sock_set_flag(sk, SOCK_TIMESTAMP); 1821 net_enable_timestamp(); 1822 } 1823 } 1824 1825 /* 1826 * Get a socket option on an socket. 1827 * 1828 * FIX: POSIX 1003.1g is very ambiguous here. It states that 1829 * asynchronous errors should be reported by getsockopt. We assume 1830 * this means if you specify SO_ERROR (otherwise whats the point of it). 1831 */ 1832 int sock_common_getsockopt(struct socket *sock, int level, int optname, 1833 char __user *optval, int __user *optlen) 1834 { 1835 struct sock *sk = sock->sk; 1836 1837 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 1838 } 1839 1840 EXPORT_SYMBOL(sock_common_getsockopt); 1841 1842 #ifdef CONFIG_COMPAT 1843 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 1844 char __user *optval, int __user *optlen) 1845 { 1846 struct sock *sk = sock->sk; 1847 1848 if (sk->sk_prot->compat_getsockopt != NULL) 1849 return sk->sk_prot->compat_getsockopt(sk, level, optname, 1850 optval, optlen); 1851 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 1852 } 1853 EXPORT_SYMBOL(compat_sock_common_getsockopt); 1854 #endif 1855 1856 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 1857 struct msghdr *msg, size_t size, int flags) 1858 { 1859 struct sock *sk = sock->sk; 1860 int addr_len = 0; 1861 int err; 1862 1863 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 1864 flags & ~MSG_DONTWAIT, &addr_len); 1865 if (err >= 0) 1866 msg->msg_namelen = addr_len; 1867 return err; 1868 } 1869 1870 EXPORT_SYMBOL(sock_common_recvmsg); 1871 1872 /* 1873 * Set socket options on an inet socket. 1874 */ 1875 int sock_common_setsockopt(struct socket *sock, int level, int optname, 1876 char __user *optval, int optlen) 1877 { 1878 struct sock *sk = sock->sk; 1879 1880 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 1881 } 1882 1883 EXPORT_SYMBOL(sock_common_setsockopt); 1884 1885 #ifdef CONFIG_COMPAT 1886 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 1887 char __user *optval, int optlen) 1888 { 1889 struct sock *sk = sock->sk; 1890 1891 if (sk->sk_prot->compat_setsockopt != NULL) 1892 return sk->sk_prot->compat_setsockopt(sk, level, optname, 1893 optval, optlen); 1894 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 1895 } 1896 EXPORT_SYMBOL(compat_sock_common_setsockopt); 1897 #endif 1898 1899 void sk_common_release(struct sock *sk) 1900 { 1901 if (sk->sk_prot->destroy) 1902 sk->sk_prot->destroy(sk); 1903 1904 /* 1905 * Observation: when sock_common_release is called, processes have 1906 * no access to socket. But net still has. 1907 * Step one, detach it from networking: 1908 * 1909 * A. Remove from hash tables. 1910 */ 1911 1912 sk->sk_prot->unhash(sk); 1913 1914 /* 1915 * In this point socket cannot receive new packets, but it is possible 1916 * that some packets are in flight because some CPU runs receiver and 1917 * did hash table lookup before we unhashed socket. They will achieve 1918 * receive queue and will be purged by socket destructor. 1919 * 1920 * Also we still have packets pending on receive queue and probably, 1921 * our own packets waiting in device queues. sock_destroy will drain 1922 * receive queue, but transmitted packets will delay socket destruction 1923 * until the last reference will be released. 1924 */ 1925 1926 sock_orphan(sk); 1927 1928 xfrm_sk_free_policy(sk); 1929 1930 sk_refcnt_debug_release(sk); 1931 sock_put(sk); 1932 } 1933 1934 EXPORT_SYMBOL(sk_common_release); 1935 1936 static DEFINE_RWLOCK(proto_list_lock); 1937 static LIST_HEAD(proto_list); 1938 1939 #ifdef CONFIG_PROC_FS 1940 #define PROTO_INUSE_NR 64 /* should be enough for the first time */ 1941 struct prot_inuse { 1942 int val[PROTO_INUSE_NR]; 1943 }; 1944 1945 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 1946 1947 #ifdef CONFIG_NET_NS 1948 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 1949 { 1950 int cpu = smp_processor_id(); 1951 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; 1952 } 1953 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 1954 1955 int sock_prot_inuse_get(struct net *net, struct proto *prot) 1956 { 1957 int cpu, idx = prot->inuse_idx; 1958 int res = 0; 1959 1960 for_each_possible_cpu(cpu) 1961 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 1962 1963 return res >= 0 ? res : 0; 1964 } 1965 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 1966 1967 static int sock_inuse_init_net(struct net *net) 1968 { 1969 net->core.inuse = alloc_percpu(struct prot_inuse); 1970 return net->core.inuse ? 0 : -ENOMEM; 1971 } 1972 1973 static void sock_inuse_exit_net(struct net *net) 1974 { 1975 free_percpu(net->core.inuse); 1976 } 1977 1978 static struct pernet_operations net_inuse_ops = { 1979 .init = sock_inuse_init_net, 1980 .exit = sock_inuse_exit_net, 1981 }; 1982 1983 static __init int net_inuse_init(void) 1984 { 1985 if (register_pernet_subsys(&net_inuse_ops)) 1986 panic("Cannot initialize net inuse counters"); 1987 1988 return 0; 1989 } 1990 1991 core_initcall(net_inuse_init); 1992 #else 1993 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 1994 1995 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 1996 { 1997 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; 1998 } 1999 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2000 2001 int sock_prot_inuse_get(struct net *net, struct proto *prot) 2002 { 2003 int cpu, idx = prot->inuse_idx; 2004 int res = 0; 2005 2006 for_each_possible_cpu(cpu) 2007 res += per_cpu(prot_inuse, cpu).val[idx]; 2008 2009 return res >= 0 ? res : 0; 2010 } 2011 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2012 #endif 2013 2014 static void assign_proto_idx(struct proto *prot) 2015 { 2016 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2017 2018 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2019 printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); 2020 return; 2021 } 2022 2023 set_bit(prot->inuse_idx, proto_inuse_idx); 2024 } 2025 2026 static void release_proto_idx(struct proto *prot) 2027 { 2028 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2029 clear_bit(prot->inuse_idx, proto_inuse_idx); 2030 } 2031 #else 2032 static inline void assign_proto_idx(struct proto *prot) 2033 { 2034 } 2035 2036 static inline void release_proto_idx(struct proto *prot) 2037 { 2038 } 2039 #endif 2040 2041 int proto_register(struct proto *prot, int alloc_slab) 2042 { 2043 if (alloc_slab) { 2044 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2045 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2046 NULL); 2047 2048 if (prot->slab == NULL) { 2049 printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", 2050 prot->name); 2051 goto out; 2052 } 2053 2054 if (prot->rsk_prot != NULL) { 2055 static const char mask[] = "request_sock_%s"; 2056 2057 prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); 2058 if (prot->rsk_prot->slab_name == NULL) 2059 goto out_free_sock_slab; 2060 2061 sprintf(prot->rsk_prot->slab_name, mask, prot->name); 2062 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2063 prot->rsk_prot->obj_size, 0, 2064 SLAB_HWCACHE_ALIGN, NULL); 2065 2066 if (prot->rsk_prot->slab == NULL) { 2067 printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", 2068 prot->name); 2069 goto out_free_request_sock_slab_name; 2070 } 2071 } 2072 2073 if (prot->twsk_prot != NULL) { 2074 static const char mask[] = "tw_sock_%s"; 2075 2076 prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); 2077 2078 if (prot->twsk_prot->twsk_slab_name == NULL) 2079 goto out_free_request_sock_slab; 2080 2081 sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name); 2082 prot->twsk_prot->twsk_slab = 2083 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2084 prot->twsk_prot->twsk_obj_size, 2085 0, 2086 SLAB_HWCACHE_ALIGN | 2087 prot->slab_flags, 2088 NULL); 2089 if (prot->twsk_prot->twsk_slab == NULL) 2090 goto out_free_timewait_sock_slab_name; 2091 } 2092 } 2093 2094 write_lock(&proto_list_lock); 2095 list_add(&prot->node, &proto_list); 2096 assign_proto_idx(prot); 2097 write_unlock(&proto_list_lock); 2098 return 0; 2099 2100 out_free_timewait_sock_slab_name: 2101 kfree(prot->twsk_prot->twsk_slab_name); 2102 out_free_request_sock_slab: 2103 if (prot->rsk_prot && prot->rsk_prot->slab) { 2104 kmem_cache_destroy(prot->rsk_prot->slab); 2105 prot->rsk_prot->slab = NULL; 2106 } 2107 out_free_request_sock_slab_name: 2108 kfree(prot->rsk_prot->slab_name); 2109 out_free_sock_slab: 2110 kmem_cache_destroy(prot->slab); 2111 prot->slab = NULL; 2112 out: 2113 return -ENOBUFS; 2114 } 2115 2116 EXPORT_SYMBOL(proto_register); 2117 2118 void proto_unregister(struct proto *prot) 2119 { 2120 write_lock(&proto_list_lock); 2121 release_proto_idx(prot); 2122 list_del(&prot->node); 2123 write_unlock(&proto_list_lock); 2124 2125 if (prot->slab != NULL) { 2126 kmem_cache_destroy(prot->slab); 2127 prot->slab = NULL; 2128 } 2129 2130 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2131 kmem_cache_destroy(prot->rsk_prot->slab); 2132 kfree(prot->rsk_prot->slab_name); 2133 prot->rsk_prot->slab = NULL; 2134 } 2135 2136 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2137 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2138 kfree(prot->twsk_prot->twsk_slab_name); 2139 prot->twsk_prot->twsk_slab = NULL; 2140 } 2141 } 2142 2143 EXPORT_SYMBOL(proto_unregister); 2144 2145 #ifdef CONFIG_PROC_FS 2146 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2147 __acquires(proto_list_lock) 2148 { 2149 read_lock(&proto_list_lock); 2150 return seq_list_start_head(&proto_list, *pos); 2151 } 2152 2153 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2154 { 2155 return seq_list_next(v, &proto_list, pos); 2156 } 2157 2158 static void proto_seq_stop(struct seq_file *seq, void *v) 2159 __releases(proto_list_lock) 2160 { 2161 read_unlock(&proto_list_lock); 2162 } 2163 2164 static char proto_method_implemented(const void *method) 2165 { 2166 return method == NULL ? 'n' : 'y'; 2167 } 2168 2169 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2170 { 2171 seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " 2172 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2173 proto->name, 2174 proto->obj_size, 2175 sock_prot_inuse_get(seq_file_net(seq), proto), 2176 proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, 2177 proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", 2178 proto->max_header, 2179 proto->slab == NULL ? "no" : "yes", 2180 module_name(proto->owner), 2181 proto_method_implemented(proto->close), 2182 proto_method_implemented(proto->connect), 2183 proto_method_implemented(proto->disconnect), 2184 proto_method_implemented(proto->accept), 2185 proto_method_implemented(proto->ioctl), 2186 proto_method_implemented(proto->init), 2187 proto_method_implemented(proto->destroy), 2188 proto_method_implemented(proto->shutdown), 2189 proto_method_implemented(proto->setsockopt), 2190 proto_method_implemented(proto->getsockopt), 2191 proto_method_implemented(proto->sendmsg), 2192 proto_method_implemented(proto->recvmsg), 2193 proto_method_implemented(proto->sendpage), 2194 proto_method_implemented(proto->bind), 2195 proto_method_implemented(proto->backlog_rcv), 2196 proto_method_implemented(proto->hash), 2197 proto_method_implemented(proto->unhash), 2198 proto_method_implemented(proto->get_port), 2199 proto_method_implemented(proto->enter_memory_pressure)); 2200 } 2201 2202 static int proto_seq_show(struct seq_file *seq, void *v) 2203 { 2204 if (v == &proto_list) 2205 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2206 "protocol", 2207 "size", 2208 "sockets", 2209 "memory", 2210 "press", 2211 "maxhdr", 2212 "slab", 2213 "module", 2214 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2215 else 2216 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2217 return 0; 2218 } 2219 2220 static const struct seq_operations proto_seq_ops = { 2221 .start = proto_seq_start, 2222 .next = proto_seq_next, 2223 .stop = proto_seq_stop, 2224 .show = proto_seq_show, 2225 }; 2226 2227 static int proto_seq_open(struct inode *inode, struct file *file) 2228 { 2229 return seq_open_net(inode, file, &proto_seq_ops, 2230 sizeof(struct seq_net_private)); 2231 } 2232 2233 static const struct file_operations proto_seq_fops = { 2234 .owner = THIS_MODULE, 2235 .open = proto_seq_open, 2236 .read = seq_read, 2237 .llseek = seq_lseek, 2238 .release = seq_release_net, 2239 }; 2240 2241 static __net_init int proto_init_net(struct net *net) 2242 { 2243 if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) 2244 return -ENOMEM; 2245 2246 return 0; 2247 } 2248 2249 static __net_exit void proto_exit_net(struct net *net) 2250 { 2251 proc_net_remove(net, "protocols"); 2252 } 2253 2254 2255 static __net_initdata struct pernet_operations proto_net_ops = { 2256 .init = proto_init_net, 2257 .exit = proto_exit_net, 2258 }; 2259 2260 static int __init proto_init(void) 2261 { 2262 return register_pernet_subsys(&proto_net_ops); 2263 } 2264 2265 subsys_initcall(proto_init); 2266 2267 #endif /* PROC_FS */ 2268 2269 EXPORT_SYMBOL(sk_alloc); 2270 EXPORT_SYMBOL(sk_free); 2271 EXPORT_SYMBOL(sk_send_sigurg); 2272 EXPORT_SYMBOL(sock_alloc_send_skb); 2273 EXPORT_SYMBOL(sock_init_data); 2274 EXPORT_SYMBOL(sock_kfree_s); 2275 EXPORT_SYMBOL(sock_kmalloc); 2276 EXPORT_SYMBOL(sock_no_accept); 2277 EXPORT_SYMBOL(sock_no_bind); 2278 EXPORT_SYMBOL(sock_no_connect); 2279 EXPORT_SYMBOL(sock_no_getname); 2280 EXPORT_SYMBOL(sock_no_getsockopt); 2281 EXPORT_SYMBOL(sock_no_ioctl); 2282 EXPORT_SYMBOL(sock_no_listen); 2283 EXPORT_SYMBOL(sock_no_mmap); 2284 EXPORT_SYMBOL(sock_no_poll); 2285 EXPORT_SYMBOL(sock_no_recvmsg); 2286 EXPORT_SYMBOL(sock_no_sendmsg); 2287 EXPORT_SYMBOL(sock_no_sendpage); 2288 EXPORT_SYMBOL(sock_no_setsockopt); 2289 EXPORT_SYMBOL(sock_no_shutdown); 2290 EXPORT_SYMBOL(sock_no_socketpair); 2291 EXPORT_SYMBOL(sock_rfree); 2292 EXPORT_SYMBOL(sock_setsockopt); 2293 EXPORT_SYMBOL(sock_wfree); 2294 EXPORT_SYMBOL(sock_wmalloc); 2295 EXPORT_SYMBOL(sock_i_uid); 2296 EXPORT_SYMBOL(sock_i_ino); 2297 EXPORT_SYMBOL(sysctl_optmem_max); 2298