1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Generic socket support routines. Memory allocators, socket lock/release 7 * handler for protocols to use and generic option handler. 8 * 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 * 85 * 86 * This program is free software; you can redistribute it and/or 87 * modify it under the terms of the GNU General Public License 88 * as published by the Free Software Foundation; either version 89 * 2 of the License, or (at your option) any later version. 90 */ 91 92 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 93 94 #include <linux/capability.h> 95 #include <linux/errno.h> 96 #include <linux/errqueue.h> 97 #include <linux/types.h> 98 #include <linux/socket.h> 99 #include <linux/in.h> 100 #include <linux/kernel.h> 101 #include <linux/module.h> 102 #include <linux/proc_fs.h> 103 #include <linux/seq_file.h> 104 #include <linux/sched.h> 105 #include <linux/timer.h> 106 #include <linux/string.h> 107 #include <linux/sockios.h> 108 #include <linux/net.h> 109 #include <linux/mm.h> 110 #include <linux/slab.h> 111 #include <linux/interrupt.h> 112 #include <linux/poll.h> 113 #include <linux/tcp.h> 114 #include <linux/init.h> 115 #include <linux/highmem.h> 116 #include <linux/user_namespace.h> 117 #include <linux/static_key.h> 118 #include <linux/memcontrol.h> 119 #include <linux/prefetch.h> 120 121 #include <asm/uaccess.h> 122 123 #include <linux/netdevice.h> 124 #include <net/protocol.h> 125 #include <linux/skbuff.h> 126 #include <net/net_namespace.h> 127 #include <net/request_sock.h> 128 #include <net/sock.h> 129 #include <linux/net_tstamp.h> 130 #include <net/xfrm.h> 131 #include <linux/ipsec.h> 132 #include <net/cls_cgroup.h> 133 #include <net/netprio_cgroup.h> 134 135 #include <linux/filter.h> 136 137 #include <trace/events/sock.h> 138 139 #ifdef CONFIG_INET 140 #include <net/tcp.h> 141 #endif 142 143 #include <net/busy_poll.h> 144 145 static DEFINE_MUTEX(proto_list_mutex); 146 static LIST_HEAD(proto_list); 147 148 #ifdef CONFIG_MEMCG_KMEM 149 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 150 { 151 struct proto *proto; 152 int ret = 0; 153 154 mutex_lock(&proto_list_mutex); 155 list_for_each_entry(proto, &proto_list, node) { 156 if (proto->init_cgroup) { 157 ret = proto->init_cgroup(memcg, ss); 158 if (ret) 159 goto out; 160 } 161 } 162 163 mutex_unlock(&proto_list_mutex); 164 return ret; 165 out: 166 list_for_each_entry_continue_reverse(proto, &proto_list, node) 167 if (proto->destroy_cgroup) 168 proto->destroy_cgroup(memcg); 169 mutex_unlock(&proto_list_mutex); 170 return ret; 171 } 172 173 void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) 174 { 175 struct proto *proto; 176 177 mutex_lock(&proto_list_mutex); 178 list_for_each_entry_reverse(proto, &proto_list, node) 179 if (proto->destroy_cgroup) 180 proto->destroy_cgroup(memcg); 181 mutex_unlock(&proto_list_mutex); 182 } 183 #endif 184 185 /* 186 * Each address family might have different locking rules, so we have 187 * one slock key per address family: 188 */ 189 static struct lock_class_key af_family_keys[AF_MAX]; 190 static struct lock_class_key af_family_slock_keys[AF_MAX]; 191 192 #if defined(CONFIG_MEMCG_KMEM) 193 struct static_key memcg_socket_limit_enabled; 194 EXPORT_SYMBOL(memcg_socket_limit_enabled); 195 #endif 196 197 /* 198 * Make lock validator output more readable. (we pre-construct these 199 * strings build-time, so that runtime initialization of socket 200 * locks is fast): 201 */ 202 static const char *const af_family_key_strings[AF_MAX+1] = { 203 "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , 204 "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", 205 "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , 206 "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , 207 "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , 208 "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , 209 "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , 210 "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , 211 "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , 212 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 213 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 214 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 215 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 216 "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" 217 }; 218 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 219 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 220 "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", 221 "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , 222 "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , 223 "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , 224 "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , 225 "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , 226 "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , 227 "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , 228 "slock-27" , "slock-28" , "slock-AF_CAN" , 229 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 230 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 231 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 232 "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" 233 }; 234 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 235 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 236 "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", 237 "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , 238 "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , 239 "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , 240 "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , 241 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 242 "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , 243 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 244 "clock-27" , "clock-28" , "clock-AF_CAN" , 245 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 246 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 247 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 248 "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" 249 }; 250 251 /* 252 * sk_callback_lock locking rules are per-address-family, 253 * so split the lock classes by using a per-AF key: 254 */ 255 static struct lock_class_key af_callback_keys[AF_MAX]; 256 257 /* Take into consideration the size of the struct sk_buff overhead in the 258 * determination of these values, since that is non-constant across 259 * platforms. This makes socket queueing behavior and performance 260 * not depend upon such differences. 261 */ 262 #define _SK_MEM_PACKETS 256 263 #define _SK_MEM_OVERHEAD SKB_TRUESIZE(256) 264 #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 265 #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) 266 267 /* Run time adjustable parameters. */ 268 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 269 EXPORT_SYMBOL(sysctl_wmem_max); 270 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 271 EXPORT_SYMBOL(sysctl_rmem_max); 272 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 273 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 274 275 /* Maximal space eaten by iovec or ancillary data plus some space */ 276 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 277 EXPORT_SYMBOL(sysctl_optmem_max); 278 279 struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; 280 EXPORT_SYMBOL_GPL(memalloc_socks); 281 282 /** 283 * sk_set_memalloc - sets %SOCK_MEMALLOC 284 * @sk: socket to set it on 285 * 286 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 287 * It's the responsibility of the admin to adjust min_free_kbytes 288 * to meet the requirements 289 */ 290 void sk_set_memalloc(struct sock *sk) 291 { 292 sock_set_flag(sk, SOCK_MEMALLOC); 293 sk->sk_allocation |= __GFP_MEMALLOC; 294 static_key_slow_inc(&memalloc_socks); 295 } 296 EXPORT_SYMBOL_GPL(sk_set_memalloc); 297 298 void sk_clear_memalloc(struct sock *sk) 299 { 300 sock_reset_flag(sk, SOCK_MEMALLOC); 301 sk->sk_allocation &= ~__GFP_MEMALLOC; 302 static_key_slow_dec(&memalloc_socks); 303 304 /* 305 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 306 * progress of swapping. However, if SOCK_MEMALLOC is cleared while 307 * it has rmem allocations there is a risk that the user of the 308 * socket cannot make forward progress due to exceeding the rmem 309 * limits. By rights, sk_clear_memalloc() should only be called 310 * on sockets being torn down but warn and reset the accounting if 311 * that assumption breaks. 312 */ 313 if (WARN_ON(sk->sk_forward_alloc)) 314 sk_mem_reclaim(sk); 315 } 316 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 317 318 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 319 { 320 int ret; 321 unsigned long pflags = current->flags; 322 323 /* these should have been dropped before queueing */ 324 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 325 326 current->flags |= PF_MEMALLOC; 327 ret = sk->sk_backlog_rcv(sk, skb); 328 tsk_restore_flags(current, pflags, PF_MEMALLOC); 329 330 return ret; 331 } 332 EXPORT_SYMBOL(__sk_backlog_rcv); 333 334 static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) 335 { 336 struct timeval tv; 337 338 if (optlen < sizeof(tv)) 339 return -EINVAL; 340 if (copy_from_user(&tv, optval, sizeof(tv))) 341 return -EFAULT; 342 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 343 return -EDOM; 344 345 if (tv.tv_sec < 0) { 346 static int warned __read_mostly; 347 348 *timeo_p = 0; 349 if (warned < 10 && net_ratelimit()) { 350 warned++; 351 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 352 __func__, current->comm, task_pid_nr(current)); 353 } 354 return 0; 355 } 356 *timeo_p = MAX_SCHEDULE_TIMEOUT; 357 if (tv.tv_sec == 0 && tv.tv_usec == 0) 358 return 0; 359 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) 360 *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); 361 return 0; 362 } 363 364 static void sock_warn_obsolete_bsdism(const char *name) 365 { 366 static int warned; 367 static char warncomm[TASK_COMM_LEN]; 368 if (strcmp(warncomm, current->comm) && warned < 5) { 369 strcpy(warncomm, current->comm); 370 pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n", 371 warncomm, name); 372 warned++; 373 } 374 } 375 376 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) 377 378 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 379 { 380 if (sk->sk_flags & flags) { 381 sk->sk_flags &= ~flags; 382 if (!(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 383 net_disable_timestamp(); 384 } 385 } 386 387 388 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 389 { 390 int err; 391 int skb_len; 392 unsigned long flags; 393 struct sk_buff_head *list = &sk->sk_receive_queue; 394 395 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 396 atomic_inc(&sk->sk_drops); 397 trace_sock_rcvqueue_full(sk, skb); 398 return -ENOMEM; 399 } 400 401 err = sk_filter(sk, skb); 402 if (err) 403 return err; 404 405 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 406 atomic_inc(&sk->sk_drops); 407 return -ENOBUFS; 408 } 409 410 skb->dev = NULL; 411 skb_set_owner_r(skb, sk); 412 413 /* Cache the SKB length before we tack it onto the receive 414 * queue. Once it is added it no longer belongs to us and 415 * may be freed by other threads of control pulling packets 416 * from the queue. 417 */ 418 skb_len = skb->len; 419 420 /* we escape from rcu protected region, make sure we dont leak 421 * a norefcounted dst 422 */ 423 skb_dst_force(skb); 424 425 spin_lock_irqsave(&list->lock, flags); 426 skb->dropcount = atomic_read(&sk->sk_drops); 427 __skb_queue_tail(list, skb); 428 spin_unlock_irqrestore(&list->lock, flags); 429 430 if (!sock_flag(sk, SOCK_DEAD)) 431 sk->sk_data_ready(sk, skb_len); 432 return 0; 433 } 434 EXPORT_SYMBOL(sock_queue_rcv_skb); 435 436 int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) 437 { 438 int rc = NET_RX_SUCCESS; 439 440 if (sk_filter(sk, skb)) 441 goto discard_and_relse; 442 443 skb->dev = NULL; 444 445 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) { 446 atomic_inc(&sk->sk_drops); 447 goto discard_and_relse; 448 } 449 if (nested) 450 bh_lock_sock_nested(sk); 451 else 452 bh_lock_sock(sk); 453 if (!sock_owned_by_user(sk)) { 454 /* 455 * trylock + unlock semantics: 456 */ 457 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 458 459 rc = sk_backlog_rcv(sk, skb); 460 461 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 462 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { 463 bh_unlock_sock(sk); 464 atomic_inc(&sk->sk_drops); 465 goto discard_and_relse; 466 } 467 468 bh_unlock_sock(sk); 469 out: 470 sock_put(sk); 471 return rc; 472 discard_and_relse: 473 kfree_skb(skb); 474 goto out; 475 } 476 EXPORT_SYMBOL(sk_receive_skb); 477 478 void sk_reset_txq(struct sock *sk) 479 { 480 sk_tx_queue_clear(sk); 481 } 482 EXPORT_SYMBOL(sk_reset_txq); 483 484 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 485 { 486 struct dst_entry *dst = __sk_dst_get(sk); 487 488 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 489 sk_tx_queue_clear(sk); 490 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 491 dst_release(dst); 492 return NULL; 493 } 494 495 return dst; 496 } 497 EXPORT_SYMBOL(__sk_dst_check); 498 499 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 500 { 501 struct dst_entry *dst = sk_dst_get(sk); 502 503 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 504 sk_dst_reset(sk); 505 dst_release(dst); 506 return NULL; 507 } 508 509 return dst; 510 } 511 EXPORT_SYMBOL(sk_dst_check); 512 513 static int sock_setbindtodevice(struct sock *sk, char __user *optval, 514 int optlen) 515 { 516 int ret = -ENOPROTOOPT; 517 #ifdef CONFIG_NETDEVICES 518 struct net *net = sock_net(sk); 519 char devname[IFNAMSIZ]; 520 int index; 521 522 /* Sorry... */ 523 ret = -EPERM; 524 if (!ns_capable(net->user_ns, CAP_NET_RAW)) 525 goto out; 526 527 ret = -EINVAL; 528 if (optlen < 0) 529 goto out; 530 531 /* Bind this socket to a particular device like "eth0", 532 * as specified in the passed interface name. If the 533 * name is "" or the option length is zero the socket 534 * is not bound. 535 */ 536 if (optlen > IFNAMSIZ - 1) 537 optlen = IFNAMSIZ - 1; 538 memset(devname, 0, sizeof(devname)); 539 540 ret = -EFAULT; 541 if (copy_from_user(devname, optval, optlen)) 542 goto out; 543 544 index = 0; 545 if (devname[0] != '\0') { 546 struct net_device *dev; 547 548 rcu_read_lock(); 549 dev = dev_get_by_name_rcu(net, devname); 550 if (dev) 551 index = dev->ifindex; 552 rcu_read_unlock(); 553 ret = -ENODEV; 554 if (!dev) 555 goto out; 556 } 557 558 lock_sock(sk); 559 sk->sk_bound_dev_if = index; 560 sk_dst_reset(sk); 561 release_sock(sk); 562 563 ret = 0; 564 565 out: 566 #endif 567 568 return ret; 569 } 570 571 static int sock_getbindtodevice(struct sock *sk, char __user *optval, 572 int __user *optlen, int len) 573 { 574 int ret = -ENOPROTOOPT; 575 #ifdef CONFIG_NETDEVICES 576 struct net *net = sock_net(sk); 577 char devname[IFNAMSIZ]; 578 579 if (sk->sk_bound_dev_if == 0) { 580 len = 0; 581 goto zero; 582 } 583 584 ret = -EINVAL; 585 if (len < IFNAMSIZ) 586 goto out; 587 588 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); 589 if (ret) 590 goto out; 591 592 len = strlen(devname) + 1; 593 594 ret = -EFAULT; 595 if (copy_to_user(optval, devname, len)) 596 goto out; 597 598 zero: 599 ret = -EFAULT; 600 if (put_user(len, optlen)) 601 goto out; 602 603 ret = 0; 604 605 out: 606 #endif 607 608 return ret; 609 } 610 611 static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) 612 { 613 if (valbool) 614 sock_set_flag(sk, bit); 615 else 616 sock_reset_flag(sk, bit); 617 } 618 619 /* 620 * This is meant for all protocols to use and covers goings on 621 * at the socket level. Everything here is generic. 622 */ 623 624 int sock_setsockopt(struct socket *sock, int level, int optname, 625 char __user *optval, unsigned int optlen) 626 { 627 struct sock *sk = sock->sk; 628 int val; 629 int valbool; 630 struct linger ling; 631 int ret = 0; 632 633 /* 634 * Options without arguments 635 */ 636 637 if (optname == SO_BINDTODEVICE) 638 return sock_setbindtodevice(sk, optval, optlen); 639 640 if (optlen < sizeof(int)) 641 return -EINVAL; 642 643 if (get_user(val, (int __user *)optval)) 644 return -EFAULT; 645 646 valbool = val ? 1 : 0; 647 648 lock_sock(sk); 649 650 switch (optname) { 651 case SO_DEBUG: 652 if (val && !capable(CAP_NET_ADMIN)) 653 ret = -EACCES; 654 else 655 sock_valbool_flag(sk, SOCK_DBG, valbool); 656 break; 657 case SO_REUSEADDR: 658 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 659 break; 660 case SO_REUSEPORT: 661 sk->sk_reuseport = valbool; 662 break; 663 case SO_TYPE: 664 case SO_PROTOCOL: 665 case SO_DOMAIN: 666 case SO_ERROR: 667 ret = -ENOPROTOOPT; 668 break; 669 case SO_DONTROUTE: 670 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 671 break; 672 case SO_BROADCAST: 673 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 674 break; 675 case SO_SNDBUF: 676 /* Don't error on this BSD doesn't and if you think 677 * about it this is right. Otherwise apps have to 678 * play 'guess the biggest size' games. RCVBUF/SNDBUF 679 * are treated in BSD as hints 680 */ 681 val = min_t(u32, val, sysctl_wmem_max); 682 set_sndbuf: 683 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 684 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF); 685 /* Wake up sending tasks if we upped the value. */ 686 sk->sk_write_space(sk); 687 break; 688 689 case SO_SNDBUFFORCE: 690 if (!capable(CAP_NET_ADMIN)) { 691 ret = -EPERM; 692 break; 693 } 694 goto set_sndbuf; 695 696 case SO_RCVBUF: 697 /* Don't error on this BSD doesn't and if you think 698 * about it this is right. Otherwise apps have to 699 * play 'guess the biggest size' games. RCVBUF/SNDBUF 700 * are treated in BSD as hints 701 */ 702 val = min_t(u32, val, sysctl_rmem_max); 703 set_rcvbuf: 704 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 705 /* 706 * We double it on the way in to account for 707 * "struct sk_buff" etc. overhead. Applications 708 * assume that the SO_RCVBUF setting they make will 709 * allow that much actual data to be received on that 710 * socket. 711 * 712 * Applications are unaware that "struct sk_buff" and 713 * other overheads allocate from the receive buffer 714 * during socket buffer allocation. 715 * 716 * And after considering the possible alternatives, 717 * returning the value we actually used in getsockopt 718 * is the most desirable behavior. 719 */ 720 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF); 721 break; 722 723 case SO_RCVBUFFORCE: 724 if (!capable(CAP_NET_ADMIN)) { 725 ret = -EPERM; 726 break; 727 } 728 goto set_rcvbuf; 729 730 case SO_KEEPALIVE: 731 #ifdef CONFIG_INET 732 if (sk->sk_protocol == IPPROTO_TCP && 733 sk->sk_type == SOCK_STREAM) 734 tcp_set_keepalive(sk, valbool); 735 #endif 736 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 737 break; 738 739 case SO_OOBINLINE: 740 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 741 break; 742 743 case SO_NO_CHECK: 744 sk->sk_no_check = valbool; 745 break; 746 747 case SO_PRIORITY: 748 if ((val >= 0 && val <= 6) || 749 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 750 sk->sk_priority = val; 751 else 752 ret = -EPERM; 753 break; 754 755 case SO_LINGER: 756 if (optlen < sizeof(ling)) { 757 ret = -EINVAL; /* 1003.1g */ 758 break; 759 } 760 if (copy_from_user(&ling, optval, sizeof(ling))) { 761 ret = -EFAULT; 762 break; 763 } 764 if (!ling.l_onoff) 765 sock_reset_flag(sk, SOCK_LINGER); 766 else { 767 #if (BITS_PER_LONG == 32) 768 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 769 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 770 else 771 #endif 772 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 773 sock_set_flag(sk, SOCK_LINGER); 774 } 775 break; 776 777 case SO_BSDCOMPAT: 778 sock_warn_obsolete_bsdism("setsockopt"); 779 break; 780 781 case SO_PASSCRED: 782 if (valbool) 783 set_bit(SOCK_PASSCRED, &sock->flags); 784 else 785 clear_bit(SOCK_PASSCRED, &sock->flags); 786 break; 787 788 case SO_TIMESTAMP: 789 case SO_TIMESTAMPNS: 790 if (valbool) { 791 if (optname == SO_TIMESTAMP) 792 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 793 else 794 sock_set_flag(sk, SOCK_RCVTSTAMPNS); 795 sock_set_flag(sk, SOCK_RCVTSTAMP); 796 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 797 } else { 798 sock_reset_flag(sk, SOCK_RCVTSTAMP); 799 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 800 } 801 break; 802 803 case SO_TIMESTAMPING: 804 if (val & ~SOF_TIMESTAMPING_MASK) { 805 ret = -EINVAL; 806 break; 807 } 808 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, 809 val & SOF_TIMESTAMPING_TX_HARDWARE); 810 sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, 811 val & SOF_TIMESTAMPING_TX_SOFTWARE); 812 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, 813 val & SOF_TIMESTAMPING_RX_HARDWARE); 814 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 815 sock_enable_timestamp(sk, 816 SOCK_TIMESTAMPING_RX_SOFTWARE); 817 else 818 sock_disable_timestamp(sk, 819 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 820 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, 821 val & SOF_TIMESTAMPING_SOFTWARE); 822 sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, 823 val & SOF_TIMESTAMPING_SYS_HARDWARE); 824 sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, 825 val & SOF_TIMESTAMPING_RAW_HARDWARE); 826 break; 827 828 case SO_RCVLOWAT: 829 if (val < 0) 830 val = INT_MAX; 831 sk->sk_rcvlowat = val ? : 1; 832 break; 833 834 case SO_RCVTIMEO: 835 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); 836 break; 837 838 case SO_SNDTIMEO: 839 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); 840 break; 841 842 case SO_ATTACH_FILTER: 843 ret = -EINVAL; 844 if (optlen == sizeof(struct sock_fprog)) { 845 struct sock_fprog fprog; 846 847 ret = -EFAULT; 848 if (copy_from_user(&fprog, optval, sizeof(fprog))) 849 break; 850 851 ret = sk_attach_filter(&fprog, sk); 852 } 853 break; 854 855 case SO_DETACH_FILTER: 856 ret = sk_detach_filter(sk); 857 break; 858 859 case SO_LOCK_FILTER: 860 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 861 ret = -EPERM; 862 else 863 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 864 break; 865 866 case SO_PASSSEC: 867 if (valbool) 868 set_bit(SOCK_PASSSEC, &sock->flags); 869 else 870 clear_bit(SOCK_PASSSEC, &sock->flags); 871 break; 872 case SO_MARK: 873 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 874 ret = -EPERM; 875 else 876 sk->sk_mark = val; 877 break; 878 879 /* We implement the SO_SNDLOWAT etc to 880 not be settable (1003.1g 5.3) */ 881 case SO_RXQ_OVFL: 882 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 883 break; 884 885 case SO_WIFI_STATUS: 886 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 887 break; 888 889 case SO_PEEK_OFF: 890 if (sock->ops->set_peek_off) 891 sock->ops->set_peek_off(sk, val); 892 else 893 ret = -EOPNOTSUPP; 894 break; 895 896 case SO_NOFCS: 897 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 898 break; 899 900 case SO_SELECT_ERR_QUEUE: 901 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 902 break; 903 904 #ifdef CONFIG_NET_RX_BUSY_POLL 905 case SO_BUSY_POLL: 906 /* allow unprivileged users to decrease the value */ 907 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) 908 ret = -EPERM; 909 else { 910 if (val < 0) 911 ret = -EINVAL; 912 else 913 sk->sk_ll_usec = val; 914 } 915 break; 916 #endif 917 918 case SO_MAX_PACING_RATE: 919 sk->sk_max_pacing_rate = val; 920 sk->sk_pacing_rate = min(sk->sk_pacing_rate, 921 sk->sk_max_pacing_rate); 922 break; 923 924 default: 925 ret = -ENOPROTOOPT; 926 break; 927 } 928 release_sock(sk); 929 return ret; 930 } 931 EXPORT_SYMBOL(sock_setsockopt); 932 933 934 void cred_to_ucred(struct pid *pid, const struct cred *cred, 935 struct ucred *ucred) 936 { 937 ucred->pid = pid_vnr(pid); 938 ucred->uid = ucred->gid = -1; 939 if (cred) { 940 struct user_namespace *current_ns = current_user_ns(); 941 942 ucred->uid = from_kuid_munged(current_ns, cred->euid); 943 ucred->gid = from_kgid_munged(current_ns, cred->egid); 944 } 945 } 946 EXPORT_SYMBOL_GPL(cred_to_ucred); 947 948 int sock_getsockopt(struct socket *sock, int level, int optname, 949 char __user *optval, int __user *optlen) 950 { 951 struct sock *sk = sock->sk; 952 953 union { 954 int val; 955 struct linger ling; 956 struct timeval tm; 957 } v; 958 959 int lv = sizeof(int); 960 int len; 961 962 if (get_user(len, optlen)) 963 return -EFAULT; 964 if (len < 0) 965 return -EINVAL; 966 967 memset(&v, 0, sizeof(v)); 968 969 switch (optname) { 970 case SO_DEBUG: 971 v.val = sock_flag(sk, SOCK_DBG); 972 break; 973 974 case SO_DONTROUTE: 975 v.val = sock_flag(sk, SOCK_LOCALROUTE); 976 break; 977 978 case SO_BROADCAST: 979 v.val = sock_flag(sk, SOCK_BROADCAST); 980 break; 981 982 case SO_SNDBUF: 983 v.val = sk->sk_sndbuf; 984 break; 985 986 case SO_RCVBUF: 987 v.val = sk->sk_rcvbuf; 988 break; 989 990 case SO_REUSEADDR: 991 v.val = sk->sk_reuse; 992 break; 993 994 case SO_REUSEPORT: 995 v.val = sk->sk_reuseport; 996 break; 997 998 case SO_KEEPALIVE: 999 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1000 break; 1001 1002 case SO_TYPE: 1003 v.val = sk->sk_type; 1004 break; 1005 1006 case SO_PROTOCOL: 1007 v.val = sk->sk_protocol; 1008 break; 1009 1010 case SO_DOMAIN: 1011 v.val = sk->sk_family; 1012 break; 1013 1014 case SO_ERROR: 1015 v.val = -sock_error(sk); 1016 if (v.val == 0) 1017 v.val = xchg(&sk->sk_err_soft, 0); 1018 break; 1019 1020 case SO_OOBINLINE: 1021 v.val = sock_flag(sk, SOCK_URGINLINE); 1022 break; 1023 1024 case SO_NO_CHECK: 1025 v.val = sk->sk_no_check; 1026 break; 1027 1028 case SO_PRIORITY: 1029 v.val = sk->sk_priority; 1030 break; 1031 1032 case SO_LINGER: 1033 lv = sizeof(v.ling); 1034 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1035 v.ling.l_linger = sk->sk_lingertime / HZ; 1036 break; 1037 1038 case SO_BSDCOMPAT: 1039 sock_warn_obsolete_bsdism("getsockopt"); 1040 break; 1041 1042 case SO_TIMESTAMP: 1043 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1044 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1045 break; 1046 1047 case SO_TIMESTAMPNS: 1048 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); 1049 break; 1050 1051 case SO_TIMESTAMPING: 1052 v.val = 0; 1053 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) 1054 v.val |= SOF_TIMESTAMPING_TX_HARDWARE; 1055 if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) 1056 v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; 1057 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) 1058 v.val |= SOF_TIMESTAMPING_RX_HARDWARE; 1059 if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1060 v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; 1061 if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) 1062 v.val |= SOF_TIMESTAMPING_SOFTWARE; 1063 if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) 1064 v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; 1065 if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) 1066 v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; 1067 break; 1068 1069 case SO_RCVTIMEO: 1070 lv = sizeof(struct timeval); 1071 if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { 1072 v.tm.tv_sec = 0; 1073 v.tm.tv_usec = 0; 1074 } else { 1075 v.tm.tv_sec = sk->sk_rcvtimeo / HZ; 1076 v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; 1077 } 1078 break; 1079 1080 case SO_SNDTIMEO: 1081 lv = sizeof(struct timeval); 1082 if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { 1083 v.tm.tv_sec = 0; 1084 v.tm.tv_usec = 0; 1085 } else { 1086 v.tm.tv_sec = sk->sk_sndtimeo / HZ; 1087 v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; 1088 } 1089 break; 1090 1091 case SO_RCVLOWAT: 1092 v.val = sk->sk_rcvlowat; 1093 break; 1094 1095 case SO_SNDLOWAT: 1096 v.val = 1; 1097 break; 1098 1099 case SO_PASSCRED: 1100 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1101 break; 1102 1103 case SO_PEERCRED: 1104 { 1105 struct ucred peercred; 1106 if (len > sizeof(peercred)) 1107 len = sizeof(peercred); 1108 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1109 if (copy_to_user(optval, &peercred, len)) 1110 return -EFAULT; 1111 goto lenout; 1112 } 1113 1114 case SO_PEERNAME: 1115 { 1116 char address[128]; 1117 1118 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) 1119 return -ENOTCONN; 1120 if (lv < len) 1121 return -EINVAL; 1122 if (copy_to_user(optval, address, len)) 1123 return -EFAULT; 1124 goto lenout; 1125 } 1126 1127 /* Dubious BSD thing... Probably nobody even uses it, but 1128 * the UNIX standard wants it for whatever reason... -DaveM 1129 */ 1130 case SO_ACCEPTCONN: 1131 v.val = sk->sk_state == TCP_LISTEN; 1132 break; 1133 1134 case SO_PASSSEC: 1135 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1136 break; 1137 1138 case SO_PEERSEC: 1139 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1140 1141 case SO_MARK: 1142 v.val = sk->sk_mark; 1143 break; 1144 1145 case SO_RXQ_OVFL: 1146 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1147 break; 1148 1149 case SO_WIFI_STATUS: 1150 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1151 break; 1152 1153 case SO_PEEK_OFF: 1154 if (!sock->ops->set_peek_off) 1155 return -EOPNOTSUPP; 1156 1157 v.val = sk->sk_peek_off; 1158 break; 1159 case SO_NOFCS: 1160 v.val = sock_flag(sk, SOCK_NOFCS); 1161 break; 1162 1163 case SO_BINDTODEVICE: 1164 return sock_getbindtodevice(sk, optval, optlen, len); 1165 1166 case SO_GET_FILTER: 1167 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); 1168 if (len < 0) 1169 return len; 1170 1171 goto lenout; 1172 1173 case SO_LOCK_FILTER: 1174 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1175 break; 1176 1177 case SO_SELECT_ERR_QUEUE: 1178 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 1179 break; 1180 1181 #ifdef CONFIG_NET_RX_BUSY_POLL 1182 case SO_BUSY_POLL: 1183 v.val = sk->sk_ll_usec; 1184 break; 1185 #endif 1186 1187 case SO_MAX_PACING_RATE: 1188 v.val = sk->sk_max_pacing_rate; 1189 break; 1190 1191 default: 1192 return -ENOPROTOOPT; 1193 } 1194 1195 if (len > lv) 1196 len = lv; 1197 if (copy_to_user(optval, &v, len)) 1198 return -EFAULT; 1199 lenout: 1200 if (put_user(len, optlen)) 1201 return -EFAULT; 1202 return 0; 1203 } 1204 1205 /* 1206 * Initialize an sk_lock. 1207 * 1208 * (We also register the sk_lock with the lock validator.) 1209 */ 1210 static inline void sock_lock_init(struct sock *sk) 1211 { 1212 sock_lock_init_class_and_name(sk, 1213 af_family_slock_key_strings[sk->sk_family], 1214 af_family_slock_keys + sk->sk_family, 1215 af_family_key_strings[sk->sk_family], 1216 af_family_keys + sk->sk_family); 1217 } 1218 1219 /* 1220 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1221 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1222 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1223 */ 1224 static void sock_copy(struct sock *nsk, const struct sock *osk) 1225 { 1226 #ifdef CONFIG_SECURITY_NETWORK 1227 void *sptr = nsk->sk_security; 1228 #endif 1229 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1230 1231 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1232 osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1233 1234 #ifdef CONFIG_SECURITY_NETWORK 1235 nsk->sk_security = sptr; 1236 security_sk_clone(osk, nsk); 1237 #endif 1238 } 1239 1240 void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1241 { 1242 unsigned long nulls1, nulls2; 1243 1244 nulls1 = offsetof(struct sock, __sk_common.skc_node.next); 1245 nulls2 = offsetof(struct sock, __sk_common.skc_portaddr_node.next); 1246 if (nulls1 > nulls2) 1247 swap(nulls1, nulls2); 1248 1249 if (nulls1 != 0) 1250 memset((char *)sk, 0, nulls1); 1251 memset((char *)sk + nulls1 + sizeof(void *), 0, 1252 nulls2 - nulls1 - sizeof(void *)); 1253 memset((char *)sk + nulls2 + sizeof(void *), 0, 1254 size - nulls2 - sizeof(void *)); 1255 } 1256 EXPORT_SYMBOL(sk_prot_clear_portaddr_nulls); 1257 1258 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1259 int family) 1260 { 1261 struct sock *sk; 1262 struct kmem_cache *slab; 1263 1264 slab = prot->slab; 1265 if (slab != NULL) { 1266 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1267 if (!sk) 1268 return sk; 1269 if (priority & __GFP_ZERO) { 1270 if (prot->clear_sk) 1271 prot->clear_sk(sk, prot->obj_size); 1272 else 1273 sk_prot_clear_nulls(sk, prot->obj_size); 1274 } 1275 } else 1276 sk = kmalloc(prot->obj_size, priority); 1277 1278 if (sk != NULL) { 1279 kmemcheck_annotate_bitfield(sk, flags); 1280 1281 if (security_sk_alloc(sk, family, priority)) 1282 goto out_free; 1283 1284 if (!try_module_get(prot->owner)) 1285 goto out_free_sec; 1286 sk_tx_queue_clear(sk); 1287 } 1288 1289 return sk; 1290 1291 out_free_sec: 1292 security_sk_free(sk); 1293 out_free: 1294 if (slab != NULL) 1295 kmem_cache_free(slab, sk); 1296 else 1297 kfree(sk); 1298 return NULL; 1299 } 1300 1301 static void sk_prot_free(struct proto *prot, struct sock *sk) 1302 { 1303 struct kmem_cache *slab; 1304 struct module *owner; 1305 1306 owner = prot->owner; 1307 slab = prot->slab; 1308 1309 security_sk_free(sk); 1310 if (slab != NULL) 1311 kmem_cache_free(slab, sk); 1312 else 1313 kfree(sk); 1314 module_put(owner); 1315 } 1316 1317 #if IS_ENABLED(CONFIG_NET_CLS_CGROUP) 1318 void sock_update_classid(struct sock *sk) 1319 { 1320 u32 classid; 1321 1322 classid = task_cls_classid(current); 1323 if (classid != sk->sk_classid) 1324 sk->sk_classid = classid; 1325 } 1326 EXPORT_SYMBOL(sock_update_classid); 1327 #endif 1328 1329 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 1330 void sock_update_netprioidx(struct sock *sk) 1331 { 1332 if (in_interrupt()) 1333 return; 1334 1335 sk->sk_cgrp_prioidx = task_netprioidx(current); 1336 } 1337 EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1338 #endif 1339 1340 /** 1341 * sk_alloc - All socket objects are allocated here 1342 * @net: the applicable net namespace 1343 * @family: protocol family 1344 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1345 * @prot: struct proto associated with this new sock instance 1346 */ 1347 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1348 struct proto *prot) 1349 { 1350 struct sock *sk; 1351 1352 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1353 if (sk) { 1354 sk->sk_family = family; 1355 /* 1356 * See comment in struct sock definition to understand 1357 * why we need sk_prot_creator -acme 1358 */ 1359 sk->sk_prot = sk->sk_prot_creator = prot; 1360 sock_lock_init(sk); 1361 sock_net_set(sk, get_net(net)); 1362 atomic_set(&sk->sk_wmem_alloc, 1); 1363 1364 sock_update_classid(sk); 1365 sock_update_netprioidx(sk); 1366 } 1367 1368 return sk; 1369 } 1370 EXPORT_SYMBOL(sk_alloc); 1371 1372 static void __sk_free(struct sock *sk) 1373 { 1374 struct sk_filter *filter; 1375 1376 if (sk->sk_destruct) 1377 sk->sk_destruct(sk); 1378 1379 filter = rcu_dereference_check(sk->sk_filter, 1380 atomic_read(&sk->sk_wmem_alloc) == 0); 1381 if (filter) { 1382 sk_filter_uncharge(sk, filter); 1383 RCU_INIT_POINTER(sk->sk_filter, NULL); 1384 } 1385 1386 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1387 1388 if (atomic_read(&sk->sk_omem_alloc)) 1389 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1390 __func__, atomic_read(&sk->sk_omem_alloc)); 1391 1392 if (sk->sk_peer_cred) 1393 put_cred(sk->sk_peer_cred); 1394 put_pid(sk->sk_peer_pid); 1395 put_net(sock_net(sk)); 1396 sk_prot_free(sk->sk_prot_creator, sk); 1397 } 1398 1399 void sk_free(struct sock *sk) 1400 { 1401 /* 1402 * We subtract one from sk_wmem_alloc and can know if 1403 * some packets are still in some tx queue. 1404 * If not null, sock_wfree() will call __sk_free(sk) later 1405 */ 1406 if (atomic_dec_and_test(&sk->sk_wmem_alloc)) 1407 __sk_free(sk); 1408 } 1409 EXPORT_SYMBOL(sk_free); 1410 1411 /* 1412 * Last sock_put should drop reference to sk->sk_net. It has already 1413 * been dropped in sk_change_net. Taking reference to stopping namespace 1414 * is not an option. 1415 * Take reference to a socket to remove it from hash _alive_ and after that 1416 * destroy it in the context of init_net. 1417 */ 1418 void sk_release_kernel(struct sock *sk) 1419 { 1420 if (sk == NULL || sk->sk_socket == NULL) 1421 return; 1422 1423 sock_hold(sk); 1424 sock_release(sk->sk_socket); 1425 release_net(sock_net(sk)); 1426 sock_net_set(sk, get_net(&init_net)); 1427 sock_put(sk); 1428 } 1429 EXPORT_SYMBOL(sk_release_kernel); 1430 1431 static void sk_update_clone(const struct sock *sk, struct sock *newsk) 1432 { 1433 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) 1434 sock_update_memcg(newsk); 1435 } 1436 1437 /** 1438 * sk_clone_lock - clone a socket, and lock its clone 1439 * @sk: the socket to clone 1440 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1441 * 1442 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1443 */ 1444 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1445 { 1446 struct sock *newsk; 1447 1448 newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); 1449 if (newsk != NULL) { 1450 struct sk_filter *filter; 1451 1452 sock_copy(newsk, sk); 1453 1454 /* SANITY */ 1455 get_net(sock_net(newsk)); 1456 sk_node_init(&newsk->sk_node); 1457 sock_lock_init(newsk); 1458 bh_lock_sock(newsk); 1459 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1460 newsk->sk_backlog.len = 0; 1461 1462 atomic_set(&newsk->sk_rmem_alloc, 0); 1463 /* 1464 * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) 1465 */ 1466 atomic_set(&newsk->sk_wmem_alloc, 1); 1467 atomic_set(&newsk->sk_omem_alloc, 0); 1468 skb_queue_head_init(&newsk->sk_receive_queue); 1469 skb_queue_head_init(&newsk->sk_write_queue); 1470 #ifdef CONFIG_NET_DMA 1471 skb_queue_head_init(&newsk->sk_async_wait_queue); 1472 #endif 1473 1474 spin_lock_init(&newsk->sk_dst_lock); 1475 rwlock_init(&newsk->sk_callback_lock); 1476 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1477 af_callback_keys + newsk->sk_family, 1478 af_family_clock_key_strings[newsk->sk_family]); 1479 1480 newsk->sk_dst_cache = NULL; 1481 newsk->sk_wmem_queued = 0; 1482 newsk->sk_forward_alloc = 0; 1483 newsk->sk_send_head = NULL; 1484 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1485 1486 sock_reset_flag(newsk, SOCK_DONE); 1487 skb_queue_head_init(&newsk->sk_error_queue); 1488 1489 filter = rcu_dereference_protected(newsk->sk_filter, 1); 1490 if (filter != NULL) 1491 sk_filter_charge(newsk, filter); 1492 1493 if (unlikely(xfrm_sk_clone_policy(newsk))) { 1494 /* It is still raw copy of parent, so invalidate 1495 * destructor and make plain sk_free() */ 1496 newsk->sk_destruct = NULL; 1497 bh_unlock_sock(newsk); 1498 sk_free(newsk); 1499 newsk = NULL; 1500 goto out; 1501 } 1502 1503 newsk->sk_err = 0; 1504 newsk->sk_priority = 0; 1505 /* 1506 * Before updating sk_refcnt, we must commit prior changes to memory 1507 * (Documentation/RCU/rculist_nulls.txt for details) 1508 */ 1509 smp_wmb(); 1510 atomic_set(&newsk->sk_refcnt, 2); 1511 1512 /* 1513 * Increment the counter in the same struct proto as the master 1514 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1515 * is the same as sk->sk_prot->socks, as this field was copied 1516 * with memcpy). 1517 * 1518 * This _changes_ the previous behaviour, where 1519 * tcp_create_openreq_child always was incrementing the 1520 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1521 * to be taken into account in all callers. -acme 1522 */ 1523 sk_refcnt_debug_inc(newsk); 1524 sk_set_socket(newsk, NULL); 1525 newsk->sk_wq = NULL; 1526 1527 sk_update_clone(sk, newsk); 1528 1529 if (newsk->sk_prot->sockets_allocated) 1530 sk_sockets_allocated_inc(newsk); 1531 1532 if (newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1533 net_enable_timestamp(); 1534 } 1535 out: 1536 return newsk; 1537 } 1538 EXPORT_SYMBOL_GPL(sk_clone_lock); 1539 1540 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 1541 { 1542 __sk_dst_set(sk, dst); 1543 sk->sk_route_caps = dst->dev->features; 1544 if (sk->sk_route_caps & NETIF_F_GSO) 1545 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 1546 sk->sk_route_caps &= ~sk->sk_route_nocaps; 1547 if (sk_can_gso(sk)) { 1548 if (dst->header_len) { 1549 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 1550 } else { 1551 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 1552 sk->sk_gso_max_size = dst->dev->gso_max_size; 1553 sk->sk_gso_max_segs = dst->dev->gso_max_segs; 1554 } 1555 } 1556 } 1557 EXPORT_SYMBOL_GPL(sk_setup_caps); 1558 1559 /* 1560 * Simple resource managers for sockets. 1561 */ 1562 1563 1564 /* 1565 * Write buffer destructor automatically called from kfree_skb. 1566 */ 1567 void sock_wfree(struct sk_buff *skb) 1568 { 1569 struct sock *sk = skb->sk; 1570 unsigned int len = skb->truesize; 1571 1572 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 1573 /* 1574 * Keep a reference on sk_wmem_alloc, this will be released 1575 * after sk_write_space() call 1576 */ 1577 atomic_sub(len - 1, &sk->sk_wmem_alloc); 1578 sk->sk_write_space(sk); 1579 len = 1; 1580 } 1581 /* 1582 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 1583 * could not do because of in-flight packets 1584 */ 1585 if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) 1586 __sk_free(sk); 1587 } 1588 EXPORT_SYMBOL(sock_wfree); 1589 1590 void skb_orphan_partial(struct sk_buff *skb) 1591 { 1592 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc, 1593 * so we do not completely orphan skb, but transfert all 1594 * accounted bytes but one, to avoid unexpected reorders. 1595 */ 1596 if (skb->destructor == sock_wfree 1597 #ifdef CONFIG_INET 1598 || skb->destructor == tcp_wfree 1599 #endif 1600 ) { 1601 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1602 skb->truesize = 1; 1603 } else { 1604 skb_orphan(skb); 1605 } 1606 } 1607 EXPORT_SYMBOL(skb_orphan_partial); 1608 1609 /* 1610 * Read buffer destructor automatically called from kfree_skb. 1611 */ 1612 void sock_rfree(struct sk_buff *skb) 1613 { 1614 struct sock *sk = skb->sk; 1615 unsigned int len = skb->truesize; 1616 1617 atomic_sub(len, &sk->sk_rmem_alloc); 1618 sk_mem_uncharge(sk, len); 1619 } 1620 EXPORT_SYMBOL(sock_rfree); 1621 1622 void sock_edemux(struct sk_buff *skb) 1623 { 1624 struct sock *sk = skb->sk; 1625 1626 #ifdef CONFIG_INET 1627 if (sk->sk_state == TCP_TIME_WAIT) 1628 inet_twsk_put(inet_twsk(sk)); 1629 else 1630 #endif 1631 sock_put(sk); 1632 } 1633 EXPORT_SYMBOL(sock_edemux); 1634 1635 kuid_t sock_i_uid(struct sock *sk) 1636 { 1637 kuid_t uid; 1638 1639 read_lock_bh(&sk->sk_callback_lock); 1640 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 1641 read_unlock_bh(&sk->sk_callback_lock); 1642 return uid; 1643 } 1644 EXPORT_SYMBOL(sock_i_uid); 1645 1646 unsigned long sock_i_ino(struct sock *sk) 1647 { 1648 unsigned long ino; 1649 1650 read_lock_bh(&sk->sk_callback_lock); 1651 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 1652 read_unlock_bh(&sk->sk_callback_lock); 1653 return ino; 1654 } 1655 EXPORT_SYMBOL(sock_i_ino); 1656 1657 /* 1658 * Allocate a skb from the socket's send buffer. 1659 */ 1660 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 1661 gfp_t priority) 1662 { 1663 if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1664 struct sk_buff *skb = alloc_skb(size, priority); 1665 if (skb) { 1666 skb_set_owner_w(skb, sk); 1667 return skb; 1668 } 1669 } 1670 return NULL; 1671 } 1672 EXPORT_SYMBOL(sock_wmalloc); 1673 1674 /* 1675 * Allocate a skb from the socket's receive buffer. 1676 */ 1677 struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, 1678 gfp_t priority) 1679 { 1680 if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { 1681 struct sk_buff *skb = alloc_skb(size, priority); 1682 if (skb) { 1683 skb_set_owner_r(skb, sk); 1684 return skb; 1685 } 1686 } 1687 return NULL; 1688 } 1689 1690 /* 1691 * Allocate a memory block from the socket's option memory buffer. 1692 */ 1693 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1694 { 1695 if ((unsigned int)size <= sysctl_optmem_max && 1696 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1697 void *mem; 1698 /* First do the add, to avoid the race if kmalloc 1699 * might sleep. 1700 */ 1701 atomic_add(size, &sk->sk_omem_alloc); 1702 mem = kmalloc(size, priority); 1703 if (mem) 1704 return mem; 1705 atomic_sub(size, &sk->sk_omem_alloc); 1706 } 1707 return NULL; 1708 } 1709 EXPORT_SYMBOL(sock_kmalloc); 1710 1711 /* 1712 * Free an option memory block. 1713 */ 1714 void sock_kfree_s(struct sock *sk, void *mem, int size) 1715 { 1716 kfree(mem); 1717 atomic_sub(size, &sk->sk_omem_alloc); 1718 } 1719 EXPORT_SYMBOL(sock_kfree_s); 1720 1721 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 1722 I think, these locks should be removed for datagram sockets. 1723 */ 1724 static long sock_wait_for_wmem(struct sock *sk, long timeo) 1725 { 1726 DEFINE_WAIT(wait); 1727 1728 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1729 for (;;) { 1730 if (!timeo) 1731 break; 1732 if (signal_pending(current)) 1733 break; 1734 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1735 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1736 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1737 break; 1738 if (sk->sk_shutdown & SEND_SHUTDOWN) 1739 break; 1740 if (sk->sk_err) 1741 break; 1742 timeo = schedule_timeout(timeo); 1743 } 1744 finish_wait(sk_sleep(sk), &wait); 1745 return timeo; 1746 } 1747 1748 1749 /* 1750 * Generic send/receive buffer handlers 1751 */ 1752 1753 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 1754 unsigned long data_len, int noblock, 1755 int *errcode, int max_page_order) 1756 { 1757 struct sk_buff *skb = NULL; 1758 unsigned long chunk; 1759 gfp_t gfp_mask; 1760 long timeo; 1761 int err; 1762 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 1763 struct page *page; 1764 int i; 1765 1766 err = -EMSGSIZE; 1767 if (npages > MAX_SKB_FRAGS) 1768 goto failure; 1769 1770 timeo = sock_sndtimeo(sk, noblock); 1771 while (!skb) { 1772 err = sock_error(sk); 1773 if (err != 0) 1774 goto failure; 1775 1776 err = -EPIPE; 1777 if (sk->sk_shutdown & SEND_SHUTDOWN) 1778 goto failure; 1779 1780 if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) { 1781 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1782 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1783 err = -EAGAIN; 1784 if (!timeo) 1785 goto failure; 1786 if (signal_pending(current)) 1787 goto interrupted; 1788 timeo = sock_wait_for_wmem(sk, timeo); 1789 continue; 1790 } 1791 1792 err = -ENOBUFS; 1793 gfp_mask = sk->sk_allocation; 1794 if (gfp_mask & __GFP_WAIT) 1795 gfp_mask |= __GFP_REPEAT; 1796 1797 skb = alloc_skb(header_len, gfp_mask); 1798 if (!skb) 1799 goto failure; 1800 1801 skb->truesize += data_len; 1802 1803 for (i = 0; npages > 0; i++) { 1804 int order = max_page_order; 1805 1806 while (order) { 1807 if (npages >= 1 << order) { 1808 page = alloc_pages(sk->sk_allocation | 1809 __GFP_COMP | __GFP_NOWARN, 1810 order); 1811 if (page) 1812 goto fill_page; 1813 } 1814 order--; 1815 } 1816 page = alloc_page(sk->sk_allocation); 1817 if (!page) 1818 goto failure; 1819 fill_page: 1820 chunk = min_t(unsigned long, data_len, 1821 PAGE_SIZE << order); 1822 skb_fill_page_desc(skb, i, page, 0, chunk); 1823 data_len -= chunk; 1824 npages -= 1 << order; 1825 } 1826 } 1827 1828 skb_set_owner_w(skb, sk); 1829 return skb; 1830 1831 interrupted: 1832 err = sock_intr_errno(timeo); 1833 failure: 1834 kfree_skb(skb); 1835 *errcode = err; 1836 return NULL; 1837 } 1838 EXPORT_SYMBOL(sock_alloc_send_pskb); 1839 1840 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 1841 int noblock, int *errcode) 1842 { 1843 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); 1844 } 1845 EXPORT_SYMBOL(sock_alloc_send_skb); 1846 1847 /* On 32bit arches, an skb frag is limited to 2^15 */ 1848 #define SKB_FRAG_PAGE_ORDER get_order(32768) 1849 1850 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 1851 { 1852 int order; 1853 1854 if (pfrag->page) { 1855 if (atomic_read(&pfrag->page->_count) == 1) { 1856 pfrag->offset = 0; 1857 return true; 1858 } 1859 if (pfrag->offset < pfrag->size) 1860 return true; 1861 put_page(pfrag->page); 1862 } 1863 1864 /* We restrict high order allocations to users that can afford to wait */ 1865 order = (sk->sk_allocation & __GFP_WAIT) ? SKB_FRAG_PAGE_ORDER : 0; 1866 1867 do { 1868 gfp_t gfp = sk->sk_allocation; 1869 1870 if (order) 1871 gfp |= __GFP_COMP | __GFP_NOWARN; 1872 pfrag->page = alloc_pages(gfp, order); 1873 if (likely(pfrag->page)) { 1874 pfrag->offset = 0; 1875 pfrag->size = PAGE_SIZE << order; 1876 return true; 1877 } 1878 } while (--order >= 0); 1879 1880 sk_enter_memory_pressure(sk); 1881 sk_stream_moderate_sndbuf(sk); 1882 return false; 1883 } 1884 EXPORT_SYMBOL(sk_page_frag_refill); 1885 1886 static void __lock_sock(struct sock *sk) 1887 __releases(&sk->sk_lock.slock) 1888 __acquires(&sk->sk_lock.slock) 1889 { 1890 DEFINE_WAIT(wait); 1891 1892 for (;;) { 1893 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1894 TASK_UNINTERRUPTIBLE); 1895 spin_unlock_bh(&sk->sk_lock.slock); 1896 schedule(); 1897 spin_lock_bh(&sk->sk_lock.slock); 1898 if (!sock_owned_by_user(sk)) 1899 break; 1900 } 1901 finish_wait(&sk->sk_lock.wq, &wait); 1902 } 1903 1904 static void __release_sock(struct sock *sk) 1905 __releases(&sk->sk_lock.slock) 1906 __acquires(&sk->sk_lock.slock) 1907 { 1908 struct sk_buff *skb = sk->sk_backlog.head; 1909 1910 do { 1911 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 1912 bh_unlock_sock(sk); 1913 1914 do { 1915 struct sk_buff *next = skb->next; 1916 1917 prefetch(next); 1918 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1919 skb->next = NULL; 1920 sk_backlog_rcv(sk, skb); 1921 1922 /* 1923 * We are in process context here with softirqs 1924 * disabled, use cond_resched_softirq() to preempt. 1925 * This is safe to do because we've taken the backlog 1926 * queue private: 1927 */ 1928 cond_resched_softirq(); 1929 1930 skb = next; 1931 } while (skb != NULL); 1932 1933 bh_lock_sock(sk); 1934 } while ((skb = sk->sk_backlog.head) != NULL); 1935 1936 /* 1937 * Doing the zeroing here guarantee we can not loop forever 1938 * while a wild producer attempts to flood us. 1939 */ 1940 sk->sk_backlog.len = 0; 1941 } 1942 1943 /** 1944 * sk_wait_data - wait for data to arrive at sk_receive_queue 1945 * @sk: sock to wait on 1946 * @timeo: for how long 1947 * 1948 * Now socket state including sk->sk_err is changed only under lock, 1949 * hence we may omit checks after joining wait queue. 1950 * We check receive queue before schedule() only as optimization; 1951 * it is very likely that release_sock() added new data. 1952 */ 1953 int sk_wait_data(struct sock *sk, long *timeo) 1954 { 1955 int rc; 1956 DEFINE_WAIT(wait); 1957 1958 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1959 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1960 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1961 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1962 finish_wait(sk_sleep(sk), &wait); 1963 return rc; 1964 } 1965 EXPORT_SYMBOL(sk_wait_data); 1966 1967 /** 1968 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 1969 * @sk: socket 1970 * @size: memory size to allocate 1971 * @kind: allocation type 1972 * 1973 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 1974 * rmem allocation. This function assumes that protocols which have 1975 * memory_pressure use sk_wmem_queued as write buffer accounting. 1976 */ 1977 int __sk_mem_schedule(struct sock *sk, int size, int kind) 1978 { 1979 struct proto *prot = sk->sk_prot; 1980 int amt = sk_mem_pages(size); 1981 long allocated; 1982 int parent_status = UNDER_LIMIT; 1983 1984 sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; 1985 1986 allocated = sk_memory_allocated_add(sk, amt, &parent_status); 1987 1988 /* Under limit. */ 1989 if (parent_status == UNDER_LIMIT && 1990 allocated <= sk_prot_mem_limits(sk, 0)) { 1991 sk_leave_memory_pressure(sk); 1992 return 1; 1993 } 1994 1995 /* Under pressure. (we or our parents) */ 1996 if ((parent_status > SOFT_LIMIT) || 1997 allocated > sk_prot_mem_limits(sk, 1)) 1998 sk_enter_memory_pressure(sk); 1999 2000 /* Over hard limit (we or our parents) */ 2001 if ((parent_status == OVER_LIMIT) || 2002 (allocated > sk_prot_mem_limits(sk, 2))) 2003 goto suppress_allocation; 2004 2005 /* guarantee minimum buffer size under pressure */ 2006 if (kind == SK_MEM_RECV) { 2007 if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) 2008 return 1; 2009 2010 } else { /* SK_MEM_SEND */ 2011 if (sk->sk_type == SOCK_STREAM) { 2012 if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) 2013 return 1; 2014 } else if (atomic_read(&sk->sk_wmem_alloc) < 2015 prot->sysctl_wmem[0]) 2016 return 1; 2017 } 2018 2019 if (sk_has_memory_pressure(sk)) { 2020 int alloc; 2021 2022 if (!sk_under_memory_pressure(sk)) 2023 return 1; 2024 alloc = sk_sockets_allocated_read_positive(sk); 2025 if (sk_prot_mem_limits(sk, 2) > alloc * 2026 sk_mem_pages(sk->sk_wmem_queued + 2027 atomic_read(&sk->sk_rmem_alloc) + 2028 sk->sk_forward_alloc)) 2029 return 1; 2030 } 2031 2032 suppress_allocation: 2033 2034 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 2035 sk_stream_moderate_sndbuf(sk); 2036 2037 /* Fail only if socket is _under_ its sndbuf. 2038 * In this case we cannot block, so that we have to fail. 2039 */ 2040 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 2041 return 1; 2042 } 2043 2044 trace_sock_exceed_buf_limit(sk, prot, allocated); 2045 2046 /* Alas. Undo changes. */ 2047 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 2048 2049 sk_memory_allocated_sub(sk, amt); 2050 2051 return 0; 2052 } 2053 EXPORT_SYMBOL(__sk_mem_schedule); 2054 2055 /** 2056 * __sk_reclaim - reclaim memory_allocated 2057 * @sk: socket 2058 */ 2059 void __sk_mem_reclaim(struct sock *sk) 2060 { 2061 sk_memory_allocated_sub(sk, 2062 sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT); 2063 sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; 2064 2065 if (sk_under_memory_pressure(sk) && 2066 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 2067 sk_leave_memory_pressure(sk); 2068 } 2069 EXPORT_SYMBOL(__sk_mem_reclaim); 2070 2071 2072 /* 2073 * Set of default routines for initialising struct proto_ops when 2074 * the protocol does not support a particular function. In certain 2075 * cases where it makes no sense for a protocol to have a "do nothing" 2076 * function, some default processing is provided. 2077 */ 2078 2079 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 2080 { 2081 return -EOPNOTSUPP; 2082 } 2083 EXPORT_SYMBOL(sock_no_bind); 2084 2085 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 2086 int len, int flags) 2087 { 2088 return -EOPNOTSUPP; 2089 } 2090 EXPORT_SYMBOL(sock_no_connect); 2091 2092 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 2093 { 2094 return -EOPNOTSUPP; 2095 } 2096 EXPORT_SYMBOL(sock_no_socketpair); 2097 2098 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) 2099 { 2100 return -EOPNOTSUPP; 2101 } 2102 EXPORT_SYMBOL(sock_no_accept); 2103 2104 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 2105 int *len, int peer) 2106 { 2107 return -EOPNOTSUPP; 2108 } 2109 EXPORT_SYMBOL(sock_no_getname); 2110 2111 unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) 2112 { 2113 return 0; 2114 } 2115 EXPORT_SYMBOL(sock_no_poll); 2116 2117 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2118 { 2119 return -EOPNOTSUPP; 2120 } 2121 EXPORT_SYMBOL(sock_no_ioctl); 2122 2123 int sock_no_listen(struct socket *sock, int backlog) 2124 { 2125 return -EOPNOTSUPP; 2126 } 2127 EXPORT_SYMBOL(sock_no_listen); 2128 2129 int sock_no_shutdown(struct socket *sock, int how) 2130 { 2131 return -EOPNOTSUPP; 2132 } 2133 EXPORT_SYMBOL(sock_no_shutdown); 2134 2135 int sock_no_setsockopt(struct socket *sock, int level, int optname, 2136 char __user *optval, unsigned int optlen) 2137 { 2138 return -EOPNOTSUPP; 2139 } 2140 EXPORT_SYMBOL(sock_no_setsockopt); 2141 2142 int sock_no_getsockopt(struct socket *sock, int level, int optname, 2143 char __user *optval, int __user *optlen) 2144 { 2145 return -EOPNOTSUPP; 2146 } 2147 EXPORT_SYMBOL(sock_no_getsockopt); 2148 2149 int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2150 size_t len) 2151 { 2152 return -EOPNOTSUPP; 2153 } 2154 EXPORT_SYMBOL(sock_no_sendmsg); 2155 2156 int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, 2157 size_t len, int flags) 2158 { 2159 return -EOPNOTSUPP; 2160 } 2161 EXPORT_SYMBOL(sock_no_recvmsg); 2162 2163 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2164 { 2165 /* Mirror missing mmap method error code */ 2166 return -ENODEV; 2167 } 2168 EXPORT_SYMBOL(sock_no_mmap); 2169 2170 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2171 { 2172 ssize_t res; 2173 struct msghdr msg = {.msg_flags = flags}; 2174 struct kvec iov; 2175 char *kaddr = kmap(page); 2176 iov.iov_base = kaddr + offset; 2177 iov.iov_len = size; 2178 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2179 kunmap(page); 2180 return res; 2181 } 2182 EXPORT_SYMBOL(sock_no_sendpage); 2183 2184 /* 2185 * Default Socket Callbacks 2186 */ 2187 2188 static void sock_def_wakeup(struct sock *sk) 2189 { 2190 struct socket_wq *wq; 2191 2192 rcu_read_lock(); 2193 wq = rcu_dereference(sk->sk_wq); 2194 if (wq_has_sleeper(wq)) 2195 wake_up_interruptible_all(&wq->wait); 2196 rcu_read_unlock(); 2197 } 2198 2199 static void sock_def_error_report(struct sock *sk) 2200 { 2201 struct socket_wq *wq; 2202 2203 rcu_read_lock(); 2204 wq = rcu_dereference(sk->sk_wq); 2205 if (wq_has_sleeper(wq)) 2206 wake_up_interruptible_poll(&wq->wait, POLLERR); 2207 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2208 rcu_read_unlock(); 2209 } 2210 2211 static void sock_def_readable(struct sock *sk, int len) 2212 { 2213 struct socket_wq *wq; 2214 2215 rcu_read_lock(); 2216 wq = rcu_dereference(sk->sk_wq); 2217 if (wq_has_sleeper(wq)) 2218 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | 2219 POLLRDNORM | POLLRDBAND); 2220 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2221 rcu_read_unlock(); 2222 } 2223 2224 static void sock_def_write_space(struct sock *sk) 2225 { 2226 struct socket_wq *wq; 2227 2228 rcu_read_lock(); 2229 2230 /* Do not wake up a writer until he can make "significant" 2231 * progress. --DaveM 2232 */ 2233 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 2234 wq = rcu_dereference(sk->sk_wq); 2235 if (wq_has_sleeper(wq)) 2236 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | 2237 POLLWRNORM | POLLWRBAND); 2238 2239 /* Should agree with poll, otherwise some programs break */ 2240 if (sock_writeable(sk)) 2241 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2242 } 2243 2244 rcu_read_unlock(); 2245 } 2246 2247 static void sock_def_destruct(struct sock *sk) 2248 { 2249 kfree(sk->sk_protinfo); 2250 } 2251 2252 void sk_send_sigurg(struct sock *sk) 2253 { 2254 if (sk->sk_socket && sk->sk_socket->file) 2255 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2256 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2257 } 2258 EXPORT_SYMBOL(sk_send_sigurg); 2259 2260 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2261 unsigned long expires) 2262 { 2263 if (!mod_timer(timer, expires)) 2264 sock_hold(sk); 2265 } 2266 EXPORT_SYMBOL(sk_reset_timer); 2267 2268 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2269 { 2270 if (del_timer(timer)) 2271 __sock_put(sk); 2272 } 2273 EXPORT_SYMBOL(sk_stop_timer); 2274 2275 void sock_init_data(struct socket *sock, struct sock *sk) 2276 { 2277 skb_queue_head_init(&sk->sk_receive_queue); 2278 skb_queue_head_init(&sk->sk_write_queue); 2279 skb_queue_head_init(&sk->sk_error_queue); 2280 #ifdef CONFIG_NET_DMA 2281 skb_queue_head_init(&sk->sk_async_wait_queue); 2282 #endif 2283 2284 sk->sk_send_head = NULL; 2285 2286 init_timer(&sk->sk_timer); 2287 2288 sk->sk_allocation = GFP_KERNEL; 2289 sk->sk_rcvbuf = sysctl_rmem_default; 2290 sk->sk_sndbuf = sysctl_wmem_default; 2291 sk->sk_state = TCP_CLOSE; 2292 sk_set_socket(sk, sock); 2293 2294 sock_set_flag(sk, SOCK_ZAPPED); 2295 2296 if (sock) { 2297 sk->sk_type = sock->type; 2298 sk->sk_wq = sock->wq; 2299 sock->sk = sk; 2300 } else 2301 sk->sk_wq = NULL; 2302 2303 spin_lock_init(&sk->sk_dst_lock); 2304 rwlock_init(&sk->sk_callback_lock); 2305 lockdep_set_class_and_name(&sk->sk_callback_lock, 2306 af_callback_keys + sk->sk_family, 2307 af_family_clock_key_strings[sk->sk_family]); 2308 2309 sk->sk_state_change = sock_def_wakeup; 2310 sk->sk_data_ready = sock_def_readable; 2311 sk->sk_write_space = sock_def_write_space; 2312 sk->sk_error_report = sock_def_error_report; 2313 sk->sk_destruct = sock_def_destruct; 2314 2315 sk->sk_frag.page = NULL; 2316 sk->sk_frag.offset = 0; 2317 sk->sk_peek_off = -1; 2318 2319 sk->sk_peer_pid = NULL; 2320 sk->sk_peer_cred = NULL; 2321 sk->sk_write_pending = 0; 2322 sk->sk_rcvlowat = 1; 2323 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 2324 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 2325 2326 sk->sk_stamp = ktime_set(-1L, 0); 2327 2328 #ifdef CONFIG_NET_RX_BUSY_POLL 2329 sk->sk_napi_id = 0; 2330 sk->sk_ll_usec = sysctl_net_busy_read; 2331 #endif 2332 2333 sk->sk_max_pacing_rate = ~0U; 2334 /* 2335 * Before updating sk_refcnt, we must commit prior changes to memory 2336 * (Documentation/RCU/rculist_nulls.txt for details) 2337 */ 2338 smp_wmb(); 2339 atomic_set(&sk->sk_refcnt, 1); 2340 atomic_set(&sk->sk_drops, 0); 2341 } 2342 EXPORT_SYMBOL(sock_init_data); 2343 2344 void lock_sock_nested(struct sock *sk, int subclass) 2345 { 2346 might_sleep(); 2347 spin_lock_bh(&sk->sk_lock.slock); 2348 if (sk->sk_lock.owned) 2349 __lock_sock(sk); 2350 sk->sk_lock.owned = 1; 2351 spin_unlock(&sk->sk_lock.slock); 2352 /* 2353 * The sk_lock has mutex_lock() semantics here: 2354 */ 2355 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 2356 local_bh_enable(); 2357 } 2358 EXPORT_SYMBOL(lock_sock_nested); 2359 2360 void release_sock(struct sock *sk) 2361 { 2362 /* 2363 * The sk_lock has mutex_unlock() semantics: 2364 */ 2365 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 2366 2367 spin_lock_bh(&sk->sk_lock.slock); 2368 if (sk->sk_backlog.tail) 2369 __release_sock(sk); 2370 2371 if (sk->sk_prot->release_cb) 2372 sk->sk_prot->release_cb(sk); 2373 2374 sk->sk_lock.owned = 0; 2375 if (waitqueue_active(&sk->sk_lock.wq)) 2376 wake_up(&sk->sk_lock.wq); 2377 spin_unlock_bh(&sk->sk_lock.slock); 2378 } 2379 EXPORT_SYMBOL(release_sock); 2380 2381 /** 2382 * lock_sock_fast - fast version of lock_sock 2383 * @sk: socket 2384 * 2385 * This version should be used for very small section, where process wont block 2386 * return false if fast path is taken 2387 * sk_lock.slock locked, owned = 0, BH disabled 2388 * return true if slow path is taken 2389 * sk_lock.slock unlocked, owned = 1, BH enabled 2390 */ 2391 bool lock_sock_fast(struct sock *sk) 2392 { 2393 might_sleep(); 2394 spin_lock_bh(&sk->sk_lock.slock); 2395 2396 if (!sk->sk_lock.owned) 2397 /* 2398 * Note : We must disable BH 2399 */ 2400 return false; 2401 2402 __lock_sock(sk); 2403 sk->sk_lock.owned = 1; 2404 spin_unlock(&sk->sk_lock.slock); 2405 /* 2406 * The sk_lock has mutex_lock() semantics here: 2407 */ 2408 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 2409 local_bh_enable(); 2410 return true; 2411 } 2412 EXPORT_SYMBOL(lock_sock_fast); 2413 2414 int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 2415 { 2416 struct timeval tv; 2417 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2418 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2419 tv = ktime_to_timeval(sk->sk_stamp); 2420 if (tv.tv_sec == -1) 2421 return -ENOENT; 2422 if (tv.tv_sec == 0) { 2423 sk->sk_stamp = ktime_get_real(); 2424 tv = ktime_to_timeval(sk->sk_stamp); 2425 } 2426 return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; 2427 } 2428 EXPORT_SYMBOL(sock_get_timestamp); 2429 2430 int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 2431 { 2432 struct timespec ts; 2433 if (!sock_flag(sk, SOCK_TIMESTAMP)) 2434 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 2435 ts = ktime_to_timespec(sk->sk_stamp); 2436 if (ts.tv_sec == -1) 2437 return -ENOENT; 2438 if (ts.tv_sec == 0) { 2439 sk->sk_stamp = ktime_get_real(); 2440 ts = ktime_to_timespec(sk->sk_stamp); 2441 } 2442 return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; 2443 } 2444 EXPORT_SYMBOL(sock_get_timestampns); 2445 2446 void sock_enable_timestamp(struct sock *sk, int flag) 2447 { 2448 if (!sock_flag(sk, flag)) { 2449 unsigned long previous_flags = sk->sk_flags; 2450 2451 sock_set_flag(sk, flag); 2452 /* 2453 * we just set one of the two flags which require net 2454 * time stamping, but time stamping might have been on 2455 * already because of the other one 2456 */ 2457 if (!(previous_flags & SK_FLAGS_TIMESTAMP)) 2458 net_enable_timestamp(); 2459 } 2460 } 2461 2462 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 2463 int level, int type) 2464 { 2465 struct sock_exterr_skb *serr; 2466 struct sk_buff *skb, *skb2; 2467 int copied, err; 2468 2469 err = -EAGAIN; 2470 skb = skb_dequeue(&sk->sk_error_queue); 2471 if (skb == NULL) 2472 goto out; 2473 2474 copied = skb->len; 2475 if (copied > len) { 2476 msg->msg_flags |= MSG_TRUNC; 2477 copied = len; 2478 } 2479 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 2480 if (err) 2481 goto out_free_skb; 2482 2483 sock_recv_timestamp(msg, sk, skb); 2484 2485 serr = SKB_EXT_ERR(skb); 2486 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 2487 2488 msg->msg_flags |= MSG_ERRQUEUE; 2489 err = copied; 2490 2491 /* Reset and regenerate socket error */ 2492 spin_lock_bh(&sk->sk_error_queue.lock); 2493 sk->sk_err = 0; 2494 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { 2495 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; 2496 spin_unlock_bh(&sk->sk_error_queue.lock); 2497 sk->sk_error_report(sk); 2498 } else 2499 spin_unlock_bh(&sk->sk_error_queue.lock); 2500 2501 out_free_skb: 2502 kfree_skb(skb); 2503 out: 2504 return err; 2505 } 2506 EXPORT_SYMBOL(sock_recv_errqueue); 2507 2508 /* 2509 * Get a socket option on an socket. 2510 * 2511 * FIX: POSIX 1003.1g is very ambiguous here. It states that 2512 * asynchronous errors should be reported by getsockopt. We assume 2513 * this means if you specify SO_ERROR (otherwise whats the point of it). 2514 */ 2515 int sock_common_getsockopt(struct socket *sock, int level, int optname, 2516 char __user *optval, int __user *optlen) 2517 { 2518 struct sock *sk = sock->sk; 2519 2520 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2521 } 2522 EXPORT_SYMBOL(sock_common_getsockopt); 2523 2524 #ifdef CONFIG_COMPAT 2525 int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, 2526 char __user *optval, int __user *optlen) 2527 { 2528 struct sock *sk = sock->sk; 2529 2530 if (sk->sk_prot->compat_getsockopt != NULL) 2531 return sk->sk_prot->compat_getsockopt(sk, level, optname, 2532 optval, optlen); 2533 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 2534 } 2535 EXPORT_SYMBOL(compat_sock_common_getsockopt); 2536 #endif 2537 2538 int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, 2539 struct msghdr *msg, size_t size, int flags) 2540 { 2541 struct sock *sk = sock->sk; 2542 int addr_len = 0; 2543 int err; 2544 2545 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, 2546 flags & ~MSG_DONTWAIT, &addr_len); 2547 if (err >= 0) 2548 msg->msg_namelen = addr_len; 2549 return err; 2550 } 2551 EXPORT_SYMBOL(sock_common_recvmsg); 2552 2553 /* 2554 * Set socket options on an inet socket. 2555 */ 2556 int sock_common_setsockopt(struct socket *sock, int level, int optname, 2557 char __user *optval, unsigned int optlen) 2558 { 2559 struct sock *sk = sock->sk; 2560 2561 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2562 } 2563 EXPORT_SYMBOL(sock_common_setsockopt); 2564 2565 #ifdef CONFIG_COMPAT 2566 int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, 2567 char __user *optval, unsigned int optlen) 2568 { 2569 struct sock *sk = sock->sk; 2570 2571 if (sk->sk_prot->compat_setsockopt != NULL) 2572 return sk->sk_prot->compat_setsockopt(sk, level, optname, 2573 optval, optlen); 2574 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 2575 } 2576 EXPORT_SYMBOL(compat_sock_common_setsockopt); 2577 #endif 2578 2579 void sk_common_release(struct sock *sk) 2580 { 2581 if (sk->sk_prot->destroy) 2582 sk->sk_prot->destroy(sk); 2583 2584 /* 2585 * Observation: when sock_common_release is called, processes have 2586 * no access to socket. But net still has. 2587 * Step one, detach it from networking: 2588 * 2589 * A. Remove from hash tables. 2590 */ 2591 2592 sk->sk_prot->unhash(sk); 2593 2594 /* 2595 * In this point socket cannot receive new packets, but it is possible 2596 * that some packets are in flight because some CPU runs receiver and 2597 * did hash table lookup before we unhashed socket. They will achieve 2598 * receive queue and will be purged by socket destructor. 2599 * 2600 * Also we still have packets pending on receive queue and probably, 2601 * our own packets waiting in device queues. sock_destroy will drain 2602 * receive queue, but transmitted packets will delay socket destruction 2603 * until the last reference will be released. 2604 */ 2605 2606 sock_orphan(sk); 2607 2608 xfrm_sk_free_policy(sk); 2609 2610 sk_refcnt_debug_release(sk); 2611 2612 if (sk->sk_frag.page) { 2613 put_page(sk->sk_frag.page); 2614 sk->sk_frag.page = NULL; 2615 } 2616 2617 sock_put(sk); 2618 } 2619 EXPORT_SYMBOL(sk_common_release); 2620 2621 #ifdef CONFIG_PROC_FS 2622 #define PROTO_INUSE_NR 64 /* should be enough for the first time */ 2623 struct prot_inuse { 2624 int val[PROTO_INUSE_NR]; 2625 }; 2626 2627 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 2628 2629 #ifdef CONFIG_NET_NS 2630 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2631 { 2632 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val); 2633 } 2634 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2635 2636 int sock_prot_inuse_get(struct net *net, struct proto *prot) 2637 { 2638 int cpu, idx = prot->inuse_idx; 2639 int res = 0; 2640 2641 for_each_possible_cpu(cpu) 2642 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2643 2644 return res >= 0 ? res : 0; 2645 } 2646 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2647 2648 static int __net_init sock_inuse_init_net(struct net *net) 2649 { 2650 net->core.inuse = alloc_percpu(struct prot_inuse); 2651 return net->core.inuse ? 0 : -ENOMEM; 2652 } 2653 2654 static void __net_exit sock_inuse_exit_net(struct net *net) 2655 { 2656 free_percpu(net->core.inuse); 2657 } 2658 2659 static struct pernet_operations net_inuse_ops = { 2660 .init = sock_inuse_init_net, 2661 .exit = sock_inuse_exit_net, 2662 }; 2663 2664 static __init int net_inuse_init(void) 2665 { 2666 if (register_pernet_subsys(&net_inuse_ops)) 2667 panic("Cannot initialize net inuse counters"); 2668 2669 return 0; 2670 } 2671 2672 core_initcall(net_inuse_init); 2673 #else 2674 static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); 2675 2676 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2677 { 2678 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val); 2679 } 2680 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2681 2682 int sock_prot_inuse_get(struct net *net, struct proto *prot) 2683 { 2684 int cpu, idx = prot->inuse_idx; 2685 int res = 0; 2686 2687 for_each_possible_cpu(cpu) 2688 res += per_cpu(prot_inuse, cpu).val[idx]; 2689 2690 return res >= 0 ? res : 0; 2691 } 2692 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 2693 #endif 2694 2695 static void assign_proto_idx(struct proto *prot) 2696 { 2697 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 2698 2699 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 2700 pr_err("PROTO_INUSE_NR exhausted\n"); 2701 return; 2702 } 2703 2704 set_bit(prot->inuse_idx, proto_inuse_idx); 2705 } 2706 2707 static void release_proto_idx(struct proto *prot) 2708 { 2709 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 2710 clear_bit(prot->inuse_idx, proto_inuse_idx); 2711 } 2712 #else 2713 static inline void assign_proto_idx(struct proto *prot) 2714 { 2715 } 2716 2717 static inline void release_proto_idx(struct proto *prot) 2718 { 2719 } 2720 #endif 2721 2722 int proto_register(struct proto *prot, int alloc_slab) 2723 { 2724 if (alloc_slab) { 2725 prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, 2726 SLAB_HWCACHE_ALIGN | prot->slab_flags, 2727 NULL); 2728 2729 if (prot->slab == NULL) { 2730 pr_crit("%s: Can't create sock SLAB cache!\n", 2731 prot->name); 2732 goto out; 2733 } 2734 2735 if (prot->rsk_prot != NULL) { 2736 prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); 2737 if (prot->rsk_prot->slab_name == NULL) 2738 goto out_free_sock_slab; 2739 2740 prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, 2741 prot->rsk_prot->obj_size, 0, 2742 SLAB_HWCACHE_ALIGN, NULL); 2743 2744 if (prot->rsk_prot->slab == NULL) { 2745 pr_crit("%s: Can't create request sock SLAB cache!\n", 2746 prot->name); 2747 goto out_free_request_sock_slab_name; 2748 } 2749 } 2750 2751 if (prot->twsk_prot != NULL) { 2752 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 2753 2754 if (prot->twsk_prot->twsk_slab_name == NULL) 2755 goto out_free_request_sock_slab; 2756 2757 prot->twsk_prot->twsk_slab = 2758 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 2759 prot->twsk_prot->twsk_obj_size, 2760 0, 2761 SLAB_HWCACHE_ALIGN | 2762 prot->slab_flags, 2763 NULL); 2764 if (prot->twsk_prot->twsk_slab == NULL) 2765 goto out_free_timewait_sock_slab_name; 2766 } 2767 } 2768 2769 mutex_lock(&proto_list_mutex); 2770 list_add(&prot->node, &proto_list); 2771 assign_proto_idx(prot); 2772 mutex_unlock(&proto_list_mutex); 2773 return 0; 2774 2775 out_free_timewait_sock_slab_name: 2776 kfree(prot->twsk_prot->twsk_slab_name); 2777 out_free_request_sock_slab: 2778 if (prot->rsk_prot && prot->rsk_prot->slab) { 2779 kmem_cache_destroy(prot->rsk_prot->slab); 2780 prot->rsk_prot->slab = NULL; 2781 } 2782 out_free_request_sock_slab_name: 2783 if (prot->rsk_prot) 2784 kfree(prot->rsk_prot->slab_name); 2785 out_free_sock_slab: 2786 kmem_cache_destroy(prot->slab); 2787 prot->slab = NULL; 2788 out: 2789 return -ENOBUFS; 2790 } 2791 EXPORT_SYMBOL(proto_register); 2792 2793 void proto_unregister(struct proto *prot) 2794 { 2795 mutex_lock(&proto_list_mutex); 2796 release_proto_idx(prot); 2797 list_del(&prot->node); 2798 mutex_unlock(&proto_list_mutex); 2799 2800 if (prot->slab != NULL) { 2801 kmem_cache_destroy(prot->slab); 2802 prot->slab = NULL; 2803 } 2804 2805 if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { 2806 kmem_cache_destroy(prot->rsk_prot->slab); 2807 kfree(prot->rsk_prot->slab_name); 2808 prot->rsk_prot->slab = NULL; 2809 } 2810 2811 if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { 2812 kmem_cache_destroy(prot->twsk_prot->twsk_slab); 2813 kfree(prot->twsk_prot->twsk_slab_name); 2814 prot->twsk_prot->twsk_slab = NULL; 2815 } 2816 } 2817 EXPORT_SYMBOL(proto_unregister); 2818 2819 #ifdef CONFIG_PROC_FS 2820 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 2821 __acquires(proto_list_mutex) 2822 { 2823 mutex_lock(&proto_list_mutex); 2824 return seq_list_start_head(&proto_list, *pos); 2825 } 2826 2827 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2828 { 2829 return seq_list_next(v, &proto_list, pos); 2830 } 2831 2832 static void proto_seq_stop(struct seq_file *seq, void *v) 2833 __releases(proto_list_mutex) 2834 { 2835 mutex_unlock(&proto_list_mutex); 2836 } 2837 2838 static char proto_method_implemented(const void *method) 2839 { 2840 return method == NULL ? 'n' : 'y'; 2841 } 2842 static long sock_prot_memory_allocated(struct proto *proto) 2843 { 2844 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 2845 } 2846 2847 static char *sock_prot_memory_pressure(struct proto *proto) 2848 { 2849 return proto->memory_pressure != NULL ? 2850 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 2851 } 2852 2853 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 2854 { 2855 2856 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 2857 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 2858 proto->name, 2859 proto->obj_size, 2860 sock_prot_inuse_get(seq_file_net(seq), proto), 2861 sock_prot_memory_allocated(proto), 2862 sock_prot_memory_pressure(proto), 2863 proto->max_header, 2864 proto->slab == NULL ? "no" : "yes", 2865 module_name(proto->owner), 2866 proto_method_implemented(proto->close), 2867 proto_method_implemented(proto->connect), 2868 proto_method_implemented(proto->disconnect), 2869 proto_method_implemented(proto->accept), 2870 proto_method_implemented(proto->ioctl), 2871 proto_method_implemented(proto->init), 2872 proto_method_implemented(proto->destroy), 2873 proto_method_implemented(proto->shutdown), 2874 proto_method_implemented(proto->setsockopt), 2875 proto_method_implemented(proto->getsockopt), 2876 proto_method_implemented(proto->sendmsg), 2877 proto_method_implemented(proto->recvmsg), 2878 proto_method_implemented(proto->sendpage), 2879 proto_method_implemented(proto->bind), 2880 proto_method_implemented(proto->backlog_rcv), 2881 proto_method_implemented(proto->hash), 2882 proto_method_implemented(proto->unhash), 2883 proto_method_implemented(proto->get_port), 2884 proto_method_implemented(proto->enter_memory_pressure)); 2885 } 2886 2887 static int proto_seq_show(struct seq_file *seq, void *v) 2888 { 2889 if (v == &proto_list) 2890 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 2891 "protocol", 2892 "size", 2893 "sockets", 2894 "memory", 2895 "press", 2896 "maxhdr", 2897 "slab", 2898 "module", 2899 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 2900 else 2901 proto_seq_printf(seq, list_entry(v, struct proto, node)); 2902 return 0; 2903 } 2904 2905 static const struct seq_operations proto_seq_ops = { 2906 .start = proto_seq_start, 2907 .next = proto_seq_next, 2908 .stop = proto_seq_stop, 2909 .show = proto_seq_show, 2910 }; 2911 2912 static int proto_seq_open(struct inode *inode, struct file *file) 2913 { 2914 return seq_open_net(inode, file, &proto_seq_ops, 2915 sizeof(struct seq_net_private)); 2916 } 2917 2918 static const struct file_operations proto_seq_fops = { 2919 .owner = THIS_MODULE, 2920 .open = proto_seq_open, 2921 .read = seq_read, 2922 .llseek = seq_lseek, 2923 .release = seq_release_net, 2924 }; 2925 2926 static __net_init int proto_init_net(struct net *net) 2927 { 2928 if (!proc_create("protocols", S_IRUGO, net->proc_net, &proto_seq_fops)) 2929 return -ENOMEM; 2930 2931 return 0; 2932 } 2933 2934 static __net_exit void proto_exit_net(struct net *net) 2935 { 2936 remove_proc_entry("protocols", net->proc_net); 2937 } 2938 2939 2940 static __net_initdata struct pernet_operations proto_net_ops = { 2941 .init = proto_init_net, 2942 .exit = proto_exit_net, 2943 }; 2944 2945 static int __init proto_init(void) 2946 { 2947 return register_pernet_subsys(&proto_net_ops); 2948 } 2949 2950 subsys_initcall(proto_init); 2951 2952 #endif /* PROC_FS */ 2953