1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic socket support routines. Memory allocators, socket lock/release 8 * handler for protocols to use and generic option handler. 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 */ 85 86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 87 88 #include <asm/unaligned.h> 89 #include <linux/capability.h> 90 #include <linux/errno.h> 91 #include <linux/errqueue.h> 92 #include <linux/types.h> 93 #include <linux/socket.h> 94 #include <linux/in.h> 95 #include <linux/kernel.h> 96 #include <linux/module.h> 97 #include <linux/proc_fs.h> 98 #include <linux/seq_file.h> 99 #include <linux/sched.h> 100 #include <linux/sched/mm.h> 101 #include <linux/timer.h> 102 #include <linux/string.h> 103 #include <linux/sockios.h> 104 #include <linux/net.h> 105 #include <linux/mm.h> 106 #include <linux/slab.h> 107 #include <linux/interrupt.h> 108 #include <linux/poll.h> 109 #include <linux/tcp.h> 110 #include <linux/init.h> 111 #include <linux/highmem.h> 112 #include <linux/user_namespace.h> 113 #include <linux/static_key.h> 114 #include <linux/memcontrol.h> 115 #include <linux/prefetch.h> 116 #include <linux/compat.h> 117 118 #include <linux/uaccess.h> 119 120 #include <linux/netdevice.h> 121 #include <net/protocol.h> 122 #include <linux/skbuff.h> 123 #include <net/net_namespace.h> 124 #include <net/request_sock.h> 125 #include <net/sock.h> 126 #include <linux/net_tstamp.h> 127 #include <net/xfrm.h> 128 #include <linux/ipsec.h> 129 #include <net/cls_cgroup.h> 130 #include <net/netprio_cgroup.h> 131 #include <linux/sock_diag.h> 132 133 #include <linux/filter.h> 134 #include <net/sock_reuseport.h> 135 #include <net/bpf_sk_storage.h> 136 137 #include <trace/events/sock.h> 138 139 #include <net/tcp.h> 140 #include <net/busy_poll.h> 141 142 static DEFINE_MUTEX(proto_list_mutex); 143 static LIST_HEAD(proto_list); 144 145 static void sock_inuse_add(struct net *net, int val); 146 147 /** 148 * sk_ns_capable - General socket capability test 149 * @sk: Socket to use a capability on or through 150 * @user_ns: The user namespace of the capability to use 151 * @cap: The capability to use 152 * 153 * Test to see if the opener of the socket had when the socket was 154 * created and the current process has the capability @cap in the user 155 * namespace @user_ns. 156 */ 157 bool sk_ns_capable(const struct sock *sk, 158 struct user_namespace *user_ns, int cap) 159 { 160 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 161 ns_capable(user_ns, cap); 162 } 163 EXPORT_SYMBOL(sk_ns_capable); 164 165 /** 166 * sk_capable - Socket global capability test 167 * @sk: Socket to use a capability on or through 168 * @cap: The global capability to use 169 * 170 * Test to see if the opener of the socket had when the socket was 171 * created and the current process has the capability @cap in all user 172 * namespaces. 173 */ 174 bool sk_capable(const struct sock *sk, int cap) 175 { 176 return sk_ns_capable(sk, &init_user_ns, cap); 177 } 178 EXPORT_SYMBOL(sk_capable); 179 180 /** 181 * sk_net_capable - Network namespace socket capability test 182 * @sk: Socket to use a capability on or through 183 * @cap: The capability to use 184 * 185 * Test to see if the opener of the socket had when the socket was created 186 * and the current process has the capability @cap over the network namespace 187 * the socket is a member of. 188 */ 189 bool sk_net_capable(const struct sock *sk, int cap) 190 { 191 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 192 } 193 EXPORT_SYMBOL(sk_net_capable); 194 195 /* 196 * Each address family might have different locking rules, so we have 197 * one slock key per address family and separate keys for internal and 198 * userspace sockets. 199 */ 200 static struct lock_class_key af_family_keys[AF_MAX]; 201 static struct lock_class_key af_family_kern_keys[AF_MAX]; 202 static struct lock_class_key af_family_slock_keys[AF_MAX]; 203 static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; 204 205 /* 206 * Make lock validator output more readable. (we pre-construct these 207 * strings build-time, so that runtime initialization of socket 208 * locks is fast): 209 */ 210 211 #define _sock_locks(x) \ 212 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ 213 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ 214 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ 215 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ 216 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ 217 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ 218 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ 219 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ 220 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ 221 x "27" , x "28" , x "AF_CAN" , \ 222 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ 223 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ 224 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ 225 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ 226 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ 227 x "AF_MAX" 228 229 static const char *const af_family_key_strings[AF_MAX+1] = { 230 _sock_locks("sk_lock-") 231 }; 232 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 233 _sock_locks("slock-") 234 }; 235 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 236 _sock_locks("clock-") 237 }; 238 239 static const char *const af_family_kern_key_strings[AF_MAX+1] = { 240 _sock_locks("k-sk_lock-") 241 }; 242 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { 243 _sock_locks("k-slock-") 244 }; 245 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 246 _sock_locks("k-clock-") 247 }; 248 static const char *const af_family_rlock_key_strings[AF_MAX+1] = { 249 _sock_locks("rlock-") 250 }; 251 static const char *const af_family_wlock_key_strings[AF_MAX+1] = { 252 _sock_locks("wlock-") 253 }; 254 static const char *const af_family_elock_key_strings[AF_MAX+1] = { 255 _sock_locks("elock-") 256 }; 257 258 /* 259 * sk_callback_lock and sk queues locking rules are per-address-family, 260 * so split the lock classes by using a per-AF key: 261 */ 262 static struct lock_class_key af_callback_keys[AF_MAX]; 263 static struct lock_class_key af_rlock_keys[AF_MAX]; 264 static struct lock_class_key af_wlock_keys[AF_MAX]; 265 static struct lock_class_key af_elock_keys[AF_MAX]; 266 static struct lock_class_key af_kern_callback_keys[AF_MAX]; 267 268 /* Run time adjustable parameters. */ 269 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 270 EXPORT_SYMBOL(sysctl_wmem_max); 271 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 272 EXPORT_SYMBOL(sysctl_rmem_max); 273 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 274 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 275 276 /* Maximal space eaten by iovec or ancillary data plus some space */ 277 int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 278 EXPORT_SYMBOL(sysctl_optmem_max); 279 280 int sysctl_tstamp_allow_data __read_mostly = 1; 281 282 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); 283 EXPORT_SYMBOL_GPL(memalloc_socks_key); 284 285 /** 286 * sk_set_memalloc - sets %SOCK_MEMALLOC 287 * @sk: socket to set it on 288 * 289 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 290 * It's the responsibility of the admin to adjust min_free_kbytes 291 * to meet the requirements 292 */ 293 void sk_set_memalloc(struct sock *sk) 294 { 295 sock_set_flag(sk, SOCK_MEMALLOC); 296 sk->sk_allocation |= __GFP_MEMALLOC; 297 static_branch_inc(&memalloc_socks_key); 298 } 299 EXPORT_SYMBOL_GPL(sk_set_memalloc); 300 301 void sk_clear_memalloc(struct sock *sk) 302 { 303 sock_reset_flag(sk, SOCK_MEMALLOC); 304 sk->sk_allocation &= ~__GFP_MEMALLOC; 305 static_branch_dec(&memalloc_socks_key); 306 307 /* 308 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 309 * progress of swapping. SOCK_MEMALLOC may be cleared while 310 * it has rmem allocations due to the last swapfile being deactivated 311 * but there is a risk that the socket is unusable due to exceeding 312 * the rmem limits. Reclaim the reserves and obey rmem limits again. 313 */ 314 sk_mem_reclaim(sk); 315 } 316 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 317 318 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 319 { 320 int ret; 321 unsigned int noreclaim_flag; 322 323 /* these should have been dropped before queueing */ 324 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 325 326 noreclaim_flag = memalloc_noreclaim_save(); 327 ret = sk->sk_backlog_rcv(sk, skb); 328 memalloc_noreclaim_restore(noreclaim_flag); 329 330 return ret; 331 } 332 EXPORT_SYMBOL(__sk_backlog_rcv); 333 334 static int sock_get_timeout(long timeo, void *optval, bool old_timeval) 335 { 336 struct __kernel_sock_timeval tv; 337 338 if (timeo == MAX_SCHEDULE_TIMEOUT) { 339 tv.tv_sec = 0; 340 tv.tv_usec = 0; 341 } else { 342 tv.tv_sec = timeo / HZ; 343 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 344 } 345 346 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 347 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 348 *(struct old_timeval32 *)optval = tv32; 349 return sizeof(tv32); 350 } 351 352 if (old_timeval) { 353 struct __kernel_old_timeval old_tv; 354 old_tv.tv_sec = tv.tv_sec; 355 old_tv.tv_usec = tv.tv_usec; 356 *(struct __kernel_old_timeval *)optval = old_tv; 357 return sizeof(old_tv); 358 } 359 360 *(struct __kernel_sock_timeval *)optval = tv; 361 return sizeof(tv); 362 } 363 364 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, 365 bool old_timeval) 366 { 367 struct __kernel_sock_timeval tv; 368 369 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 370 struct old_timeval32 tv32; 371 372 if (optlen < sizeof(tv32)) 373 return -EINVAL; 374 375 if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) 376 return -EFAULT; 377 tv.tv_sec = tv32.tv_sec; 378 tv.tv_usec = tv32.tv_usec; 379 } else if (old_timeval) { 380 struct __kernel_old_timeval old_tv; 381 382 if (optlen < sizeof(old_tv)) 383 return -EINVAL; 384 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) 385 return -EFAULT; 386 tv.tv_sec = old_tv.tv_sec; 387 tv.tv_usec = old_tv.tv_usec; 388 } else { 389 if (optlen < sizeof(tv)) 390 return -EINVAL; 391 if (copy_from_sockptr(&tv, optval, sizeof(tv))) 392 return -EFAULT; 393 } 394 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 395 return -EDOM; 396 397 if (tv.tv_sec < 0) { 398 static int warned __read_mostly; 399 400 *timeo_p = 0; 401 if (warned < 10 && net_ratelimit()) { 402 warned++; 403 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 404 __func__, current->comm, task_pid_nr(current)); 405 } 406 return 0; 407 } 408 *timeo_p = MAX_SCHEDULE_TIMEOUT; 409 if (tv.tv_sec == 0 && tv.tv_usec == 0) 410 return 0; 411 if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) 412 *timeo_p = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, USEC_PER_SEC / HZ); 413 return 0; 414 } 415 416 static bool sock_needs_netstamp(const struct sock *sk) 417 { 418 switch (sk->sk_family) { 419 case AF_UNSPEC: 420 case AF_UNIX: 421 return false; 422 default: 423 return true; 424 } 425 } 426 427 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 428 { 429 if (sk->sk_flags & flags) { 430 sk->sk_flags &= ~flags; 431 if (sock_needs_netstamp(sk) && 432 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 433 net_disable_timestamp(); 434 } 435 } 436 437 438 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 439 { 440 unsigned long flags; 441 struct sk_buff_head *list = &sk->sk_receive_queue; 442 443 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { 444 atomic_inc(&sk->sk_drops); 445 trace_sock_rcvqueue_full(sk, skb); 446 return -ENOMEM; 447 } 448 449 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 450 atomic_inc(&sk->sk_drops); 451 return -ENOBUFS; 452 } 453 454 skb->dev = NULL; 455 skb_set_owner_r(skb, sk); 456 457 /* we escape from rcu protected region, make sure we dont leak 458 * a norefcounted dst 459 */ 460 skb_dst_force(skb); 461 462 spin_lock_irqsave(&list->lock, flags); 463 sock_skb_set_dropcount(sk, skb); 464 __skb_queue_tail(list, skb); 465 spin_unlock_irqrestore(&list->lock, flags); 466 467 if (!sock_flag(sk, SOCK_DEAD)) 468 sk->sk_data_ready(sk); 469 return 0; 470 } 471 EXPORT_SYMBOL(__sock_queue_rcv_skb); 472 473 int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 474 { 475 int err; 476 477 err = sk_filter(sk, skb); 478 if (err) 479 return err; 480 481 return __sock_queue_rcv_skb(sk, skb); 482 } 483 EXPORT_SYMBOL(sock_queue_rcv_skb); 484 485 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 486 const int nested, unsigned int trim_cap, bool refcounted) 487 { 488 int rc = NET_RX_SUCCESS; 489 490 if (sk_filter_trim_cap(sk, skb, trim_cap)) 491 goto discard_and_relse; 492 493 skb->dev = NULL; 494 495 if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { 496 atomic_inc(&sk->sk_drops); 497 goto discard_and_relse; 498 } 499 if (nested) 500 bh_lock_sock_nested(sk); 501 else 502 bh_lock_sock(sk); 503 if (!sock_owned_by_user(sk)) { 504 /* 505 * trylock + unlock semantics: 506 */ 507 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 508 509 rc = sk_backlog_rcv(sk, skb); 510 511 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 512 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { 513 bh_unlock_sock(sk); 514 atomic_inc(&sk->sk_drops); 515 goto discard_and_relse; 516 } 517 518 bh_unlock_sock(sk); 519 out: 520 if (refcounted) 521 sock_put(sk); 522 return rc; 523 discard_and_relse: 524 kfree_skb(skb); 525 goto out; 526 } 527 EXPORT_SYMBOL(__sk_receive_skb); 528 529 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 530 { 531 struct dst_entry *dst = __sk_dst_get(sk); 532 533 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 534 sk_tx_queue_clear(sk); 535 sk->sk_dst_pending_confirm = 0; 536 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 537 dst_release(dst); 538 return NULL; 539 } 540 541 return dst; 542 } 543 EXPORT_SYMBOL(__sk_dst_check); 544 545 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 546 { 547 struct dst_entry *dst = sk_dst_get(sk); 548 549 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 550 sk_dst_reset(sk); 551 dst_release(dst); 552 return NULL; 553 } 554 555 return dst; 556 } 557 EXPORT_SYMBOL(sk_dst_check); 558 559 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) 560 { 561 int ret = -ENOPROTOOPT; 562 #ifdef CONFIG_NETDEVICES 563 struct net *net = sock_net(sk); 564 565 /* Sorry... */ 566 ret = -EPERM; 567 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) 568 goto out; 569 570 ret = -EINVAL; 571 if (ifindex < 0) 572 goto out; 573 574 sk->sk_bound_dev_if = ifindex; 575 if (sk->sk_prot->rehash) 576 sk->sk_prot->rehash(sk); 577 sk_dst_reset(sk); 578 579 ret = 0; 580 581 out: 582 #endif 583 584 return ret; 585 } 586 587 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) 588 { 589 int ret; 590 591 if (lock_sk) 592 lock_sock(sk); 593 ret = sock_bindtoindex_locked(sk, ifindex); 594 if (lock_sk) 595 release_sock(sk); 596 597 return ret; 598 } 599 EXPORT_SYMBOL(sock_bindtoindex); 600 601 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) 602 { 603 int ret = -ENOPROTOOPT; 604 #ifdef CONFIG_NETDEVICES 605 struct net *net = sock_net(sk); 606 char devname[IFNAMSIZ]; 607 int index; 608 609 ret = -EINVAL; 610 if (optlen < 0) 611 goto out; 612 613 /* Bind this socket to a particular device like "eth0", 614 * as specified in the passed interface name. If the 615 * name is "" or the option length is zero the socket 616 * is not bound. 617 */ 618 if (optlen > IFNAMSIZ - 1) 619 optlen = IFNAMSIZ - 1; 620 memset(devname, 0, sizeof(devname)); 621 622 ret = -EFAULT; 623 if (copy_from_sockptr(devname, optval, optlen)) 624 goto out; 625 626 index = 0; 627 if (devname[0] != '\0') { 628 struct net_device *dev; 629 630 rcu_read_lock(); 631 dev = dev_get_by_name_rcu(net, devname); 632 if (dev) 633 index = dev->ifindex; 634 rcu_read_unlock(); 635 ret = -ENODEV; 636 if (!dev) 637 goto out; 638 } 639 640 return sock_bindtoindex(sk, index, true); 641 out: 642 #endif 643 644 return ret; 645 } 646 647 static int sock_getbindtodevice(struct sock *sk, char __user *optval, 648 int __user *optlen, int len) 649 { 650 int ret = -ENOPROTOOPT; 651 #ifdef CONFIG_NETDEVICES 652 struct net *net = sock_net(sk); 653 char devname[IFNAMSIZ]; 654 655 if (sk->sk_bound_dev_if == 0) { 656 len = 0; 657 goto zero; 658 } 659 660 ret = -EINVAL; 661 if (len < IFNAMSIZ) 662 goto out; 663 664 ret = netdev_get_name(net, devname, sk->sk_bound_dev_if); 665 if (ret) 666 goto out; 667 668 len = strlen(devname) + 1; 669 670 ret = -EFAULT; 671 if (copy_to_user(optval, devname, len)) 672 goto out; 673 674 zero: 675 ret = -EFAULT; 676 if (put_user(len, optlen)) 677 goto out; 678 679 ret = 0; 680 681 out: 682 #endif 683 684 return ret; 685 } 686 687 bool sk_mc_loop(struct sock *sk) 688 { 689 if (dev_recursion_level()) 690 return false; 691 if (!sk) 692 return true; 693 switch (sk->sk_family) { 694 case AF_INET: 695 return inet_sk(sk)->mc_loop; 696 #if IS_ENABLED(CONFIG_IPV6) 697 case AF_INET6: 698 return inet6_sk(sk)->mc_loop; 699 #endif 700 } 701 WARN_ON_ONCE(1); 702 return true; 703 } 704 EXPORT_SYMBOL(sk_mc_loop); 705 706 void sock_set_reuseaddr(struct sock *sk) 707 { 708 lock_sock(sk); 709 sk->sk_reuse = SK_CAN_REUSE; 710 release_sock(sk); 711 } 712 EXPORT_SYMBOL(sock_set_reuseaddr); 713 714 void sock_set_reuseport(struct sock *sk) 715 { 716 lock_sock(sk); 717 sk->sk_reuseport = true; 718 release_sock(sk); 719 } 720 EXPORT_SYMBOL(sock_set_reuseport); 721 722 void sock_no_linger(struct sock *sk) 723 { 724 lock_sock(sk); 725 sk->sk_lingertime = 0; 726 sock_set_flag(sk, SOCK_LINGER); 727 release_sock(sk); 728 } 729 EXPORT_SYMBOL(sock_no_linger); 730 731 void sock_set_priority(struct sock *sk, u32 priority) 732 { 733 lock_sock(sk); 734 sk->sk_priority = priority; 735 release_sock(sk); 736 } 737 EXPORT_SYMBOL(sock_set_priority); 738 739 void sock_set_sndtimeo(struct sock *sk, s64 secs) 740 { 741 lock_sock(sk); 742 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) 743 sk->sk_sndtimeo = secs * HZ; 744 else 745 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 746 release_sock(sk); 747 } 748 EXPORT_SYMBOL(sock_set_sndtimeo); 749 750 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) 751 { 752 if (val) { 753 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); 754 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, ns); 755 sock_set_flag(sk, SOCK_RCVTSTAMP); 756 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 757 } else { 758 sock_reset_flag(sk, SOCK_RCVTSTAMP); 759 sock_reset_flag(sk, SOCK_RCVTSTAMPNS); 760 } 761 } 762 763 void sock_enable_timestamps(struct sock *sk) 764 { 765 lock_sock(sk); 766 __sock_set_timestamps(sk, true, false, true); 767 release_sock(sk); 768 } 769 EXPORT_SYMBOL(sock_enable_timestamps); 770 771 void sock_set_keepalive(struct sock *sk) 772 { 773 lock_sock(sk); 774 if (sk->sk_prot->keepalive) 775 sk->sk_prot->keepalive(sk, true); 776 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); 777 release_sock(sk); 778 } 779 EXPORT_SYMBOL(sock_set_keepalive); 780 781 static void __sock_set_rcvbuf(struct sock *sk, int val) 782 { 783 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it 784 * as a negative value. 785 */ 786 val = min_t(int, val, INT_MAX / 2); 787 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 788 789 /* We double it on the way in to account for "struct sk_buff" etc. 790 * overhead. Applications assume that the SO_RCVBUF setting they make 791 * will allow that much actual data to be received on that socket. 792 * 793 * Applications are unaware that "struct sk_buff" and other overheads 794 * allocate from the receive buffer during socket buffer allocation. 795 * 796 * And after considering the possible alternatives, returning the value 797 * we actually used in getsockopt is the most desirable behavior. 798 */ 799 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); 800 } 801 802 void sock_set_rcvbuf(struct sock *sk, int val) 803 { 804 lock_sock(sk); 805 __sock_set_rcvbuf(sk, val); 806 release_sock(sk); 807 } 808 EXPORT_SYMBOL(sock_set_rcvbuf); 809 810 void sock_set_mark(struct sock *sk, u32 val) 811 { 812 lock_sock(sk); 813 sk->sk_mark = val; 814 release_sock(sk); 815 } 816 EXPORT_SYMBOL(sock_set_mark); 817 818 /* 819 * This is meant for all protocols to use and covers goings on 820 * at the socket level. Everything here is generic. 821 */ 822 823 int sock_setsockopt(struct socket *sock, int level, int optname, 824 sockptr_t optval, unsigned int optlen) 825 { 826 struct sock_txtime sk_txtime; 827 struct sock *sk = sock->sk; 828 int val; 829 int valbool; 830 struct linger ling; 831 int ret = 0; 832 833 /* 834 * Options without arguments 835 */ 836 837 if (optname == SO_BINDTODEVICE) 838 return sock_setbindtodevice(sk, optval, optlen); 839 840 if (optlen < sizeof(int)) 841 return -EINVAL; 842 843 if (copy_from_sockptr(&val, optval, sizeof(val))) 844 return -EFAULT; 845 846 valbool = val ? 1 : 0; 847 848 lock_sock(sk); 849 850 switch (optname) { 851 case SO_DEBUG: 852 if (val && !capable(CAP_NET_ADMIN)) 853 ret = -EACCES; 854 else 855 sock_valbool_flag(sk, SOCK_DBG, valbool); 856 break; 857 case SO_REUSEADDR: 858 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 859 break; 860 case SO_REUSEPORT: 861 sk->sk_reuseport = valbool; 862 break; 863 case SO_TYPE: 864 case SO_PROTOCOL: 865 case SO_DOMAIN: 866 case SO_ERROR: 867 ret = -ENOPROTOOPT; 868 break; 869 case SO_DONTROUTE: 870 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 871 sk_dst_reset(sk); 872 break; 873 case SO_BROADCAST: 874 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 875 break; 876 case SO_SNDBUF: 877 /* Don't error on this BSD doesn't and if you think 878 * about it this is right. Otherwise apps have to 879 * play 'guess the biggest size' games. RCVBUF/SNDBUF 880 * are treated in BSD as hints 881 */ 882 val = min_t(u32, val, sysctl_wmem_max); 883 set_sndbuf: 884 /* Ensure val * 2 fits into an int, to prevent max_t() 885 * from treating it as a negative value. 886 */ 887 val = min_t(int, val, INT_MAX / 2); 888 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 889 WRITE_ONCE(sk->sk_sndbuf, 890 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 891 /* Wake up sending tasks if we upped the value. */ 892 sk->sk_write_space(sk); 893 break; 894 895 case SO_SNDBUFFORCE: 896 if (!capable(CAP_NET_ADMIN)) { 897 ret = -EPERM; 898 break; 899 } 900 901 /* No negative values (to prevent underflow, as val will be 902 * multiplied by 2). 903 */ 904 if (val < 0) 905 val = 0; 906 goto set_sndbuf; 907 908 case SO_RCVBUF: 909 /* Don't error on this BSD doesn't and if you think 910 * about it this is right. Otherwise apps have to 911 * play 'guess the biggest size' games. RCVBUF/SNDBUF 912 * are treated in BSD as hints 913 */ 914 __sock_set_rcvbuf(sk, min_t(u32, val, sysctl_rmem_max)); 915 break; 916 917 case SO_RCVBUFFORCE: 918 if (!capable(CAP_NET_ADMIN)) { 919 ret = -EPERM; 920 break; 921 } 922 923 /* No negative values (to prevent underflow, as val will be 924 * multiplied by 2). 925 */ 926 __sock_set_rcvbuf(sk, max(val, 0)); 927 break; 928 929 case SO_KEEPALIVE: 930 if (sk->sk_prot->keepalive) 931 sk->sk_prot->keepalive(sk, valbool); 932 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 933 break; 934 935 case SO_OOBINLINE: 936 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 937 break; 938 939 case SO_NO_CHECK: 940 sk->sk_no_check_tx = valbool; 941 break; 942 943 case SO_PRIORITY: 944 if ((val >= 0 && val <= 6) || 945 ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 946 sk->sk_priority = val; 947 else 948 ret = -EPERM; 949 break; 950 951 case SO_LINGER: 952 if (optlen < sizeof(ling)) { 953 ret = -EINVAL; /* 1003.1g */ 954 break; 955 } 956 if (copy_from_sockptr(&ling, optval, sizeof(ling))) { 957 ret = -EFAULT; 958 break; 959 } 960 if (!ling.l_onoff) 961 sock_reset_flag(sk, SOCK_LINGER); 962 else { 963 #if (BITS_PER_LONG == 32) 964 if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) 965 sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; 966 else 967 #endif 968 sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; 969 sock_set_flag(sk, SOCK_LINGER); 970 } 971 break; 972 973 case SO_BSDCOMPAT: 974 break; 975 976 case SO_PASSCRED: 977 if (valbool) 978 set_bit(SOCK_PASSCRED, &sock->flags); 979 else 980 clear_bit(SOCK_PASSCRED, &sock->flags); 981 break; 982 983 case SO_TIMESTAMP_OLD: 984 __sock_set_timestamps(sk, valbool, false, false); 985 break; 986 case SO_TIMESTAMP_NEW: 987 __sock_set_timestamps(sk, valbool, true, false); 988 break; 989 case SO_TIMESTAMPNS_OLD: 990 __sock_set_timestamps(sk, valbool, false, true); 991 break; 992 case SO_TIMESTAMPNS_NEW: 993 __sock_set_timestamps(sk, valbool, true, true); 994 break; 995 case SO_TIMESTAMPING_NEW: 996 case SO_TIMESTAMPING_OLD: 997 if (val & ~SOF_TIMESTAMPING_MASK) { 998 ret = -EINVAL; 999 break; 1000 } 1001 1002 if (val & SOF_TIMESTAMPING_OPT_ID && 1003 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 1004 if (sk->sk_protocol == IPPROTO_TCP && 1005 sk->sk_type == SOCK_STREAM) { 1006 if ((1 << sk->sk_state) & 1007 (TCPF_CLOSE | TCPF_LISTEN)) { 1008 ret = -EINVAL; 1009 break; 1010 } 1011 sk->sk_tskey = tcp_sk(sk)->snd_una; 1012 } else { 1013 sk->sk_tskey = 0; 1014 } 1015 } 1016 1017 if (val & SOF_TIMESTAMPING_OPT_STATS && 1018 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) { 1019 ret = -EINVAL; 1020 break; 1021 } 1022 1023 sk->sk_tsflags = val; 1024 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); 1025 1026 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 1027 sock_enable_timestamp(sk, 1028 SOCK_TIMESTAMPING_RX_SOFTWARE); 1029 else 1030 sock_disable_timestamp(sk, 1031 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 1032 break; 1033 1034 case SO_RCVLOWAT: 1035 if (val < 0) 1036 val = INT_MAX; 1037 if (sock->ops->set_rcvlowat) 1038 ret = sock->ops->set_rcvlowat(sk, val); 1039 else 1040 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1041 break; 1042 1043 case SO_RCVTIMEO_OLD: 1044 case SO_RCVTIMEO_NEW: 1045 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, 1046 optlen, optname == SO_RCVTIMEO_OLD); 1047 break; 1048 1049 case SO_SNDTIMEO_OLD: 1050 case SO_SNDTIMEO_NEW: 1051 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, 1052 optlen, optname == SO_SNDTIMEO_OLD); 1053 break; 1054 1055 case SO_ATTACH_FILTER: { 1056 struct sock_fprog fprog; 1057 1058 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1059 if (!ret) 1060 ret = sk_attach_filter(&fprog, sk); 1061 break; 1062 } 1063 case SO_ATTACH_BPF: 1064 ret = -EINVAL; 1065 if (optlen == sizeof(u32)) { 1066 u32 ufd; 1067 1068 ret = -EFAULT; 1069 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1070 break; 1071 1072 ret = sk_attach_bpf(ufd, sk); 1073 } 1074 break; 1075 1076 case SO_ATTACH_REUSEPORT_CBPF: { 1077 struct sock_fprog fprog; 1078 1079 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1080 if (!ret) 1081 ret = sk_reuseport_attach_filter(&fprog, sk); 1082 break; 1083 } 1084 case SO_ATTACH_REUSEPORT_EBPF: 1085 ret = -EINVAL; 1086 if (optlen == sizeof(u32)) { 1087 u32 ufd; 1088 1089 ret = -EFAULT; 1090 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1091 break; 1092 1093 ret = sk_reuseport_attach_bpf(ufd, sk); 1094 } 1095 break; 1096 1097 case SO_DETACH_REUSEPORT_BPF: 1098 ret = reuseport_detach_prog(sk); 1099 break; 1100 1101 case SO_DETACH_FILTER: 1102 ret = sk_detach_filter(sk); 1103 break; 1104 1105 case SO_LOCK_FILTER: 1106 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 1107 ret = -EPERM; 1108 else 1109 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 1110 break; 1111 1112 case SO_PASSSEC: 1113 if (valbool) 1114 set_bit(SOCK_PASSSEC, &sock->flags); 1115 else 1116 clear_bit(SOCK_PASSSEC, &sock->flags); 1117 break; 1118 case SO_MARK: 1119 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1120 ret = -EPERM; 1121 } else if (val != sk->sk_mark) { 1122 sk->sk_mark = val; 1123 sk_dst_reset(sk); 1124 } 1125 break; 1126 1127 case SO_RXQ_OVFL: 1128 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 1129 break; 1130 1131 case SO_WIFI_STATUS: 1132 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 1133 break; 1134 1135 case SO_PEEK_OFF: 1136 if (sock->ops->set_peek_off) 1137 ret = sock->ops->set_peek_off(sk, val); 1138 else 1139 ret = -EOPNOTSUPP; 1140 break; 1141 1142 case SO_NOFCS: 1143 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 1144 break; 1145 1146 case SO_SELECT_ERR_QUEUE: 1147 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 1148 break; 1149 1150 #ifdef CONFIG_NET_RX_BUSY_POLL 1151 case SO_BUSY_POLL: 1152 /* allow unprivileged users to decrease the value */ 1153 if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) 1154 ret = -EPERM; 1155 else { 1156 if (val < 0) 1157 ret = -EINVAL; 1158 else 1159 sk->sk_ll_usec = val; 1160 } 1161 break; 1162 case SO_PREFER_BUSY_POLL: 1163 if (valbool && !capable(CAP_NET_ADMIN)) 1164 ret = -EPERM; 1165 else 1166 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); 1167 break; 1168 case SO_BUSY_POLL_BUDGET: 1169 if (val > READ_ONCE(sk->sk_busy_poll_budget) && !capable(CAP_NET_ADMIN)) { 1170 ret = -EPERM; 1171 } else { 1172 if (val < 0 || val > U16_MAX) 1173 ret = -EINVAL; 1174 else 1175 WRITE_ONCE(sk->sk_busy_poll_budget, val); 1176 } 1177 break; 1178 #endif 1179 1180 case SO_MAX_PACING_RATE: 1181 { 1182 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; 1183 1184 if (sizeof(ulval) != sizeof(val) && 1185 optlen >= sizeof(ulval) && 1186 copy_from_sockptr(&ulval, optval, sizeof(ulval))) { 1187 ret = -EFAULT; 1188 break; 1189 } 1190 if (ulval != ~0UL) 1191 cmpxchg(&sk->sk_pacing_status, 1192 SK_PACING_NONE, 1193 SK_PACING_NEEDED); 1194 sk->sk_max_pacing_rate = ulval; 1195 sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); 1196 break; 1197 } 1198 case SO_INCOMING_CPU: 1199 WRITE_ONCE(sk->sk_incoming_cpu, val); 1200 break; 1201 1202 case SO_CNX_ADVICE: 1203 if (val == 1) 1204 dst_negative_advice(sk); 1205 break; 1206 1207 case SO_ZEROCOPY: 1208 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { 1209 if (!((sk->sk_type == SOCK_STREAM && 1210 sk->sk_protocol == IPPROTO_TCP) || 1211 (sk->sk_type == SOCK_DGRAM && 1212 sk->sk_protocol == IPPROTO_UDP))) 1213 ret = -ENOTSUPP; 1214 } else if (sk->sk_family != PF_RDS) { 1215 ret = -ENOTSUPP; 1216 } 1217 if (!ret) { 1218 if (val < 0 || val > 1) 1219 ret = -EINVAL; 1220 else 1221 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); 1222 } 1223 break; 1224 1225 case SO_TXTIME: 1226 if (optlen != sizeof(struct sock_txtime)) { 1227 ret = -EINVAL; 1228 break; 1229 } else if (copy_from_sockptr(&sk_txtime, optval, 1230 sizeof(struct sock_txtime))) { 1231 ret = -EFAULT; 1232 break; 1233 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { 1234 ret = -EINVAL; 1235 break; 1236 } 1237 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet 1238 * scheduler has enough safe guards. 1239 */ 1240 if (sk_txtime.clockid != CLOCK_MONOTONIC && 1241 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1242 ret = -EPERM; 1243 break; 1244 } 1245 sock_valbool_flag(sk, SOCK_TXTIME, true); 1246 sk->sk_clockid = sk_txtime.clockid; 1247 sk->sk_txtime_deadline_mode = 1248 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); 1249 sk->sk_txtime_report_errors = 1250 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); 1251 break; 1252 1253 case SO_BINDTOIFINDEX: 1254 ret = sock_bindtoindex_locked(sk, val); 1255 break; 1256 1257 default: 1258 ret = -ENOPROTOOPT; 1259 break; 1260 } 1261 release_sock(sk); 1262 return ret; 1263 } 1264 EXPORT_SYMBOL(sock_setsockopt); 1265 1266 1267 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1268 struct ucred *ucred) 1269 { 1270 ucred->pid = pid_vnr(pid); 1271 ucred->uid = ucred->gid = -1; 1272 if (cred) { 1273 struct user_namespace *current_ns = current_user_ns(); 1274 1275 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1276 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1277 } 1278 } 1279 1280 static int groups_to_user(gid_t __user *dst, const struct group_info *src) 1281 { 1282 struct user_namespace *user_ns = current_user_ns(); 1283 int i; 1284 1285 for (i = 0; i < src->ngroups; i++) 1286 if (put_user(from_kgid_munged(user_ns, src->gid[i]), dst + i)) 1287 return -EFAULT; 1288 1289 return 0; 1290 } 1291 1292 int sock_getsockopt(struct socket *sock, int level, int optname, 1293 char __user *optval, int __user *optlen) 1294 { 1295 struct sock *sk = sock->sk; 1296 1297 union { 1298 int val; 1299 u64 val64; 1300 unsigned long ulval; 1301 struct linger ling; 1302 struct old_timeval32 tm32; 1303 struct __kernel_old_timeval tm; 1304 struct __kernel_sock_timeval stm; 1305 struct sock_txtime txtime; 1306 } v; 1307 1308 int lv = sizeof(int); 1309 int len; 1310 1311 if (get_user(len, optlen)) 1312 return -EFAULT; 1313 if (len < 0) 1314 return -EINVAL; 1315 1316 memset(&v, 0, sizeof(v)); 1317 1318 switch (optname) { 1319 case SO_DEBUG: 1320 v.val = sock_flag(sk, SOCK_DBG); 1321 break; 1322 1323 case SO_DONTROUTE: 1324 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1325 break; 1326 1327 case SO_BROADCAST: 1328 v.val = sock_flag(sk, SOCK_BROADCAST); 1329 break; 1330 1331 case SO_SNDBUF: 1332 v.val = sk->sk_sndbuf; 1333 break; 1334 1335 case SO_RCVBUF: 1336 v.val = sk->sk_rcvbuf; 1337 break; 1338 1339 case SO_REUSEADDR: 1340 v.val = sk->sk_reuse; 1341 break; 1342 1343 case SO_REUSEPORT: 1344 v.val = sk->sk_reuseport; 1345 break; 1346 1347 case SO_KEEPALIVE: 1348 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1349 break; 1350 1351 case SO_TYPE: 1352 v.val = sk->sk_type; 1353 break; 1354 1355 case SO_PROTOCOL: 1356 v.val = sk->sk_protocol; 1357 break; 1358 1359 case SO_DOMAIN: 1360 v.val = sk->sk_family; 1361 break; 1362 1363 case SO_ERROR: 1364 v.val = -sock_error(sk); 1365 if (v.val == 0) 1366 v.val = xchg(&sk->sk_err_soft, 0); 1367 break; 1368 1369 case SO_OOBINLINE: 1370 v.val = sock_flag(sk, SOCK_URGINLINE); 1371 break; 1372 1373 case SO_NO_CHECK: 1374 v.val = sk->sk_no_check_tx; 1375 break; 1376 1377 case SO_PRIORITY: 1378 v.val = sk->sk_priority; 1379 break; 1380 1381 case SO_LINGER: 1382 lv = sizeof(v.ling); 1383 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1384 v.ling.l_linger = sk->sk_lingertime / HZ; 1385 break; 1386 1387 case SO_BSDCOMPAT: 1388 break; 1389 1390 case SO_TIMESTAMP_OLD: 1391 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1392 !sock_flag(sk, SOCK_TSTAMP_NEW) && 1393 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1394 break; 1395 1396 case SO_TIMESTAMPNS_OLD: 1397 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); 1398 break; 1399 1400 case SO_TIMESTAMP_NEW: 1401 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); 1402 break; 1403 1404 case SO_TIMESTAMPNS_NEW: 1405 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); 1406 break; 1407 1408 case SO_TIMESTAMPING_OLD: 1409 v.val = sk->sk_tsflags; 1410 break; 1411 1412 case SO_RCVTIMEO_OLD: 1413 case SO_RCVTIMEO_NEW: 1414 lv = sock_get_timeout(sk->sk_rcvtimeo, &v, SO_RCVTIMEO_OLD == optname); 1415 break; 1416 1417 case SO_SNDTIMEO_OLD: 1418 case SO_SNDTIMEO_NEW: 1419 lv = sock_get_timeout(sk->sk_sndtimeo, &v, SO_SNDTIMEO_OLD == optname); 1420 break; 1421 1422 case SO_RCVLOWAT: 1423 v.val = sk->sk_rcvlowat; 1424 break; 1425 1426 case SO_SNDLOWAT: 1427 v.val = 1; 1428 break; 1429 1430 case SO_PASSCRED: 1431 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1432 break; 1433 1434 case SO_PEERCRED: 1435 { 1436 struct ucred peercred; 1437 if (len > sizeof(peercred)) 1438 len = sizeof(peercred); 1439 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1440 if (copy_to_user(optval, &peercred, len)) 1441 return -EFAULT; 1442 goto lenout; 1443 } 1444 1445 case SO_PEERGROUPS: 1446 { 1447 int ret, n; 1448 1449 if (!sk->sk_peer_cred) 1450 return -ENODATA; 1451 1452 n = sk->sk_peer_cred->group_info->ngroups; 1453 if (len < n * sizeof(gid_t)) { 1454 len = n * sizeof(gid_t); 1455 return put_user(len, optlen) ? -EFAULT : -ERANGE; 1456 } 1457 len = n * sizeof(gid_t); 1458 1459 ret = groups_to_user((gid_t __user *)optval, 1460 sk->sk_peer_cred->group_info); 1461 if (ret) 1462 return ret; 1463 goto lenout; 1464 } 1465 1466 case SO_PEERNAME: 1467 { 1468 char address[128]; 1469 1470 lv = sock->ops->getname(sock, (struct sockaddr *)address, 2); 1471 if (lv < 0) 1472 return -ENOTCONN; 1473 if (lv < len) 1474 return -EINVAL; 1475 if (copy_to_user(optval, address, len)) 1476 return -EFAULT; 1477 goto lenout; 1478 } 1479 1480 /* Dubious BSD thing... Probably nobody even uses it, but 1481 * the UNIX standard wants it for whatever reason... -DaveM 1482 */ 1483 case SO_ACCEPTCONN: 1484 v.val = sk->sk_state == TCP_LISTEN; 1485 break; 1486 1487 case SO_PASSSEC: 1488 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1489 break; 1490 1491 case SO_PEERSEC: 1492 return security_socket_getpeersec_stream(sock, optval, optlen, len); 1493 1494 case SO_MARK: 1495 v.val = sk->sk_mark; 1496 break; 1497 1498 case SO_RXQ_OVFL: 1499 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1500 break; 1501 1502 case SO_WIFI_STATUS: 1503 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1504 break; 1505 1506 case SO_PEEK_OFF: 1507 if (!sock->ops->set_peek_off) 1508 return -EOPNOTSUPP; 1509 1510 v.val = sk->sk_peek_off; 1511 break; 1512 case SO_NOFCS: 1513 v.val = sock_flag(sk, SOCK_NOFCS); 1514 break; 1515 1516 case SO_BINDTODEVICE: 1517 return sock_getbindtodevice(sk, optval, optlen, len); 1518 1519 case SO_GET_FILTER: 1520 len = sk_get_filter(sk, (struct sock_filter __user *)optval, len); 1521 if (len < 0) 1522 return len; 1523 1524 goto lenout; 1525 1526 case SO_LOCK_FILTER: 1527 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1528 break; 1529 1530 case SO_BPF_EXTENSIONS: 1531 v.val = bpf_tell_extensions(); 1532 break; 1533 1534 case SO_SELECT_ERR_QUEUE: 1535 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 1536 break; 1537 1538 #ifdef CONFIG_NET_RX_BUSY_POLL 1539 case SO_BUSY_POLL: 1540 v.val = sk->sk_ll_usec; 1541 break; 1542 case SO_PREFER_BUSY_POLL: 1543 v.val = READ_ONCE(sk->sk_prefer_busy_poll); 1544 break; 1545 #endif 1546 1547 case SO_MAX_PACING_RATE: 1548 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 1549 lv = sizeof(v.ulval); 1550 v.ulval = sk->sk_max_pacing_rate; 1551 } else { 1552 /* 32bit version */ 1553 v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U); 1554 } 1555 break; 1556 1557 case SO_INCOMING_CPU: 1558 v.val = READ_ONCE(sk->sk_incoming_cpu); 1559 break; 1560 1561 case SO_MEMINFO: 1562 { 1563 u32 meminfo[SK_MEMINFO_VARS]; 1564 1565 sk_get_meminfo(sk, meminfo); 1566 1567 len = min_t(unsigned int, len, sizeof(meminfo)); 1568 if (copy_to_user(optval, &meminfo, len)) 1569 return -EFAULT; 1570 1571 goto lenout; 1572 } 1573 1574 #ifdef CONFIG_NET_RX_BUSY_POLL 1575 case SO_INCOMING_NAPI_ID: 1576 v.val = READ_ONCE(sk->sk_napi_id); 1577 1578 /* aggregate non-NAPI IDs down to 0 */ 1579 if (v.val < MIN_NAPI_ID) 1580 v.val = 0; 1581 1582 break; 1583 #endif 1584 1585 case SO_COOKIE: 1586 lv = sizeof(u64); 1587 if (len < lv) 1588 return -EINVAL; 1589 v.val64 = sock_gen_cookie(sk); 1590 break; 1591 1592 case SO_ZEROCOPY: 1593 v.val = sock_flag(sk, SOCK_ZEROCOPY); 1594 break; 1595 1596 case SO_TXTIME: 1597 lv = sizeof(v.txtime); 1598 v.txtime.clockid = sk->sk_clockid; 1599 v.txtime.flags |= sk->sk_txtime_deadline_mode ? 1600 SOF_TXTIME_DEADLINE_MODE : 0; 1601 v.txtime.flags |= sk->sk_txtime_report_errors ? 1602 SOF_TXTIME_REPORT_ERRORS : 0; 1603 break; 1604 1605 case SO_BINDTOIFINDEX: 1606 v.val = sk->sk_bound_dev_if; 1607 break; 1608 1609 default: 1610 /* We implement the SO_SNDLOWAT etc to not be settable 1611 * (1003.1g 7). 1612 */ 1613 return -ENOPROTOOPT; 1614 } 1615 1616 if (len > lv) 1617 len = lv; 1618 if (copy_to_user(optval, &v, len)) 1619 return -EFAULT; 1620 lenout: 1621 if (put_user(len, optlen)) 1622 return -EFAULT; 1623 return 0; 1624 } 1625 1626 /* 1627 * Initialize an sk_lock. 1628 * 1629 * (We also register the sk_lock with the lock validator.) 1630 */ 1631 static inline void sock_lock_init(struct sock *sk) 1632 { 1633 if (sk->sk_kern_sock) 1634 sock_lock_init_class_and_name( 1635 sk, 1636 af_family_kern_slock_key_strings[sk->sk_family], 1637 af_family_kern_slock_keys + sk->sk_family, 1638 af_family_kern_key_strings[sk->sk_family], 1639 af_family_kern_keys + sk->sk_family); 1640 else 1641 sock_lock_init_class_and_name( 1642 sk, 1643 af_family_slock_key_strings[sk->sk_family], 1644 af_family_slock_keys + sk->sk_family, 1645 af_family_key_strings[sk->sk_family], 1646 af_family_keys + sk->sk_family); 1647 } 1648 1649 /* 1650 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 1651 * even temporarly, because of RCU lookups. sk_node should also be left as is. 1652 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 1653 */ 1654 static void sock_copy(struct sock *nsk, const struct sock *osk) 1655 { 1656 const struct proto *prot = READ_ONCE(osk->sk_prot); 1657 #ifdef CONFIG_SECURITY_NETWORK 1658 void *sptr = nsk->sk_security; 1659 #endif 1660 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 1661 1662 memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 1663 prot->obj_size - offsetof(struct sock, sk_dontcopy_end)); 1664 1665 #ifdef CONFIG_SECURITY_NETWORK 1666 nsk->sk_security = sptr; 1667 security_sk_clone(osk, nsk); 1668 #endif 1669 } 1670 1671 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 1672 int family) 1673 { 1674 struct sock *sk; 1675 struct kmem_cache *slab; 1676 1677 slab = prot->slab; 1678 if (slab != NULL) { 1679 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 1680 if (!sk) 1681 return sk; 1682 if (want_init_on_alloc(priority)) 1683 sk_prot_clear_nulls(sk, prot->obj_size); 1684 } else 1685 sk = kmalloc(prot->obj_size, priority); 1686 1687 if (sk != NULL) { 1688 if (security_sk_alloc(sk, family, priority)) 1689 goto out_free; 1690 1691 if (!try_module_get(prot->owner)) 1692 goto out_free_sec; 1693 sk_tx_queue_clear(sk); 1694 } 1695 1696 return sk; 1697 1698 out_free_sec: 1699 security_sk_free(sk); 1700 out_free: 1701 if (slab != NULL) 1702 kmem_cache_free(slab, sk); 1703 else 1704 kfree(sk); 1705 return NULL; 1706 } 1707 1708 static void sk_prot_free(struct proto *prot, struct sock *sk) 1709 { 1710 struct kmem_cache *slab; 1711 struct module *owner; 1712 1713 owner = prot->owner; 1714 slab = prot->slab; 1715 1716 cgroup_sk_free(&sk->sk_cgrp_data); 1717 mem_cgroup_sk_free(sk); 1718 security_sk_free(sk); 1719 if (slab != NULL) 1720 kmem_cache_free(slab, sk); 1721 else 1722 kfree(sk); 1723 module_put(owner); 1724 } 1725 1726 /** 1727 * sk_alloc - All socket objects are allocated here 1728 * @net: the applicable net namespace 1729 * @family: protocol family 1730 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1731 * @prot: struct proto associated with this new sock instance 1732 * @kern: is this to be a kernel socket? 1733 */ 1734 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 1735 struct proto *prot, int kern) 1736 { 1737 struct sock *sk; 1738 1739 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 1740 if (sk) { 1741 sk->sk_family = family; 1742 /* 1743 * See comment in struct sock definition to understand 1744 * why we need sk_prot_creator -acme 1745 */ 1746 sk->sk_prot = sk->sk_prot_creator = prot; 1747 sk->sk_kern_sock = kern; 1748 sock_lock_init(sk); 1749 sk->sk_net_refcnt = kern ? 0 : 1; 1750 if (likely(sk->sk_net_refcnt)) { 1751 get_net(net); 1752 sock_inuse_add(net, 1); 1753 } 1754 1755 sock_net_set(sk, net); 1756 refcount_set(&sk->sk_wmem_alloc, 1); 1757 1758 mem_cgroup_sk_alloc(sk); 1759 cgroup_sk_alloc(&sk->sk_cgrp_data); 1760 sock_update_classid(&sk->sk_cgrp_data); 1761 sock_update_netprioidx(&sk->sk_cgrp_data); 1762 sk_tx_queue_clear(sk); 1763 } 1764 1765 return sk; 1766 } 1767 EXPORT_SYMBOL(sk_alloc); 1768 1769 /* Sockets having SOCK_RCU_FREE will call this function after one RCU 1770 * grace period. This is the case for UDP sockets and TCP listeners. 1771 */ 1772 static void __sk_destruct(struct rcu_head *head) 1773 { 1774 struct sock *sk = container_of(head, struct sock, sk_rcu); 1775 struct sk_filter *filter; 1776 1777 if (sk->sk_destruct) 1778 sk->sk_destruct(sk); 1779 1780 filter = rcu_dereference_check(sk->sk_filter, 1781 refcount_read(&sk->sk_wmem_alloc) == 0); 1782 if (filter) { 1783 sk_filter_uncharge(sk, filter); 1784 RCU_INIT_POINTER(sk->sk_filter, NULL); 1785 } 1786 1787 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 1788 1789 #ifdef CONFIG_BPF_SYSCALL 1790 bpf_sk_storage_free(sk); 1791 #endif 1792 1793 if (atomic_read(&sk->sk_omem_alloc)) 1794 pr_debug("%s: optmem leakage (%d bytes) detected\n", 1795 __func__, atomic_read(&sk->sk_omem_alloc)); 1796 1797 if (sk->sk_frag.page) { 1798 put_page(sk->sk_frag.page); 1799 sk->sk_frag.page = NULL; 1800 } 1801 1802 if (sk->sk_peer_cred) 1803 put_cred(sk->sk_peer_cred); 1804 put_pid(sk->sk_peer_pid); 1805 if (likely(sk->sk_net_refcnt)) 1806 put_net(sock_net(sk)); 1807 sk_prot_free(sk->sk_prot_creator, sk); 1808 } 1809 1810 void sk_destruct(struct sock *sk) 1811 { 1812 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); 1813 1814 if (rcu_access_pointer(sk->sk_reuseport_cb)) { 1815 reuseport_detach_sock(sk); 1816 use_call_rcu = true; 1817 } 1818 1819 if (use_call_rcu) 1820 call_rcu(&sk->sk_rcu, __sk_destruct); 1821 else 1822 __sk_destruct(&sk->sk_rcu); 1823 } 1824 1825 static void __sk_free(struct sock *sk) 1826 { 1827 if (likely(sk->sk_net_refcnt)) 1828 sock_inuse_add(sock_net(sk), -1); 1829 1830 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) 1831 sock_diag_broadcast_destroy(sk); 1832 else 1833 sk_destruct(sk); 1834 } 1835 1836 void sk_free(struct sock *sk) 1837 { 1838 /* 1839 * We subtract one from sk_wmem_alloc and can know if 1840 * some packets are still in some tx queue. 1841 * If not null, sock_wfree() will call __sk_free(sk) later 1842 */ 1843 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 1844 __sk_free(sk); 1845 } 1846 EXPORT_SYMBOL(sk_free); 1847 1848 static void sk_init_common(struct sock *sk) 1849 { 1850 skb_queue_head_init(&sk->sk_receive_queue); 1851 skb_queue_head_init(&sk->sk_write_queue); 1852 skb_queue_head_init(&sk->sk_error_queue); 1853 1854 rwlock_init(&sk->sk_callback_lock); 1855 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, 1856 af_rlock_keys + sk->sk_family, 1857 af_family_rlock_key_strings[sk->sk_family]); 1858 lockdep_set_class_and_name(&sk->sk_write_queue.lock, 1859 af_wlock_keys + sk->sk_family, 1860 af_family_wlock_key_strings[sk->sk_family]); 1861 lockdep_set_class_and_name(&sk->sk_error_queue.lock, 1862 af_elock_keys + sk->sk_family, 1863 af_family_elock_key_strings[sk->sk_family]); 1864 lockdep_set_class_and_name(&sk->sk_callback_lock, 1865 af_callback_keys + sk->sk_family, 1866 af_family_clock_key_strings[sk->sk_family]); 1867 } 1868 1869 /** 1870 * sk_clone_lock - clone a socket, and lock its clone 1871 * @sk: the socket to clone 1872 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 1873 * 1874 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 1875 */ 1876 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 1877 { 1878 struct proto *prot = READ_ONCE(sk->sk_prot); 1879 struct sk_filter *filter; 1880 bool is_charged = true; 1881 struct sock *newsk; 1882 1883 newsk = sk_prot_alloc(prot, priority, sk->sk_family); 1884 if (!newsk) 1885 goto out; 1886 1887 sock_copy(newsk, sk); 1888 1889 newsk->sk_prot_creator = prot; 1890 1891 /* SANITY */ 1892 if (likely(newsk->sk_net_refcnt)) 1893 get_net(sock_net(newsk)); 1894 sk_node_init(&newsk->sk_node); 1895 sock_lock_init(newsk); 1896 bh_lock_sock(newsk); 1897 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 1898 newsk->sk_backlog.len = 0; 1899 1900 atomic_set(&newsk->sk_rmem_alloc, 0); 1901 1902 /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ 1903 refcount_set(&newsk->sk_wmem_alloc, 1); 1904 1905 atomic_set(&newsk->sk_omem_alloc, 0); 1906 sk_init_common(newsk); 1907 1908 newsk->sk_dst_cache = NULL; 1909 newsk->sk_dst_pending_confirm = 0; 1910 newsk->sk_wmem_queued = 0; 1911 newsk->sk_forward_alloc = 0; 1912 atomic_set(&newsk->sk_drops, 0); 1913 newsk->sk_send_head = NULL; 1914 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 1915 atomic_set(&newsk->sk_zckey, 0); 1916 1917 sock_reset_flag(newsk, SOCK_DONE); 1918 1919 /* sk->sk_memcg will be populated at accept() time */ 1920 newsk->sk_memcg = NULL; 1921 1922 cgroup_sk_clone(&newsk->sk_cgrp_data); 1923 1924 rcu_read_lock(); 1925 filter = rcu_dereference(sk->sk_filter); 1926 if (filter != NULL) 1927 /* though it's an empty new sock, the charging may fail 1928 * if sysctl_optmem_max was changed between creation of 1929 * original socket and cloning 1930 */ 1931 is_charged = sk_filter_charge(newsk, filter); 1932 RCU_INIT_POINTER(newsk->sk_filter, filter); 1933 rcu_read_unlock(); 1934 1935 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 1936 /* We need to make sure that we don't uncharge the new 1937 * socket if we couldn't charge it in the first place 1938 * as otherwise we uncharge the parent's filter. 1939 */ 1940 if (!is_charged) 1941 RCU_INIT_POINTER(newsk->sk_filter, NULL); 1942 sk_free_unlock_clone(newsk); 1943 newsk = NULL; 1944 goto out; 1945 } 1946 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 1947 1948 if (bpf_sk_storage_clone(sk, newsk)) { 1949 sk_free_unlock_clone(newsk); 1950 newsk = NULL; 1951 goto out; 1952 } 1953 1954 /* Clear sk_user_data if parent had the pointer tagged 1955 * as not suitable for copying when cloning. 1956 */ 1957 if (sk_user_data_is_nocopy(newsk)) 1958 newsk->sk_user_data = NULL; 1959 1960 newsk->sk_err = 0; 1961 newsk->sk_err_soft = 0; 1962 newsk->sk_priority = 0; 1963 newsk->sk_incoming_cpu = raw_smp_processor_id(); 1964 if (likely(newsk->sk_net_refcnt)) 1965 sock_inuse_add(sock_net(newsk), 1); 1966 1967 /* Before updating sk_refcnt, we must commit prior changes to memory 1968 * (Documentation/RCU/rculist_nulls.rst for details) 1969 */ 1970 smp_wmb(); 1971 refcount_set(&newsk->sk_refcnt, 2); 1972 1973 /* Increment the counter in the same struct proto as the master 1974 * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that 1975 * is the same as sk->sk_prot->socks, as this field was copied 1976 * with memcpy). 1977 * 1978 * This _changes_ the previous behaviour, where 1979 * tcp_create_openreq_child always was incrementing the 1980 * equivalent to tcp_prot->socks (inet_sock_nr), so this have 1981 * to be taken into account in all callers. -acme 1982 */ 1983 sk_refcnt_debug_inc(newsk); 1984 sk_set_socket(newsk, NULL); 1985 sk_tx_queue_clear(newsk); 1986 RCU_INIT_POINTER(newsk->sk_wq, NULL); 1987 1988 if (newsk->sk_prot->sockets_allocated) 1989 sk_sockets_allocated_inc(newsk); 1990 1991 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) 1992 net_enable_timestamp(); 1993 out: 1994 return newsk; 1995 } 1996 EXPORT_SYMBOL_GPL(sk_clone_lock); 1997 1998 void sk_free_unlock_clone(struct sock *sk) 1999 { 2000 /* It is still raw copy of parent, so invalidate 2001 * destructor and make plain sk_free() */ 2002 sk->sk_destruct = NULL; 2003 bh_unlock_sock(sk); 2004 sk_free(sk); 2005 } 2006 EXPORT_SYMBOL_GPL(sk_free_unlock_clone); 2007 2008 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 2009 { 2010 u32 max_segs = 1; 2011 2012 sk_dst_set(sk, dst); 2013 sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps; 2014 if (sk->sk_route_caps & NETIF_F_GSO) 2015 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 2016 sk->sk_route_caps &= ~sk->sk_route_nocaps; 2017 if (sk_can_gso(sk)) { 2018 if (dst->header_len && !xfrm_dst_offload_ok(dst)) { 2019 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2020 } else { 2021 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 2022 sk->sk_gso_max_size = dst->dev->gso_max_size; 2023 max_segs = max_t(u32, dst->dev->gso_max_segs, 1); 2024 } 2025 } 2026 sk->sk_gso_max_segs = max_segs; 2027 } 2028 EXPORT_SYMBOL_GPL(sk_setup_caps); 2029 2030 /* 2031 * Simple resource managers for sockets. 2032 */ 2033 2034 2035 /* 2036 * Write buffer destructor automatically called from kfree_skb. 2037 */ 2038 void sock_wfree(struct sk_buff *skb) 2039 { 2040 struct sock *sk = skb->sk; 2041 unsigned int len = skb->truesize; 2042 2043 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 2044 /* 2045 * Keep a reference on sk_wmem_alloc, this will be released 2046 * after sk_write_space() call 2047 */ 2048 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 2049 sk->sk_write_space(sk); 2050 len = 1; 2051 } 2052 /* 2053 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 2054 * could not do because of in-flight packets 2055 */ 2056 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 2057 __sk_free(sk); 2058 } 2059 EXPORT_SYMBOL(sock_wfree); 2060 2061 /* This variant of sock_wfree() is used by TCP, 2062 * since it sets SOCK_USE_WRITE_QUEUE. 2063 */ 2064 void __sock_wfree(struct sk_buff *skb) 2065 { 2066 struct sock *sk = skb->sk; 2067 2068 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 2069 __sk_free(sk); 2070 } 2071 2072 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 2073 { 2074 skb_orphan(skb); 2075 skb->sk = sk; 2076 #ifdef CONFIG_INET 2077 if (unlikely(!sk_fullsock(sk))) { 2078 skb->destructor = sock_edemux; 2079 sock_hold(sk); 2080 return; 2081 } 2082 #endif 2083 skb->destructor = sock_wfree; 2084 skb_set_hash_from_sk(skb, sk); 2085 /* 2086 * We used to take a refcount on sk, but following operation 2087 * is enough to guarantee sk_free() wont free this sock until 2088 * all in-flight packets are completed 2089 */ 2090 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 2091 } 2092 EXPORT_SYMBOL(skb_set_owner_w); 2093 2094 static bool can_skb_orphan_partial(const struct sk_buff *skb) 2095 { 2096 #ifdef CONFIG_TLS_DEVICE 2097 /* Drivers depend on in-order delivery for crypto offload, 2098 * partial orphan breaks out-of-order-OK logic. 2099 */ 2100 if (skb->decrypted) 2101 return false; 2102 #endif 2103 return (skb->destructor == sock_wfree || 2104 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); 2105 } 2106 2107 /* This helper is used by netem, as it can hold packets in its 2108 * delay queue. We want to allow the owner socket to send more 2109 * packets, as if they were already TX completed by a typical driver. 2110 * But we also want to keep skb->sk set because some packet schedulers 2111 * rely on it (sch_fq for example). 2112 */ 2113 void skb_orphan_partial(struct sk_buff *skb) 2114 { 2115 if (skb_is_tcp_pure_ack(skb)) 2116 return; 2117 2118 if (can_skb_orphan_partial(skb)) { 2119 struct sock *sk = skb->sk; 2120 2121 if (refcount_inc_not_zero(&sk->sk_refcnt)) { 2122 WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)); 2123 skb->destructor = sock_efree; 2124 } 2125 } else { 2126 skb_orphan(skb); 2127 } 2128 } 2129 EXPORT_SYMBOL(skb_orphan_partial); 2130 2131 /* 2132 * Read buffer destructor automatically called from kfree_skb. 2133 */ 2134 void sock_rfree(struct sk_buff *skb) 2135 { 2136 struct sock *sk = skb->sk; 2137 unsigned int len = skb->truesize; 2138 2139 atomic_sub(len, &sk->sk_rmem_alloc); 2140 sk_mem_uncharge(sk, len); 2141 } 2142 EXPORT_SYMBOL(sock_rfree); 2143 2144 /* 2145 * Buffer destructor for skbs that are not used directly in read or write 2146 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 2147 */ 2148 void sock_efree(struct sk_buff *skb) 2149 { 2150 sock_put(skb->sk); 2151 } 2152 EXPORT_SYMBOL(sock_efree); 2153 2154 /* Buffer destructor for prefetch/receive path where reference count may 2155 * not be held, e.g. for listen sockets. 2156 */ 2157 #ifdef CONFIG_INET 2158 void sock_pfree(struct sk_buff *skb) 2159 { 2160 if (sk_is_refcounted(skb->sk)) 2161 sock_gen_put(skb->sk); 2162 } 2163 EXPORT_SYMBOL(sock_pfree); 2164 #endif /* CONFIG_INET */ 2165 2166 kuid_t sock_i_uid(struct sock *sk) 2167 { 2168 kuid_t uid; 2169 2170 read_lock_bh(&sk->sk_callback_lock); 2171 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 2172 read_unlock_bh(&sk->sk_callback_lock); 2173 return uid; 2174 } 2175 EXPORT_SYMBOL(sock_i_uid); 2176 2177 unsigned long sock_i_ino(struct sock *sk) 2178 { 2179 unsigned long ino; 2180 2181 read_lock_bh(&sk->sk_callback_lock); 2182 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 2183 read_unlock_bh(&sk->sk_callback_lock); 2184 return ino; 2185 } 2186 EXPORT_SYMBOL(sock_i_ino); 2187 2188 /* 2189 * Allocate a skb from the socket's send buffer. 2190 */ 2191 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 2192 gfp_t priority) 2193 { 2194 if (force || 2195 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { 2196 struct sk_buff *skb = alloc_skb(size, priority); 2197 2198 if (skb) { 2199 skb_set_owner_w(skb, sk); 2200 return skb; 2201 } 2202 } 2203 return NULL; 2204 } 2205 EXPORT_SYMBOL(sock_wmalloc); 2206 2207 static void sock_ofree(struct sk_buff *skb) 2208 { 2209 struct sock *sk = skb->sk; 2210 2211 atomic_sub(skb->truesize, &sk->sk_omem_alloc); 2212 } 2213 2214 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 2215 gfp_t priority) 2216 { 2217 struct sk_buff *skb; 2218 2219 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ 2220 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > 2221 sysctl_optmem_max) 2222 return NULL; 2223 2224 skb = alloc_skb(size, priority); 2225 if (!skb) 2226 return NULL; 2227 2228 atomic_add(skb->truesize, &sk->sk_omem_alloc); 2229 skb->sk = sk; 2230 skb->destructor = sock_ofree; 2231 return skb; 2232 } 2233 2234 /* 2235 * Allocate a memory block from the socket's option memory buffer. 2236 */ 2237 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 2238 { 2239 if ((unsigned int)size <= sysctl_optmem_max && 2240 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 2241 void *mem; 2242 /* First do the add, to avoid the race if kmalloc 2243 * might sleep. 2244 */ 2245 atomic_add(size, &sk->sk_omem_alloc); 2246 mem = kmalloc(size, priority); 2247 if (mem) 2248 return mem; 2249 atomic_sub(size, &sk->sk_omem_alloc); 2250 } 2251 return NULL; 2252 } 2253 EXPORT_SYMBOL(sock_kmalloc); 2254 2255 /* Free an option memory block. Note, we actually want the inline 2256 * here as this allows gcc to detect the nullify and fold away the 2257 * condition entirely. 2258 */ 2259 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 2260 const bool nullify) 2261 { 2262 if (WARN_ON_ONCE(!mem)) 2263 return; 2264 if (nullify) 2265 kfree_sensitive(mem); 2266 else 2267 kfree(mem); 2268 atomic_sub(size, &sk->sk_omem_alloc); 2269 } 2270 2271 void sock_kfree_s(struct sock *sk, void *mem, int size) 2272 { 2273 __sock_kfree_s(sk, mem, size, false); 2274 } 2275 EXPORT_SYMBOL(sock_kfree_s); 2276 2277 void sock_kzfree_s(struct sock *sk, void *mem, int size) 2278 { 2279 __sock_kfree_s(sk, mem, size, true); 2280 } 2281 EXPORT_SYMBOL(sock_kzfree_s); 2282 2283 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 2284 I think, these locks should be removed for datagram sockets. 2285 */ 2286 static long sock_wait_for_wmem(struct sock *sk, long timeo) 2287 { 2288 DEFINE_WAIT(wait); 2289 2290 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2291 for (;;) { 2292 if (!timeo) 2293 break; 2294 if (signal_pending(current)) 2295 break; 2296 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2297 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2298 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) 2299 break; 2300 if (sk->sk_shutdown & SEND_SHUTDOWN) 2301 break; 2302 if (sk->sk_err) 2303 break; 2304 timeo = schedule_timeout(timeo); 2305 } 2306 finish_wait(sk_sleep(sk), &wait); 2307 return timeo; 2308 } 2309 2310 2311 /* 2312 * Generic send/receive buffer handlers 2313 */ 2314 2315 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 2316 unsigned long data_len, int noblock, 2317 int *errcode, int max_page_order) 2318 { 2319 struct sk_buff *skb; 2320 long timeo; 2321 int err; 2322 2323 timeo = sock_sndtimeo(sk, noblock); 2324 for (;;) { 2325 err = sock_error(sk); 2326 if (err != 0) 2327 goto failure; 2328 2329 err = -EPIPE; 2330 if (sk->sk_shutdown & SEND_SHUTDOWN) 2331 goto failure; 2332 2333 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) 2334 break; 2335 2336 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2337 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2338 err = -EAGAIN; 2339 if (!timeo) 2340 goto failure; 2341 if (signal_pending(current)) 2342 goto interrupted; 2343 timeo = sock_wait_for_wmem(sk, timeo); 2344 } 2345 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 2346 errcode, sk->sk_allocation); 2347 if (skb) 2348 skb_set_owner_w(skb, sk); 2349 return skb; 2350 2351 interrupted: 2352 err = sock_intr_errno(timeo); 2353 failure: 2354 *errcode = err; 2355 return NULL; 2356 } 2357 EXPORT_SYMBOL(sock_alloc_send_pskb); 2358 2359 struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, 2360 int noblock, int *errcode) 2361 { 2362 return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0); 2363 } 2364 EXPORT_SYMBOL(sock_alloc_send_skb); 2365 2366 int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, 2367 struct sockcm_cookie *sockc) 2368 { 2369 u32 tsflags; 2370 2371 switch (cmsg->cmsg_type) { 2372 case SO_MARK: 2373 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2374 return -EPERM; 2375 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2376 return -EINVAL; 2377 sockc->mark = *(u32 *)CMSG_DATA(cmsg); 2378 break; 2379 case SO_TIMESTAMPING_OLD: 2380 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2381 return -EINVAL; 2382 2383 tsflags = *(u32 *)CMSG_DATA(cmsg); 2384 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) 2385 return -EINVAL; 2386 2387 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 2388 sockc->tsflags |= tsflags; 2389 break; 2390 case SCM_TXTIME: 2391 if (!sock_flag(sk, SOCK_TXTIME)) 2392 return -EINVAL; 2393 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) 2394 return -EINVAL; 2395 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); 2396 break; 2397 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 2398 case SCM_RIGHTS: 2399 case SCM_CREDENTIALS: 2400 break; 2401 default: 2402 return -EINVAL; 2403 } 2404 return 0; 2405 } 2406 EXPORT_SYMBOL(__sock_cmsg_send); 2407 2408 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 2409 struct sockcm_cookie *sockc) 2410 { 2411 struct cmsghdr *cmsg; 2412 int ret; 2413 2414 for_each_cmsghdr(cmsg, msg) { 2415 if (!CMSG_OK(msg, cmsg)) 2416 return -EINVAL; 2417 if (cmsg->cmsg_level != SOL_SOCKET) 2418 continue; 2419 ret = __sock_cmsg_send(sk, msg, cmsg, sockc); 2420 if (ret) 2421 return ret; 2422 } 2423 return 0; 2424 } 2425 EXPORT_SYMBOL(sock_cmsg_send); 2426 2427 static void sk_enter_memory_pressure(struct sock *sk) 2428 { 2429 if (!sk->sk_prot->enter_memory_pressure) 2430 return; 2431 2432 sk->sk_prot->enter_memory_pressure(sk); 2433 } 2434 2435 static void sk_leave_memory_pressure(struct sock *sk) 2436 { 2437 if (sk->sk_prot->leave_memory_pressure) { 2438 sk->sk_prot->leave_memory_pressure(sk); 2439 } else { 2440 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; 2441 2442 if (memory_pressure && READ_ONCE(*memory_pressure)) 2443 WRITE_ONCE(*memory_pressure, 0); 2444 } 2445 } 2446 2447 #define SKB_FRAG_PAGE_ORDER get_order(32768) 2448 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 2449 2450 /** 2451 * skb_page_frag_refill - check that a page_frag contains enough room 2452 * @sz: minimum size of the fragment we want to get 2453 * @pfrag: pointer to page_frag 2454 * @gfp: priority for memory allocation 2455 * 2456 * Note: While this allocator tries to use high order pages, there is 2457 * no guarantee that allocations succeed. Therefore, @sz MUST be 2458 * less or equal than PAGE_SIZE. 2459 */ 2460 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 2461 { 2462 if (pfrag->page) { 2463 if (page_ref_count(pfrag->page) == 1) { 2464 pfrag->offset = 0; 2465 return true; 2466 } 2467 if (pfrag->offset + sz <= pfrag->size) 2468 return true; 2469 put_page(pfrag->page); 2470 } 2471 2472 pfrag->offset = 0; 2473 if (SKB_FRAG_PAGE_ORDER && 2474 !static_branch_unlikely(&net_high_order_alloc_disable_key)) { 2475 /* Avoid direct reclaim but allow kswapd to wake */ 2476 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | 2477 __GFP_COMP | __GFP_NOWARN | 2478 __GFP_NORETRY, 2479 SKB_FRAG_PAGE_ORDER); 2480 if (likely(pfrag->page)) { 2481 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 2482 return true; 2483 } 2484 } 2485 pfrag->page = alloc_page(gfp); 2486 if (likely(pfrag->page)) { 2487 pfrag->size = PAGE_SIZE; 2488 return true; 2489 } 2490 return false; 2491 } 2492 EXPORT_SYMBOL(skb_page_frag_refill); 2493 2494 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 2495 { 2496 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 2497 return true; 2498 2499 sk_enter_memory_pressure(sk); 2500 sk_stream_moderate_sndbuf(sk); 2501 return false; 2502 } 2503 EXPORT_SYMBOL(sk_page_frag_refill); 2504 2505 void __lock_sock(struct sock *sk) 2506 __releases(&sk->sk_lock.slock) 2507 __acquires(&sk->sk_lock.slock) 2508 { 2509 DEFINE_WAIT(wait); 2510 2511 for (;;) { 2512 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 2513 TASK_UNINTERRUPTIBLE); 2514 spin_unlock_bh(&sk->sk_lock.slock); 2515 schedule(); 2516 spin_lock_bh(&sk->sk_lock.slock); 2517 if (!sock_owned_by_user(sk)) 2518 break; 2519 } 2520 finish_wait(&sk->sk_lock.wq, &wait); 2521 } 2522 2523 void __release_sock(struct sock *sk) 2524 __releases(&sk->sk_lock.slock) 2525 __acquires(&sk->sk_lock.slock) 2526 { 2527 struct sk_buff *skb, *next; 2528 2529 while ((skb = sk->sk_backlog.head) != NULL) { 2530 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 2531 2532 spin_unlock_bh(&sk->sk_lock.slock); 2533 2534 do { 2535 next = skb->next; 2536 prefetch(next); 2537 WARN_ON_ONCE(skb_dst_is_noref(skb)); 2538 skb_mark_not_on_list(skb); 2539 sk_backlog_rcv(sk, skb); 2540 2541 cond_resched(); 2542 2543 skb = next; 2544 } while (skb != NULL); 2545 2546 spin_lock_bh(&sk->sk_lock.slock); 2547 } 2548 2549 /* 2550 * Doing the zeroing here guarantee we can not loop forever 2551 * while a wild producer attempts to flood us. 2552 */ 2553 sk->sk_backlog.len = 0; 2554 } 2555 2556 void __sk_flush_backlog(struct sock *sk) 2557 { 2558 spin_lock_bh(&sk->sk_lock.slock); 2559 __release_sock(sk); 2560 spin_unlock_bh(&sk->sk_lock.slock); 2561 } 2562 2563 /** 2564 * sk_wait_data - wait for data to arrive at sk_receive_queue 2565 * @sk: sock to wait on 2566 * @timeo: for how long 2567 * @skb: last skb seen on sk_receive_queue 2568 * 2569 * Now socket state including sk->sk_err is changed only under lock, 2570 * hence we may omit checks after joining wait queue. 2571 * We check receive queue before schedule() only as optimization; 2572 * it is very likely that release_sock() added new data. 2573 */ 2574 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) 2575 { 2576 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2577 int rc; 2578 2579 add_wait_queue(sk_sleep(sk), &wait); 2580 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2581 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); 2582 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 2583 remove_wait_queue(sk_sleep(sk), &wait); 2584 return rc; 2585 } 2586 EXPORT_SYMBOL(sk_wait_data); 2587 2588 /** 2589 * __sk_mem_raise_allocated - increase memory_allocated 2590 * @sk: socket 2591 * @size: memory size to allocate 2592 * @amt: pages to allocate 2593 * @kind: allocation type 2594 * 2595 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc 2596 */ 2597 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) 2598 { 2599 struct proto *prot = sk->sk_prot; 2600 long allocated = sk_memory_allocated_add(sk, amt); 2601 bool charged = true; 2602 2603 if (mem_cgroup_sockets_enabled && sk->sk_memcg && 2604 !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt))) 2605 goto suppress_allocation; 2606 2607 /* Under limit. */ 2608 if (allocated <= sk_prot_mem_limits(sk, 0)) { 2609 sk_leave_memory_pressure(sk); 2610 return 1; 2611 } 2612 2613 /* Under pressure. */ 2614 if (allocated > sk_prot_mem_limits(sk, 1)) 2615 sk_enter_memory_pressure(sk); 2616 2617 /* Over hard limit. */ 2618 if (allocated > sk_prot_mem_limits(sk, 2)) 2619 goto suppress_allocation; 2620 2621 /* guarantee minimum buffer size under pressure */ 2622 if (kind == SK_MEM_RECV) { 2623 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) 2624 return 1; 2625 2626 } else { /* SK_MEM_SEND */ 2627 int wmem0 = sk_get_wmem0(sk, prot); 2628 2629 if (sk->sk_type == SOCK_STREAM) { 2630 if (sk->sk_wmem_queued < wmem0) 2631 return 1; 2632 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { 2633 return 1; 2634 } 2635 } 2636 2637 if (sk_has_memory_pressure(sk)) { 2638 u64 alloc; 2639 2640 if (!sk_under_memory_pressure(sk)) 2641 return 1; 2642 alloc = sk_sockets_allocated_read_positive(sk); 2643 if (sk_prot_mem_limits(sk, 2) > alloc * 2644 sk_mem_pages(sk->sk_wmem_queued + 2645 atomic_read(&sk->sk_rmem_alloc) + 2646 sk->sk_forward_alloc)) 2647 return 1; 2648 } 2649 2650 suppress_allocation: 2651 2652 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 2653 sk_stream_moderate_sndbuf(sk); 2654 2655 /* Fail only if socket is _under_ its sndbuf. 2656 * In this case we cannot block, so that we have to fail. 2657 */ 2658 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) 2659 return 1; 2660 } 2661 2662 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) 2663 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); 2664 2665 sk_memory_allocated_sub(sk, amt); 2666 2667 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 2668 mem_cgroup_uncharge_skmem(sk->sk_memcg, amt); 2669 2670 return 0; 2671 } 2672 EXPORT_SYMBOL(__sk_mem_raise_allocated); 2673 2674 /** 2675 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 2676 * @sk: socket 2677 * @size: memory size to allocate 2678 * @kind: allocation type 2679 * 2680 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 2681 * rmem allocation. This function assumes that protocols which have 2682 * memory_pressure use sk_wmem_queued as write buffer accounting. 2683 */ 2684 int __sk_mem_schedule(struct sock *sk, int size, int kind) 2685 { 2686 int ret, amt = sk_mem_pages(size); 2687 2688 sk->sk_forward_alloc += amt << SK_MEM_QUANTUM_SHIFT; 2689 ret = __sk_mem_raise_allocated(sk, size, amt, kind); 2690 if (!ret) 2691 sk->sk_forward_alloc -= amt << SK_MEM_QUANTUM_SHIFT; 2692 return ret; 2693 } 2694 EXPORT_SYMBOL(__sk_mem_schedule); 2695 2696 /** 2697 * __sk_mem_reduce_allocated - reclaim memory_allocated 2698 * @sk: socket 2699 * @amount: number of quanta 2700 * 2701 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc 2702 */ 2703 void __sk_mem_reduce_allocated(struct sock *sk, int amount) 2704 { 2705 sk_memory_allocated_sub(sk, amount); 2706 2707 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 2708 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); 2709 2710 if (sk_under_memory_pressure(sk) && 2711 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 2712 sk_leave_memory_pressure(sk); 2713 } 2714 EXPORT_SYMBOL(__sk_mem_reduce_allocated); 2715 2716 /** 2717 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated 2718 * @sk: socket 2719 * @amount: number of bytes (rounded down to a SK_MEM_QUANTUM multiple) 2720 */ 2721 void __sk_mem_reclaim(struct sock *sk, int amount) 2722 { 2723 amount >>= SK_MEM_QUANTUM_SHIFT; 2724 sk->sk_forward_alloc -= amount << SK_MEM_QUANTUM_SHIFT; 2725 __sk_mem_reduce_allocated(sk, amount); 2726 } 2727 EXPORT_SYMBOL(__sk_mem_reclaim); 2728 2729 int sk_set_peek_off(struct sock *sk, int val) 2730 { 2731 sk->sk_peek_off = val; 2732 return 0; 2733 } 2734 EXPORT_SYMBOL_GPL(sk_set_peek_off); 2735 2736 /* 2737 * Set of default routines for initialising struct proto_ops when 2738 * the protocol does not support a particular function. In certain 2739 * cases where it makes no sense for a protocol to have a "do nothing" 2740 * function, some default processing is provided. 2741 */ 2742 2743 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 2744 { 2745 return -EOPNOTSUPP; 2746 } 2747 EXPORT_SYMBOL(sock_no_bind); 2748 2749 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 2750 int len, int flags) 2751 { 2752 return -EOPNOTSUPP; 2753 } 2754 EXPORT_SYMBOL(sock_no_connect); 2755 2756 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 2757 { 2758 return -EOPNOTSUPP; 2759 } 2760 EXPORT_SYMBOL(sock_no_socketpair); 2761 2762 int sock_no_accept(struct socket *sock, struct socket *newsock, int flags, 2763 bool kern) 2764 { 2765 return -EOPNOTSUPP; 2766 } 2767 EXPORT_SYMBOL(sock_no_accept); 2768 2769 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 2770 int peer) 2771 { 2772 return -EOPNOTSUPP; 2773 } 2774 EXPORT_SYMBOL(sock_no_getname); 2775 2776 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2777 { 2778 return -EOPNOTSUPP; 2779 } 2780 EXPORT_SYMBOL(sock_no_ioctl); 2781 2782 int sock_no_listen(struct socket *sock, int backlog) 2783 { 2784 return -EOPNOTSUPP; 2785 } 2786 EXPORT_SYMBOL(sock_no_listen); 2787 2788 int sock_no_shutdown(struct socket *sock, int how) 2789 { 2790 return -EOPNOTSUPP; 2791 } 2792 EXPORT_SYMBOL(sock_no_shutdown); 2793 2794 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 2795 { 2796 return -EOPNOTSUPP; 2797 } 2798 EXPORT_SYMBOL(sock_no_sendmsg); 2799 2800 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) 2801 { 2802 return -EOPNOTSUPP; 2803 } 2804 EXPORT_SYMBOL(sock_no_sendmsg_locked); 2805 2806 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, 2807 int flags) 2808 { 2809 return -EOPNOTSUPP; 2810 } 2811 EXPORT_SYMBOL(sock_no_recvmsg); 2812 2813 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 2814 { 2815 /* Mirror missing mmap method error code */ 2816 return -ENODEV; 2817 } 2818 EXPORT_SYMBOL(sock_no_mmap); 2819 2820 /* 2821 * When a file is received (via SCM_RIGHTS, etc), we must bump the 2822 * various sock-based usage counts. 2823 */ 2824 void __receive_sock(struct file *file) 2825 { 2826 struct socket *sock; 2827 2828 sock = sock_from_file(file); 2829 if (sock) { 2830 sock_update_netprioidx(&sock->sk->sk_cgrp_data); 2831 sock_update_classid(&sock->sk->sk_cgrp_data); 2832 } 2833 } 2834 2835 ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) 2836 { 2837 ssize_t res; 2838 struct msghdr msg = {.msg_flags = flags}; 2839 struct kvec iov; 2840 char *kaddr = kmap(page); 2841 iov.iov_base = kaddr + offset; 2842 iov.iov_len = size; 2843 res = kernel_sendmsg(sock, &msg, &iov, 1, size); 2844 kunmap(page); 2845 return res; 2846 } 2847 EXPORT_SYMBOL(sock_no_sendpage); 2848 2849 ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, 2850 int offset, size_t size, int flags) 2851 { 2852 ssize_t res; 2853 struct msghdr msg = {.msg_flags = flags}; 2854 struct kvec iov; 2855 char *kaddr = kmap(page); 2856 2857 iov.iov_base = kaddr + offset; 2858 iov.iov_len = size; 2859 res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size); 2860 kunmap(page); 2861 return res; 2862 } 2863 EXPORT_SYMBOL(sock_no_sendpage_locked); 2864 2865 /* 2866 * Default Socket Callbacks 2867 */ 2868 2869 static void sock_def_wakeup(struct sock *sk) 2870 { 2871 struct socket_wq *wq; 2872 2873 rcu_read_lock(); 2874 wq = rcu_dereference(sk->sk_wq); 2875 if (skwq_has_sleeper(wq)) 2876 wake_up_interruptible_all(&wq->wait); 2877 rcu_read_unlock(); 2878 } 2879 2880 static void sock_def_error_report(struct sock *sk) 2881 { 2882 struct socket_wq *wq; 2883 2884 rcu_read_lock(); 2885 wq = rcu_dereference(sk->sk_wq); 2886 if (skwq_has_sleeper(wq)) 2887 wake_up_interruptible_poll(&wq->wait, EPOLLERR); 2888 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 2889 rcu_read_unlock(); 2890 } 2891 2892 void sock_def_readable(struct sock *sk) 2893 { 2894 struct socket_wq *wq; 2895 2896 rcu_read_lock(); 2897 wq = rcu_dereference(sk->sk_wq); 2898 if (skwq_has_sleeper(wq)) 2899 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 2900 EPOLLRDNORM | EPOLLRDBAND); 2901 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 2902 rcu_read_unlock(); 2903 } 2904 2905 static void sock_def_write_space(struct sock *sk) 2906 { 2907 struct socket_wq *wq; 2908 2909 rcu_read_lock(); 2910 2911 /* Do not wake up a writer until he can make "significant" 2912 * progress. --DaveM 2913 */ 2914 if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) { 2915 wq = rcu_dereference(sk->sk_wq); 2916 if (skwq_has_sleeper(wq)) 2917 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 2918 EPOLLWRNORM | EPOLLWRBAND); 2919 2920 /* Should agree with poll, otherwise some programs break */ 2921 if (sock_writeable(sk)) 2922 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 2923 } 2924 2925 rcu_read_unlock(); 2926 } 2927 2928 static void sock_def_destruct(struct sock *sk) 2929 { 2930 } 2931 2932 void sk_send_sigurg(struct sock *sk) 2933 { 2934 if (sk->sk_socket && sk->sk_socket->file) 2935 if (send_sigurg(&sk->sk_socket->file->f_owner)) 2936 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 2937 } 2938 EXPORT_SYMBOL(sk_send_sigurg); 2939 2940 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 2941 unsigned long expires) 2942 { 2943 if (!mod_timer(timer, expires)) 2944 sock_hold(sk); 2945 } 2946 EXPORT_SYMBOL(sk_reset_timer); 2947 2948 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 2949 { 2950 if (del_timer(timer)) 2951 __sock_put(sk); 2952 } 2953 EXPORT_SYMBOL(sk_stop_timer); 2954 2955 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 2956 { 2957 if (del_timer_sync(timer)) 2958 __sock_put(sk); 2959 } 2960 EXPORT_SYMBOL(sk_stop_timer_sync); 2961 2962 void sock_init_data(struct socket *sock, struct sock *sk) 2963 { 2964 sk_init_common(sk); 2965 sk->sk_send_head = NULL; 2966 2967 timer_setup(&sk->sk_timer, NULL, 0); 2968 2969 sk->sk_allocation = GFP_KERNEL; 2970 sk->sk_rcvbuf = sysctl_rmem_default; 2971 sk->sk_sndbuf = sysctl_wmem_default; 2972 sk->sk_state = TCP_CLOSE; 2973 sk_set_socket(sk, sock); 2974 2975 sock_set_flag(sk, SOCK_ZAPPED); 2976 2977 if (sock) { 2978 sk->sk_type = sock->type; 2979 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); 2980 sock->sk = sk; 2981 sk->sk_uid = SOCK_INODE(sock)->i_uid; 2982 } else { 2983 RCU_INIT_POINTER(sk->sk_wq, NULL); 2984 sk->sk_uid = make_kuid(sock_net(sk)->user_ns, 0); 2985 } 2986 2987 rwlock_init(&sk->sk_callback_lock); 2988 if (sk->sk_kern_sock) 2989 lockdep_set_class_and_name( 2990 &sk->sk_callback_lock, 2991 af_kern_callback_keys + sk->sk_family, 2992 af_family_kern_clock_key_strings[sk->sk_family]); 2993 else 2994 lockdep_set_class_and_name( 2995 &sk->sk_callback_lock, 2996 af_callback_keys + sk->sk_family, 2997 af_family_clock_key_strings[sk->sk_family]); 2998 2999 sk->sk_state_change = sock_def_wakeup; 3000 sk->sk_data_ready = sock_def_readable; 3001 sk->sk_write_space = sock_def_write_space; 3002 sk->sk_error_report = sock_def_error_report; 3003 sk->sk_destruct = sock_def_destruct; 3004 3005 sk->sk_frag.page = NULL; 3006 sk->sk_frag.offset = 0; 3007 sk->sk_peek_off = -1; 3008 3009 sk->sk_peer_pid = NULL; 3010 sk->sk_peer_cred = NULL; 3011 sk->sk_write_pending = 0; 3012 sk->sk_rcvlowat = 1; 3013 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 3014 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 3015 3016 sk->sk_stamp = SK_DEFAULT_STAMP; 3017 #if BITS_PER_LONG==32 3018 seqlock_init(&sk->sk_stamp_seq); 3019 #endif 3020 atomic_set(&sk->sk_zckey, 0); 3021 3022 #ifdef CONFIG_NET_RX_BUSY_POLL 3023 sk->sk_napi_id = 0; 3024 sk->sk_ll_usec = sysctl_net_busy_read; 3025 #endif 3026 3027 sk->sk_max_pacing_rate = ~0UL; 3028 sk->sk_pacing_rate = ~0UL; 3029 WRITE_ONCE(sk->sk_pacing_shift, 10); 3030 sk->sk_incoming_cpu = -1; 3031 3032 sk_rx_queue_clear(sk); 3033 /* 3034 * Before updating sk_refcnt, we must commit prior changes to memory 3035 * (Documentation/RCU/rculist_nulls.rst for details) 3036 */ 3037 smp_wmb(); 3038 refcount_set(&sk->sk_refcnt, 1); 3039 atomic_set(&sk->sk_drops, 0); 3040 } 3041 EXPORT_SYMBOL(sock_init_data); 3042 3043 void lock_sock_nested(struct sock *sk, int subclass) 3044 { 3045 might_sleep(); 3046 spin_lock_bh(&sk->sk_lock.slock); 3047 if (sk->sk_lock.owned) 3048 __lock_sock(sk); 3049 sk->sk_lock.owned = 1; 3050 spin_unlock(&sk->sk_lock.slock); 3051 /* 3052 * The sk_lock has mutex_lock() semantics here: 3053 */ 3054 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3055 local_bh_enable(); 3056 } 3057 EXPORT_SYMBOL(lock_sock_nested); 3058 3059 void release_sock(struct sock *sk) 3060 { 3061 spin_lock_bh(&sk->sk_lock.slock); 3062 if (sk->sk_backlog.tail) 3063 __release_sock(sk); 3064 3065 /* Warning : release_cb() might need to release sk ownership, 3066 * ie call sock_release_ownership(sk) before us. 3067 */ 3068 if (sk->sk_prot->release_cb) 3069 sk->sk_prot->release_cb(sk); 3070 3071 sock_release_ownership(sk); 3072 if (waitqueue_active(&sk->sk_lock.wq)) 3073 wake_up(&sk->sk_lock.wq); 3074 spin_unlock_bh(&sk->sk_lock.slock); 3075 } 3076 EXPORT_SYMBOL(release_sock); 3077 3078 /** 3079 * lock_sock_fast - fast version of lock_sock 3080 * @sk: socket 3081 * 3082 * This version should be used for very small section, where process wont block 3083 * return false if fast path is taken: 3084 * 3085 * sk_lock.slock locked, owned = 0, BH disabled 3086 * 3087 * return true if slow path is taken: 3088 * 3089 * sk_lock.slock unlocked, owned = 1, BH enabled 3090 */ 3091 bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3092 { 3093 might_sleep(); 3094 spin_lock_bh(&sk->sk_lock.slock); 3095 3096 if (!sk->sk_lock.owned) 3097 /* 3098 * Note : We must disable BH 3099 */ 3100 return false; 3101 3102 __lock_sock(sk); 3103 sk->sk_lock.owned = 1; 3104 spin_unlock(&sk->sk_lock.slock); 3105 /* 3106 * The sk_lock has mutex_lock() semantics here: 3107 */ 3108 mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_); 3109 __acquire(&sk->sk_lock.slock); 3110 local_bh_enable(); 3111 return true; 3112 } 3113 EXPORT_SYMBOL(lock_sock_fast); 3114 3115 int sock_gettstamp(struct socket *sock, void __user *userstamp, 3116 bool timeval, bool time32) 3117 { 3118 struct sock *sk = sock->sk; 3119 struct timespec64 ts; 3120 3121 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 3122 ts = ktime_to_timespec64(sock_read_timestamp(sk)); 3123 if (ts.tv_sec == -1) 3124 return -ENOENT; 3125 if (ts.tv_sec == 0) { 3126 ktime_t kt = ktime_get_real(); 3127 sock_write_timestamp(sk, kt); 3128 ts = ktime_to_timespec64(kt); 3129 } 3130 3131 if (timeval) 3132 ts.tv_nsec /= 1000; 3133 3134 #ifdef CONFIG_COMPAT_32BIT_TIME 3135 if (time32) 3136 return put_old_timespec32(&ts, userstamp); 3137 #endif 3138 #ifdef CONFIG_SPARC64 3139 /* beware of padding in sparc64 timeval */ 3140 if (timeval && !in_compat_syscall()) { 3141 struct __kernel_old_timeval __user tv = { 3142 .tv_sec = ts.tv_sec, 3143 .tv_usec = ts.tv_nsec, 3144 }; 3145 if (copy_to_user(userstamp, &tv, sizeof(tv))) 3146 return -EFAULT; 3147 return 0; 3148 } 3149 #endif 3150 return put_timespec64(&ts, userstamp); 3151 } 3152 EXPORT_SYMBOL(sock_gettstamp); 3153 3154 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) 3155 { 3156 if (!sock_flag(sk, flag)) { 3157 unsigned long previous_flags = sk->sk_flags; 3158 3159 sock_set_flag(sk, flag); 3160 /* 3161 * we just set one of the two flags which require net 3162 * time stamping, but time stamping might have been on 3163 * already because of the other one 3164 */ 3165 if (sock_needs_netstamp(sk) && 3166 !(previous_flags & SK_FLAGS_TIMESTAMP)) 3167 net_enable_timestamp(); 3168 } 3169 } 3170 3171 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 3172 int level, int type) 3173 { 3174 struct sock_exterr_skb *serr; 3175 struct sk_buff *skb; 3176 int copied, err; 3177 3178 err = -EAGAIN; 3179 skb = sock_dequeue_err_skb(sk); 3180 if (skb == NULL) 3181 goto out; 3182 3183 copied = skb->len; 3184 if (copied > len) { 3185 msg->msg_flags |= MSG_TRUNC; 3186 copied = len; 3187 } 3188 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3189 if (err) 3190 goto out_free_skb; 3191 3192 sock_recv_timestamp(msg, sk, skb); 3193 3194 serr = SKB_EXT_ERR(skb); 3195 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 3196 3197 msg->msg_flags |= MSG_ERRQUEUE; 3198 err = copied; 3199 3200 out_free_skb: 3201 kfree_skb(skb); 3202 out: 3203 return err; 3204 } 3205 EXPORT_SYMBOL(sock_recv_errqueue); 3206 3207 /* 3208 * Get a socket option on an socket. 3209 * 3210 * FIX: POSIX 1003.1g is very ambiguous here. It states that 3211 * asynchronous errors should be reported by getsockopt. We assume 3212 * this means if you specify SO_ERROR (otherwise whats the point of it). 3213 */ 3214 int sock_common_getsockopt(struct socket *sock, int level, int optname, 3215 char __user *optval, int __user *optlen) 3216 { 3217 struct sock *sk = sock->sk; 3218 3219 return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); 3220 } 3221 EXPORT_SYMBOL(sock_common_getsockopt); 3222 3223 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 3224 int flags) 3225 { 3226 struct sock *sk = sock->sk; 3227 int addr_len = 0; 3228 int err; 3229 3230 err = sk->sk_prot->recvmsg(sk, msg, size, flags & MSG_DONTWAIT, 3231 flags & ~MSG_DONTWAIT, &addr_len); 3232 if (err >= 0) 3233 msg->msg_namelen = addr_len; 3234 return err; 3235 } 3236 EXPORT_SYMBOL(sock_common_recvmsg); 3237 3238 /* 3239 * Set socket options on an inet socket. 3240 */ 3241 int sock_common_setsockopt(struct socket *sock, int level, int optname, 3242 sockptr_t optval, unsigned int optlen) 3243 { 3244 struct sock *sk = sock->sk; 3245 3246 return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); 3247 } 3248 EXPORT_SYMBOL(sock_common_setsockopt); 3249 3250 void sk_common_release(struct sock *sk) 3251 { 3252 if (sk->sk_prot->destroy) 3253 sk->sk_prot->destroy(sk); 3254 3255 /* 3256 * Observation: when sk_common_release is called, processes have 3257 * no access to socket. But net still has. 3258 * Step one, detach it from networking: 3259 * 3260 * A. Remove from hash tables. 3261 */ 3262 3263 sk->sk_prot->unhash(sk); 3264 3265 /* 3266 * In this point socket cannot receive new packets, but it is possible 3267 * that some packets are in flight because some CPU runs receiver and 3268 * did hash table lookup before we unhashed socket. They will achieve 3269 * receive queue and will be purged by socket destructor. 3270 * 3271 * Also we still have packets pending on receive queue and probably, 3272 * our own packets waiting in device queues. sock_destroy will drain 3273 * receive queue, but transmitted packets will delay socket destruction 3274 * until the last reference will be released. 3275 */ 3276 3277 sock_orphan(sk); 3278 3279 xfrm_sk_free_policy(sk); 3280 3281 sk_refcnt_debug_release(sk); 3282 3283 sock_put(sk); 3284 } 3285 EXPORT_SYMBOL(sk_common_release); 3286 3287 void sk_get_meminfo(const struct sock *sk, u32 *mem) 3288 { 3289 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); 3290 3291 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 3292 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); 3293 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); 3294 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); 3295 mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc; 3296 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3297 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3298 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3299 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3300 } 3301 3302 #ifdef CONFIG_PROC_FS 3303 #define PROTO_INUSE_NR 64 /* should be enough for the first time */ 3304 struct prot_inuse { 3305 int val[PROTO_INUSE_NR]; 3306 }; 3307 3308 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 3309 3310 void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 3311 { 3312 __this_cpu_add(net->core.prot_inuse->val[prot->inuse_idx], val); 3313 } 3314 EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 3315 3316 int sock_prot_inuse_get(struct net *net, struct proto *prot) 3317 { 3318 int cpu, idx = prot->inuse_idx; 3319 int res = 0; 3320 3321 for_each_possible_cpu(cpu) 3322 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx]; 3323 3324 return res >= 0 ? res : 0; 3325 } 3326 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 3327 3328 static void sock_inuse_add(struct net *net, int val) 3329 { 3330 this_cpu_add(*net->core.sock_inuse, val); 3331 } 3332 3333 int sock_inuse_get(struct net *net) 3334 { 3335 int cpu, res = 0; 3336 3337 for_each_possible_cpu(cpu) 3338 res += *per_cpu_ptr(net->core.sock_inuse, cpu); 3339 3340 return res; 3341 } 3342 3343 EXPORT_SYMBOL_GPL(sock_inuse_get); 3344 3345 static int __net_init sock_inuse_init_net(struct net *net) 3346 { 3347 net->core.prot_inuse = alloc_percpu(struct prot_inuse); 3348 if (net->core.prot_inuse == NULL) 3349 return -ENOMEM; 3350 3351 net->core.sock_inuse = alloc_percpu(int); 3352 if (net->core.sock_inuse == NULL) 3353 goto out; 3354 3355 return 0; 3356 3357 out: 3358 free_percpu(net->core.prot_inuse); 3359 return -ENOMEM; 3360 } 3361 3362 static void __net_exit sock_inuse_exit_net(struct net *net) 3363 { 3364 free_percpu(net->core.prot_inuse); 3365 free_percpu(net->core.sock_inuse); 3366 } 3367 3368 static struct pernet_operations net_inuse_ops = { 3369 .init = sock_inuse_init_net, 3370 .exit = sock_inuse_exit_net, 3371 }; 3372 3373 static __init int net_inuse_init(void) 3374 { 3375 if (register_pernet_subsys(&net_inuse_ops)) 3376 panic("Cannot initialize net inuse counters"); 3377 3378 return 0; 3379 } 3380 3381 core_initcall(net_inuse_init); 3382 3383 static int assign_proto_idx(struct proto *prot) 3384 { 3385 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 3386 3387 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 3388 pr_err("PROTO_INUSE_NR exhausted\n"); 3389 return -ENOSPC; 3390 } 3391 3392 set_bit(prot->inuse_idx, proto_inuse_idx); 3393 return 0; 3394 } 3395 3396 static void release_proto_idx(struct proto *prot) 3397 { 3398 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 3399 clear_bit(prot->inuse_idx, proto_inuse_idx); 3400 } 3401 #else 3402 static inline int assign_proto_idx(struct proto *prot) 3403 { 3404 return 0; 3405 } 3406 3407 static inline void release_proto_idx(struct proto *prot) 3408 { 3409 } 3410 3411 static void sock_inuse_add(struct net *net, int val) 3412 { 3413 } 3414 #endif 3415 3416 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) 3417 { 3418 if (!twsk_prot) 3419 return; 3420 kfree(twsk_prot->twsk_slab_name); 3421 twsk_prot->twsk_slab_name = NULL; 3422 kmem_cache_destroy(twsk_prot->twsk_slab); 3423 twsk_prot->twsk_slab = NULL; 3424 } 3425 3426 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 3427 { 3428 if (!rsk_prot) 3429 return; 3430 kfree(rsk_prot->slab_name); 3431 rsk_prot->slab_name = NULL; 3432 kmem_cache_destroy(rsk_prot->slab); 3433 rsk_prot->slab = NULL; 3434 } 3435 3436 static int req_prot_init(const struct proto *prot) 3437 { 3438 struct request_sock_ops *rsk_prot = prot->rsk_prot; 3439 3440 if (!rsk_prot) 3441 return 0; 3442 3443 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", 3444 prot->name); 3445 if (!rsk_prot->slab_name) 3446 return -ENOMEM; 3447 3448 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 3449 rsk_prot->obj_size, 0, 3450 SLAB_ACCOUNT | prot->slab_flags, 3451 NULL); 3452 3453 if (!rsk_prot->slab) { 3454 pr_crit("%s: Can't create request sock SLAB cache!\n", 3455 prot->name); 3456 return -ENOMEM; 3457 } 3458 return 0; 3459 } 3460 3461 int proto_register(struct proto *prot, int alloc_slab) 3462 { 3463 int ret = -ENOBUFS; 3464 3465 if (alloc_slab) { 3466 prot->slab = kmem_cache_create_usercopy(prot->name, 3467 prot->obj_size, 0, 3468 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | 3469 prot->slab_flags, 3470 prot->useroffset, prot->usersize, 3471 NULL); 3472 3473 if (prot->slab == NULL) { 3474 pr_crit("%s: Can't create sock SLAB cache!\n", 3475 prot->name); 3476 goto out; 3477 } 3478 3479 if (req_prot_init(prot)) 3480 goto out_free_request_sock_slab; 3481 3482 if (prot->twsk_prot != NULL) { 3483 prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); 3484 3485 if (prot->twsk_prot->twsk_slab_name == NULL) 3486 goto out_free_request_sock_slab; 3487 3488 prot->twsk_prot->twsk_slab = 3489 kmem_cache_create(prot->twsk_prot->twsk_slab_name, 3490 prot->twsk_prot->twsk_obj_size, 3491 0, 3492 SLAB_ACCOUNT | 3493 prot->slab_flags, 3494 NULL); 3495 if (prot->twsk_prot->twsk_slab == NULL) 3496 goto out_free_timewait_sock_slab; 3497 } 3498 } 3499 3500 mutex_lock(&proto_list_mutex); 3501 ret = assign_proto_idx(prot); 3502 if (ret) { 3503 mutex_unlock(&proto_list_mutex); 3504 goto out_free_timewait_sock_slab; 3505 } 3506 list_add(&prot->node, &proto_list); 3507 mutex_unlock(&proto_list_mutex); 3508 return ret; 3509 3510 out_free_timewait_sock_slab: 3511 if (alloc_slab && prot->twsk_prot) 3512 tw_prot_cleanup(prot->twsk_prot); 3513 out_free_request_sock_slab: 3514 if (alloc_slab) { 3515 req_prot_cleanup(prot->rsk_prot); 3516 3517 kmem_cache_destroy(prot->slab); 3518 prot->slab = NULL; 3519 } 3520 out: 3521 return ret; 3522 } 3523 EXPORT_SYMBOL(proto_register); 3524 3525 void proto_unregister(struct proto *prot) 3526 { 3527 mutex_lock(&proto_list_mutex); 3528 release_proto_idx(prot); 3529 list_del(&prot->node); 3530 mutex_unlock(&proto_list_mutex); 3531 3532 kmem_cache_destroy(prot->slab); 3533 prot->slab = NULL; 3534 3535 req_prot_cleanup(prot->rsk_prot); 3536 tw_prot_cleanup(prot->twsk_prot); 3537 } 3538 EXPORT_SYMBOL(proto_unregister); 3539 3540 int sock_load_diag_module(int family, int protocol) 3541 { 3542 if (!protocol) { 3543 if (!sock_is_registered(family)) 3544 return -ENOENT; 3545 3546 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 3547 NETLINK_SOCK_DIAG, family); 3548 } 3549 3550 #ifdef CONFIG_INET 3551 if (family == AF_INET && 3552 protocol != IPPROTO_RAW && 3553 protocol < MAX_INET_PROTOS && 3554 !rcu_access_pointer(inet_protos[protocol])) 3555 return -ENOENT; 3556 #endif 3557 3558 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 3559 NETLINK_SOCK_DIAG, family, protocol); 3560 } 3561 EXPORT_SYMBOL(sock_load_diag_module); 3562 3563 #ifdef CONFIG_PROC_FS 3564 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 3565 __acquires(proto_list_mutex) 3566 { 3567 mutex_lock(&proto_list_mutex); 3568 return seq_list_start_head(&proto_list, *pos); 3569 } 3570 3571 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 3572 { 3573 return seq_list_next(v, &proto_list, pos); 3574 } 3575 3576 static void proto_seq_stop(struct seq_file *seq, void *v) 3577 __releases(proto_list_mutex) 3578 { 3579 mutex_unlock(&proto_list_mutex); 3580 } 3581 3582 static char proto_method_implemented(const void *method) 3583 { 3584 return method == NULL ? 'n' : 'y'; 3585 } 3586 static long sock_prot_memory_allocated(struct proto *proto) 3587 { 3588 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 3589 } 3590 3591 static const char *sock_prot_memory_pressure(struct proto *proto) 3592 { 3593 return proto->memory_pressure != NULL ? 3594 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 3595 } 3596 3597 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 3598 { 3599 3600 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 3601 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 3602 proto->name, 3603 proto->obj_size, 3604 sock_prot_inuse_get(seq_file_net(seq), proto), 3605 sock_prot_memory_allocated(proto), 3606 sock_prot_memory_pressure(proto), 3607 proto->max_header, 3608 proto->slab == NULL ? "no" : "yes", 3609 module_name(proto->owner), 3610 proto_method_implemented(proto->close), 3611 proto_method_implemented(proto->connect), 3612 proto_method_implemented(proto->disconnect), 3613 proto_method_implemented(proto->accept), 3614 proto_method_implemented(proto->ioctl), 3615 proto_method_implemented(proto->init), 3616 proto_method_implemented(proto->destroy), 3617 proto_method_implemented(proto->shutdown), 3618 proto_method_implemented(proto->setsockopt), 3619 proto_method_implemented(proto->getsockopt), 3620 proto_method_implemented(proto->sendmsg), 3621 proto_method_implemented(proto->recvmsg), 3622 proto_method_implemented(proto->sendpage), 3623 proto_method_implemented(proto->bind), 3624 proto_method_implemented(proto->backlog_rcv), 3625 proto_method_implemented(proto->hash), 3626 proto_method_implemented(proto->unhash), 3627 proto_method_implemented(proto->get_port), 3628 proto_method_implemented(proto->enter_memory_pressure)); 3629 } 3630 3631 static int proto_seq_show(struct seq_file *seq, void *v) 3632 { 3633 if (v == &proto_list) 3634 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 3635 "protocol", 3636 "size", 3637 "sockets", 3638 "memory", 3639 "press", 3640 "maxhdr", 3641 "slab", 3642 "module", 3643 "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); 3644 else 3645 proto_seq_printf(seq, list_entry(v, struct proto, node)); 3646 return 0; 3647 } 3648 3649 static const struct seq_operations proto_seq_ops = { 3650 .start = proto_seq_start, 3651 .next = proto_seq_next, 3652 .stop = proto_seq_stop, 3653 .show = proto_seq_show, 3654 }; 3655 3656 static __net_init int proto_init_net(struct net *net) 3657 { 3658 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, 3659 sizeof(struct seq_net_private))) 3660 return -ENOMEM; 3661 3662 return 0; 3663 } 3664 3665 static __net_exit void proto_exit_net(struct net *net) 3666 { 3667 remove_proc_entry("protocols", net->proc_net); 3668 } 3669 3670 3671 static __net_initdata struct pernet_operations proto_net_ops = { 3672 .init = proto_init_net, 3673 .exit = proto_exit_net, 3674 }; 3675 3676 static int __init proto_init(void) 3677 { 3678 return register_pernet_subsys(&proto_net_ops); 3679 } 3680 3681 subsys_initcall(proto_init); 3682 3683 #endif /* PROC_FS */ 3684 3685 #ifdef CONFIG_NET_RX_BUSY_POLL 3686 bool sk_busy_loop_end(void *p, unsigned long start_time) 3687 { 3688 struct sock *sk = p; 3689 3690 return !skb_queue_empty_lockless(&sk->sk_receive_queue) || 3691 sk_busy_loop_timeout(sk, start_time); 3692 } 3693 EXPORT_SYMBOL(sk_busy_loop_end); 3694 #endif /* CONFIG_NET_RX_BUSY_POLL */ 3695 3696 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) 3697 { 3698 if (!sk->sk_prot->bind_add) 3699 return -EOPNOTSUPP; 3700 return sk->sk_prot->bind_add(sk, addr, addr_len); 3701 } 3702 EXPORT_SYMBOL(sock_bind_add); 3703