1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic socket support routines. Memory allocators, socket lock/release 8 * handler for protocols to use and generic option handler. 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 */ 85 86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 87 88 #include <linux/unaligned.h> 89 #include <linux/capability.h> 90 #include <linux/errno.h> 91 #include <linux/errqueue.h> 92 #include <linux/types.h> 93 #include <linux/socket.h> 94 #include <linux/in.h> 95 #include <linux/kernel.h> 96 #include <linux/module.h> 97 #include <linux/proc_fs.h> 98 #include <linux/seq_file.h> 99 #include <linux/sched.h> 100 #include <linux/sched/mm.h> 101 #include <linux/timer.h> 102 #include <linux/string.h> 103 #include <linux/sockios.h> 104 #include <linux/net.h> 105 #include <linux/mm.h> 106 #include <linux/slab.h> 107 #include <linux/interrupt.h> 108 #include <linux/poll.h> 109 #include <linux/tcp.h> 110 #include <linux/udp.h> 111 #include <linux/init.h> 112 #include <linux/highmem.h> 113 #include <linux/user_namespace.h> 114 #include <linux/static_key.h> 115 #include <linux/memcontrol.h> 116 #include <linux/prefetch.h> 117 #include <linux/compat.h> 118 #include <linux/mroute.h> 119 #include <linux/mroute6.h> 120 #include <linux/icmpv6.h> 121 122 #include <linux/uaccess.h> 123 124 #include <linux/netdevice.h> 125 #include <net/protocol.h> 126 #include <linux/skbuff.h> 127 #include <linux/skbuff_ref.h> 128 #include <net/net_namespace.h> 129 #include <net/request_sock.h> 130 #include <net/sock.h> 131 #include <net/proto_memory.h> 132 #include <linux/net_tstamp.h> 133 #include <net/xfrm.h> 134 #include <linux/ipsec.h> 135 #include <net/cls_cgroup.h> 136 #include <net/netprio_cgroup.h> 137 #include <linux/sock_diag.h> 138 139 #include <linux/filter.h> 140 #include <net/sock_reuseport.h> 141 #include <net/bpf_sk_storage.h> 142 143 #include <trace/events/sock.h> 144 145 #include <net/tcp.h> 146 #include <net/busy_poll.h> 147 #include <net/phonet/phonet.h> 148 149 #include <linux/ethtool.h> 150 151 #include "dev.h" 152 153 static DEFINE_MUTEX(proto_list_mutex); 154 static LIST_HEAD(proto_list); 155 156 static void sock_def_write_space_wfree(struct sock *sk); 157 static void sock_def_write_space(struct sock *sk); 158 159 /** 160 * sk_ns_capable - General socket capability test 161 * @sk: Socket to use a capability on or through 162 * @user_ns: The user namespace of the capability to use 163 * @cap: The capability to use 164 * 165 * Test to see if the opener of the socket had when the socket was 166 * created and the current process has the capability @cap in the user 167 * namespace @user_ns. 168 */ 169 bool sk_ns_capable(const struct sock *sk, 170 struct user_namespace *user_ns, int cap) 171 { 172 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 173 ns_capable(user_ns, cap); 174 } 175 EXPORT_SYMBOL(sk_ns_capable); 176 177 /** 178 * sk_capable - Socket global capability test 179 * @sk: Socket to use a capability on or through 180 * @cap: The global capability to use 181 * 182 * Test to see if the opener of the socket had when the socket was 183 * created and the current process has the capability @cap in all user 184 * namespaces. 185 */ 186 bool sk_capable(const struct sock *sk, int cap) 187 { 188 return sk_ns_capable(sk, &init_user_ns, cap); 189 } 190 EXPORT_SYMBOL(sk_capable); 191 192 /** 193 * sk_net_capable - Network namespace socket capability test 194 * @sk: Socket to use a capability on or through 195 * @cap: The capability to use 196 * 197 * Test to see if the opener of the socket had when the socket was created 198 * and the current process has the capability @cap over the network namespace 199 * the socket is a member of. 200 */ 201 bool sk_net_capable(const struct sock *sk, int cap) 202 { 203 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 204 } 205 EXPORT_SYMBOL(sk_net_capable); 206 207 /* 208 * Each address family might have different locking rules, so we have 209 * one slock key per address family and separate keys for internal and 210 * userspace sockets. 211 */ 212 static struct lock_class_key af_family_keys[AF_MAX]; 213 static struct lock_class_key af_family_kern_keys[AF_MAX]; 214 static struct lock_class_key af_family_slock_keys[AF_MAX]; 215 static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; 216 217 /* 218 * Make lock validator output more readable. (we pre-construct these 219 * strings build-time, so that runtime initialization of socket 220 * locks is fast): 221 */ 222 223 #define _sock_locks(x) \ 224 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ 225 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ 226 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ 227 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ 228 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ 229 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ 230 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ 231 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ 232 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ 233 x "27" , x "28" , x "AF_CAN" , \ 234 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ 235 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ 236 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ 237 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ 238 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ 239 x "AF_MCTP" , \ 240 x "AF_MAX" 241 242 static const char *const af_family_key_strings[AF_MAX+1] = { 243 _sock_locks("sk_lock-") 244 }; 245 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 246 _sock_locks("slock-") 247 }; 248 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 249 _sock_locks("clock-") 250 }; 251 252 static const char *const af_family_kern_key_strings[AF_MAX+1] = { 253 _sock_locks("k-sk_lock-") 254 }; 255 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { 256 _sock_locks("k-slock-") 257 }; 258 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 259 _sock_locks("k-clock-") 260 }; 261 static const char *const af_family_rlock_key_strings[AF_MAX+1] = { 262 _sock_locks("rlock-") 263 }; 264 static const char *const af_family_wlock_key_strings[AF_MAX+1] = { 265 _sock_locks("wlock-") 266 }; 267 static const char *const af_family_elock_key_strings[AF_MAX+1] = { 268 _sock_locks("elock-") 269 }; 270 271 /* 272 * sk_callback_lock and sk queues locking rules are per-address-family, 273 * so split the lock classes by using a per-AF key: 274 */ 275 static struct lock_class_key af_callback_keys[AF_MAX]; 276 static struct lock_class_key af_rlock_keys[AF_MAX]; 277 static struct lock_class_key af_wlock_keys[AF_MAX]; 278 static struct lock_class_key af_elock_keys[AF_MAX]; 279 static struct lock_class_key af_kern_callback_keys[AF_MAX]; 280 281 /* Run time adjustable parameters. */ 282 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 283 EXPORT_SYMBOL(sysctl_wmem_max); 284 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 285 EXPORT_SYMBOL(sysctl_rmem_max); 286 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 287 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 288 289 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); 290 EXPORT_SYMBOL_GPL(memalloc_socks_key); 291 292 /** 293 * sk_set_memalloc - sets %SOCK_MEMALLOC 294 * @sk: socket to set it on 295 * 296 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 297 * It's the responsibility of the admin to adjust min_free_kbytes 298 * to meet the requirements 299 */ 300 void sk_set_memalloc(struct sock *sk) 301 { 302 sock_set_flag(sk, SOCK_MEMALLOC); 303 sk->sk_allocation |= __GFP_MEMALLOC; 304 static_branch_inc(&memalloc_socks_key); 305 } 306 EXPORT_SYMBOL_GPL(sk_set_memalloc); 307 308 void sk_clear_memalloc(struct sock *sk) 309 { 310 sock_reset_flag(sk, SOCK_MEMALLOC); 311 sk->sk_allocation &= ~__GFP_MEMALLOC; 312 static_branch_dec(&memalloc_socks_key); 313 314 /* 315 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 316 * progress of swapping. SOCK_MEMALLOC may be cleared while 317 * it has rmem allocations due to the last swapfile being deactivated 318 * but there is a risk that the socket is unusable due to exceeding 319 * the rmem limits. Reclaim the reserves and obey rmem limits again. 320 */ 321 sk_mem_reclaim(sk); 322 } 323 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 324 325 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 326 { 327 int ret; 328 unsigned int noreclaim_flag; 329 330 /* these should have been dropped before queueing */ 331 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 332 333 noreclaim_flag = memalloc_noreclaim_save(); 334 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, 335 tcp_v6_do_rcv, 336 tcp_v4_do_rcv, 337 sk, skb); 338 memalloc_noreclaim_restore(noreclaim_flag); 339 340 return ret; 341 } 342 EXPORT_SYMBOL(__sk_backlog_rcv); 343 344 void sk_error_report(struct sock *sk) 345 { 346 sk->sk_error_report(sk); 347 348 switch (sk->sk_family) { 349 case AF_INET: 350 fallthrough; 351 case AF_INET6: 352 trace_inet_sk_error_report(sk); 353 break; 354 default: 355 break; 356 } 357 } 358 EXPORT_SYMBOL(sk_error_report); 359 360 int sock_get_timeout(long timeo, void *optval, bool old_timeval) 361 { 362 struct __kernel_sock_timeval tv; 363 364 if (timeo == MAX_SCHEDULE_TIMEOUT) { 365 tv.tv_sec = 0; 366 tv.tv_usec = 0; 367 } else { 368 tv.tv_sec = timeo / HZ; 369 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 370 } 371 372 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 373 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 374 *(struct old_timeval32 *)optval = tv32; 375 return sizeof(tv32); 376 } 377 378 if (old_timeval) { 379 struct __kernel_old_timeval old_tv; 380 old_tv.tv_sec = tv.tv_sec; 381 old_tv.tv_usec = tv.tv_usec; 382 *(struct __kernel_old_timeval *)optval = old_tv; 383 return sizeof(old_tv); 384 } 385 386 *(struct __kernel_sock_timeval *)optval = tv; 387 return sizeof(tv); 388 } 389 EXPORT_SYMBOL(sock_get_timeout); 390 391 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, 392 sockptr_t optval, int optlen, bool old_timeval) 393 { 394 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 395 struct old_timeval32 tv32; 396 397 if (optlen < sizeof(tv32)) 398 return -EINVAL; 399 400 if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) 401 return -EFAULT; 402 tv->tv_sec = tv32.tv_sec; 403 tv->tv_usec = tv32.tv_usec; 404 } else if (old_timeval) { 405 struct __kernel_old_timeval old_tv; 406 407 if (optlen < sizeof(old_tv)) 408 return -EINVAL; 409 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) 410 return -EFAULT; 411 tv->tv_sec = old_tv.tv_sec; 412 tv->tv_usec = old_tv.tv_usec; 413 } else { 414 if (optlen < sizeof(*tv)) 415 return -EINVAL; 416 if (copy_from_sockptr(tv, optval, sizeof(*tv))) 417 return -EFAULT; 418 } 419 420 return 0; 421 } 422 EXPORT_SYMBOL(sock_copy_user_timeval); 423 424 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, 425 bool old_timeval) 426 { 427 struct __kernel_sock_timeval tv; 428 int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); 429 long val; 430 431 if (err) 432 return err; 433 434 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 435 return -EDOM; 436 437 if (tv.tv_sec < 0) { 438 static int warned __read_mostly; 439 440 WRITE_ONCE(*timeo_p, 0); 441 if (warned < 10 && net_ratelimit()) { 442 warned++; 443 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 444 __func__, current->comm, task_pid_nr(current)); 445 } 446 return 0; 447 } 448 val = MAX_SCHEDULE_TIMEOUT; 449 if ((tv.tv_sec || tv.tv_usec) && 450 (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))) 451 val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, 452 USEC_PER_SEC / HZ); 453 WRITE_ONCE(*timeo_p, val); 454 return 0; 455 } 456 457 static bool sk_set_prio_allowed(const struct sock *sk, int val) 458 { 459 return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) || 460 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || 461 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)); 462 } 463 464 static bool sock_needs_netstamp(const struct sock *sk) 465 { 466 switch (sk->sk_family) { 467 case AF_UNSPEC: 468 case AF_UNIX: 469 return false; 470 default: 471 return true; 472 } 473 } 474 475 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 476 { 477 if (sk->sk_flags & flags) { 478 sk->sk_flags &= ~flags; 479 if (sock_needs_netstamp(sk) && 480 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 481 net_disable_timestamp(); 482 } 483 } 484 485 486 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 487 { 488 unsigned long flags; 489 struct sk_buff_head *list = &sk->sk_receive_queue; 490 491 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { 492 atomic_inc(&sk->sk_drops); 493 trace_sock_rcvqueue_full(sk, skb); 494 return -ENOMEM; 495 } 496 497 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 498 atomic_inc(&sk->sk_drops); 499 return -ENOBUFS; 500 } 501 502 skb->dev = NULL; 503 skb_set_owner_r(skb, sk); 504 505 /* we escape from rcu protected region, make sure we dont leak 506 * a norefcounted dst 507 */ 508 skb_dst_force(skb); 509 510 spin_lock_irqsave(&list->lock, flags); 511 sock_skb_set_dropcount(sk, skb); 512 __skb_queue_tail(list, skb); 513 spin_unlock_irqrestore(&list->lock, flags); 514 515 if (!sock_flag(sk, SOCK_DEAD)) 516 sk->sk_data_ready(sk); 517 return 0; 518 } 519 EXPORT_SYMBOL(__sock_queue_rcv_skb); 520 521 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb, 522 enum skb_drop_reason *reason) 523 { 524 enum skb_drop_reason drop_reason; 525 int err; 526 527 err = sk_filter(sk, skb); 528 if (err) { 529 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 530 goto out; 531 } 532 err = __sock_queue_rcv_skb(sk, skb); 533 switch (err) { 534 case -ENOMEM: 535 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 536 break; 537 case -ENOBUFS: 538 drop_reason = SKB_DROP_REASON_PROTO_MEM; 539 break; 540 default: 541 drop_reason = SKB_NOT_DROPPED_YET; 542 break; 543 } 544 out: 545 if (reason) 546 *reason = drop_reason; 547 return err; 548 } 549 EXPORT_SYMBOL(sock_queue_rcv_skb_reason); 550 551 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 552 const int nested, unsigned int trim_cap, bool refcounted) 553 { 554 int rc = NET_RX_SUCCESS; 555 556 if (sk_filter_trim_cap(sk, skb, trim_cap)) 557 goto discard_and_relse; 558 559 skb->dev = NULL; 560 561 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { 562 atomic_inc(&sk->sk_drops); 563 goto discard_and_relse; 564 } 565 if (nested) 566 bh_lock_sock_nested(sk); 567 else 568 bh_lock_sock(sk); 569 if (!sock_owned_by_user(sk)) { 570 /* 571 * trylock + unlock semantics: 572 */ 573 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 574 575 rc = sk_backlog_rcv(sk, skb); 576 577 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 578 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { 579 bh_unlock_sock(sk); 580 atomic_inc(&sk->sk_drops); 581 goto discard_and_relse; 582 } 583 584 bh_unlock_sock(sk); 585 out: 586 if (refcounted) 587 sock_put(sk); 588 return rc; 589 discard_and_relse: 590 kfree_skb(skb); 591 goto out; 592 } 593 EXPORT_SYMBOL(__sk_receive_skb); 594 595 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, 596 u32)); 597 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 598 u32)); 599 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 600 { 601 struct dst_entry *dst = __sk_dst_get(sk); 602 603 if (dst && dst->obsolete && 604 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 605 dst, cookie) == NULL) { 606 sk_tx_queue_clear(sk); 607 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); 608 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 609 dst_release(dst); 610 return NULL; 611 } 612 613 return dst; 614 } 615 EXPORT_SYMBOL(__sk_dst_check); 616 617 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 618 { 619 struct dst_entry *dst = sk_dst_get(sk); 620 621 if (dst && dst->obsolete && 622 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 623 dst, cookie) == NULL) { 624 sk_dst_reset(sk); 625 dst_release(dst); 626 return NULL; 627 } 628 629 return dst; 630 } 631 EXPORT_SYMBOL(sk_dst_check); 632 633 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) 634 { 635 int ret = -ENOPROTOOPT; 636 #ifdef CONFIG_NETDEVICES 637 struct net *net = sock_net(sk); 638 639 /* Sorry... */ 640 ret = -EPERM; 641 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) 642 goto out; 643 644 ret = -EINVAL; 645 if (ifindex < 0) 646 goto out; 647 648 /* Paired with all READ_ONCE() done locklessly. */ 649 WRITE_ONCE(sk->sk_bound_dev_if, ifindex); 650 651 if (sk->sk_prot->rehash) 652 sk->sk_prot->rehash(sk); 653 sk_dst_reset(sk); 654 655 ret = 0; 656 657 out: 658 #endif 659 660 return ret; 661 } 662 663 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) 664 { 665 int ret; 666 667 if (lock_sk) 668 lock_sock(sk); 669 ret = sock_bindtoindex_locked(sk, ifindex); 670 if (lock_sk) 671 release_sock(sk); 672 673 return ret; 674 } 675 EXPORT_SYMBOL(sock_bindtoindex); 676 677 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) 678 { 679 int ret = -ENOPROTOOPT; 680 #ifdef CONFIG_NETDEVICES 681 struct net *net = sock_net(sk); 682 char devname[IFNAMSIZ]; 683 int index; 684 685 ret = -EINVAL; 686 if (optlen < 0) 687 goto out; 688 689 /* Bind this socket to a particular device like "eth0", 690 * as specified in the passed interface name. If the 691 * name is "" or the option length is zero the socket 692 * is not bound. 693 */ 694 if (optlen > IFNAMSIZ - 1) 695 optlen = IFNAMSIZ - 1; 696 memset(devname, 0, sizeof(devname)); 697 698 ret = -EFAULT; 699 if (copy_from_sockptr(devname, optval, optlen)) 700 goto out; 701 702 index = 0; 703 if (devname[0] != '\0') { 704 struct net_device *dev; 705 706 rcu_read_lock(); 707 dev = dev_get_by_name_rcu(net, devname); 708 if (dev) 709 index = dev->ifindex; 710 rcu_read_unlock(); 711 ret = -ENODEV; 712 if (!dev) 713 goto out; 714 } 715 716 sockopt_lock_sock(sk); 717 ret = sock_bindtoindex_locked(sk, index); 718 sockopt_release_sock(sk); 719 out: 720 #endif 721 722 return ret; 723 } 724 725 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, 726 sockptr_t optlen, int len) 727 { 728 int ret = -ENOPROTOOPT; 729 #ifdef CONFIG_NETDEVICES 730 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 731 struct net *net = sock_net(sk); 732 char devname[IFNAMSIZ]; 733 734 if (bound_dev_if == 0) { 735 len = 0; 736 goto zero; 737 } 738 739 ret = -EINVAL; 740 if (len < IFNAMSIZ) 741 goto out; 742 743 ret = netdev_get_name(net, devname, bound_dev_if); 744 if (ret) 745 goto out; 746 747 len = strlen(devname) + 1; 748 749 ret = -EFAULT; 750 if (copy_to_sockptr(optval, devname, len)) 751 goto out; 752 753 zero: 754 ret = -EFAULT; 755 if (copy_to_sockptr(optlen, &len, sizeof(int))) 756 goto out; 757 758 ret = 0; 759 760 out: 761 #endif 762 763 return ret; 764 } 765 766 bool sk_mc_loop(const struct sock *sk) 767 { 768 if (dev_recursion_level()) 769 return false; 770 if (!sk) 771 return true; 772 /* IPV6_ADDRFORM can change sk->sk_family under us. */ 773 switch (READ_ONCE(sk->sk_family)) { 774 case AF_INET: 775 return inet_test_bit(MC_LOOP, sk); 776 #if IS_ENABLED(CONFIG_IPV6) 777 case AF_INET6: 778 return inet6_test_bit(MC6_LOOP, sk); 779 #endif 780 } 781 WARN_ON_ONCE(1); 782 return true; 783 } 784 EXPORT_SYMBOL(sk_mc_loop); 785 786 void sock_set_reuseaddr(struct sock *sk) 787 { 788 lock_sock(sk); 789 sk->sk_reuse = SK_CAN_REUSE; 790 release_sock(sk); 791 } 792 EXPORT_SYMBOL(sock_set_reuseaddr); 793 794 void sock_set_reuseport(struct sock *sk) 795 { 796 lock_sock(sk); 797 sk->sk_reuseport = true; 798 release_sock(sk); 799 } 800 EXPORT_SYMBOL(sock_set_reuseport); 801 802 void sock_no_linger(struct sock *sk) 803 { 804 lock_sock(sk); 805 WRITE_ONCE(sk->sk_lingertime, 0); 806 sock_set_flag(sk, SOCK_LINGER); 807 release_sock(sk); 808 } 809 EXPORT_SYMBOL(sock_no_linger); 810 811 void sock_set_priority(struct sock *sk, u32 priority) 812 { 813 WRITE_ONCE(sk->sk_priority, priority); 814 } 815 EXPORT_SYMBOL(sock_set_priority); 816 817 void sock_set_sndtimeo(struct sock *sk, s64 secs) 818 { 819 lock_sock(sk); 820 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) 821 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); 822 else 823 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); 824 release_sock(sk); 825 } 826 EXPORT_SYMBOL(sock_set_sndtimeo); 827 828 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) 829 { 830 sock_valbool_flag(sk, SOCK_RCVTSTAMP, val); 831 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns); 832 if (val) { 833 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); 834 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 835 } 836 } 837 838 void sock_enable_timestamps(struct sock *sk) 839 { 840 lock_sock(sk); 841 __sock_set_timestamps(sk, true, false, true); 842 release_sock(sk); 843 } 844 EXPORT_SYMBOL(sock_enable_timestamps); 845 846 void sock_set_timestamp(struct sock *sk, int optname, bool valbool) 847 { 848 switch (optname) { 849 case SO_TIMESTAMP_OLD: 850 __sock_set_timestamps(sk, valbool, false, false); 851 break; 852 case SO_TIMESTAMP_NEW: 853 __sock_set_timestamps(sk, valbool, true, false); 854 break; 855 case SO_TIMESTAMPNS_OLD: 856 __sock_set_timestamps(sk, valbool, false, true); 857 break; 858 case SO_TIMESTAMPNS_NEW: 859 __sock_set_timestamps(sk, valbool, true, true); 860 break; 861 } 862 } 863 864 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) 865 { 866 struct net *net = sock_net(sk); 867 struct net_device *dev = NULL; 868 bool match = false; 869 int *vclock_index; 870 int i, num; 871 872 if (sk->sk_bound_dev_if) 873 dev = dev_get_by_index(net, sk->sk_bound_dev_if); 874 875 if (!dev) { 876 pr_err("%s: sock not bind to device\n", __func__); 877 return -EOPNOTSUPP; 878 } 879 880 num = ethtool_get_phc_vclocks(dev, &vclock_index); 881 dev_put(dev); 882 883 for (i = 0; i < num; i++) { 884 if (*(vclock_index + i) == phc_index) { 885 match = true; 886 break; 887 } 888 } 889 890 if (num > 0) 891 kfree(vclock_index); 892 893 if (!match) 894 return -EINVAL; 895 896 WRITE_ONCE(sk->sk_bind_phc, phc_index); 897 898 return 0; 899 } 900 901 int sock_set_timestamping(struct sock *sk, int optname, 902 struct so_timestamping timestamping) 903 { 904 int val = timestamping.flags; 905 int ret; 906 907 if (val & ~SOF_TIMESTAMPING_MASK) 908 return -EINVAL; 909 910 if (val & SOF_TIMESTAMPING_OPT_ID_TCP && 911 !(val & SOF_TIMESTAMPING_OPT_ID)) 912 return -EINVAL; 913 914 if (val & SOF_TIMESTAMPING_OPT_ID && 915 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 916 if (sk_is_tcp(sk)) { 917 if ((1 << sk->sk_state) & 918 (TCPF_CLOSE | TCPF_LISTEN)) 919 return -EINVAL; 920 if (val & SOF_TIMESTAMPING_OPT_ID_TCP) 921 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq); 922 else 923 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); 924 } else { 925 atomic_set(&sk->sk_tskey, 0); 926 } 927 } 928 929 if (val & SOF_TIMESTAMPING_OPT_STATS && 930 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) 931 return -EINVAL; 932 933 if (val & SOF_TIMESTAMPING_BIND_PHC) { 934 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); 935 if (ret) 936 return ret; 937 } 938 939 WRITE_ONCE(sk->sk_tsflags, val); 940 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); 941 sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY)); 942 943 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 944 sock_enable_timestamp(sk, 945 SOCK_TIMESTAMPING_RX_SOFTWARE); 946 else 947 sock_disable_timestamp(sk, 948 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 949 return 0; 950 } 951 952 #if defined(CONFIG_CGROUP_BPF) 953 void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) 954 { 955 struct bpf_sock_ops_kern sock_ops; 956 957 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 958 sock_ops.op = op; 959 sock_ops.is_fullsock = 1; 960 sock_ops.sk = sk; 961 bpf_skops_init_skb(&sock_ops, skb, 0); 962 __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS); 963 } 964 #endif 965 966 void sock_set_keepalive(struct sock *sk) 967 { 968 lock_sock(sk); 969 if (sk->sk_prot->keepalive) 970 sk->sk_prot->keepalive(sk, true); 971 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); 972 release_sock(sk); 973 } 974 EXPORT_SYMBOL(sock_set_keepalive); 975 976 static void __sock_set_rcvbuf(struct sock *sk, int val) 977 { 978 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it 979 * as a negative value. 980 */ 981 val = min_t(int, val, INT_MAX / 2); 982 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 983 984 /* We double it on the way in to account for "struct sk_buff" etc. 985 * overhead. Applications assume that the SO_RCVBUF setting they make 986 * will allow that much actual data to be received on that socket. 987 * 988 * Applications are unaware that "struct sk_buff" and other overheads 989 * allocate from the receive buffer during socket buffer allocation. 990 * 991 * And after considering the possible alternatives, returning the value 992 * we actually used in getsockopt is the most desirable behavior. 993 */ 994 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); 995 } 996 997 void sock_set_rcvbuf(struct sock *sk, int val) 998 { 999 lock_sock(sk); 1000 __sock_set_rcvbuf(sk, val); 1001 release_sock(sk); 1002 } 1003 EXPORT_SYMBOL(sock_set_rcvbuf); 1004 1005 static void __sock_set_mark(struct sock *sk, u32 val) 1006 { 1007 if (val != sk->sk_mark) { 1008 WRITE_ONCE(sk->sk_mark, val); 1009 sk_dst_reset(sk); 1010 } 1011 } 1012 1013 void sock_set_mark(struct sock *sk, u32 val) 1014 { 1015 lock_sock(sk); 1016 __sock_set_mark(sk, val); 1017 release_sock(sk); 1018 } 1019 EXPORT_SYMBOL(sock_set_mark); 1020 1021 static void sock_release_reserved_memory(struct sock *sk, int bytes) 1022 { 1023 /* Round down bytes to multiple of pages */ 1024 bytes = round_down(bytes, PAGE_SIZE); 1025 1026 WARN_ON(bytes > sk->sk_reserved_mem); 1027 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); 1028 sk_mem_reclaim(sk); 1029 } 1030 1031 static int sock_reserve_memory(struct sock *sk, int bytes) 1032 { 1033 long allocated; 1034 bool charged; 1035 int pages; 1036 1037 if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk)) 1038 return -EOPNOTSUPP; 1039 1040 if (!bytes) 1041 return 0; 1042 1043 pages = sk_mem_pages(bytes); 1044 1045 /* pre-charge to memcg */ 1046 charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages, 1047 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1048 if (!charged) 1049 return -ENOMEM; 1050 1051 /* pre-charge to forward_alloc */ 1052 sk_memory_allocated_add(sk, pages); 1053 allocated = sk_memory_allocated(sk); 1054 /* If the system goes into memory pressure with this 1055 * precharge, give up and return error. 1056 */ 1057 if (allocated > sk_prot_mem_limits(sk, 1)) { 1058 sk_memory_allocated_sub(sk, pages); 1059 mem_cgroup_uncharge_skmem(sk->sk_memcg, pages); 1060 return -ENOMEM; 1061 } 1062 sk_forward_alloc_add(sk, pages << PAGE_SHIFT); 1063 1064 WRITE_ONCE(sk->sk_reserved_mem, 1065 sk->sk_reserved_mem + (pages << PAGE_SHIFT)); 1066 1067 return 0; 1068 } 1069 1070 #ifdef CONFIG_PAGE_POOL 1071 1072 /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED 1073 * in 1 syscall. The limit exists to limit the amount of memory the kernel 1074 * allocates to copy these tokens, and to prevent looping over the frags for 1075 * too long. 1076 */ 1077 #define MAX_DONTNEED_TOKENS 128 1078 #define MAX_DONTNEED_FRAGS 1024 1079 1080 static noinline_for_stack int 1081 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) 1082 { 1083 unsigned int num_tokens, i, j, k, netmem_num = 0; 1084 struct dmabuf_token *tokens; 1085 int ret = 0, num_frags = 0; 1086 netmem_ref netmems[16]; 1087 1088 if (!sk_is_tcp(sk)) 1089 return -EBADF; 1090 1091 if (optlen % sizeof(*tokens) || 1092 optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS) 1093 return -EINVAL; 1094 1095 num_tokens = optlen / sizeof(*tokens); 1096 tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL); 1097 if (!tokens) 1098 return -ENOMEM; 1099 1100 if (copy_from_sockptr(tokens, optval, optlen)) { 1101 kvfree(tokens); 1102 return -EFAULT; 1103 } 1104 1105 xa_lock_bh(&sk->sk_user_frags); 1106 for (i = 0; i < num_tokens; i++) { 1107 for (j = 0; j < tokens[i].token_count; j++) { 1108 if (++num_frags > MAX_DONTNEED_FRAGS) 1109 goto frag_limit_reached; 1110 1111 netmem_ref netmem = (__force netmem_ref)__xa_erase( 1112 &sk->sk_user_frags, tokens[i].token_start + j); 1113 1114 if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 1115 continue; 1116 1117 netmems[netmem_num++] = netmem; 1118 if (netmem_num == ARRAY_SIZE(netmems)) { 1119 xa_unlock_bh(&sk->sk_user_frags); 1120 for (k = 0; k < netmem_num; k++) 1121 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1122 netmem_num = 0; 1123 xa_lock_bh(&sk->sk_user_frags); 1124 } 1125 ret++; 1126 } 1127 } 1128 1129 frag_limit_reached: 1130 xa_unlock_bh(&sk->sk_user_frags); 1131 for (k = 0; k < netmem_num; k++) 1132 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1133 1134 kvfree(tokens); 1135 return ret; 1136 } 1137 #endif 1138 1139 void sockopt_lock_sock(struct sock *sk) 1140 { 1141 /* When current->bpf_ctx is set, the setsockopt is called from 1142 * a bpf prog. bpf has ensured the sk lock has been 1143 * acquired before calling setsockopt(). 1144 */ 1145 if (has_current_bpf_ctx()) 1146 return; 1147 1148 lock_sock(sk); 1149 } 1150 EXPORT_SYMBOL(sockopt_lock_sock); 1151 1152 void sockopt_release_sock(struct sock *sk) 1153 { 1154 if (has_current_bpf_ctx()) 1155 return; 1156 1157 release_sock(sk); 1158 } 1159 EXPORT_SYMBOL(sockopt_release_sock); 1160 1161 bool sockopt_ns_capable(struct user_namespace *ns, int cap) 1162 { 1163 return has_current_bpf_ctx() || ns_capable(ns, cap); 1164 } 1165 EXPORT_SYMBOL(sockopt_ns_capable); 1166 1167 bool sockopt_capable(int cap) 1168 { 1169 return has_current_bpf_ctx() || capable(cap); 1170 } 1171 EXPORT_SYMBOL(sockopt_capable); 1172 1173 static int sockopt_validate_clockid(__kernel_clockid_t value) 1174 { 1175 switch (value) { 1176 case CLOCK_REALTIME: 1177 case CLOCK_MONOTONIC: 1178 case CLOCK_TAI: 1179 return 0; 1180 } 1181 return -EINVAL; 1182 } 1183 1184 /* 1185 * This is meant for all protocols to use and covers goings on 1186 * at the socket level. Everything here is generic. 1187 */ 1188 1189 int sk_setsockopt(struct sock *sk, int level, int optname, 1190 sockptr_t optval, unsigned int optlen) 1191 { 1192 struct so_timestamping timestamping; 1193 struct socket *sock = sk->sk_socket; 1194 struct sock_txtime sk_txtime; 1195 int val; 1196 int valbool; 1197 struct linger ling; 1198 int ret = 0; 1199 1200 /* 1201 * Options without arguments 1202 */ 1203 1204 if (optname == SO_BINDTODEVICE) 1205 return sock_setbindtodevice(sk, optval, optlen); 1206 1207 if (optlen < sizeof(int)) 1208 return -EINVAL; 1209 1210 if (copy_from_sockptr(&val, optval, sizeof(val))) 1211 return -EFAULT; 1212 1213 valbool = val ? 1 : 0; 1214 1215 /* handle options which do not require locking the socket. */ 1216 switch (optname) { 1217 case SO_PRIORITY: 1218 if (sk_set_prio_allowed(sk, val)) { 1219 sock_set_priority(sk, val); 1220 return 0; 1221 } 1222 return -EPERM; 1223 case SO_PASSSEC: 1224 assign_bit(SOCK_PASSSEC, &sock->flags, valbool); 1225 return 0; 1226 case SO_PASSCRED: 1227 assign_bit(SOCK_PASSCRED, &sock->flags, valbool); 1228 return 0; 1229 case SO_PASSPIDFD: 1230 assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool); 1231 return 0; 1232 case SO_TYPE: 1233 case SO_PROTOCOL: 1234 case SO_DOMAIN: 1235 case SO_ERROR: 1236 return -ENOPROTOOPT; 1237 #ifdef CONFIG_NET_RX_BUSY_POLL 1238 case SO_BUSY_POLL: 1239 if (val < 0) 1240 return -EINVAL; 1241 WRITE_ONCE(sk->sk_ll_usec, val); 1242 return 0; 1243 case SO_PREFER_BUSY_POLL: 1244 if (valbool && !sockopt_capable(CAP_NET_ADMIN)) 1245 return -EPERM; 1246 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); 1247 return 0; 1248 case SO_BUSY_POLL_BUDGET: 1249 if (val > READ_ONCE(sk->sk_busy_poll_budget) && 1250 !sockopt_capable(CAP_NET_ADMIN)) 1251 return -EPERM; 1252 if (val < 0 || val > U16_MAX) 1253 return -EINVAL; 1254 WRITE_ONCE(sk->sk_busy_poll_budget, val); 1255 return 0; 1256 #endif 1257 case SO_MAX_PACING_RATE: 1258 { 1259 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; 1260 unsigned long pacing_rate; 1261 1262 if (sizeof(ulval) != sizeof(val) && 1263 optlen >= sizeof(ulval) && 1264 copy_from_sockptr(&ulval, optval, sizeof(ulval))) { 1265 return -EFAULT; 1266 } 1267 if (ulval != ~0UL) 1268 cmpxchg(&sk->sk_pacing_status, 1269 SK_PACING_NONE, 1270 SK_PACING_NEEDED); 1271 /* Pairs with READ_ONCE() from sk_getsockopt() */ 1272 WRITE_ONCE(sk->sk_max_pacing_rate, ulval); 1273 pacing_rate = READ_ONCE(sk->sk_pacing_rate); 1274 if (ulval < pacing_rate) 1275 WRITE_ONCE(sk->sk_pacing_rate, ulval); 1276 return 0; 1277 } 1278 case SO_TXREHASH: 1279 if (val < -1 || val > 1) 1280 return -EINVAL; 1281 if ((u8)val == SOCK_TXREHASH_DEFAULT) 1282 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); 1283 /* Paired with READ_ONCE() in tcp_rtx_synack() 1284 * and sk_getsockopt(). 1285 */ 1286 WRITE_ONCE(sk->sk_txrehash, (u8)val); 1287 return 0; 1288 case SO_PEEK_OFF: 1289 { 1290 int (*set_peek_off)(struct sock *sk, int val); 1291 1292 set_peek_off = READ_ONCE(sock->ops)->set_peek_off; 1293 if (set_peek_off) 1294 ret = set_peek_off(sk, val); 1295 else 1296 ret = -EOPNOTSUPP; 1297 return ret; 1298 } 1299 #ifdef CONFIG_PAGE_POOL 1300 case SO_DEVMEM_DONTNEED: 1301 return sock_devmem_dontneed(sk, optval, optlen); 1302 #endif 1303 } 1304 1305 sockopt_lock_sock(sk); 1306 1307 switch (optname) { 1308 case SO_DEBUG: 1309 if (val && !sockopt_capable(CAP_NET_ADMIN)) 1310 ret = -EACCES; 1311 else 1312 sock_valbool_flag(sk, SOCK_DBG, valbool); 1313 break; 1314 case SO_REUSEADDR: 1315 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 1316 break; 1317 case SO_REUSEPORT: 1318 if (valbool && !sk_is_inet(sk)) 1319 ret = -EOPNOTSUPP; 1320 else 1321 sk->sk_reuseport = valbool; 1322 break; 1323 case SO_DONTROUTE: 1324 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 1325 sk_dst_reset(sk); 1326 break; 1327 case SO_BROADCAST: 1328 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 1329 break; 1330 case SO_SNDBUF: 1331 /* Don't error on this BSD doesn't and if you think 1332 * about it this is right. Otherwise apps have to 1333 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1334 * are treated in BSD as hints 1335 */ 1336 val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); 1337 set_sndbuf: 1338 /* Ensure val * 2 fits into an int, to prevent max_t() 1339 * from treating it as a negative value. 1340 */ 1341 val = min_t(int, val, INT_MAX / 2); 1342 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1343 WRITE_ONCE(sk->sk_sndbuf, 1344 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 1345 /* Wake up sending tasks if we upped the value. */ 1346 sk->sk_write_space(sk); 1347 break; 1348 1349 case SO_SNDBUFFORCE: 1350 if (!sockopt_capable(CAP_NET_ADMIN)) { 1351 ret = -EPERM; 1352 break; 1353 } 1354 1355 /* No negative values (to prevent underflow, as val will be 1356 * multiplied by 2). 1357 */ 1358 if (val < 0) 1359 val = 0; 1360 goto set_sndbuf; 1361 1362 case SO_RCVBUF: 1363 /* Don't error on this BSD doesn't and if you think 1364 * about it this is right. Otherwise apps have to 1365 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1366 * are treated in BSD as hints 1367 */ 1368 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); 1369 break; 1370 1371 case SO_RCVBUFFORCE: 1372 if (!sockopt_capable(CAP_NET_ADMIN)) { 1373 ret = -EPERM; 1374 break; 1375 } 1376 1377 /* No negative values (to prevent underflow, as val will be 1378 * multiplied by 2). 1379 */ 1380 __sock_set_rcvbuf(sk, max(val, 0)); 1381 break; 1382 1383 case SO_KEEPALIVE: 1384 if (sk->sk_prot->keepalive) 1385 sk->sk_prot->keepalive(sk, valbool); 1386 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 1387 break; 1388 1389 case SO_OOBINLINE: 1390 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 1391 break; 1392 1393 case SO_NO_CHECK: 1394 sk->sk_no_check_tx = valbool; 1395 break; 1396 1397 case SO_LINGER: 1398 if (optlen < sizeof(ling)) { 1399 ret = -EINVAL; /* 1003.1g */ 1400 break; 1401 } 1402 if (copy_from_sockptr(&ling, optval, sizeof(ling))) { 1403 ret = -EFAULT; 1404 break; 1405 } 1406 if (!ling.l_onoff) { 1407 sock_reset_flag(sk, SOCK_LINGER); 1408 } else { 1409 unsigned long t_sec = ling.l_linger; 1410 1411 if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ) 1412 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT); 1413 else 1414 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ); 1415 sock_set_flag(sk, SOCK_LINGER); 1416 } 1417 break; 1418 1419 case SO_BSDCOMPAT: 1420 break; 1421 1422 case SO_TIMESTAMP_OLD: 1423 case SO_TIMESTAMP_NEW: 1424 case SO_TIMESTAMPNS_OLD: 1425 case SO_TIMESTAMPNS_NEW: 1426 sock_set_timestamp(sk, optname, valbool); 1427 break; 1428 1429 case SO_TIMESTAMPING_NEW: 1430 case SO_TIMESTAMPING_OLD: 1431 if (optlen == sizeof(timestamping)) { 1432 if (copy_from_sockptr(×tamping, optval, 1433 sizeof(timestamping))) { 1434 ret = -EFAULT; 1435 break; 1436 } 1437 } else { 1438 memset(×tamping, 0, sizeof(timestamping)); 1439 timestamping.flags = val; 1440 } 1441 ret = sock_set_timestamping(sk, optname, timestamping); 1442 break; 1443 1444 case SO_RCVLOWAT: 1445 { 1446 int (*set_rcvlowat)(struct sock *sk, int val) = NULL; 1447 1448 if (val < 0) 1449 val = INT_MAX; 1450 if (sock) 1451 set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat; 1452 if (set_rcvlowat) 1453 ret = set_rcvlowat(sk, val); 1454 else 1455 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1456 break; 1457 } 1458 case SO_RCVTIMEO_OLD: 1459 case SO_RCVTIMEO_NEW: 1460 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, 1461 optlen, optname == SO_RCVTIMEO_OLD); 1462 break; 1463 1464 case SO_SNDTIMEO_OLD: 1465 case SO_SNDTIMEO_NEW: 1466 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, 1467 optlen, optname == SO_SNDTIMEO_OLD); 1468 break; 1469 1470 case SO_ATTACH_FILTER: { 1471 struct sock_fprog fprog; 1472 1473 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1474 if (!ret) 1475 ret = sk_attach_filter(&fprog, sk); 1476 break; 1477 } 1478 case SO_ATTACH_BPF: 1479 ret = -EINVAL; 1480 if (optlen == sizeof(u32)) { 1481 u32 ufd; 1482 1483 ret = -EFAULT; 1484 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1485 break; 1486 1487 ret = sk_attach_bpf(ufd, sk); 1488 } 1489 break; 1490 1491 case SO_ATTACH_REUSEPORT_CBPF: { 1492 struct sock_fprog fprog; 1493 1494 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1495 if (!ret) 1496 ret = sk_reuseport_attach_filter(&fprog, sk); 1497 break; 1498 } 1499 case SO_ATTACH_REUSEPORT_EBPF: 1500 ret = -EINVAL; 1501 if (optlen == sizeof(u32)) { 1502 u32 ufd; 1503 1504 ret = -EFAULT; 1505 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1506 break; 1507 1508 ret = sk_reuseport_attach_bpf(ufd, sk); 1509 } 1510 break; 1511 1512 case SO_DETACH_REUSEPORT_BPF: 1513 ret = reuseport_detach_prog(sk); 1514 break; 1515 1516 case SO_DETACH_FILTER: 1517 ret = sk_detach_filter(sk); 1518 break; 1519 1520 case SO_LOCK_FILTER: 1521 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 1522 ret = -EPERM; 1523 else 1524 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 1525 break; 1526 1527 case SO_MARK: 1528 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 1529 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1530 ret = -EPERM; 1531 break; 1532 } 1533 1534 __sock_set_mark(sk, val); 1535 break; 1536 case SO_RCVMARK: 1537 sock_valbool_flag(sk, SOCK_RCVMARK, valbool); 1538 break; 1539 1540 case SO_RCVPRIORITY: 1541 sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool); 1542 break; 1543 1544 case SO_RXQ_OVFL: 1545 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 1546 break; 1547 1548 case SO_WIFI_STATUS: 1549 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 1550 break; 1551 1552 case SO_NOFCS: 1553 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 1554 break; 1555 1556 case SO_SELECT_ERR_QUEUE: 1557 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 1558 break; 1559 1560 1561 case SO_INCOMING_CPU: 1562 reuseport_update_incoming_cpu(sk, val); 1563 break; 1564 1565 case SO_CNX_ADVICE: 1566 if (val == 1) 1567 dst_negative_advice(sk); 1568 break; 1569 1570 case SO_ZEROCOPY: 1571 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { 1572 if (!(sk_is_tcp(sk) || 1573 (sk->sk_type == SOCK_DGRAM && 1574 sk->sk_protocol == IPPROTO_UDP))) 1575 ret = -EOPNOTSUPP; 1576 } else if (sk->sk_family != PF_RDS) { 1577 ret = -EOPNOTSUPP; 1578 } 1579 if (!ret) { 1580 if (val < 0 || val > 1) 1581 ret = -EINVAL; 1582 else 1583 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); 1584 } 1585 break; 1586 1587 case SO_TXTIME: 1588 if (optlen != sizeof(struct sock_txtime)) { 1589 ret = -EINVAL; 1590 break; 1591 } else if (copy_from_sockptr(&sk_txtime, optval, 1592 sizeof(struct sock_txtime))) { 1593 ret = -EFAULT; 1594 break; 1595 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { 1596 ret = -EINVAL; 1597 break; 1598 } 1599 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet 1600 * scheduler has enough safe guards. 1601 */ 1602 if (sk_txtime.clockid != CLOCK_MONOTONIC && 1603 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1604 ret = -EPERM; 1605 break; 1606 } 1607 1608 ret = sockopt_validate_clockid(sk_txtime.clockid); 1609 if (ret) 1610 break; 1611 1612 sock_valbool_flag(sk, SOCK_TXTIME, true); 1613 sk->sk_clockid = sk_txtime.clockid; 1614 sk->sk_txtime_deadline_mode = 1615 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); 1616 sk->sk_txtime_report_errors = 1617 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); 1618 break; 1619 1620 case SO_BINDTOIFINDEX: 1621 ret = sock_bindtoindex_locked(sk, val); 1622 break; 1623 1624 case SO_BUF_LOCK: 1625 if (val & ~SOCK_BUF_LOCK_MASK) { 1626 ret = -EINVAL; 1627 break; 1628 } 1629 sk->sk_userlocks = val | (sk->sk_userlocks & 1630 ~SOCK_BUF_LOCK_MASK); 1631 break; 1632 1633 case SO_RESERVE_MEM: 1634 { 1635 int delta; 1636 1637 if (val < 0) { 1638 ret = -EINVAL; 1639 break; 1640 } 1641 1642 delta = val - sk->sk_reserved_mem; 1643 if (delta < 0) 1644 sock_release_reserved_memory(sk, -delta); 1645 else 1646 ret = sock_reserve_memory(sk, delta); 1647 break; 1648 } 1649 1650 default: 1651 ret = -ENOPROTOOPT; 1652 break; 1653 } 1654 sockopt_release_sock(sk); 1655 return ret; 1656 } 1657 1658 int sock_setsockopt(struct socket *sock, int level, int optname, 1659 sockptr_t optval, unsigned int optlen) 1660 { 1661 return sk_setsockopt(sock->sk, level, optname, 1662 optval, optlen); 1663 } 1664 EXPORT_SYMBOL(sock_setsockopt); 1665 1666 static const struct cred *sk_get_peer_cred(struct sock *sk) 1667 { 1668 const struct cred *cred; 1669 1670 spin_lock(&sk->sk_peer_lock); 1671 cred = get_cred(sk->sk_peer_cred); 1672 spin_unlock(&sk->sk_peer_lock); 1673 1674 return cred; 1675 } 1676 1677 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1678 struct ucred *ucred) 1679 { 1680 ucred->pid = pid_vnr(pid); 1681 ucred->uid = ucred->gid = -1; 1682 if (cred) { 1683 struct user_namespace *current_ns = current_user_ns(); 1684 1685 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1686 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1687 } 1688 } 1689 1690 static int groups_to_user(sockptr_t dst, const struct group_info *src) 1691 { 1692 struct user_namespace *user_ns = current_user_ns(); 1693 int i; 1694 1695 for (i = 0; i < src->ngroups; i++) { 1696 gid_t gid = from_kgid_munged(user_ns, src->gid[i]); 1697 1698 if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid))) 1699 return -EFAULT; 1700 } 1701 1702 return 0; 1703 } 1704 1705 int sk_getsockopt(struct sock *sk, int level, int optname, 1706 sockptr_t optval, sockptr_t optlen) 1707 { 1708 struct socket *sock = sk->sk_socket; 1709 1710 union { 1711 int val; 1712 u64 val64; 1713 unsigned long ulval; 1714 struct linger ling; 1715 struct old_timeval32 tm32; 1716 struct __kernel_old_timeval tm; 1717 struct __kernel_sock_timeval stm; 1718 struct sock_txtime txtime; 1719 struct so_timestamping timestamping; 1720 } v; 1721 1722 int lv = sizeof(int); 1723 int len; 1724 1725 if (copy_from_sockptr(&len, optlen, sizeof(int))) 1726 return -EFAULT; 1727 if (len < 0) 1728 return -EINVAL; 1729 1730 memset(&v, 0, sizeof(v)); 1731 1732 switch (optname) { 1733 case SO_DEBUG: 1734 v.val = sock_flag(sk, SOCK_DBG); 1735 break; 1736 1737 case SO_DONTROUTE: 1738 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1739 break; 1740 1741 case SO_BROADCAST: 1742 v.val = sock_flag(sk, SOCK_BROADCAST); 1743 break; 1744 1745 case SO_SNDBUF: 1746 v.val = READ_ONCE(sk->sk_sndbuf); 1747 break; 1748 1749 case SO_RCVBUF: 1750 v.val = READ_ONCE(sk->sk_rcvbuf); 1751 break; 1752 1753 case SO_REUSEADDR: 1754 v.val = sk->sk_reuse; 1755 break; 1756 1757 case SO_REUSEPORT: 1758 v.val = sk->sk_reuseport; 1759 break; 1760 1761 case SO_KEEPALIVE: 1762 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1763 break; 1764 1765 case SO_TYPE: 1766 v.val = sk->sk_type; 1767 break; 1768 1769 case SO_PROTOCOL: 1770 v.val = sk->sk_protocol; 1771 break; 1772 1773 case SO_DOMAIN: 1774 v.val = sk->sk_family; 1775 break; 1776 1777 case SO_ERROR: 1778 v.val = -sock_error(sk); 1779 if (v.val == 0) 1780 v.val = xchg(&sk->sk_err_soft, 0); 1781 break; 1782 1783 case SO_OOBINLINE: 1784 v.val = sock_flag(sk, SOCK_URGINLINE); 1785 break; 1786 1787 case SO_NO_CHECK: 1788 v.val = sk->sk_no_check_tx; 1789 break; 1790 1791 case SO_PRIORITY: 1792 v.val = READ_ONCE(sk->sk_priority); 1793 break; 1794 1795 case SO_LINGER: 1796 lv = sizeof(v.ling); 1797 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1798 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ; 1799 break; 1800 1801 case SO_BSDCOMPAT: 1802 break; 1803 1804 case SO_TIMESTAMP_OLD: 1805 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1806 !sock_flag(sk, SOCK_TSTAMP_NEW) && 1807 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1808 break; 1809 1810 case SO_TIMESTAMPNS_OLD: 1811 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); 1812 break; 1813 1814 case SO_TIMESTAMP_NEW: 1815 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); 1816 break; 1817 1818 case SO_TIMESTAMPNS_NEW: 1819 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); 1820 break; 1821 1822 case SO_TIMESTAMPING_OLD: 1823 case SO_TIMESTAMPING_NEW: 1824 lv = sizeof(v.timestamping); 1825 /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only 1826 * returning the flags when they were set through the same option. 1827 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD. 1828 */ 1829 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) { 1830 v.timestamping.flags = READ_ONCE(sk->sk_tsflags); 1831 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); 1832 } 1833 break; 1834 1835 case SO_RCVTIMEO_OLD: 1836 case SO_RCVTIMEO_NEW: 1837 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, 1838 SO_RCVTIMEO_OLD == optname); 1839 break; 1840 1841 case SO_SNDTIMEO_OLD: 1842 case SO_SNDTIMEO_NEW: 1843 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, 1844 SO_SNDTIMEO_OLD == optname); 1845 break; 1846 1847 case SO_RCVLOWAT: 1848 v.val = READ_ONCE(sk->sk_rcvlowat); 1849 break; 1850 1851 case SO_SNDLOWAT: 1852 v.val = 1; 1853 break; 1854 1855 case SO_PASSCRED: 1856 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1857 break; 1858 1859 case SO_PASSPIDFD: 1860 v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags); 1861 break; 1862 1863 case SO_PEERCRED: 1864 { 1865 struct ucred peercred; 1866 if (len > sizeof(peercred)) 1867 len = sizeof(peercred); 1868 1869 spin_lock(&sk->sk_peer_lock); 1870 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1871 spin_unlock(&sk->sk_peer_lock); 1872 1873 if (copy_to_sockptr(optval, &peercred, len)) 1874 return -EFAULT; 1875 goto lenout; 1876 } 1877 1878 case SO_PEERPIDFD: 1879 { 1880 struct pid *peer_pid; 1881 struct file *pidfd_file = NULL; 1882 int pidfd; 1883 1884 if (len > sizeof(pidfd)) 1885 len = sizeof(pidfd); 1886 1887 spin_lock(&sk->sk_peer_lock); 1888 peer_pid = get_pid(sk->sk_peer_pid); 1889 spin_unlock(&sk->sk_peer_lock); 1890 1891 if (!peer_pid) 1892 return -ENODATA; 1893 1894 pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file); 1895 put_pid(peer_pid); 1896 if (pidfd < 0) 1897 return pidfd; 1898 1899 if (copy_to_sockptr(optval, &pidfd, len) || 1900 copy_to_sockptr(optlen, &len, sizeof(int))) { 1901 put_unused_fd(pidfd); 1902 fput(pidfd_file); 1903 1904 return -EFAULT; 1905 } 1906 1907 fd_install(pidfd, pidfd_file); 1908 return 0; 1909 } 1910 1911 case SO_PEERGROUPS: 1912 { 1913 const struct cred *cred; 1914 int ret, n; 1915 1916 cred = sk_get_peer_cred(sk); 1917 if (!cred) 1918 return -ENODATA; 1919 1920 n = cred->group_info->ngroups; 1921 if (len < n * sizeof(gid_t)) { 1922 len = n * sizeof(gid_t); 1923 put_cred(cred); 1924 return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE; 1925 } 1926 len = n * sizeof(gid_t); 1927 1928 ret = groups_to_user(optval, cred->group_info); 1929 put_cred(cred); 1930 if (ret) 1931 return ret; 1932 goto lenout; 1933 } 1934 1935 case SO_PEERNAME: 1936 { 1937 struct sockaddr_storage address; 1938 1939 lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2); 1940 if (lv < 0) 1941 return -ENOTCONN; 1942 if (lv < len) 1943 return -EINVAL; 1944 if (copy_to_sockptr(optval, &address, len)) 1945 return -EFAULT; 1946 goto lenout; 1947 } 1948 1949 /* Dubious BSD thing... Probably nobody even uses it, but 1950 * the UNIX standard wants it for whatever reason... -DaveM 1951 */ 1952 case SO_ACCEPTCONN: 1953 v.val = sk->sk_state == TCP_LISTEN; 1954 break; 1955 1956 case SO_PASSSEC: 1957 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1958 break; 1959 1960 case SO_PEERSEC: 1961 return security_socket_getpeersec_stream(sock, 1962 optval, optlen, len); 1963 1964 case SO_MARK: 1965 v.val = READ_ONCE(sk->sk_mark); 1966 break; 1967 1968 case SO_RCVMARK: 1969 v.val = sock_flag(sk, SOCK_RCVMARK); 1970 break; 1971 1972 case SO_RCVPRIORITY: 1973 v.val = sock_flag(sk, SOCK_RCVPRIORITY); 1974 break; 1975 1976 case SO_RXQ_OVFL: 1977 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1978 break; 1979 1980 case SO_WIFI_STATUS: 1981 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1982 break; 1983 1984 case SO_PEEK_OFF: 1985 if (!READ_ONCE(sock->ops)->set_peek_off) 1986 return -EOPNOTSUPP; 1987 1988 v.val = READ_ONCE(sk->sk_peek_off); 1989 break; 1990 case SO_NOFCS: 1991 v.val = sock_flag(sk, SOCK_NOFCS); 1992 break; 1993 1994 case SO_BINDTODEVICE: 1995 return sock_getbindtodevice(sk, optval, optlen, len); 1996 1997 case SO_GET_FILTER: 1998 len = sk_get_filter(sk, optval, len); 1999 if (len < 0) 2000 return len; 2001 2002 goto lenout; 2003 2004 case SO_LOCK_FILTER: 2005 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 2006 break; 2007 2008 case SO_BPF_EXTENSIONS: 2009 v.val = bpf_tell_extensions(); 2010 break; 2011 2012 case SO_SELECT_ERR_QUEUE: 2013 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 2014 break; 2015 2016 #ifdef CONFIG_NET_RX_BUSY_POLL 2017 case SO_BUSY_POLL: 2018 v.val = READ_ONCE(sk->sk_ll_usec); 2019 break; 2020 case SO_PREFER_BUSY_POLL: 2021 v.val = READ_ONCE(sk->sk_prefer_busy_poll); 2022 break; 2023 #endif 2024 2025 case SO_MAX_PACING_RATE: 2026 /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ 2027 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 2028 lv = sizeof(v.ulval); 2029 v.ulval = READ_ONCE(sk->sk_max_pacing_rate); 2030 } else { 2031 /* 32bit version */ 2032 v.val = min_t(unsigned long, ~0U, 2033 READ_ONCE(sk->sk_max_pacing_rate)); 2034 } 2035 break; 2036 2037 case SO_INCOMING_CPU: 2038 v.val = READ_ONCE(sk->sk_incoming_cpu); 2039 break; 2040 2041 case SO_MEMINFO: 2042 { 2043 u32 meminfo[SK_MEMINFO_VARS]; 2044 2045 sk_get_meminfo(sk, meminfo); 2046 2047 len = min_t(unsigned int, len, sizeof(meminfo)); 2048 if (copy_to_sockptr(optval, &meminfo, len)) 2049 return -EFAULT; 2050 2051 goto lenout; 2052 } 2053 2054 #ifdef CONFIG_NET_RX_BUSY_POLL 2055 case SO_INCOMING_NAPI_ID: 2056 v.val = READ_ONCE(sk->sk_napi_id); 2057 2058 /* aggregate non-NAPI IDs down to 0 */ 2059 if (!napi_id_valid(v.val)) 2060 v.val = 0; 2061 2062 break; 2063 #endif 2064 2065 case SO_COOKIE: 2066 lv = sizeof(u64); 2067 if (len < lv) 2068 return -EINVAL; 2069 v.val64 = sock_gen_cookie(sk); 2070 break; 2071 2072 case SO_ZEROCOPY: 2073 v.val = sock_flag(sk, SOCK_ZEROCOPY); 2074 break; 2075 2076 case SO_TXTIME: 2077 lv = sizeof(v.txtime); 2078 v.txtime.clockid = sk->sk_clockid; 2079 v.txtime.flags |= sk->sk_txtime_deadline_mode ? 2080 SOF_TXTIME_DEADLINE_MODE : 0; 2081 v.txtime.flags |= sk->sk_txtime_report_errors ? 2082 SOF_TXTIME_REPORT_ERRORS : 0; 2083 break; 2084 2085 case SO_BINDTOIFINDEX: 2086 v.val = READ_ONCE(sk->sk_bound_dev_if); 2087 break; 2088 2089 case SO_NETNS_COOKIE: 2090 lv = sizeof(u64); 2091 if (len != lv) 2092 return -EINVAL; 2093 v.val64 = sock_net(sk)->net_cookie; 2094 break; 2095 2096 case SO_BUF_LOCK: 2097 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; 2098 break; 2099 2100 case SO_RESERVE_MEM: 2101 v.val = READ_ONCE(sk->sk_reserved_mem); 2102 break; 2103 2104 case SO_TXREHASH: 2105 /* Paired with WRITE_ONCE() in sk_setsockopt() */ 2106 v.val = READ_ONCE(sk->sk_txrehash); 2107 break; 2108 2109 default: 2110 /* We implement the SO_SNDLOWAT etc to not be settable 2111 * (1003.1g 7). 2112 */ 2113 return -ENOPROTOOPT; 2114 } 2115 2116 if (len > lv) 2117 len = lv; 2118 if (copy_to_sockptr(optval, &v, len)) 2119 return -EFAULT; 2120 lenout: 2121 if (copy_to_sockptr(optlen, &len, sizeof(int))) 2122 return -EFAULT; 2123 return 0; 2124 } 2125 2126 /* 2127 * Initialize an sk_lock. 2128 * 2129 * (We also register the sk_lock with the lock validator.) 2130 */ 2131 static inline void sock_lock_init(struct sock *sk) 2132 { 2133 sk_owner_clear(sk); 2134 2135 if (sk->sk_kern_sock) 2136 sock_lock_init_class_and_name( 2137 sk, 2138 af_family_kern_slock_key_strings[sk->sk_family], 2139 af_family_kern_slock_keys + sk->sk_family, 2140 af_family_kern_key_strings[sk->sk_family], 2141 af_family_kern_keys + sk->sk_family); 2142 else 2143 sock_lock_init_class_and_name( 2144 sk, 2145 af_family_slock_key_strings[sk->sk_family], 2146 af_family_slock_keys + sk->sk_family, 2147 af_family_key_strings[sk->sk_family], 2148 af_family_keys + sk->sk_family); 2149 } 2150 2151 /* 2152 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 2153 * even temporarily, because of RCU lookups. sk_node should also be left as is. 2154 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 2155 */ 2156 static void sock_copy(struct sock *nsk, const struct sock *osk) 2157 { 2158 const struct proto *prot = READ_ONCE(osk->sk_prot); 2159 #ifdef CONFIG_SECURITY_NETWORK 2160 void *sptr = nsk->sk_security; 2161 #endif 2162 2163 /* If we move sk_tx_queue_mapping out of the private section, 2164 * we must check if sk_tx_queue_clear() is called after 2165 * sock_copy() in sk_clone_lock(). 2166 */ 2167 BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) < 2168 offsetof(struct sock, sk_dontcopy_begin) || 2169 offsetof(struct sock, sk_tx_queue_mapping) >= 2170 offsetof(struct sock, sk_dontcopy_end)); 2171 2172 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 2173 2174 unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 2175 prot->obj_size - offsetof(struct sock, sk_dontcopy_end), 2176 /* alloc is larger than struct, see sk_prot_alloc() */); 2177 2178 #ifdef CONFIG_SECURITY_NETWORK 2179 nsk->sk_security = sptr; 2180 security_sk_clone(osk, nsk); 2181 #endif 2182 } 2183 2184 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 2185 int family) 2186 { 2187 struct sock *sk; 2188 struct kmem_cache *slab; 2189 2190 slab = prot->slab; 2191 if (slab != NULL) { 2192 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 2193 if (!sk) 2194 return sk; 2195 if (want_init_on_alloc(priority)) 2196 sk_prot_clear_nulls(sk, prot->obj_size); 2197 } else 2198 sk = kmalloc(prot->obj_size, priority); 2199 2200 if (sk != NULL) { 2201 if (security_sk_alloc(sk, family, priority)) 2202 goto out_free; 2203 2204 if (!try_module_get(prot->owner)) 2205 goto out_free_sec; 2206 } 2207 2208 return sk; 2209 2210 out_free_sec: 2211 security_sk_free(sk); 2212 out_free: 2213 if (slab != NULL) 2214 kmem_cache_free(slab, sk); 2215 else 2216 kfree(sk); 2217 return NULL; 2218 } 2219 2220 static void sk_prot_free(struct proto *prot, struct sock *sk) 2221 { 2222 struct kmem_cache *slab; 2223 struct module *owner; 2224 2225 owner = prot->owner; 2226 slab = prot->slab; 2227 2228 cgroup_sk_free(&sk->sk_cgrp_data); 2229 mem_cgroup_sk_free(sk); 2230 security_sk_free(sk); 2231 2232 sk_owner_put(sk); 2233 2234 if (slab != NULL) 2235 kmem_cache_free(slab, sk); 2236 else 2237 kfree(sk); 2238 module_put(owner); 2239 } 2240 2241 /** 2242 * sk_alloc - All socket objects are allocated here 2243 * @net: the applicable net namespace 2244 * @family: protocol family 2245 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2246 * @prot: struct proto associated with this new sock instance 2247 * @kern: is this to be a kernel socket? 2248 */ 2249 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 2250 struct proto *prot, int kern) 2251 { 2252 struct sock *sk; 2253 2254 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 2255 if (sk) { 2256 sk->sk_family = family; 2257 /* 2258 * See comment in struct sock definition to understand 2259 * why we need sk_prot_creator -acme 2260 */ 2261 sk->sk_prot = sk->sk_prot_creator = prot; 2262 sk->sk_kern_sock = kern; 2263 sock_lock_init(sk); 2264 sk->sk_net_refcnt = kern ? 0 : 1; 2265 if (likely(sk->sk_net_refcnt)) { 2266 get_net_track(net, &sk->ns_tracker, priority); 2267 sock_inuse_add(net, 1); 2268 } else { 2269 net_passive_inc(net); 2270 __netns_tracker_alloc(net, &sk->ns_tracker, 2271 false, priority); 2272 } 2273 2274 sock_net_set(sk, net); 2275 refcount_set(&sk->sk_wmem_alloc, 1); 2276 2277 mem_cgroup_sk_alloc(sk); 2278 cgroup_sk_alloc(&sk->sk_cgrp_data); 2279 sock_update_classid(&sk->sk_cgrp_data); 2280 sock_update_netprioidx(&sk->sk_cgrp_data); 2281 sk_tx_queue_clear(sk); 2282 } 2283 2284 return sk; 2285 } 2286 EXPORT_SYMBOL(sk_alloc); 2287 2288 /* Sockets having SOCK_RCU_FREE will call this function after one RCU 2289 * grace period. This is the case for UDP sockets and TCP listeners. 2290 */ 2291 static void __sk_destruct(struct rcu_head *head) 2292 { 2293 struct sock *sk = container_of(head, struct sock, sk_rcu); 2294 struct net *net = sock_net(sk); 2295 struct sk_filter *filter; 2296 2297 if (sk->sk_destruct) 2298 sk->sk_destruct(sk); 2299 2300 filter = rcu_dereference_check(sk->sk_filter, 2301 refcount_read(&sk->sk_wmem_alloc) == 0); 2302 if (filter) { 2303 sk_filter_uncharge(sk, filter); 2304 RCU_INIT_POINTER(sk->sk_filter, NULL); 2305 } 2306 2307 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 2308 2309 #ifdef CONFIG_BPF_SYSCALL 2310 bpf_sk_storage_free(sk); 2311 #endif 2312 2313 if (atomic_read(&sk->sk_omem_alloc)) 2314 pr_debug("%s: optmem leakage (%d bytes) detected\n", 2315 __func__, atomic_read(&sk->sk_omem_alloc)); 2316 2317 if (sk->sk_frag.page) { 2318 put_page(sk->sk_frag.page); 2319 sk->sk_frag.page = NULL; 2320 } 2321 2322 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */ 2323 put_cred(sk->sk_peer_cred); 2324 put_pid(sk->sk_peer_pid); 2325 2326 if (likely(sk->sk_net_refcnt)) { 2327 put_net_track(net, &sk->ns_tracker); 2328 } else { 2329 __netns_tracker_free(net, &sk->ns_tracker, false); 2330 net_passive_dec(net); 2331 } 2332 sk_prot_free(sk->sk_prot_creator, sk); 2333 } 2334 2335 void sk_net_refcnt_upgrade(struct sock *sk) 2336 { 2337 struct net *net = sock_net(sk); 2338 2339 WARN_ON_ONCE(sk->sk_net_refcnt); 2340 __netns_tracker_free(net, &sk->ns_tracker, false); 2341 net_passive_dec(net); 2342 sk->sk_net_refcnt = 1; 2343 get_net_track(net, &sk->ns_tracker, GFP_KERNEL); 2344 sock_inuse_add(net, 1); 2345 } 2346 EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); 2347 2348 void sk_destruct(struct sock *sk) 2349 { 2350 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); 2351 2352 if (rcu_access_pointer(sk->sk_reuseport_cb)) { 2353 reuseport_detach_sock(sk); 2354 use_call_rcu = true; 2355 } 2356 2357 if (use_call_rcu) 2358 call_rcu(&sk->sk_rcu, __sk_destruct); 2359 else 2360 __sk_destruct(&sk->sk_rcu); 2361 } 2362 2363 static void __sk_free(struct sock *sk) 2364 { 2365 if (likely(sk->sk_net_refcnt)) 2366 sock_inuse_add(sock_net(sk), -1); 2367 2368 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) 2369 sock_diag_broadcast_destroy(sk); 2370 else 2371 sk_destruct(sk); 2372 } 2373 2374 void sk_free(struct sock *sk) 2375 { 2376 /* 2377 * We subtract one from sk_wmem_alloc and can know if 2378 * some packets are still in some tx queue. 2379 * If not null, sock_wfree() will call __sk_free(sk) later 2380 */ 2381 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 2382 __sk_free(sk); 2383 } 2384 EXPORT_SYMBOL(sk_free); 2385 2386 static void sk_init_common(struct sock *sk) 2387 { 2388 skb_queue_head_init(&sk->sk_receive_queue); 2389 skb_queue_head_init(&sk->sk_write_queue); 2390 skb_queue_head_init(&sk->sk_error_queue); 2391 2392 rwlock_init(&sk->sk_callback_lock); 2393 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, 2394 af_rlock_keys + sk->sk_family, 2395 af_family_rlock_key_strings[sk->sk_family]); 2396 lockdep_set_class_and_name(&sk->sk_write_queue.lock, 2397 af_wlock_keys + sk->sk_family, 2398 af_family_wlock_key_strings[sk->sk_family]); 2399 lockdep_set_class_and_name(&sk->sk_error_queue.lock, 2400 af_elock_keys + sk->sk_family, 2401 af_family_elock_key_strings[sk->sk_family]); 2402 if (sk->sk_kern_sock) 2403 lockdep_set_class_and_name(&sk->sk_callback_lock, 2404 af_kern_callback_keys + sk->sk_family, 2405 af_family_kern_clock_key_strings[sk->sk_family]); 2406 else 2407 lockdep_set_class_and_name(&sk->sk_callback_lock, 2408 af_callback_keys + sk->sk_family, 2409 af_family_clock_key_strings[sk->sk_family]); 2410 } 2411 2412 /** 2413 * sk_clone_lock - clone a socket, and lock its clone 2414 * @sk: the socket to clone 2415 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2416 * 2417 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 2418 */ 2419 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 2420 { 2421 struct proto *prot = READ_ONCE(sk->sk_prot); 2422 struct sk_filter *filter; 2423 bool is_charged = true; 2424 struct sock *newsk; 2425 2426 newsk = sk_prot_alloc(prot, priority, sk->sk_family); 2427 if (!newsk) 2428 goto out; 2429 2430 sock_copy(newsk, sk); 2431 2432 newsk->sk_prot_creator = prot; 2433 2434 /* SANITY */ 2435 if (likely(newsk->sk_net_refcnt)) { 2436 get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); 2437 sock_inuse_add(sock_net(newsk), 1); 2438 } else { 2439 /* Kernel sockets are not elevating the struct net refcount. 2440 * Instead, use a tracker to more easily detect if a layer 2441 * is not properly dismantling its kernel sockets at netns 2442 * destroy time. 2443 */ 2444 net_passive_inc(sock_net(newsk)); 2445 __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, 2446 false, priority); 2447 } 2448 sk_node_init(&newsk->sk_node); 2449 sock_lock_init(newsk); 2450 bh_lock_sock(newsk); 2451 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 2452 newsk->sk_backlog.len = 0; 2453 2454 atomic_set(&newsk->sk_rmem_alloc, 0); 2455 2456 /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ 2457 refcount_set(&newsk->sk_wmem_alloc, 1); 2458 2459 atomic_set(&newsk->sk_omem_alloc, 0); 2460 sk_init_common(newsk); 2461 2462 newsk->sk_dst_cache = NULL; 2463 newsk->sk_dst_pending_confirm = 0; 2464 newsk->sk_wmem_queued = 0; 2465 newsk->sk_forward_alloc = 0; 2466 newsk->sk_reserved_mem = 0; 2467 atomic_set(&newsk->sk_drops, 0); 2468 newsk->sk_send_head = NULL; 2469 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 2470 atomic_set(&newsk->sk_zckey, 0); 2471 2472 sock_reset_flag(newsk, SOCK_DONE); 2473 2474 /* sk->sk_memcg will be populated at accept() time */ 2475 newsk->sk_memcg = NULL; 2476 2477 cgroup_sk_clone(&newsk->sk_cgrp_data); 2478 2479 rcu_read_lock(); 2480 filter = rcu_dereference(sk->sk_filter); 2481 if (filter != NULL) 2482 /* though it's an empty new sock, the charging may fail 2483 * if sysctl_optmem_max was changed between creation of 2484 * original socket and cloning 2485 */ 2486 is_charged = sk_filter_charge(newsk, filter); 2487 RCU_INIT_POINTER(newsk->sk_filter, filter); 2488 rcu_read_unlock(); 2489 2490 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 2491 /* We need to make sure that we don't uncharge the new 2492 * socket if we couldn't charge it in the first place 2493 * as otherwise we uncharge the parent's filter. 2494 */ 2495 if (!is_charged) 2496 RCU_INIT_POINTER(newsk->sk_filter, NULL); 2497 2498 goto free; 2499 } 2500 2501 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 2502 2503 if (bpf_sk_storage_clone(sk, newsk)) 2504 goto free; 2505 2506 /* Clear sk_user_data if parent had the pointer tagged 2507 * as not suitable for copying when cloning. 2508 */ 2509 if (sk_user_data_is_nocopy(newsk)) 2510 newsk->sk_user_data = NULL; 2511 2512 newsk->sk_err = 0; 2513 newsk->sk_err_soft = 0; 2514 newsk->sk_priority = 0; 2515 newsk->sk_incoming_cpu = raw_smp_processor_id(); 2516 2517 /* Before updating sk_refcnt, we must commit prior changes to memory 2518 * (Documentation/RCU/rculist_nulls.rst for details) 2519 */ 2520 smp_wmb(); 2521 refcount_set(&newsk->sk_refcnt, 2); 2522 2523 sk_set_socket(newsk, NULL); 2524 sk_tx_queue_clear(newsk); 2525 RCU_INIT_POINTER(newsk->sk_wq, NULL); 2526 2527 if (newsk->sk_prot->sockets_allocated) 2528 sk_sockets_allocated_inc(newsk); 2529 2530 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) 2531 net_enable_timestamp(); 2532 out: 2533 return newsk; 2534 free: 2535 /* It is still raw copy of parent, so invalidate 2536 * destructor and make plain sk_free() 2537 */ 2538 newsk->sk_destruct = NULL; 2539 bh_unlock_sock(newsk); 2540 sk_free(newsk); 2541 newsk = NULL; 2542 goto out; 2543 } 2544 EXPORT_SYMBOL_GPL(sk_clone_lock); 2545 2546 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) 2547 { 2548 bool is_ipv6 = false; 2549 u32 max_size; 2550 2551 #if IS_ENABLED(CONFIG_IPV6) 2552 is_ipv6 = (sk->sk_family == AF_INET6 && 2553 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); 2554 #endif 2555 /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 2556 max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) : 2557 READ_ONCE(dst->dev->gso_ipv4_max_size); 2558 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) 2559 max_size = GSO_LEGACY_MAX_SIZE; 2560 2561 return max_size - (MAX_TCP_HEADER + 1); 2562 } 2563 2564 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 2565 { 2566 u32 max_segs = 1; 2567 2568 sk->sk_route_caps = dst->dev->features; 2569 if (sk_is_tcp(sk)) { 2570 struct inet_connection_sock *icsk = inet_csk(sk); 2571 2572 sk->sk_route_caps |= NETIF_F_GSO; 2573 icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK); 2574 } 2575 if (sk->sk_route_caps & NETIF_F_GSO) 2576 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 2577 if (unlikely(sk->sk_gso_disabled)) 2578 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2579 if (sk_can_gso(sk)) { 2580 if (dst->header_len && !xfrm_dst_offload_ok(dst)) { 2581 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2582 } else { 2583 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 2584 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst); 2585 /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ 2586 max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1); 2587 } 2588 } 2589 sk->sk_gso_max_segs = max_segs; 2590 sk_dst_set(sk, dst); 2591 } 2592 EXPORT_SYMBOL_GPL(sk_setup_caps); 2593 2594 /* 2595 * Simple resource managers for sockets. 2596 */ 2597 2598 2599 /* 2600 * Write buffer destructor automatically called from kfree_skb. 2601 */ 2602 void sock_wfree(struct sk_buff *skb) 2603 { 2604 struct sock *sk = skb->sk; 2605 unsigned int len = skb->truesize; 2606 bool free; 2607 2608 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 2609 if (sock_flag(sk, SOCK_RCU_FREE) && 2610 sk->sk_write_space == sock_def_write_space) { 2611 rcu_read_lock(); 2612 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); 2613 sock_def_write_space_wfree(sk); 2614 rcu_read_unlock(); 2615 if (unlikely(free)) 2616 __sk_free(sk); 2617 return; 2618 } 2619 2620 /* 2621 * Keep a reference on sk_wmem_alloc, this will be released 2622 * after sk_write_space() call 2623 */ 2624 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 2625 sk->sk_write_space(sk); 2626 len = 1; 2627 } 2628 /* 2629 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 2630 * could not do because of in-flight packets 2631 */ 2632 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 2633 __sk_free(sk); 2634 } 2635 EXPORT_SYMBOL(sock_wfree); 2636 2637 /* This variant of sock_wfree() is used by TCP, 2638 * since it sets SOCK_USE_WRITE_QUEUE. 2639 */ 2640 void __sock_wfree(struct sk_buff *skb) 2641 { 2642 struct sock *sk = skb->sk; 2643 2644 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 2645 __sk_free(sk); 2646 } 2647 2648 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 2649 { 2650 skb_orphan(skb); 2651 #ifdef CONFIG_INET 2652 if (unlikely(!sk_fullsock(sk))) 2653 return skb_set_owner_edemux(skb, sk); 2654 #endif 2655 skb->sk = sk; 2656 skb->destructor = sock_wfree; 2657 skb_set_hash_from_sk(skb, sk); 2658 /* 2659 * We used to take a refcount on sk, but following operation 2660 * is enough to guarantee sk_free() won't free this sock until 2661 * all in-flight packets are completed 2662 */ 2663 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 2664 } 2665 EXPORT_SYMBOL(skb_set_owner_w); 2666 2667 static bool can_skb_orphan_partial(const struct sk_buff *skb) 2668 { 2669 /* Drivers depend on in-order delivery for crypto offload, 2670 * partial orphan breaks out-of-order-OK logic. 2671 */ 2672 if (skb_is_decrypted(skb)) 2673 return false; 2674 2675 return (skb->destructor == sock_wfree || 2676 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); 2677 } 2678 2679 /* This helper is used by netem, as it can hold packets in its 2680 * delay queue. We want to allow the owner socket to send more 2681 * packets, as if they were already TX completed by a typical driver. 2682 * But we also want to keep skb->sk set because some packet schedulers 2683 * rely on it (sch_fq for example). 2684 */ 2685 void skb_orphan_partial(struct sk_buff *skb) 2686 { 2687 if (skb_is_tcp_pure_ack(skb)) 2688 return; 2689 2690 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk)) 2691 return; 2692 2693 skb_orphan(skb); 2694 } 2695 EXPORT_SYMBOL(skb_orphan_partial); 2696 2697 /* 2698 * Read buffer destructor automatically called from kfree_skb. 2699 */ 2700 void sock_rfree(struct sk_buff *skb) 2701 { 2702 struct sock *sk = skb->sk; 2703 unsigned int len = skb->truesize; 2704 2705 atomic_sub(len, &sk->sk_rmem_alloc); 2706 sk_mem_uncharge(sk, len); 2707 } 2708 EXPORT_SYMBOL(sock_rfree); 2709 2710 /* 2711 * Buffer destructor for skbs that are not used directly in read or write 2712 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 2713 */ 2714 void sock_efree(struct sk_buff *skb) 2715 { 2716 sock_put(skb->sk); 2717 } 2718 EXPORT_SYMBOL(sock_efree); 2719 2720 /* Buffer destructor for prefetch/receive path where reference count may 2721 * not be held, e.g. for listen sockets. 2722 */ 2723 #ifdef CONFIG_INET 2724 void sock_pfree(struct sk_buff *skb) 2725 { 2726 struct sock *sk = skb->sk; 2727 2728 if (!sk_is_refcounted(sk)) 2729 return; 2730 2731 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) { 2732 inet_reqsk(sk)->rsk_listener = NULL; 2733 reqsk_free(inet_reqsk(sk)); 2734 return; 2735 } 2736 2737 sock_gen_put(sk); 2738 } 2739 EXPORT_SYMBOL(sock_pfree); 2740 #endif /* CONFIG_INET */ 2741 2742 kuid_t sock_i_uid(struct sock *sk) 2743 { 2744 kuid_t uid; 2745 2746 read_lock_bh(&sk->sk_callback_lock); 2747 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 2748 read_unlock_bh(&sk->sk_callback_lock); 2749 return uid; 2750 } 2751 EXPORT_SYMBOL(sock_i_uid); 2752 2753 unsigned long __sock_i_ino(struct sock *sk) 2754 { 2755 unsigned long ino; 2756 2757 read_lock(&sk->sk_callback_lock); 2758 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 2759 read_unlock(&sk->sk_callback_lock); 2760 return ino; 2761 } 2762 EXPORT_SYMBOL(__sock_i_ino); 2763 2764 unsigned long sock_i_ino(struct sock *sk) 2765 { 2766 unsigned long ino; 2767 2768 local_bh_disable(); 2769 ino = __sock_i_ino(sk); 2770 local_bh_enable(); 2771 return ino; 2772 } 2773 EXPORT_SYMBOL(sock_i_ino); 2774 2775 /* 2776 * Allocate a skb from the socket's send buffer. 2777 */ 2778 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 2779 gfp_t priority) 2780 { 2781 if (force || 2782 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { 2783 struct sk_buff *skb = alloc_skb(size, priority); 2784 2785 if (skb) { 2786 skb_set_owner_w(skb, sk); 2787 return skb; 2788 } 2789 } 2790 return NULL; 2791 } 2792 EXPORT_SYMBOL(sock_wmalloc); 2793 2794 static void sock_ofree(struct sk_buff *skb) 2795 { 2796 struct sock *sk = skb->sk; 2797 2798 atomic_sub(skb->truesize, &sk->sk_omem_alloc); 2799 } 2800 2801 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 2802 gfp_t priority) 2803 { 2804 struct sk_buff *skb; 2805 2806 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ 2807 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > 2808 READ_ONCE(sock_net(sk)->core.sysctl_optmem_max)) 2809 return NULL; 2810 2811 skb = alloc_skb(size, priority); 2812 if (!skb) 2813 return NULL; 2814 2815 atomic_add(skb->truesize, &sk->sk_omem_alloc); 2816 skb->sk = sk; 2817 skb->destructor = sock_ofree; 2818 return skb; 2819 } 2820 2821 /* 2822 * Allocate a memory block from the socket's option memory buffer. 2823 */ 2824 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 2825 { 2826 int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); 2827 2828 if ((unsigned int)size <= optmem_max && 2829 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { 2830 void *mem; 2831 /* First do the add, to avoid the race if kmalloc 2832 * might sleep. 2833 */ 2834 atomic_add(size, &sk->sk_omem_alloc); 2835 mem = kmalloc(size, priority); 2836 if (mem) 2837 return mem; 2838 atomic_sub(size, &sk->sk_omem_alloc); 2839 } 2840 return NULL; 2841 } 2842 EXPORT_SYMBOL(sock_kmalloc); 2843 2844 /* 2845 * Duplicate the input "src" memory block using the socket's 2846 * option memory buffer. 2847 */ 2848 void *sock_kmemdup(struct sock *sk, const void *src, 2849 int size, gfp_t priority) 2850 { 2851 void *mem; 2852 2853 mem = sock_kmalloc(sk, size, priority); 2854 if (mem) 2855 memcpy(mem, src, size); 2856 return mem; 2857 } 2858 EXPORT_SYMBOL(sock_kmemdup); 2859 2860 /* Free an option memory block. Note, we actually want the inline 2861 * here as this allows gcc to detect the nullify and fold away the 2862 * condition entirely. 2863 */ 2864 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 2865 const bool nullify) 2866 { 2867 if (WARN_ON_ONCE(!mem)) 2868 return; 2869 if (nullify) 2870 kfree_sensitive(mem); 2871 else 2872 kfree(mem); 2873 atomic_sub(size, &sk->sk_omem_alloc); 2874 } 2875 2876 void sock_kfree_s(struct sock *sk, void *mem, int size) 2877 { 2878 __sock_kfree_s(sk, mem, size, false); 2879 } 2880 EXPORT_SYMBOL(sock_kfree_s); 2881 2882 void sock_kzfree_s(struct sock *sk, void *mem, int size) 2883 { 2884 __sock_kfree_s(sk, mem, size, true); 2885 } 2886 EXPORT_SYMBOL(sock_kzfree_s); 2887 2888 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 2889 I think, these locks should be removed for datagram sockets. 2890 */ 2891 static long sock_wait_for_wmem(struct sock *sk, long timeo) 2892 { 2893 DEFINE_WAIT(wait); 2894 2895 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2896 for (;;) { 2897 if (!timeo) 2898 break; 2899 if (signal_pending(current)) 2900 break; 2901 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2902 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2903 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) 2904 break; 2905 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2906 break; 2907 if (READ_ONCE(sk->sk_err)) 2908 break; 2909 timeo = schedule_timeout(timeo); 2910 } 2911 finish_wait(sk_sleep(sk), &wait); 2912 return timeo; 2913 } 2914 2915 2916 /* 2917 * Generic send/receive buffer handlers 2918 */ 2919 2920 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 2921 unsigned long data_len, int noblock, 2922 int *errcode, int max_page_order) 2923 { 2924 struct sk_buff *skb; 2925 long timeo; 2926 int err; 2927 2928 timeo = sock_sndtimeo(sk, noblock); 2929 for (;;) { 2930 err = sock_error(sk); 2931 if (err != 0) 2932 goto failure; 2933 2934 err = -EPIPE; 2935 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2936 goto failure; 2937 2938 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) 2939 break; 2940 2941 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2942 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2943 err = -EAGAIN; 2944 if (!timeo) 2945 goto failure; 2946 if (signal_pending(current)) 2947 goto interrupted; 2948 timeo = sock_wait_for_wmem(sk, timeo); 2949 } 2950 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 2951 errcode, sk->sk_allocation); 2952 if (skb) 2953 skb_set_owner_w(skb, sk); 2954 return skb; 2955 2956 interrupted: 2957 err = sock_intr_errno(timeo); 2958 failure: 2959 *errcode = err; 2960 return NULL; 2961 } 2962 EXPORT_SYMBOL(sock_alloc_send_pskb); 2963 2964 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, 2965 struct sockcm_cookie *sockc) 2966 { 2967 u32 tsflags; 2968 2969 BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31)); 2970 2971 switch (cmsg->cmsg_type) { 2972 case SO_MARK: 2973 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 2974 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2975 return -EPERM; 2976 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2977 return -EINVAL; 2978 sockc->mark = *(u32 *)CMSG_DATA(cmsg); 2979 break; 2980 case SO_TIMESTAMPING_OLD: 2981 case SO_TIMESTAMPING_NEW: 2982 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2983 return -EINVAL; 2984 2985 tsflags = *(u32 *)CMSG_DATA(cmsg); 2986 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) 2987 return -EINVAL; 2988 2989 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 2990 sockc->tsflags |= tsflags; 2991 break; 2992 case SCM_TXTIME: 2993 if (!sock_flag(sk, SOCK_TXTIME)) 2994 return -EINVAL; 2995 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) 2996 return -EINVAL; 2997 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); 2998 break; 2999 case SCM_TS_OPT_ID: 3000 if (sk_is_tcp(sk)) 3001 return -EINVAL; 3002 tsflags = READ_ONCE(sk->sk_tsflags); 3003 if (!(tsflags & SOF_TIMESTAMPING_OPT_ID)) 3004 return -EINVAL; 3005 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3006 return -EINVAL; 3007 sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg); 3008 sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID; 3009 break; 3010 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 3011 case SCM_RIGHTS: 3012 case SCM_CREDENTIALS: 3013 break; 3014 case SO_PRIORITY: 3015 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3016 return -EINVAL; 3017 if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg))) 3018 return -EPERM; 3019 sockc->priority = *(u32 *)CMSG_DATA(cmsg); 3020 break; 3021 default: 3022 return -EINVAL; 3023 } 3024 return 0; 3025 } 3026 EXPORT_SYMBOL(__sock_cmsg_send); 3027 3028 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 3029 struct sockcm_cookie *sockc) 3030 { 3031 struct cmsghdr *cmsg; 3032 int ret; 3033 3034 for_each_cmsghdr(cmsg, msg) { 3035 if (!CMSG_OK(msg, cmsg)) 3036 return -EINVAL; 3037 if (cmsg->cmsg_level != SOL_SOCKET) 3038 continue; 3039 ret = __sock_cmsg_send(sk, cmsg, sockc); 3040 if (ret) 3041 return ret; 3042 } 3043 return 0; 3044 } 3045 EXPORT_SYMBOL(sock_cmsg_send); 3046 3047 static void sk_enter_memory_pressure(struct sock *sk) 3048 { 3049 if (!sk->sk_prot->enter_memory_pressure) 3050 return; 3051 3052 sk->sk_prot->enter_memory_pressure(sk); 3053 } 3054 3055 static void sk_leave_memory_pressure(struct sock *sk) 3056 { 3057 if (sk->sk_prot->leave_memory_pressure) { 3058 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure, 3059 tcp_leave_memory_pressure, sk); 3060 } else { 3061 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; 3062 3063 if (memory_pressure && READ_ONCE(*memory_pressure)) 3064 WRITE_ONCE(*memory_pressure, 0); 3065 } 3066 } 3067 3068 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 3069 3070 /** 3071 * skb_page_frag_refill - check that a page_frag contains enough room 3072 * @sz: minimum size of the fragment we want to get 3073 * @pfrag: pointer to page_frag 3074 * @gfp: priority for memory allocation 3075 * 3076 * Note: While this allocator tries to use high order pages, there is 3077 * no guarantee that allocations succeed. Therefore, @sz MUST be 3078 * less or equal than PAGE_SIZE. 3079 */ 3080 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 3081 { 3082 if (pfrag->page) { 3083 if (page_ref_count(pfrag->page) == 1) { 3084 pfrag->offset = 0; 3085 return true; 3086 } 3087 if (pfrag->offset + sz <= pfrag->size) 3088 return true; 3089 put_page(pfrag->page); 3090 } 3091 3092 pfrag->offset = 0; 3093 if (SKB_FRAG_PAGE_ORDER && 3094 !static_branch_unlikely(&net_high_order_alloc_disable_key)) { 3095 /* Avoid direct reclaim but allow kswapd to wake */ 3096 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | 3097 __GFP_COMP | __GFP_NOWARN | 3098 __GFP_NORETRY, 3099 SKB_FRAG_PAGE_ORDER); 3100 if (likely(pfrag->page)) { 3101 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 3102 return true; 3103 } 3104 } 3105 pfrag->page = alloc_page(gfp); 3106 if (likely(pfrag->page)) { 3107 pfrag->size = PAGE_SIZE; 3108 return true; 3109 } 3110 return false; 3111 } 3112 EXPORT_SYMBOL(skb_page_frag_refill); 3113 3114 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 3115 { 3116 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 3117 return true; 3118 3119 sk_enter_memory_pressure(sk); 3120 sk_stream_moderate_sndbuf(sk); 3121 return false; 3122 } 3123 EXPORT_SYMBOL(sk_page_frag_refill); 3124 3125 void __lock_sock(struct sock *sk) 3126 __releases(&sk->sk_lock.slock) 3127 __acquires(&sk->sk_lock.slock) 3128 { 3129 DEFINE_WAIT(wait); 3130 3131 for (;;) { 3132 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 3133 TASK_UNINTERRUPTIBLE); 3134 spin_unlock_bh(&sk->sk_lock.slock); 3135 schedule(); 3136 spin_lock_bh(&sk->sk_lock.slock); 3137 if (!sock_owned_by_user(sk)) 3138 break; 3139 } 3140 finish_wait(&sk->sk_lock.wq, &wait); 3141 } 3142 3143 void __release_sock(struct sock *sk) 3144 __releases(&sk->sk_lock.slock) 3145 __acquires(&sk->sk_lock.slock) 3146 { 3147 struct sk_buff *skb, *next; 3148 3149 while ((skb = sk->sk_backlog.head) != NULL) { 3150 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 3151 3152 spin_unlock_bh(&sk->sk_lock.slock); 3153 3154 do { 3155 next = skb->next; 3156 prefetch(next); 3157 DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb)); 3158 skb_mark_not_on_list(skb); 3159 sk_backlog_rcv(sk, skb); 3160 3161 cond_resched(); 3162 3163 skb = next; 3164 } while (skb != NULL); 3165 3166 spin_lock_bh(&sk->sk_lock.slock); 3167 } 3168 3169 /* 3170 * Doing the zeroing here guarantee we can not loop forever 3171 * while a wild producer attempts to flood us. 3172 */ 3173 sk->sk_backlog.len = 0; 3174 } 3175 3176 void __sk_flush_backlog(struct sock *sk) 3177 { 3178 spin_lock_bh(&sk->sk_lock.slock); 3179 __release_sock(sk); 3180 3181 if (sk->sk_prot->release_cb) 3182 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3183 tcp_release_cb, sk); 3184 3185 spin_unlock_bh(&sk->sk_lock.slock); 3186 } 3187 EXPORT_SYMBOL_GPL(__sk_flush_backlog); 3188 3189 /** 3190 * sk_wait_data - wait for data to arrive at sk_receive_queue 3191 * @sk: sock to wait on 3192 * @timeo: for how long 3193 * @skb: last skb seen on sk_receive_queue 3194 * 3195 * Now socket state including sk->sk_err is changed only under lock, 3196 * hence we may omit checks after joining wait queue. 3197 * We check receive queue before schedule() only as optimization; 3198 * it is very likely that release_sock() added new data. 3199 */ 3200 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) 3201 { 3202 DEFINE_WAIT_FUNC(wait, woken_wake_function); 3203 int rc; 3204 3205 add_wait_queue(sk_sleep(sk), &wait); 3206 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3207 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); 3208 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3209 remove_wait_queue(sk_sleep(sk), &wait); 3210 return rc; 3211 } 3212 EXPORT_SYMBOL(sk_wait_data); 3213 3214 /** 3215 * __sk_mem_raise_allocated - increase memory_allocated 3216 * @sk: socket 3217 * @size: memory size to allocate 3218 * @amt: pages to allocate 3219 * @kind: allocation type 3220 * 3221 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc. 3222 * 3223 * Unlike the globally shared limits among the sockets under same protocol, 3224 * consuming the budget of a memcg won't have direct effect on other ones. 3225 * So be optimistic about memcg's tolerance, and leave the callers to decide 3226 * whether or not to raise allocated through sk_under_memory_pressure() or 3227 * its variants. 3228 */ 3229 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) 3230 { 3231 struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; 3232 struct proto *prot = sk->sk_prot; 3233 bool charged = false; 3234 long allocated; 3235 3236 sk_memory_allocated_add(sk, amt); 3237 allocated = sk_memory_allocated(sk); 3238 3239 if (memcg) { 3240 if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge())) 3241 goto suppress_allocation; 3242 charged = true; 3243 } 3244 3245 /* Under limit. */ 3246 if (allocated <= sk_prot_mem_limits(sk, 0)) { 3247 sk_leave_memory_pressure(sk); 3248 return 1; 3249 } 3250 3251 /* Under pressure. */ 3252 if (allocated > sk_prot_mem_limits(sk, 1)) 3253 sk_enter_memory_pressure(sk); 3254 3255 /* Over hard limit. */ 3256 if (allocated > sk_prot_mem_limits(sk, 2)) 3257 goto suppress_allocation; 3258 3259 /* Guarantee minimum buffer size under pressure (either global 3260 * or memcg) to make sure features described in RFC 7323 (TCP 3261 * Extensions for High Performance) work properly. 3262 * 3263 * This rule does NOT stand when exceeds global or memcg's hard 3264 * limit, or else a DoS attack can be taken place by spawning 3265 * lots of sockets whose usage are under minimum buffer size. 3266 */ 3267 if (kind == SK_MEM_RECV) { 3268 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) 3269 return 1; 3270 3271 } else { /* SK_MEM_SEND */ 3272 int wmem0 = sk_get_wmem0(sk, prot); 3273 3274 if (sk->sk_type == SOCK_STREAM) { 3275 if (sk->sk_wmem_queued < wmem0) 3276 return 1; 3277 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { 3278 return 1; 3279 } 3280 } 3281 3282 if (sk_has_memory_pressure(sk)) { 3283 u64 alloc; 3284 3285 /* The following 'average' heuristic is within the 3286 * scope of global accounting, so it only makes 3287 * sense for global memory pressure. 3288 */ 3289 if (!sk_under_global_memory_pressure(sk)) 3290 return 1; 3291 3292 /* Try to be fair among all the sockets under global 3293 * pressure by allowing the ones that below average 3294 * usage to raise. 3295 */ 3296 alloc = sk_sockets_allocated_read_positive(sk); 3297 if (sk_prot_mem_limits(sk, 2) > alloc * 3298 sk_mem_pages(sk->sk_wmem_queued + 3299 atomic_read(&sk->sk_rmem_alloc) + 3300 sk->sk_forward_alloc)) 3301 return 1; 3302 } 3303 3304 suppress_allocation: 3305 3306 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 3307 sk_stream_moderate_sndbuf(sk); 3308 3309 /* Fail only if socket is _under_ its sndbuf. 3310 * In this case we cannot block, so that we have to fail. 3311 */ 3312 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) { 3313 /* Force charge with __GFP_NOFAIL */ 3314 if (memcg && !charged) { 3315 mem_cgroup_charge_skmem(memcg, amt, 3316 gfp_memcg_charge() | __GFP_NOFAIL); 3317 } 3318 return 1; 3319 } 3320 } 3321 3322 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) 3323 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); 3324 3325 sk_memory_allocated_sub(sk, amt); 3326 3327 if (charged) 3328 mem_cgroup_uncharge_skmem(memcg, amt); 3329 3330 return 0; 3331 } 3332 3333 /** 3334 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 3335 * @sk: socket 3336 * @size: memory size to allocate 3337 * @kind: allocation type 3338 * 3339 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 3340 * rmem allocation. This function assumes that protocols which have 3341 * memory_pressure use sk_wmem_queued as write buffer accounting. 3342 */ 3343 int __sk_mem_schedule(struct sock *sk, int size, int kind) 3344 { 3345 int ret, amt = sk_mem_pages(size); 3346 3347 sk_forward_alloc_add(sk, amt << PAGE_SHIFT); 3348 ret = __sk_mem_raise_allocated(sk, size, amt, kind); 3349 if (!ret) 3350 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT)); 3351 return ret; 3352 } 3353 EXPORT_SYMBOL(__sk_mem_schedule); 3354 3355 /** 3356 * __sk_mem_reduce_allocated - reclaim memory_allocated 3357 * @sk: socket 3358 * @amount: number of quanta 3359 * 3360 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc 3361 */ 3362 void __sk_mem_reduce_allocated(struct sock *sk, int amount) 3363 { 3364 sk_memory_allocated_sub(sk, amount); 3365 3366 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 3367 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); 3368 3369 if (sk_under_global_memory_pressure(sk) && 3370 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 3371 sk_leave_memory_pressure(sk); 3372 } 3373 3374 /** 3375 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated 3376 * @sk: socket 3377 * @amount: number of bytes (rounded down to a PAGE_SIZE multiple) 3378 */ 3379 void __sk_mem_reclaim(struct sock *sk, int amount) 3380 { 3381 amount >>= PAGE_SHIFT; 3382 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT)); 3383 __sk_mem_reduce_allocated(sk, amount); 3384 } 3385 EXPORT_SYMBOL(__sk_mem_reclaim); 3386 3387 int sk_set_peek_off(struct sock *sk, int val) 3388 { 3389 WRITE_ONCE(sk->sk_peek_off, val); 3390 return 0; 3391 } 3392 EXPORT_SYMBOL_GPL(sk_set_peek_off); 3393 3394 /* 3395 * Set of default routines for initialising struct proto_ops when 3396 * the protocol does not support a particular function. In certain 3397 * cases where it makes no sense for a protocol to have a "do nothing" 3398 * function, some default processing is provided. 3399 */ 3400 3401 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 3402 { 3403 return -EOPNOTSUPP; 3404 } 3405 EXPORT_SYMBOL(sock_no_bind); 3406 3407 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 3408 int len, int flags) 3409 { 3410 return -EOPNOTSUPP; 3411 } 3412 EXPORT_SYMBOL(sock_no_connect); 3413 3414 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 3415 { 3416 return -EOPNOTSUPP; 3417 } 3418 EXPORT_SYMBOL(sock_no_socketpair); 3419 3420 int sock_no_accept(struct socket *sock, struct socket *newsock, 3421 struct proto_accept_arg *arg) 3422 { 3423 return -EOPNOTSUPP; 3424 } 3425 EXPORT_SYMBOL(sock_no_accept); 3426 3427 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 3428 int peer) 3429 { 3430 return -EOPNOTSUPP; 3431 } 3432 EXPORT_SYMBOL(sock_no_getname); 3433 3434 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3435 { 3436 return -EOPNOTSUPP; 3437 } 3438 EXPORT_SYMBOL(sock_no_ioctl); 3439 3440 int sock_no_listen(struct socket *sock, int backlog) 3441 { 3442 return -EOPNOTSUPP; 3443 } 3444 EXPORT_SYMBOL(sock_no_listen); 3445 3446 int sock_no_shutdown(struct socket *sock, int how) 3447 { 3448 return -EOPNOTSUPP; 3449 } 3450 EXPORT_SYMBOL(sock_no_shutdown); 3451 3452 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 3453 { 3454 return -EOPNOTSUPP; 3455 } 3456 EXPORT_SYMBOL(sock_no_sendmsg); 3457 3458 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) 3459 { 3460 return -EOPNOTSUPP; 3461 } 3462 EXPORT_SYMBOL(sock_no_sendmsg_locked); 3463 3464 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, 3465 int flags) 3466 { 3467 return -EOPNOTSUPP; 3468 } 3469 EXPORT_SYMBOL(sock_no_recvmsg); 3470 3471 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 3472 { 3473 /* Mirror missing mmap method error code */ 3474 return -ENODEV; 3475 } 3476 EXPORT_SYMBOL(sock_no_mmap); 3477 3478 /* 3479 * When a file is received (via SCM_RIGHTS, etc), we must bump the 3480 * various sock-based usage counts. 3481 */ 3482 void __receive_sock(struct file *file) 3483 { 3484 struct socket *sock; 3485 3486 sock = sock_from_file(file); 3487 if (sock) { 3488 sock_update_netprioidx(&sock->sk->sk_cgrp_data); 3489 sock_update_classid(&sock->sk->sk_cgrp_data); 3490 } 3491 } 3492 3493 /* 3494 * Default Socket Callbacks 3495 */ 3496 3497 static void sock_def_wakeup(struct sock *sk) 3498 { 3499 struct socket_wq *wq; 3500 3501 rcu_read_lock(); 3502 wq = rcu_dereference(sk->sk_wq); 3503 if (skwq_has_sleeper(wq)) 3504 wake_up_interruptible_all(&wq->wait); 3505 rcu_read_unlock(); 3506 } 3507 3508 static void sock_def_error_report(struct sock *sk) 3509 { 3510 struct socket_wq *wq; 3511 3512 rcu_read_lock(); 3513 wq = rcu_dereference(sk->sk_wq); 3514 if (skwq_has_sleeper(wq)) 3515 wake_up_interruptible_poll(&wq->wait, EPOLLERR); 3516 sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR); 3517 rcu_read_unlock(); 3518 } 3519 3520 void sock_def_readable(struct sock *sk) 3521 { 3522 struct socket_wq *wq; 3523 3524 trace_sk_data_ready(sk); 3525 3526 rcu_read_lock(); 3527 wq = rcu_dereference(sk->sk_wq); 3528 if (skwq_has_sleeper(wq)) 3529 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 3530 EPOLLRDNORM | EPOLLRDBAND); 3531 sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); 3532 rcu_read_unlock(); 3533 } 3534 3535 static void sock_def_write_space(struct sock *sk) 3536 { 3537 struct socket_wq *wq; 3538 3539 rcu_read_lock(); 3540 3541 /* Do not wake up a writer until he can make "significant" 3542 * progress. --DaveM 3543 */ 3544 if (sock_writeable(sk)) { 3545 wq = rcu_dereference(sk->sk_wq); 3546 if (skwq_has_sleeper(wq)) 3547 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3548 EPOLLWRNORM | EPOLLWRBAND); 3549 3550 /* Should agree with poll, otherwise some programs break */ 3551 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3552 } 3553 3554 rcu_read_unlock(); 3555 } 3556 3557 /* An optimised version of sock_def_write_space(), should only be called 3558 * for SOCK_RCU_FREE sockets under RCU read section and after putting 3559 * ->sk_wmem_alloc. 3560 */ 3561 static void sock_def_write_space_wfree(struct sock *sk) 3562 { 3563 /* Do not wake up a writer until he can make "significant" 3564 * progress. --DaveM 3565 */ 3566 if (sock_writeable(sk)) { 3567 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 3568 3569 /* rely on refcount_sub from sock_wfree() */ 3570 smp_mb__after_atomic(); 3571 if (wq && waitqueue_active(&wq->wait)) 3572 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3573 EPOLLWRNORM | EPOLLWRBAND); 3574 3575 /* Should agree with poll, otherwise some programs break */ 3576 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3577 } 3578 } 3579 3580 static void sock_def_destruct(struct sock *sk) 3581 { 3582 } 3583 3584 void sk_send_sigurg(struct sock *sk) 3585 { 3586 if (sk->sk_socket && sk->sk_socket->file) 3587 if (send_sigurg(sk->sk_socket->file)) 3588 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 3589 } 3590 EXPORT_SYMBOL(sk_send_sigurg); 3591 3592 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 3593 unsigned long expires) 3594 { 3595 if (!mod_timer(timer, expires)) 3596 sock_hold(sk); 3597 } 3598 EXPORT_SYMBOL(sk_reset_timer); 3599 3600 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 3601 { 3602 if (timer_delete(timer)) 3603 __sock_put(sk); 3604 } 3605 EXPORT_SYMBOL(sk_stop_timer); 3606 3607 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 3608 { 3609 if (timer_delete_sync(timer)) 3610 __sock_put(sk); 3611 } 3612 EXPORT_SYMBOL(sk_stop_timer_sync); 3613 3614 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) 3615 { 3616 sk_init_common(sk); 3617 sk->sk_send_head = NULL; 3618 3619 timer_setup(&sk->sk_timer, NULL, 0); 3620 3621 sk->sk_allocation = GFP_KERNEL; 3622 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); 3623 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); 3624 sk->sk_state = TCP_CLOSE; 3625 sk->sk_use_task_frag = true; 3626 sk_set_socket(sk, sock); 3627 3628 sock_set_flag(sk, SOCK_ZAPPED); 3629 3630 if (sock) { 3631 sk->sk_type = sock->type; 3632 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); 3633 sock->sk = sk; 3634 } else { 3635 RCU_INIT_POINTER(sk->sk_wq, NULL); 3636 } 3637 sk->sk_uid = uid; 3638 3639 sk->sk_state_change = sock_def_wakeup; 3640 sk->sk_data_ready = sock_def_readable; 3641 sk->sk_write_space = sock_def_write_space; 3642 sk->sk_error_report = sock_def_error_report; 3643 sk->sk_destruct = sock_def_destruct; 3644 3645 sk->sk_frag.page = NULL; 3646 sk->sk_frag.offset = 0; 3647 sk->sk_peek_off = -1; 3648 3649 sk->sk_peer_pid = NULL; 3650 sk->sk_peer_cred = NULL; 3651 spin_lock_init(&sk->sk_peer_lock); 3652 3653 sk->sk_write_pending = 0; 3654 sk->sk_rcvlowat = 1; 3655 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 3656 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 3657 3658 sk->sk_stamp = SK_DEFAULT_STAMP; 3659 #if BITS_PER_LONG==32 3660 seqlock_init(&sk->sk_stamp_seq); 3661 #endif 3662 atomic_set(&sk->sk_zckey, 0); 3663 3664 #ifdef CONFIG_NET_RX_BUSY_POLL 3665 sk->sk_napi_id = 0; 3666 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); 3667 #endif 3668 3669 sk->sk_max_pacing_rate = ~0UL; 3670 sk->sk_pacing_rate = ~0UL; 3671 WRITE_ONCE(sk->sk_pacing_shift, 10); 3672 sk->sk_incoming_cpu = -1; 3673 3674 sk_rx_queue_clear(sk); 3675 /* 3676 * Before updating sk_refcnt, we must commit prior changes to memory 3677 * (Documentation/RCU/rculist_nulls.rst for details) 3678 */ 3679 smp_wmb(); 3680 refcount_set(&sk->sk_refcnt, 1); 3681 atomic_set(&sk->sk_drops, 0); 3682 } 3683 EXPORT_SYMBOL(sock_init_data_uid); 3684 3685 void sock_init_data(struct socket *sock, struct sock *sk) 3686 { 3687 kuid_t uid = sock ? 3688 SOCK_INODE(sock)->i_uid : 3689 make_kuid(sock_net(sk)->user_ns, 0); 3690 3691 sock_init_data_uid(sock, sk, uid); 3692 } 3693 EXPORT_SYMBOL(sock_init_data); 3694 3695 void lock_sock_nested(struct sock *sk, int subclass) 3696 { 3697 /* The sk_lock has mutex_lock() semantics here. */ 3698 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3699 3700 might_sleep(); 3701 spin_lock_bh(&sk->sk_lock.slock); 3702 if (sock_owned_by_user_nocheck(sk)) 3703 __lock_sock(sk); 3704 sk->sk_lock.owned = 1; 3705 spin_unlock_bh(&sk->sk_lock.slock); 3706 } 3707 EXPORT_SYMBOL(lock_sock_nested); 3708 3709 void release_sock(struct sock *sk) 3710 { 3711 spin_lock_bh(&sk->sk_lock.slock); 3712 if (sk->sk_backlog.tail) 3713 __release_sock(sk); 3714 3715 if (sk->sk_prot->release_cb) 3716 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3717 tcp_release_cb, sk); 3718 3719 sock_release_ownership(sk); 3720 if (waitqueue_active(&sk->sk_lock.wq)) 3721 wake_up(&sk->sk_lock.wq); 3722 spin_unlock_bh(&sk->sk_lock.slock); 3723 } 3724 EXPORT_SYMBOL(release_sock); 3725 3726 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3727 { 3728 might_sleep(); 3729 spin_lock_bh(&sk->sk_lock.slock); 3730 3731 if (!sock_owned_by_user_nocheck(sk)) { 3732 /* 3733 * Fast path return with bottom halves disabled and 3734 * sock::sk_lock.slock held. 3735 * 3736 * The 'mutex' is not contended and holding 3737 * sock::sk_lock.slock prevents all other lockers to 3738 * proceed so the corresponding unlock_sock_fast() can 3739 * avoid the slow path of release_sock() completely and 3740 * just release slock. 3741 * 3742 * From a semantical POV this is equivalent to 'acquiring' 3743 * the 'mutex', hence the corresponding lockdep 3744 * mutex_release() has to happen in the fast path of 3745 * unlock_sock_fast(). 3746 */ 3747 return false; 3748 } 3749 3750 __lock_sock(sk); 3751 sk->sk_lock.owned = 1; 3752 __acquire(&sk->sk_lock.slock); 3753 spin_unlock_bh(&sk->sk_lock.slock); 3754 return true; 3755 } 3756 EXPORT_SYMBOL(__lock_sock_fast); 3757 3758 int sock_gettstamp(struct socket *sock, void __user *userstamp, 3759 bool timeval, bool time32) 3760 { 3761 struct sock *sk = sock->sk; 3762 struct timespec64 ts; 3763 3764 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 3765 ts = ktime_to_timespec64(sock_read_timestamp(sk)); 3766 if (ts.tv_sec == -1) 3767 return -ENOENT; 3768 if (ts.tv_sec == 0) { 3769 ktime_t kt = ktime_get_real(); 3770 sock_write_timestamp(sk, kt); 3771 ts = ktime_to_timespec64(kt); 3772 } 3773 3774 if (timeval) 3775 ts.tv_nsec /= 1000; 3776 3777 #ifdef CONFIG_COMPAT_32BIT_TIME 3778 if (time32) 3779 return put_old_timespec32(&ts, userstamp); 3780 #endif 3781 #ifdef CONFIG_SPARC64 3782 /* beware of padding in sparc64 timeval */ 3783 if (timeval && !in_compat_syscall()) { 3784 struct __kernel_old_timeval __user tv = { 3785 .tv_sec = ts.tv_sec, 3786 .tv_usec = ts.tv_nsec, 3787 }; 3788 if (copy_to_user(userstamp, &tv, sizeof(tv))) 3789 return -EFAULT; 3790 return 0; 3791 } 3792 #endif 3793 return put_timespec64(&ts, userstamp); 3794 } 3795 EXPORT_SYMBOL(sock_gettstamp); 3796 3797 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) 3798 { 3799 if (!sock_flag(sk, flag)) { 3800 unsigned long previous_flags = sk->sk_flags; 3801 3802 sock_set_flag(sk, flag); 3803 /* 3804 * we just set one of the two flags which require net 3805 * time stamping, but time stamping might have been on 3806 * already because of the other one 3807 */ 3808 if (sock_needs_netstamp(sk) && 3809 !(previous_flags & SK_FLAGS_TIMESTAMP)) 3810 net_enable_timestamp(); 3811 } 3812 } 3813 3814 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 3815 int level, int type) 3816 { 3817 struct sock_exterr_skb *serr; 3818 struct sk_buff *skb; 3819 int copied, err; 3820 3821 err = -EAGAIN; 3822 skb = sock_dequeue_err_skb(sk); 3823 if (skb == NULL) 3824 goto out; 3825 3826 copied = skb->len; 3827 if (copied > len) { 3828 msg->msg_flags |= MSG_TRUNC; 3829 copied = len; 3830 } 3831 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3832 if (err) 3833 goto out_free_skb; 3834 3835 sock_recv_timestamp(msg, sk, skb); 3836 3837 serr = SKB_EXT_ERR(skb); 3838 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 3839 3840 msg->msg_flags |= MSG_ERRQUEUE; 3841 err = copied; 3842 3843 out_free_skb: 3844 kfree_skb(skb); 3845 out: 3846 return err; 3847 } 3848 EXPORT_SYMBOL(sock_recv_errqueue); 3849 3850 /* 3851 * Get a socket option on an socket. 3852 * 3853 * FIX: POSIX 1003.1g is very ambiguous here. It states that 3854 * asynchronous errors should be reported by getsockopt. We assume 3855 * this means if you specify SO_ERROR (otherwise what is the point of it). 3856 */ 3857 int sock_common_getsockopt(struct socket *sock, int level, int optname, 3858 char __user *optval, int __user *optlen) 3859 { 3860 struct sock *sk = sock->sk; 3861 3862 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3863 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen); 3864 } 3865 EXPORT_SYMBOL(sock_common_getsockopt); 3866 3867 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 3868 int flags) 3869 { 3870 struct sock *sk = sock->sk; 3871 int addr_len = 0; 3872 int err; 3873 3874 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len); 3875 if (err >= 0) 3876 msg->msg_namelen = addr_len; 3877 return err; 3878 } 3879 EXPORT_SYMBOL(sock_common_recvmsg); 3880 3881 /* 3882 * Set socket options on an inet socket. 3883 */ 3884 int sock_common_setsockopt(struct socket *sock, int level, int optname, 3885 sockptr_t optval, unsigned int optlen) 3886 { 3887 struct sock *sk = sock->sk; 3888 3889 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3890 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen); 3891 } 3892 EXPORT_SYMBOL(sock_common_setsockopt); 3893 3894 void sk_common_release(struct sock *sk) 3895 { 3896 if (sk->sk_prot->destroy) 3897 sk->sk_prot->destroy(sk); 3898 3899 /* 3900 * Observation: when sk_common_release is called, processes have 3901 * no access to socket. But net still has. 3902 * Step one, detach it from networking: 3903 * 3904 * A. Remove from hash tables. 3905 */ 3906 3907 sk->sk_prot->unhash(sk); 3908 3909 /* 3910 * In this point socket cannot receive new packets, but it is possible 3911 * that some packets are in flight because some CPU runs receiver and 3912 * did hash table lookup before we unhashed socket. They will achieve 3913 * receive queue and will be purged by socket destructor. 3914 * 3915 * Also we still have packets pending on receive queue and probably, 3916 * our own packets waiting in device queues. sock_destroy will drain 3917 * receive queue, but transmitted packets will delay socket destruction 3918 * until the last reference will be released. 3919 */ 3920 3921 sock_orphan(sk); 3922 3923 xfrm_sk_free_policy(sk); 3924 3925 sock_put(sk); 3926 } 3927 EXPORT_SYMBOL(sk_common_release); 3928 3929 void sk_get_meminfo(const struct sock *sk, u32 *mem) 3930 { 3931 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); 3932 3933 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 3934 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); 3935 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); 3936 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); 3937 mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc); 3938 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3939 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3940 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3941 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3942 } 3943 3944 #ifdef CONFIG_PROC_FS 3945 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 3946 3947 int sock_prot_inuse_get(struct net *net, struct proto *prot) 3948 { 3949 int cpu, idx = prot->inuse_idx; 3950 int res = 0; 3951 3952 for_each_possible_cpu(cpu) 3953 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx]; 3954 3955 return res >= 0 ? res : 0; 3956 } 3957 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 3958 3959 int sock_inuse_get(struct net *net) 3960 { 3961 int cpu, res = 0; 3962 3963 for_each_possible_cpu(cpu) 3964 res += per_cpu_ptr(net->core.prot_inuse, cpu)->all; 3965 3966 return res; 3967 } 3968 3969 EXPORT_SYMBOL_GPL(sock_inuse_get); 3970 3971 static int __net_init sock_inuse_init_net(struct net *net) 3972 { 3973 net->core.prot_inuse = alloc_percpu(struct prot_inuse); 3974 if (net->core.prot_inuse == NULL) 3975 return -ENOMEM; 3976 return 0; 3977 } 3978 3979 static void __net_exit sock_inuse_exit_net(struct net *net) 3980 { 3981 free_percpu(net->core.prot_inuse); 3982 } 3983 3984 static struct pernet_operations net_inuse_ops = { 3985 .init = sock_inuse_init_net, 3986 .exit = sock_inuse_exit_net, 3987 }; 3988 3989 static __init int net_inuse_init(void) 3990 { 3991 if (register_pernet_subsys(&net_inuse_ops)) 3992 panic("Cannot initialize net inuse counters"); 3993 3994 return 0; 3995 } 3996 3997 core_initcall(net_inuse_init); 3998 3999 static int assign_proto_idx(struct proto *prot) 4000 { 4001 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 4002 4003 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR)) { 4004 pr_err("PROTO_INUSE_NR exhausted\n"); 4005 return -ENOSPC; 4006 } 4007 4008 set_bit(prot->inuse_idx, proto_inuse_idx); 4009 return 0; 4010 } 4011 4012 static void release_proto_idx(struct proto *prot) 4013 { 4014 if (prot->inuse_idx != PROTO_INUSE_NR) 4015 clear_bit(prot->inuse_idx, proto_inuse_idx); 4016 } 4017 #else 4018 static inline int assign_proto_idx(struct proto *prot) 4019 { 4020 return 0; 4021 } 4022 4023 static inline void release_proto_idx(struct proto *prot) 4024 { 4025 } 4026 4027 #endif 4028 4029 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) 4030 { 4031 if (!twsk_prot) 4032 return; 4033 kfree(twsk_prot->twsk_slab_name); 4034 twsk_prot->twsk_slab_name = NULL; 4035 kmem_cache_destroy(twsk_prot->twsk_slab); 4036 twsk_prot->twsk_slab = NULL; 4037 } 4038 4039 static int tw_prot_init(const struct proto *prot) 4040 { 4041 struct timewait_sock_ops *twsk_prot = prot->twsk_prot; 4042 4043 if (!twsk_prot) 4044 return 0; 4045 4046 twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", 4047 prot->name); 4048 if (!twsk_prot->twsk_slab_name) 4049 return -ENOMEM; 4050 4051 twsk_prot->twsk_slab = 4052 kmem_cache_create(twsk_prot->twsk_slab_name, 4053 twsk_prot->twsk_obj_size, 0, 4054 SLAB_ACCOUNT | prot->slab_flags, 4055 NULL); 4056 if (!twsk_prot->twsk_slab) { 4057 pr_crit("%s: Can't create timewait sock SLAB cache!\n", 4058 prot->name); 4059 return -ENOMEM; 4060 } 4061 4062 return 0; 4063 } 4064 4065 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 4066 { 4067 if (!rsk_prot) 4068 return; 4069 kfree(rsk_prot->slab_name); 4070 rsk_prot->slab_name = NULL; 4071 kmem_cache_destroy(rsk_prot->slab); 4072 rsk_prot->slab = NULL; 4073 } 4074 4075 static int req_prot_init(const struct proto *prot) 4076 { 4077 struct request_sock_ops *rsk_prot = prot->rsk_prot; 4078 4079 if (!rsk_prot) 4080 return 0; 4081 4082 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", 4083 prot->name); 4084 if (!rsk_prot->slab_name) 4085 return -ENOMEM; 4086 4087 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 4088 rsk_prot->obj_size, 0, 4089 SLAB_ACCOUNT | prot->slab_flags, 4090 NULL); 4091 4092 if (!rsk_prot->slab) { 4093 pr_crit("%s: Can't create request sock SLAB cache!\n", 4094 prot->name); 4095 return -ENOMEM; 4096 } 4097 return 0; 4098 } 4099 4100 int proto_register(struct proto *prot, int alloc_slab) 4101 { 4102 int ret = -ENOBUFS; 4103 4104 if (prot->memory_allocated && !prot->sysctl_mem) { 4105 pr_err("%s: missing sysctl_mem\n", prot->name); 4106 return -EINVAL; 4107 } 4108 if (prot->memory_allocated && !prot->per_cpu_fw_alloc) { 4109 pr_err("%s: missing per_cpu_fw_alloc\n", prot->name); 4110 return -EINVAL; 4111 } 4112 if (alloc_slab) { 4113 prot->slab = kmem_cache_create_usercopy(prot->name, 4114 prot->obj_size, 0, 4115 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | 4116 prot->slab_flags, 4117 prot->useroffset, prot->usersize, 4118 NULL); 4119 4120 if (prot->slab == NULL) { 4121 pr_crit("%s: Can't create sock SLAB cache!\n", 4122 prot->name); 4123 goto out; 4124 } 4125 4126 if (req_prot_init(prot)) 4127 goto out_free_request_sock_slab; 4128 4129 if (tw_prot_init(prot)) 4130 goto out_free_timewait_sock_slab; 4131 } 4132 4133 mutex_lock(&proto_list_mutex); 4134 ret = assign_proto_idx(prot); 4135 if (ret) { 4136 mutex_unlock(&proto_list_mutex); 4137 goto out_free_timewait_sock_slab; 4138 } 4139 list_add(&prot->node, &proto_list); 4140 mutex_unlock(&proto_list_mutex); 4141 return ret; 4142 4143 out_free_timewait_sock_slab: 4144 if (alloc_slab) 4145 tw_prot_cleanup(prot->twsk_prot); 4146 out_free_request_sock_slab: 4147 if (alloc_slab) { 4148 req_prot_cleanup(prot->rsk_prot); 4149 4150 kmem_cache_destroy(prot->slab); 4151 prot->slab = NULL; 4152 } 4153 out: 4154 return ret; 4155 } 4156 EXPORT_SYMBOL(proto_register); 4157 4158 void proto_unregister(struct proto *prot) 4159 { 4160 mutex_lock(&proto_list_mutex); 4161 release_proto_idx(prot); 4162 list_del(&prot->node); 4163 mutex_unlock(&proto_list_mutex); 4164 4165 kmem_cache_destroy(prot->slab); 4166 prot->slab = NULL; 4167 4168 req_prot_cleanup(prot->rsk_prot); 4169 tw_prot_cleanup(prot->twsk_prot); 4170 } 4171 EXPORT_SYMBOL(proto_unregister); 4172 4173 int sock_load_diag_module(int family, int protocol) 4174 { 4175 if (!protocol) { 4176 if (!sock_is_registered(family)) 4177 return -ENOENT; 4178 4179 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 4180 NETLINK_SOCK_DIAG, family); 4181 } 4182 4183 #ifdef CONFIG_INET 4184 if (family == AF_INET && 4185 protocol != IPPROTO_RAW && 4186 protocol < MAX_INET_PROTOS && 4187 !rcu_access_pointer(inet_protos[protocol])) 4188 return -ENOENT; 4189 #endif 4190 4191 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 4192 NETLINK_SOCK_DIAG, family, protocol); 4193 } 4194 EXPORT_SYMBOL(sock_load_diag_module); 4195 4196 #ifdef CONFIG_PROC_FS 4197 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 4198 __acquires(proto_list_mutex) 4199 { 4200 mutex_lock(&proto_list_mutex); 4201 return seq_list_start_head(&proto_list, *pos); 4202 } 4203 4204 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4205 { 4206 return seq_list_next(v, &proto_list, pos); 4207 } 4208 4209 static void proto_seq_stop(struct seq_file *seq, void *v) 4210 __releases(proto_list_mutex) 4211 { 4212 mutex_unlock(&proto_list_mutex); 4213 } 4214 4215 static char proto_method_implemented(const void *method) 4216 { 4217 return method == NULL ? 'n' : 'y'; 4218 } 4219 static long sock_prot_memory_allocated(struct proto *proto) 4220 { 4221 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 4222 } 4223 4224 static const char *sock_prot_memory_pressure(struct proto *proto) 4225 { 4226 return proto->memory_pressure != NULL ? 4227 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 4228 } 4229 4230 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 4231 { 4232 4233 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 4234 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 4235 proto->name, 4236 proto->obj_size, 4237 sock_prot_inuse_get(seq_file_net(seq), proto), 4238 sock_prot_memory_allocated(proto), 4239 sock_prot_memory_pressure(proto), 4240 proto->max_header, 4241 proto->slab == NULL ? "no" : "yes", 4242 module_name(proto->owner), 4243 proto_method_implemented(proto->close), 4244 proto_method_implemented(proto->connect), 4245 proto_method_implemented(proto->disconnect), 4246 proto_method_implemented(proto->accept), 4247 proto_method_implemented(proto->ioctl), 4248 proto_method_implemented(proto->init), 4249 proto_method_implemented(proto->destroy), 4250 proto_method_implemented(proto->shutdown), 4251 proto_method_implemented(proto->setsockopt), 4252 proto_method_implemented(proto->getsockopt), 4253 proto_method_implemented(proto->sendmsg), 4254 proto_method_implemented(proto->recvmsg), 4255 proto_method_implemented(proto->bind), 4256 proto_method_implemented(proto->backlog_rcv), 4257 proto_method_implemented(proto->hash), 4258 proto_method_implemented(proto->unhash), 4259 proto_method_implemented(proto->get_port), 4260 proto_method_implemented(proto->enter_memory_pressure)); 4261 } 4262 4263 static int proto_seq_show(struct seq_file *seq, void *v) 4264 { 4265 if (v == &proto_list) 4266 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 4267 "protocol", 4268 "size", 4269 "sockets", 4270 "memory", 4271 "press", 4272 "maxhdr", 4273 "slab", 4274 "module", 4275 "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n"); 4276 else 4277 proto_seq_printf(seq, list_entry(v, struct proto, node)); 4278 return 0; 4279 } 4280 4281 static const struct seq_operations proto_seq_ops = { 4282 .start = proto_seq_start, 4283 .next = proto_seq_next, 4284 .stop = proto_seq_stop, 4285 .show = proto_seq_show, 4286 }; 4287 4288 static __net_init int proto_init_net(struct net *net) 4289 { 4290 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, 4291 sizeof(struct seq_net_private))) 4292 return -ENOMEM; 4293 4294 return 0; 4295 } 4296 4297 static __net_exit void proto_exit_net(struct net *net) 4298 { 4299 remove_proc_entry("protocols", net->proc_net); 4300 } 4301 4302 4303 static __net_initdata struct pernet_operations proto_net_ops = { 4304 .init = proto_init_net, 4305 .exit = proto_exit_net, 4306 }; 4307 4308 static int __init proto_init(void) 4309 { 4310 return register_pernet_subsys(&proto_net_ops); 4311 } 4312 4313 subsys_initcall(proto_init); 4314 4315 #endif /* PROC_FS */ 4316 4317 #ifdef CONFIG_NET_RX_BUSY_POLL 4318 bool sk_busy_loop_end(void *p, unsigned long start_time) 4319 { 4320 struct sock *sk = p; 4321 4322 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 4323 return true; 4324 4325 if (sk_is_udp(sk) && 4326 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 4327 return true; 4328 4329 return sk_busy_loop_timeout(sk, start_time); 4330 } 4331 EXPORT_SYMBOL(sk_busy_loop_end); 4332 #endif /* CONFIG_NET_RX_BUSY_POLL */ 4333 4334 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) 4335 { 4336 if (!sk->sk_prot->bind_add) 4337 return -EOPNOTSUPP; 4338 return sk->sk_prot->bind_add(sk, addr, addr_len); 4339 } 4340 EXPORT_SYMBOL(sock_bind_add); 4341 4342 /* Copy 'size' bytes from userspace and return `size` back to userspace */ 4343 int sock_ioctl_inout(struct sock *sk, unsigned int cmd, 4344 void __user *arg, void *karg, size_t size) 4345 { 4346 int ret; 4347 4348 if (copy_from_user(karg, arg, size)) 4349 return -EFAULT; 4350 4351 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg); 4352 if (ret) 4353 return ret; 4354 4355 if (copy_to_user(arg, karg, size)) 4356 return -EFAULT; 4357 4358 return 0; 4359 } 4360 EXPORT_SYMBOL(sock_ioctl_inout); 4361 4362 /* This is the most common ioctl prep function, where the result (4 bytes) is 4363 * copied back to userspace if the ioctl() returns successfully. No input is 4364 * copied from userspace as input argument. 4365 */ 4366 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg) 4367 { 4368 int ret, karg = 0; 4369 4370 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg); 4371 if (ret) 4372 return ret; 4373 4374 return put_user(karg, (int __user *)arg); 4375 } 4376 4377 /* A wrapper around sock ioctls, which copies the data from userspace 4378 * (depending on the protocol/ioctl), and copies back the result to userspace. 4379 * The main motivation for this function is to pass kernel memory to the 4380 * protocol ioctl callbacks, instead of userspace memory. 4381 */ 4382 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 4383 { 4384 int rc = 1; 4385 4386 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET) 4387 rc = ipmr_sk_ioctl(sk, cmd, arg); 4388 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6) 4389 rc = ip6mr_sk_ioctl(sk, cmd, arg); 4390 else if (sk_is_phonet(sk)) 4391 rc = phonet_sk_ioctl(sk, cmd, arg); 4392 4393 /* If ioctl was processed, returns its value */ 4394 if (rc <= 0) 4395 return rc; 4396 4397 /* Otherwise call the default handler */ 4398 return sock_ioctl_out(sk, cmd, arg); 4399 } 4400 EXPORT_SYMBOL(sk_ioctl); 4401 4402 static int __init sock_struct_check(void) 4403 { 4404 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops); 4405 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off); 4406 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue); 4407 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue); 4408 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog); 4409 4410 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst); 4411 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex); 4412 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie); 4413 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf); 4414 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter); 4415 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq); 4416 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready); 4417 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo); 4418 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat); 4419 4420 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err); 4421 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket); 4422 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg); 4423 4424 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock); 4425 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem); 4426 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc); 4427 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags); 4428 4429 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4430 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4431 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf); 4432 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued); 4433 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc); 4434 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags); 4435 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head); 4436 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue); 4437 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending); 4438 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm); 4439 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status); 4440 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag); 4441 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer); 4442 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate); 4443 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey); 4444 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey); 4445 4446 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate); 4447 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo); 4448 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority); 4449 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark); 4450 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache); 4451 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps); 4452 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type); 4453 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size); 4454 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation); 4455 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash); 4456 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs); 4457 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift); 4458 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag); 4459 return 0; 4460 } 4461 4462 core_initcall(sock_struct_check); 4463