1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic socket support routines. Memory allocators, socket lock/release 8 * handler for protocols to use and generic option handler. 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 */ 85 86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 87 88 #include <linux/unaligned.h> 89 #include <linux/capability.h> 90 #include <linux/errno.h> 91 #include <linux/errqueue.h> 92 #include <linux/types.h> 93 #include <linux/socket.h> 94 #include <linux/in.h> 95 #include <linux/kernel.h> 96 #include <linux/module.h> 97 #include <linux/proc_fs.h> 98 #include <linux/seq_file.h> 99 #include <linux/sched.h> 100 #include <linux/sched/mm.h> 101 #include <linux/timer.h> 102 #include <linux/string.h> 103 #include <linux/sockios.h> 104 #include <linux/net.h> 105 #include <linux/mm.h> 106 #include <linux/slab.h> 107 #include <linux/interrupt.h> 108 #include <linux/poll.h> 109 #include <linux/tcp.h> 110 #include <linux/udp.h> 111 #include <linux/init.h> 112 #include <linux/highmem.h> 113 #include <linux/user_namespace.h> 114 #include <linux/static_key.h> 115 #include <linux/memcontrol.h> 116 #include <linux/prefetch.h> 117 #include <linux/compat.h> 118 #include <linux/mroute.h> 119 #include <linux/mroute6.h> 120 #include <linux/icmpv6.h> 121 122 #include <linux/uaccess.h> 123 124 #include <linux/netdevice.h> 125 #include <net/protocol.h> 126 #include <linux/skbuff.h> 127 #include <linux/skbuff_ref.h> 128 #include <net/net_namespace.h> 129 #include <net/request_sock.h> 130 #include <net/sock.h> 131 #include <net/proto_memory.h> 132 #include <linux/net_tstamp.h> 133 #include <net/xfrm.h> 134 #include <linux/ipsec.h> 135 #include <net/cls_cgroup.h> 136 #include <net/netprio_cgroup.h> 137 #include <linux/sock_diag.h> 138 139 #include <linux/filter.h> 140 #include <net/sock_reuseport.h> 141 #include <net/bpf_sk_storage.h> 142 143 #include <trace/events/sock.h> 144 145 #include <net/tcp.h> 146 #include <net/busy_poll.h> 147 #include <net/phonet/phonet.h> 148 149 #include <linux/ethtool.h> 150 151 #include "dev.h" 152 153 static DEFINE_MUTEX(proto_list_mutex); 154 static LIST_HEAD(proto_list); 155 156 static void sock_def_write_space_wfree(struct sock *sk); 157 static void sock_def_write_space(struct sock *sk); 158 159 /** 160 * sk_ns_capable - General socket capability test 161 * @sk: Socket to use a capability on or through 162 * @user_ns: The user namespace of the capability to use 163 * @cap: The capability to use 164 * 165 * Test to see if the opener of the socket had when the socket was 166 * created and the current process has the capability @cap in the user 167 * namespace @user_ns. 168 */ 169 bool sk_ns_capable(const struct sock *sk, 170 struct user_namespace *user_ns, int cap) 171 { 172 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 173 ns_capable(user_ns, cap); 174 } 175 EXPORT_SYMBOL(sk_ns_capable); 176 177 /** 178 * sk_capable - Socket global capability test 179 * @sk: Socket to use a capability on or through 180 * @cap: The global capability to use 181 * 182 * Test to see if the opener of the socket had when the socket was 183 * created and the current process has the capability @cap in all user 184 * namespaces. 185 */ 186 bool sk_capable(const struct sock *sk, int cap) 187 { 188 return sk_ns_capable(sk, &init_user_ns, cap); 189 } 190 EXPORT_SYMBOL(sk_capable); 191 192 /** 193 * sk_net_capable - Network namespace socket capability test 194 * @sk: Socket to use a capability on or through 195 * @cap: The capability to use 196 * 197 * Test to see if the opener of the socket had when the socket was created 198 * and the current process has the capability @cap over the network namespace 199 * the socket is a member of. 200 */ 201 bool sk_net_capable(const struct sock *sk, int cap) 202 { 203 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 204 } 205 EXPORT_SYMBOL(sk_net_capable); 206 207 /* 208 * Each address family might have different locking rules, so we have 209 * one slock key per address family and separate keys for internal and 210 * userspace sockets. 211 */ 212 static struct lock_class_key af_family_keys[AF_MAX]; 213 static struct lock_class_key af_family_kern_keys[AF_MAX]; 214 static struct lock_class_key af_family_slock_keys[AF_MAX]; 215 static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; 216 217 /* 218 * Make lock validator output more readable. (we pre-construct these 219 * strings build-time, so that runtime initialization of socket 220 * locks is fast): 221 */ 222 223 #define _sock_locks(x) \ 224 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ 225 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ 226 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ 227 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ 228 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ 229 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ 230 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ 231 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ 232 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ 233 x "27" , x "28" , x "AF_CAN" , \ 234 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ 235 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ 236 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ 237 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ 238 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ 239 x "AF_MCTP" , \ 240 x "AF_MAX" 241 242 static const char *const af_family_key_strings[AF_MAX+1] = { 243 _sock_locks("sk_lock-") 244 }; 245 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 246 _sock_locks("slock-") 247 }; 248 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 249 _sock_locks("clock-") 250 }; 251 252 static const char *const af_family_kern_key_strings[AF_MAX+1] = { 253 _sock_locks("k-sk_lock-") 254 }; 255 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { 256 _sock_locks("k-slock-") 257 }; 258 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 259 _sock_locks("k-clock-") 260 }; 261 static const char *const af_family_rlock_key_strings[AF_MAX+1] = { 262 _sock_locks("rlock-") 263 }; 264 static const char *const af_family_wlock_key_strings[AF_MAX+1] = { 265 _sock_locks("wlock-") 266 }; 267 static const char *const af_family_elock_key_strings[AF_MAX+1] = { 268 _sock_locks("elock-") 269 }; 270 271 /* 272 * sk_callback_lock and sk queues locking rules are per-address-family, 273 * so split the lock classes by using a per-AF key: 274 */ 275 static struct lock_class_key af_callback_keys[AF_MAX]; 276 static struct lock_class_key af_rlock_keys[AF_MAX]; 277 static struct lock_class_key af_wlock_keys[AF_MAX]; 278 static struct lock_class_key af_elock_keys[AF_MAX]; 279 static struct lock_class_key af_kern_callback_keys[AF_MAX]; 280 281 /* Run time adjustable parameters. */ 282 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 283 EXPORT_SYMBOL(sysctl_wmem_max); 284 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 285 EXPORT_SYMBOL(sysctl_rmem_max); 286 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 287 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 288 289 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); 290 EXPORT_SYMBOL_GPL(memalloc_socks_key); 291 292 /** 293 * sk_set_memalloc - sets %SOCK_MEMALLOC 294 * @sk: socket to set it on 295 * 296 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 297 * It's the responsibility of the admin to adjust min_free_kbytes 298 * to meet the requirements 299 */ 300 void sk_set_memalloc(struct sock *sk) 301 { 302 sock_set_flag(sk, SOCK_MEMALLOC); 303 sk->sk_allocation |= __GFP_MEMALLOC; 304 static_branch_inc(&memalloc_socks_key); 305 } 306 EXPORT_SYMBOL_GPL(sk_set_memalloc); 307 308 void sk_clear_memalloc(struct sock *sk) 309 { 310 sock_reset_flag(sk, SOCK_MEMALLOC); 311 sk->sk_allocation &= ~__GFP_MEMALLOC; 312 static_branch_dec(&memalloc_socks_key); 313 314 /* 315 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 316 * progress of swapping. SOCK_MEMALLOC may be cleared while 317 * it has rmem allocations due to the last swapfile being deactivated 318 * but there is a risk that the socket is unusable due to exceeding 319 * the rmem limits. Reclaim the reserves and obey rmem limits again. 320 */ 321 sk_mem_reclaim(sk); 322 } 323 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 324 325 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 326 { 327 int ret; 328 unsigned int noreclaim_flag; 329 330 /* these should have been dropped before queueing */ 331 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 332 333 noreclaim_flag = memalloc_noreclaim_save(); 334 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, 335 tcp_v6_do_rcv, 336 tcp_v4_do_rcv, 337 sk, skb); 338 memalloc_noreclaim_restore(noreclaim_flag); 339 340 return ret; 341 } 342 EXPORT_SYMBOL(__sk_backlog_rcv); 343 344 void sk_error_report(struct sock *sk) 345 { 346 sk->sk_error_report(sk); 347 348 switch (sk->sk_family) { 349 case AF_INET: 350 fallthrough; 351 case AF_INET6: 352 trace_inet_sk_error_report(sk); 353 break; 354 default: 355 break; 356 } 357 } 358 EXPORT_SYMBOL(sk_error_report); 359 360 int sock_get_timeout(long timeo, void *optval, bool old_timeval) 361 { 362 struct __kernel_sock_timeval tv; 363 364 if (timeo == MAX_SCHEDULE_TIMEOUT) { 365 tv.tv_sec = 0; 366 tv.tv_usec = 0; 367 } else { 368 tv.tv_sec = timeo / HZ; 369 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 370 } 371 372 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 373 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 374 *(struct old_timeval32 *)optval = tv32; 375 return sizeof(tv32); 376 } 377 378 if (old_timeval) { 379 struct __kernel_old_timeval old_tv; 380 old_tv.tv_sec = tv.tv_sec; 381 old_tv.tv_usec = tv.tv_usec; 382 *(struct __kernel_old_timeval *)optval = old_tv; 383 return sizeof(old_tv); 384 } 385 386 *(struct __kernel_sock_timeval *)optval = tv; 387 return sizeof(tv); 388 } 389 EXPORT_SYMBOL(sock_get_timeout); 390 391 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, 392 sockptr_t optval, int optlen, bool old_timeval) 393 { 394 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 395 struct old_timeval32 tv32; 396 397 if (optlen < sizeof(tv32)) 398 return -EINVAL; 399 400 if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) 401 return -EFAULT; 402 tv->tv_sec = tv32.tv_sec; 403 tv->tv_usec = tv32.tv_usec; 404 } else if (old_timeval) { 405 struct __kernel_old_timeval old_tv; 406 407 if (optlen < sizeof(old_tv)) 408 return -EINVAL; 409 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) 410 return -EFAULT; 411 tv->tv_sec = old_tv.tv_sec; 412 tv->tv_usec = old_tv.tv_usec; 413 } else { 414 if (optlen < sizeof(*tv)) 415 return -EINVAL; 416 if (copy_from_sockptr(tv, optval, sizeof(*tv))) 417 return -EFAULT; 418 } 419 420 return 0; 421 } 422 EXPORT_SYMBOL(sock_copy_user_timeval); 423 424 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, 425 bool old_timeval) 426 { 427 struct __kernel_sock_timeval tv; 428 int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); 429 long val; 430 431 if (err) 432 return err; 433 434 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 435 return -EDOM; 436 437 if (tv.tv_sec < 0) { 438 static int warned __read_mostly; 439 440 WRITE_ONCE(*timeo_p, 0); 441 if (warned < 10 && net_ratelimit()) { 442 warned++; 443 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 444 __func__, current->comm, task_pid_nr(current)); 445 } 446 return 0; 447 } 448 val = MAX_SCHEDULE_TIMEOUT; 449 if ((tv.tv_sec || tv.tv_usec) && 450 (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))) 451 val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, 452 USEC_PER_SEC / HZ); 453 WRITE_ONCE(*timeo_p, val); 454 return 0; 455 } 456 457 static bool sk_set_prio_allowed(const struct sock *sk, int val) 458 { 459 return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) || 460 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || 461 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)); 462 } 463 464 static bool sock_needs_netstamp(const struct sock *sk) 465 { 466 switch (sk->sk_family) { 467 case AF_UNSPEC: 468 case AF_UNIX: 469 return false; 470 default: 471 return true; 472 } 473 } 474 475 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 476 { 477 if (sk->sk_flags & flags) { 478 sk->sk_flags &= ~flags; 479 if (sock_needs_netstamp(sk) && 480 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 481 net_disable_timestamp(); 482 } 483 } 484 485 486 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 487 { 488 unsigned long flags; 489 struct sk_buff_head *list = &sk->sk_receive_queue; 490 491 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { 492 atomic_inc(&sk->sk_drops); 493 trace_sock_rcvqueue_full(sk, skb); 494 return -ENOMEM; 495 } 496 497 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 498 atomic_inc(&sk->sk_drops); 499 return -ENOBUFS; 500 } 501 502 skb->dev = NULL; 503 skb_set_owner_r(skb, sk); 504 505 /* we escape from rcu protected region, make sure we dont leak 506 * a norefcounted dst 507 */ 508 skb_dst_force(skb); 509 510 spin_lock_irqsave(&list->lock, flags); 511 sock_skb_set_dropcount(sk, skb); 512 __skb_queue_tail(list, skb); 513 spin_unlock_irqrestore(&list->lock, flags); 514 515 if (!sock_flag(sk, SOCK_DEAD)) 516 sk->sk_data_ready(sk); 517 return 0; 518 } 519 EXPORT_SYMBOL(__sock_queue_rcv_skb); 520 521 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb, 522 enum skb_drop_reason *reason) 523 { 524 enum skb_drop_reason drop_reason; 525 int err; 526 527 err = sk_filter(sk, skb); 528 if (err) { 529 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 530 goto out; 531 } 532 err = __sock_queue_rcv_skb(sk, skb); 533 switch (err) { 534 case -ENOMEM: 535 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 536 break; 537 case -ENOBUFS: 538 drop_reason = SKB_DROP_REASON_PROTO_MEM; 539 break; 540 default: 541 drop_reason = SKB_NOT_DROPPED_YET; 542 break; 543 } 544 out: 545 if (reason) 546 *reason = drop_reason; 547 return err; 548 } 549 EXPORT_SYMBOL(sock_queue_rcv_skb_reason); 550 551 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 552 const int nested, unsigned int trim_cap, bool refcounted) 553 { 554 int rc = NET_RX_SUCCESS; 555 556 if (sk_filter_trim_cap(sk, skb, trim_cap)) 557 goto discard_and_relse; 558 559 skb->dev = NULL; 560 561 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { 562 atomic_inc(&sk->sk_drops); 563 goto discard_and_relse; 564 } 565 if (nested) 566 bh_lock_sock_nested(sk); 567 else 568 bh_lock_sock(sk); 569 if (!sock_owned_by_user(sk)) { 570 /* 571 * trylock + unlock semantics: 572 */ 573 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 574 575 rc = sk_backlog_rcv(sk, skb); 576 577 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 578 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { 579 bh_unlock_sock(sk); 580 atomic_inc(&sk->sk_drops); 581 goto discard_and_relse; 582 } 583 584 bh_unlock_sock(sk); 585 out: 586 if (refcounted) 587 sock_put(sk); 588 return rc; 589 discard_and_relse: 590 kfree_skb(skb); 591 goto out; 592 } 593 EXPORT_SYMBOL(__sk_receive_skb); 594 595 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, 596 u32)); 597 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 598 u32)); 599 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 600 { 601 struct dst_entry *dst = __sk_dst_get(sk); 602 603 if (dst && dst->obsolete && 604 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 605 dst, cookie) == NULL) { 606 sk_tx_queue_clear(sk); 607 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); 608 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 609 dst_release(dst); 610 return NULL; 611 } 612 613 return dst; 614 } 615 EXPORT_SYMBOL(__sk_dst_check); 616 617 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 618 { 619 struct dst_entry *dst = sk_dst_get(sk); 620 621 if (dst && dst->obsolete && 622 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 623 dst, cookie) == NULL) { 624 sk_dst_reset(sk); 625 dst_release(dst); 626 return NULL; 627 } 628 629 return dst; 630 } 631 EXPORT_SYMBOL(sk_dst_check); 632 633 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) 634 { 635 int ret = -ENOPROTOOPT; 636 #ifdef CONFIG_NETDEVICES 637 struct net *net = sock_net(sk); 638 639 /* Sorry... */ 640 ret = -EPERM; 641 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) 642 goto out; 643 644 ret = -EINVAL; 645 if (ifindex < 0) 646 goto out; 647 648 /* Paired with all READ_ONCE() done locklessly. */ 649 WRITE_ONCE(sk->sk_bound_dev_if, ifindex); 650 651 if (sk->sk_prot->rehash) 652 sk->sk_prot->rehash(sk); 653 sk_dst_reset(sk); 654 655 ret = 0; 656 657 out: 658 #endif 659 660 return ret; 661 } 662 663 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) 664 { 665 int ret; 666 667 if (lock_sk) 668 lock_sock(sk); 669 ret = sock_bindtoindex_locked(sk, ifindex); 670 if (lock_sk) 671 release_sock(sk); 672 673 return ret; 674 } 675 EXPORT_SYMBOL(sock_bindtoindex); 676 677 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) 678 { 679 int ret = -ENOPROTOOPT; 680 #ifdef CONFIG_NETDEVICES 681 struct net *net = sock_net(sk); 682 char devname[IFNAMSIZ]; 683 int index; 684 685 ret = -EINVAL; 686 if (optlen < 0) 687 goto out; 688 689 /* Bind this socket to a particular device like "eth0", 690 * as specified in the passed interface name. If the 691 * name is "" or the option length is zero the socket 692 * is not bound. 693 */ 694 if (optlen > IFNAMSIZ - 1) 695 optlen = IFNAMSIZ - 1; 696 memset(devname, 0, sizeof(devname)); 697 698 ret = -EFAULT; 699 if (copy_from_sockptr(devname, optval, optlen)) 700 goto out; 701 702 index = 0; 703 if (devname[0] != '\0') { 704 struct net_device *dev; 705 706 rcu_read_lock(); 707 dev = dev_get_by_name_rcu(net, devname); 708 if (dev) 709 index = dev->ifindex; 710 rcu_read_unlock(); 711 ret = -ENODEV; 712 if (!dev) 713 goto out; 714 } 715 716 sockopt_lock_sock(sk); 717 ret = sock_bindtoindex_locked(sk, index); 718 sockopt_release_sock(sk); 719 out: 720 #endif 721 722 return ret; 723 } 724 725 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, 726 sockptr_t optlen, int len) 727 { 728 int ret = -ENOPROTOOPT; 729 #ifdef CONFIG_NETDEVICES 730 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 731 struct net *net = sock_net(sk); 732 char devname[IFNAMSIZ]; 733 734 if (bound_dev_if == 0) { 735 len = 0; 736 goto zero; 737 } 738 739 ret = -EINVAL; 740 if (len < IFNAMSIZ) 741 goto out; 742 743 ret = netdev_get_name(net, devname, bound_dev_if); 744 if (ret) 745 goto out; 746 747 len = strlen(devname) + 1; 748 749 ret = -EFAULT; 750 if (copy_to_sockptr(optval, devname, len)) 751 goto out; 752 753 zero: 754 ret = -EFAULT; 755 if (copy_to_sockptr(optlen, &len, sizeof(int))) 756 goto out; 757 758 ret = 0; 759 760 out: 761 #endif 762 763 return ret; 764 } 765 766 bool sk_mc_loop(const struct sock *sk) 767 { 768 if (dev_recursion_level()) 769 return false; 770 if (!sk) 771 return true; 772 /* IPV6_ADDRFORM can change sk->sk_family under us. */ 773 switch (READ_ONCE(sk->sk_family)) { 774 case AF_INET: 775 return inet_test_bit(MC_LOOP, sk); 776 #if IS_ENABLED(CONFIG_IPV6) 777 case AF_INET6: 778 return inet6_test_bit(MC6_LOOP, sk); 779 #endif 780 } 781 WARN_ON_ONCE(1); 782 return true; 783 } 784 EXPORT_SYMBOL(sk_mc_loop); 785 786 void sock_set_reuseaddr(struct sock *sk) 787 { 788 lock_sock(sk); 789 sk->sk_reuse = SK_CAN_REUSE; 790 release_sock(sk); 791 } 792 EXPORT_SYMBOL(sock_set_reuseaddr); 793 794 void sock_set_reuseport(struct sock *sk) 795 { 796 lock_sock(sk); 797 sk->sk_reuseport = true; 798 release_sock(sk); 799 } 800 EXPORT_SYMBOL(sock_set_reuseport); 801 802 void sock_no_linger(struct sock *sk) 803 { 804 lock_sock(sk); 805 WRITE_ONCE(sk->sk_lingertime, 0); 806 sock_set_flag(sk, SOCK_LINGER); 807 release_sock(sk); 808 } 809 EXPORT_SYMBOL(sock_no_linger); 810 811 void sock_set_priority(struct sock *sk, u32 priority) 812 { 813 WRITE_ONCE(sk->sk_priority, priority); 814 } 815 EXPORT_SYMBOL(sock_set_priority); 816 817 void sock_set_sndtimeo(struct sock *sk, s64 secs) 818 { 819 lock_sock(sk); 820 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) 821 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); 822 else 823 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); 824 release_sock(sk); 825 } 826 EXPORT_SYMBOL(sock_set_sndtimeo); 827 828 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) 829 { 830 sock_valbool_flag(sk, SOCK_RCVTSTAMP, val); 831 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns); 832 if (val) { 833 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); 834 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 835 } 836 } 837 838 void sock_enable_timestamps(struct sock *sk) 839 { 840 lock_sock(sk); 841 __sock_set_timestamps(sk, true, false, true); 842 release_sock(sk); 843 } 844 EXPORT_SYMBOL(sock_enable_timestamps); 845 846 void sock_set_timestamp(struct sock *sk, int optname, bool valbool) 847 { 848 switch (optname) { 849 case SO_TIMESTAMP_OLD: 850 __sock_set_timestamps(sk, valbool, false, false); 851 break; 852 case SO_TIMESTAMP_NEW: 853 __sock_set_timestamps(sk, valbool, true, false); 854 break; 855 case SO_TIMESTAMPNS_OLD: 856 __sock_set_timestamps(sk, valbool, false, true); 857 break; 858 case SO_TIMESTAMPNS_NEW: 859 __sock_set_timestamps(sk, valbool, true, true); 860 break; 861 } 862 } 863 864 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) 865 { 866 struct net *net = sock_net(sk); 867 struct net_device *dev = NULL; 868 bool match = false; 869 int *vclock_index; 870 int i, num; 871 872 if (sk->sk_bound_dev_if) 873 dev = dev_get_by_index(net, sk->sk_bound_dev_if); 874 875 if (!dev) { 876 pr_err("%s: sock not bind to device\n", __func__); 877 return -EOPNOTSUPP; 878 } 879 880 num = ethtool_get_phc_vclocks(dev, &vclock_index); 881 dev_put(dev); 882 883 for (i = 0; i < num; i++) { 884 if (*(vclock_index + i) == phc_index) { 885 match = true; 886 break; 887 } 888 } 889 890 if (num > 0) 891 kfree(vclock_index); 892 893 if (!match) 894 return -EINVAL; 895 896 WRITE_ONCE(sk->sk_bind_phc, phc_index); 897 898 return 0; 899 } 900 901 int sock_set_timestamping(struct sock *sk, int optname, 902 struct so_timestamping timestamping) 903 { 904 int val = timestamping.flags; 905 int ret; 906 907 if (val & ~SOF_TIMESTAMPING_MASK) 908 return -EINVAL; 909 910 if (val & SOF_TIMESTAMPING_OPT_ID_TCP && 911 !(val & SOF_TIMESTAMPING_OPT_ID)) 912 return -EINVAL; 913 914 if (val & SOF_TIMESTAMPING_OPT_ID && 915 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 916 if (sk_is_tcp(sk)) { 917 if ((1 << sk->sk_state) & 918 (TCPF_CLOSE | TCPF_LISTEN)) 919 return -EINVAL; 920 if (val & SOF_TIMESTAMPING_OPT_ID_TCP) 921 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq); 922 else 923 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); 924 } else { 925 atomic_set(&sk->sk_tskey, 0); 926 } 927 } 928 929 if (val & SOF_TIMESTAMPING_OPT_STATS && 930 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) 931 return -EINVAL; 932 933 if (val & SOF_TIMESTAMPING_BIND_PHC) { 934 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); 935 if (ret) 936 return ret; 937 } 938 939 WRITE_ONCE(sk->sk_tsflags, val); 940 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); 941 sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY)); 942 943 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 944 sock_enable_timestamp(sk, 945 SOCK_TIMESTAMPING_RX_SOFTWARE); 946 else 947 sock_disable_timestamp(sk, 948 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 949 return 0; 950 } 951 952 #if defined(CONFIG_CGROUP_BPF) 953 void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) 954 { 955 struct bpf_sock_ops_kern sock_ops; 956 957 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 958 sock_ops.op = op; 959 sock_ops.is_fullsock = 1; 960 sock_ops.sk = sk; 961 bpf_skops_init_skb(&sock_ops, skb, 0); 962 __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS); 963 } 964 #endif 965 966 void sock_set_keepalive(struct sock *sk) 967 { 968 lock_sock(sk); 969 if (sk->sk_prot->keepalive) 970 sk->sk_prot->keepalive(sk, true); 971 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); 972 release_sock(sk); 973 } 974 EXPORT_SYMBOL(sock_set_keepalive); 975 976 static void __sock_set_rcvbuf(struct sock *sk, int val) 977 { 978 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it 979 * as a negative value. 980 */ 981 val = min_t(int, val, INT_MAX / 2); 982 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 983 984 /* We double it on the way in to account for "struct sk_buff" etc. 985 * overhead. Applications assume that the SO_RCVBUF setting they make 986 * will allow that much actual data to be received on that socket. 987 * 988 * Applications are unaware that "struct sk_buff" and other overheads 989 * allocate from the receive buffer during socket buffer allocation. 990 * 991 * And after considering the possible alternatives, returning the value 992 * we actually used in getsockopt is the most desirable behavior. 993 */ 994 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); 995 } 996 997 void sock_set_rcvbuf(struct sock *sk, int val) 998 { 999 lock_sock(sk); 1000 __sock_set_rcvbuf(sk, val); 1001 release_sock(sk); 1002 } 1003 EXPORT_SYMBOL(sock_set_rcvbuf); 1004 1005 static void __sock_set_mark(struct sock *sk, u32 val) 1006 { 1007 if (val != sk->sk_mark) { 1008 WRITE_ONCE(sk->sk_mark, val); 1009 sk_dst_reset(sk); 1010 } 1011 } 1012 1013 void sock_set_mark(struct sock *sk, u32 val) 1014 { 1015 lock_sock(sk); 1016 __sock_set_mark(sk, val); 1017 release_sock(sk); 1018 } 1019 EXPORT_SYMBOL(sock_set_mark); 1020 1021 static void sock_release_reserved_memory(struct sock *sk, int bytes) 1022 { 1023 /* Round down bytes to multiple of pages */ 1024 bytes = round_down(bytes, PAGE_SIZE); 1025 1026 WARN_ON(bytes > sk->sk_reserved_mem); 1027 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); 1028 sk_mem_reclaim(sk); 1029 } 1030 1031 static int sock_reserve_memory(struct sock *sk, int bytes) 1032 { 1033 long allocated; 1034 bool charged; 1035 int pages; 1036 1037 if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk)) 1038 return -EOPNOTSUPP; 1039 1040 if (!bytes) 1041 return 0; 1042 1043 pages = sk_mem_pages(bytes); 1044 1045 /* pre-charge to memcg */ 1046 charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages, 1047 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1048 if (!charged) 1049 return -ENOMEM; 1050 1051 /* pre-charge to forward_alloc */ 1052 sk_memory_allocated_add(sk, pages); 1053 allocated = sk_memory_allocated(sk); 1054 /* If the system goes into memory pressure with this 1055 * precharge, give up and return error. 1056 */ 1057 if (allocated > sk_prot_mem_limits(sk, 1)) { 1058 sk_memory_allocated_sub(sk, pages); 1059 mem_cgroup_uncharge_skmem(sk->sk_memcg, pages); 1060 return -ENOMEM; 1061 } 1062 sk_forward_alloc_add(sk, pages << PAGE_SHIFT); 1063 1064 WRITE_ONCE(sk->sk_reserved_mem, 1065 sk->sk_reserved_mem + (pages << PAGE_SHIFT)); 1066 1067 return 0; 1068 } 1069 1070 #ifdef CONFIG_PAGE_POOL 1071 1072 /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED 1073 * in 1 syscall. The limit exists to limit the amount of memory the kernel 1074 * allocates to copy these tokens, and to prevent looping over the frags for 1075 * too long. 1076 */ 1077 #define MAX_DONTNEED_TOKENS 128 1078 #define MAX_DONTNEED_FRAGS 1024 1079 1080 static noinline_for_stack int 1081 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) 1082 { 1083 unsigned int num_tokens, i, j, k, netmem_num = 0; 1084 struct dmabuf_token *tokens; 1085 int ret = 0, num_frags = 0; 1086 netmem_ref netmems[16]; 1087 1088 if (!sk_is_tcp(sk)) 1089 return -EBADF; 1090 1091 if (optlen % sizeof(*tokens) || 1092 optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS) 1093 return -EINVAL; 1094 1095 num_tokens = optlen / sizeof(*tokens); 1096 tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL); 1097 if (!tokens) 1098 return -ENOMEM; 1099 1100 if (copy_from_sockptr(tokens, optval, optlen)) { 1101 kvfree(tokens); 1102 return -EFAULT; 1103 } 1104 1105 xa_lock_bh(&sk->sk_user_frags); 1106 for (i = 0; i < num_tokens; i++) { 1107 for (j = 0; j < tokens[i].token_count; j++) { 1108 if (++num_frags > MAX_DONTNEED_FRAGS) 1109 goto frag_limit_reached; 1110 1111 netmem_ref netmem = (__force netmem_ref)__xa_erase( 1112 &sk->sk_user_frags, tokens[i].token_start + j); 1113 1114 if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 1115 continue; 1116 1117 netmems[netmem_num++] = netmem; 1118 if (netmem_num == ARRAY_SIZE(netmems)) { 1119 xa_unlock_bh(&sk->sk_user_frags); 1120 for (k = 0; k < netmem_num; k++) 1121 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1122 netmem_num = 0; 1123 xa_lock_bh(&sk->sk_user_frags); 1124 } 1125 ret++; 1126 } 1127 } 1128 1129 frag_limit_reached: 1130 xa_unlock_bh(&sk->sk_user_frags); 1131 for (k = 0; k < netmem_num; k++) 1132 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1133 1134 kvfree(tokens); 1135 return ret; 1136 } 1137 #endif 1138 1139 void sockopt_lock_sock(struct sock *sk) 1140 { 1141 /* When current->bpf_ctx is set, the setsockopt is called from 1142 * a bpf prog. bpf has ensured the sk lock has been 1143 * acquired before calling setsockopt(). 1144 */ 1145 if (has_current_bpf_ctx()) 1146 return; 1147 1148 lock_sock(sk); 1149 } 1150 EXPORT_SYMBOL(sockopt_lock_sock); 1151 1152 void sockopt_release_sock(struct sock *sk) 1153 { 1154 if (has_current_bpf_ctx()) 1155 return; 1156 1157 release_sock(sk); 1158 } 1159 EXPORT_SYMBOL(sockopt_release_sock); 1160 1161 bool sockopt_ns_capable(struct user_namespace *ns, int cap) 1162 { 1163 return has_current_bpf_ctx() || ns_capable(ns, cap); 1164 } 1165 EXPORT_SYMBOL(sockopt_ns_capable); 1166 1167 bool sockopt_capable(int cap) 1168 { 1169 return has_current_bpf_ctx() || capable(cap); 1170 } 1171 EXPORT_SYMBOL(sockopt_capable); 1172 1173 static int sockopt_validate_clockid(__kernel_clockid_t value) 1174 { 1175 switch (value) { 1176 case CLOCK_REALTIME: 1177 case CLOCK_MONOTONIC: 1178 case CLOCK_TAI: 1179 return 0; 1180 } 1181 return -EINVAL; 1182 } 1183 1184 /* 1185 * This is meant for all protocols to use and covers goings on 1186 * at the socket level. Everything here is generic. 1187 */ 1188 1189 int sk_setsockopt(struct sock *sk, int level, int optname, 1190 sockptr_t optval, unsigned int optlen) 1191 { 1192 struct so_timestamping timestamping; 1193 struct socket *sock = sk->sk_socket; 1194 struct sock_txtime sk_txtime; 1195 int val; 1196 int valbool; 1197 struct linger ling; 1198 int ret = 0; 1199 1200 /* 1201 * Options without arguments 1202 */ 1203 1204 if (optname == SO_BINDTODEVICE) 1205 return sock_setbindtodevice(sk, optval, optlen); 1206 1207 if (optlen < sizeof(int)) 1208 return -EINVAL; 1209 1210 if (copy_from_sockptr(&val, optval, sizeof(val))) 1211 return -EFAULT; 1212 1213 valbool = val ? 1 : 0; 1214 1215 /* handle options which do not require locking the socket. */ 1216 switch (optname) { 1217 case SO_PRIORITY: 1218 if (sk_set_prio_allowed(sk, val)) { 1219 sock_set_priority(sk, val); 1220 return 0; 1221 } 1222 return -EPERM; 1223 case SO_PASSSEC: 1224 assign_bit(SOCK_PASSSEC, &sock->flags, valbool); 1225 return 0; 1226 case SO_PASSCRED: 1227 assign_bit(SOCK_PASSCRED, &sock->flags, valbool); 1228 return 0; 1229 case SO_PASSPIDFD: 1230 assign_bit(SOCK_PASSPIDFD, &sock->flags, valbool); 1231 return 0; 1232 case SO_TYPE: 1233 case SO_PROTOCOL: 1234 case SO_DOMAIN: 1235 case SO_ERROR: 1236 return -ENOPROTOOPT; 1237 #ifdef CONFIG_NET_RX_BUSY_POLL 1238 case SO_BUSY_POLL: 1239 if (val < 0) 1240 return -EINVAL; 1241 WRITE_ONCE(sk->sk_ll_usec, val); 1242 return 0; 1243 case SO_PREFER_BUSY_POLL: 1244 if (valbool && !sockopt_capable(CAP_NET_ADMIN)) 1245 return -EPERM; 1246 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); 1247 return 0; 1248 case SO_BUSY_POLL_BUDGET: 1249 if (val > READ_ONCE(sk->sk_busy_poll_budget) && 1250 !sockopt_capable(CAP_NET_ADMIN)) 1251 return -EPERM; 1252 if (val < 0 || val > U16_MAX) 1253 return -EINVAL; 1254 WRITE_ONCE(sk->sk_busy_poll_budget, val); 1255 return 0; 1256 #endif 1257 case SO_MAX_PACING_RATE: 1258 { 1259 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; 1260 unsigned long pacing_rate; 1261 1262 if (sizeof(ulval) != sizeof(val) && 1263 optlen >= sizeof(ulval) && 1264 copy_from_sockptr(&ulval, optval, sizeof(ulval))) { 1265 return -EFAULT; 1266 } 1267 if (ulval != ~0UL) 1268 cmpxchg(&sk->sk_pacing_status, 1269 SK_PACING_NONE, 1270 SK_PACING_NEEDED); 1271 /* Pairs with READ_ONCE() from sk_getsockopt() */ 1272 WRITE_ONCE(sk->sk_max_pacing_rate, ulval); 1273 pacing_rate = READ_ONCE(sk->sk_pacing_rate); 1274 if (ulval < pacing_rate) 1275 WRITE_ONCE(sk->sk_pacing_rate, ulval); 1276 return 0; 1277 } 1278 case SO_TXREHASH: 1279 if (val < -1 || val > 1) 1280 return -EINVAL; 1281 if ((u8)val == SOCK_TXREHASH_DEFAULT) 1282 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); 1283 /* Paired with READ_ONCE() in tcp_rtx_synack() 1284 * and sk_getsockopt(). 1285 */ 1286 WRITE_ONCE(sk->sk_txrehash, (u8)val); 1287 return 0; 1288 case SO_PEEK_OFF: 1289 { 1290 int (*set_peek_off)(struct sock *sk, int val); 1291 1292 set_peek_off = READ_ONCE(sock->ops)->set_peek_off; 1293 if (set_peek_off) 1294 ret = set_peek_off(sk, val); 1295 else 1296 ret = -EOPNOTSUPP; 1297 return ret; 1298 } 1299 #ifdef CONFIG_PAGE_POOL 1300 case SO_DEVMEM_DONTNEED: 1301 return sock_devmem_dontneed(sk, optval, optlen); 1302 #endif 1303 } 1304 1305 sockopt_lock_sock(sk); 1306 1307 switch (optname) { 1308 case SO_DEBUG: 1309 if (val && !sockopt_capable(CAP_NET_ADMIN)) 1310 ret = -EACCES; 1311 else 1312 sock_valbool_flag(sk, SOCK_DBG, valbool); 1313 break; 1314 case SO_REUSEADDR: 1315 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 1316 break; 1317 case SO_REUSEPORT: 1318 if (valbool && !sk_is_inet(sk)) 1319 ret = -EOPNOTSUPP; 1320 else 1321 sk->sk_reuseport = valbool; 1322 break; 1323 case SO_DONTROUTE: 1324 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 1325 sk_dst_reset(sk); 1326 break; 1327 case SO_BROADCAST: 1328 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 1329 break; 1330 case SO_SNDBUF: 1331 /* Don't error on this BSD doesn't and if you think 1332 * about it this is right. Otherwise apps have to 1333 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1334 * are treated in BSD as hints 1335 */ 1336 val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); 1337 set_sndbuf: 1338 /* Ensure val * 2 fits into an int, to prevent max_t() 1339 * from treating it as a negative value. 1340 */ 1341 val = min_t(int, val, INT_MAX / 2); 1342 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1343 WRITE_ONCE(sk->sk_sndbuf, 1344 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 1345 /* Wake up sending tasks if we upped the value. */ 1346 sk->sk_write_space(sk); 1347 break; 1348 1349 case SO_SNDBUFFORCE: 1350 if (!sockopt_capable(CAP_NET_ADMIN)) { 1351 ret = -EPERM; 1352 break; 1353 } 1354 1355 /* No negative values (to prevent underflow, as val will be 1356 * multiplied by 2). 1357 */ 1358 if (val < 0) 1359 val = 0; 1360 goto set_sndbuf; 1361 1362 case SO_RCVBUF: 1363 /* Don't error on this BSD doesn't and if you think 1364 * about it this is right. Otherwise apps have to 1365 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1366 * are treated in BSD as hints 1367 */ 1368 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); 1369 break; 1370 1371 case SO_RCVBUFFORCE: 1372 if (!sockopt_capable(CAP_NET_ADMIN)) { 1373 ret = -EPERM; 1374 break; 1375 } 1376 1377 /* No negative values (to prevent underflow, as val will be 1378 * multiplied by 2). 1379 */ 1380 __sock_set_rcvbuf(sk, max(val, 0)); 1381 break; 1382 1383 case SO_KEEPALIVE: 1384 if (sk->sk_prot->keepalive) 1385 sk->sk_prot->keepalive(sk, valbool); 1386 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 1387 break; 1388 1389 case SO_OOBINLINE: 1390 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 1391 break; 1392 1393 case SO_NO_CHECK: 1394 sk->sk_no_check_tx = valbool; 1395 break; 1396 1397 case SO_LINGER: 1398 if (optlen < sizeof(ling)) { 1399 ret = -EINVAL; /* 1003.1g */ 1400 break; 1401 } 1402 if (copy_from_sockptr(&ling, optval, sizeof(ling))) { 1403 ret = -EFAULT; 1404 break; 1405 } 1406 if (!ling.l_onoff) { 1407 sock_reset_flag(sk, SOCK_LINGER); 1408 } else { 1409 unsigned long t_sec = ling.l_linger; 1410 1411 if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ) 1412 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT); 1413 else 1414 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ); 1415 sock_set_flag(sk, SOCK_LINGER); 1416 } 1417 break; 1418 1419 case SO_BSDCOMPAT: 1420 break; 1421 1422 case SO_TIMESTAMP_OLD: 1423 case SO_TIMESTAMP_NEW: 1424 case SO_TIMESTAMPNS_OLD: 1425 case SO_TIMESTAMPNS_NEW: 1426 sock_set_timestamp(sk, optname, valbool); 1427 break; 1428 1429 case SO_TIMESTAMPING_NEW: 1430 case SO_TIMESTAMPING_OLD: 1431 if (optlen == sizeof(timestamping)) { 1432 if (copy_from_sockptr(×tamping, optval, 1433 sizeof(timestamping))) { 1434 ret = -EFAULT; 1435 break; 1436 } 1437 } else { 1438 memset(×tamping, 0, sizeof(timestamping)); 1439 timestamping.flags = val; 1440 } 1441 ret = sock_set_timestamping(sk, optname, timestamping); 1442 break; 1443 1444 case SO_RCVLOWAT: 1445 { 1446 int (*set_rcvlowat)(struct sock *sk, int val) = NULL; 1447 1448 if (val < 0) 1449 val = INT_MAX; 1450 if (sock) 1451 set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat; 1452 if (set_rcvlowat) 1453 ret = set_rcvlowat(sk, val); 1454 else 1455 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1456 break; 1457 } 1458 case SO_RCVTIMEO_OLD: 1459 case SO_RCVTIMEO_NEW: 1460 ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, 1461 optlen, optname == SO_RCVTIMEO_OLD); 1462 break; 1463 1464 case SO_SNDTIMEO_OLD: 1465 case SO_SNDTIMEO_NEW: 1466 ret = sock_set_timeout(&sk->sk_sndtimeo, optval, 1467 optlen, optname == SO_SNDTIMEO_OLD); 1468 break; 1469 1470 case SO_ATTACH_FILTER: { 1471 struct sock_fprog fprog; 1472 1473 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1474 if (!ret) 1475 ret = sk_attach_filter(&fprog, sk); 1476 break; 1477 } 1478 case SO_ATTACH_BPF: 1479 ret = -EINVAL; 1480 if (optlen == sizeof(u32)) { 1481 u32 ufd; 1482 1483 ret = -EFAULT; 1484 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1485 break; 1486 1487 ret = sk_attach_bpf(ufd, sk); 1488 } 1489 break; 1490 1491 case SO_ATTACH_REUSEPORT_CBPF: { 1492 struct sock_fprog fprog; 1493 1494 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1495 if (!ret) 1496 ret = sk_reuseport_attach_filter(&fprog, sk); 1497 break; 1498 } 1499 case SO_ATTACH_REUSEPORT_EBPF: 1500 ret = -EINVAL; 1501 if (optlen == sizeof(u32)) { 1502 u32 ufd; 1503 1504 ret = -EFAULT; 1505 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1506 break; 1507 1508 ret = sk_reuseport_attach_bpf(ufd, sk); 1509 } 1510 break; 1511 1512 case SO_DETACH_REUSEPORT_BPF: 1513 ret = reuseport_detach_prog(sk); 1514 break; 1515 1516 case SO_DETACH_FILTER: 1517 ret = sk_detach_filter(sk); 1518 break; 1519 1520 case SO_LOCK_FILTER: 1521 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 1522 ret = -EPERM; 1523 else 1524 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 1525 break; 1526 1527 case SO_MARK: 1528 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 1529 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1530 ret = -EPERM; 1531 break; 1532 } 1533 1534 __sock_set_mark(sk, val); 1535 break; 1536 case SO_RCVMARK: 1537 sock_valbool_flag(sk, SOCK_RCVMARK, valbool); 1538 break; 1539 1540 case SO_RCVPRIORITY: 1541 sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool); 1542 break; 1543 1544 case SO_RXQ_OVFL: 1545 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 1546 break; 1547 1548 case SO_WIFI_STATUS: 1549 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 1550 break; 1551 1552 case SO_NOFCS: 1553 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 1554 break; 1555 1556 case SO_SELECT_ERR_QUEUE: 1557 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 1558 break; 1559 1560 1561 case SO_INCOMING_CPU: 1562 reuseport_update_incoming_cpu(sk, val); 1563 break; 1564 1565 case SO_CNX_ADVICE: 1566 if (val == 1) 1567 dst_negative_advice(sk); 1568 break; 1569 1570 case SO_ZEROCOPY: 1571 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { 1572 if (!(sk_is_tcp(sk) || 1573 (sk->sk_type == SOCK_DGRAM && 1574 sk->sk_protocol == IPPROTO_UDP))) 1575 ret = -EOPNOTSUPP; 1576 } else if (sk->sk_family != PF_RDS) { 1577 ret = -EOPNOTSUPP; 1578 } 1579 if (!ret) { 1580 if (val < 0 || val > 1) 1581 ret = -EINVAL; 1582 else 1583 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); 1584 } 1585 break; 1586 1587 case SO_TXTIME: 1588 if (optlen != sizeof(struct sock_txtime)) { 1589 ret = -EINVAL; 1590 break; 1591 } else if (copy_from_sockptr(&sk_txtime, optval, 1592 sizeof(struct sock_txtime))) { 1593 ret = -EFAULT; 1594 break; 1595 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { 1596 ret = -EINVAL; 1597 break; 1598 } 1599 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet 1600 * scheduler has enough safe guards. 1601 */ 1602 if (sk_txtime.clockid != CLOCK_MONOTONIC && 1603 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1604 ret = -EPERM; 1605 break; 1606 } 1607 1608 ret = sockopt_validate_clockid(sk_txtime.clockid); 1609 if (ret) 1610 break; 1611 1612 sock_valbool_flag(sk, SOCK_TXTIME, true); 1613 sk->sk_clockid = sk_txtime.clockid; 1614 sk->sk_txtime_deadline_mode = 1615 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); 1616 sk->sk_txtime_report_errors = 1617 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); 1618 break; 1619 1620 case SO_BINDTOIFINDEX: 1621 ret = sock_bindtoindex_locked(sk, val); 1622 break; 1623 1624 case SO_BUF_LOCK: 1625 if (val & ~SOCK_BUF_LOCK_MASK) { 1626 ret = -EINVAL; 1627 break; 1628 } 1629 sk->sk_userlocks = val | (sk->sk_userlocks & 1630 ~SOCK_BUF_LOCK_MASK); 1631 break; 1632 1633 case SO_RESERVE_MEM: 1634 { 1635 int delta; 1636 1637 if (val < 0) { 1638 ret = -EINVAL; 1639 break; 1640 } 1641 1642 delta = val - sk->sk_reserved_mem; 1643 if (delta < 0) 1644 sock_release_reserved_memory(sk, -delta); 1645 else 1646 ret = sock_reserve_memory(sk, delta); 1647 break; 1648 } 1649 1650 default: 1651 ret = -ENOPROTOOPT; 1652 break; 1653 } 1654 sockopt_release_sock(sk); 1655 return ret; 1656 } 1657 1658 int sock_setsockopt(struct socket *sock, int level, int optname, 1659 sockptr_t optval, unsigned int optlen) 1660 { 1661 return sk_setsockopt(sock->sk, level, optname, 1662 optval, optlen); 1663 } 1664 EXPORT_SYMBOL(sock_setsockopt); 1665 1666 static const struct cred *sk_get_peer_cred(struct sock *sk) 1667 { 1668 const struct cred *cred; 1669 1670 spin_lock(&sk->sk_peer_lock); 1671 cred = get_cred(sk->sk_peer_cred); 1672 spin_unlock(&sk->sk_peer_lock); 1673 1674 return cred; 1675 } 1676 1677 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1678 struct ucred *ucred) 1679 { 1680 ucred->pid = pid_vnr(pid); 1681 ucred->uid = ucred->gid = -1; 1682 if (cred) { 1683 struct user_namespace *current_ns = current_user_ns(); 1684 1685 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1686 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1687 } 1688 } 1689 1690 static int groups_to_user(sockptr_t dst, const struct group_info *src) 1691 { 1692 struct user_namespace *user_ns = current_user_ns(); 1693 int i; 1694 1695 for (i = 0; i < src->ngroups; i++) { 1696 gid_t gid = from_kgid_munged(user_ns, src->gid[i]); 1697 1698 if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid))) 1699 return -EFAULT; 1700 } 1701 1702 return 0; 1703 } 1704 1705 int sk_getsockopt(struct sock *sk, int level, int optname, 1706 sockptr_t optval, sockptr_t optlen) 1707 { 1708 struct socket *sock = sk->sk_socket; 1709 1710 union { 1711 int val; 1712 u64 val64; 1713 unsigned long ulval; 1714 struct linger ling; 1715 struct old_timeval32 tm32; 1716 struct __kernel_old_timeval tm; 1717 struct __kernel_sock_timeval stm; 1718 struct sock_txtime txtime; 1719 struct so_timestamping timestamping; 1720 } v; 1721 1722 int lv = sizeof(int); 1723 int len; 1724 1725 if (copy_from_sockptr(&len, optlen, sizeof(int))) 1726 return -EFAULT; 1727 if (len < 0) 1728 return -EINVAL; 1729 1730 memset(&v, 0, sizeof(v)); 1731 1732 switch (optname) { 1733 case SO_DEBUG: 1734 v.val = sock_flag(sk, SOCK_DBG); 1735 break; 1736 1737 case SO_DONTROUTE: 1738 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1739 break; 1740 1741 case SO_BROADCAST: 1742 v.val = sock_flag(sk, SOCK_BROADCAST); 1743 break; 1744 1745 case SO_SNDBUF: 1746 v.val = READ_ONCE(sk->sk_sndbuf); 1747 break; 1748 1749 case SO_RCVBUF: 1750 v.val = READ_ONCE(sk->sk_rcvbuf); 1751 break; 1752 1753 case SO_REUSEADDR: 1754 v.val = sk->sk_reuse; 1755 break; 1756 1757 case SO_REUSEPORT: 1758 v.val = sk->sk_reuseport; 1759 break; 1760 1761 case SO_KEEPALIVE: 1762 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1763 break; 1764 1765 case SO_TYPE: 1766 v.val = sk->sk_type; 1767 break; 1768 1769 case SO_PROTOCOL: 1770 v.val = sk->sk_protocol; 1771 break; 1772 1773 case SO_DOMAIN: 1774 v.val = sk->sk_family; 1775 break; 1776 1777 case SO_ERROR: 1778 v.val = -sock_error(sk); 1779 if (v.val == 0) 1780 v.val = xchg(&sk->sk_err_soft, 0); 1781 break; 1782 1783 case SO_OOBINLINE: 1784 v.val = sock_flag(sk, SOCK_URGINLINE); 1785 break; 1786 1787 case SO_NO_CHECK: 1788 v.val = sk->sk_no_check_tx; 1789 break; 1790 1791 case SO_PRIORITY: 1792 v.val = READ_ONCE(sk->sk_priority); 1793 break; 1794 1795 case SO_LINGER: 1796 lv = sizeof(v.ling); 1797 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1798 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ; 1799 break; 1800 1801 case SO_BSDCOMPAT: 1802 break; 1803 1804 case SO_TIMESTAMP_OLD: 1805 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1806 !sock_flag(sk, SOCK_TSTAMP_NEW) && 1807 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1808 break; 1809 1810 case SO_TIMESTAMPNS_OLD: 1811 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); 1812 break; 1813 1814 case SO_TIMESTAMP_NEW: 1815 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); 1816 break; 1817 1818 case SO_TIMESTAMPNS_NEW: 1819 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); 1820 break; 1821 1822 case SO_TIMESTAMPING_OLD: 1823 case SO_TIMESTAMPING_NEW: 1824 lv = sizeof(v.timestamping); 1825 /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only 1826 * returning the flags when they were set through the same option. 1827 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD. 1828 */ 1829 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) { 1830 v.timestamping.flags = READ_ONCE(sk->sk_tsflags); 1831 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); 1832 } 1833 break; 1834 1835 case SO_RCVTIMEO_OLD: 1836 case SO_RCVTIMEO_NEW: 1837 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, 1838 SO_RCVTIMEO_OLD == optname); 1839 break; 1840 1841 case SO_SNDTIMEO_OLD: 1842 case SO_SNDTIMEO_NEW: 1843 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, 1844 SO_SNDTIMEO_OLD == optname); 1845 break; 1846 1847 case SO_RCVLOWAT: 1848 v.val = READ_ONCE(sk->sk_rcvlowat); 1849 break; 1850 1851 case SO_SNDLOWAT: 1852 v.val = 1; 1853 break; 1854 1855 case SO_PASSCRED: 1856 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags); 1857 break; 1858 1859 case SO_PASSPIDFD: 1860 v.val = !!test_bit(SOCK_PASSPIDFD, &sock->flags); 1861 break; 1862 1863 case SO_PEERCRED: 1864 { 1865 struct ucred peercred; 1866 if (len > sizeof(peercred)) 1867 len = sizeof(peercred); 1868 1869 spin_lock(&sk->sk_peer_lock); 1870 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1871 spin_unlock(&sk->sk_peer_lock); 1872 1873 if (copy_to_sockptr(optval, &peercred, len)) 1874 return -EFAULT; 1875 goto lenout; 1876 } 1877 1878 case SO_PEERPIDFD: 1879 { 1880 struct pid *peer_pid; 1881 struct file *pidfd_file = NULL; 1882 int pidfd; 1883 1884 if (len > sizeof(pidfd)) 1885 len = sizeof(pidfd); 1886 1887 spin_lock(&sk->sk_peer_lock); 1888 peer_pid = get_pid(sk->sk_peer_pid); 1889 spin_unlock(&sk->sk_peer_lock); 1890 1891 if (!peer_pid) 1892 return -ENODATA; 1893 1894 pidfd = pidfd_prepare(peer_pid, 0, &pidfd_file); 1895 put_pid(peer_pid); 1896 if (pidfd < 0) 1897 return pidfd; 1898 1899 if (copy_to_sockptr(optval, &pidfd, len) || 1900 copy_to_sockptr(optlen, &len, sizeof(int))) { 1901 put_unused_fd(pidfd); 1902 fput(pidfd_file); 1903 1904 return -EFAULT; 1905 } 1906 1907 fd_install(pidfd, pidfd_file); 1908 return 0; 1909 } 1910 1911 case SO_PEERGROUPS: 1912 { 1913 const struct cred *cred; 1914 int ret, n; 1915 1916 cred = sk_get_peer_cred(sk); 1917 if (!cred) 1918 return -ENODATA; 1919 1920 n = cred->group_info->ngroups; 1921 if (len < n * sizeof(gid_t)) { 1922 len = n * sizeof(gid_t); 1923 put_cred(cred); 1924 return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE; 1925 } 1926 len = n * sizeof(gid_t); 1927 1928 ret = groups_to_user(optval, cred->group_info); 1929 put_cred(cred); 1930 if (ret) 1931 return ret; 1932 goto lenout; 1933 } 1934 1935 case SO_PEERNAME: 1936 { 1937 struct sockaddr_storage address; 1938 1939 lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2); 1940 if (lv < 0) 1941 return -ENOTCONN; 1942 if (lv < len) 1943 return -EINVAL; 1944 if (copy_to_sockptr(optval, &address, len)) 1945 return -EFAULT; 1946 goto lenout; 1947 } 1948 1949 /* Dubious BSD thing... Probably nobody even uses it, but 1950 * the UNIX standard wants it for whatever reason... -DaveM 1951 */ 1952 case SO_ACCEPTCONN: 1953 v.val = sk->sk_state == TCP_LISTEN; 1954 break; 1955 1956 case SO_PASSSEC: 1957 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags); 1958 break; 1959 1960 case SO_PEERSEC: 1961 return security_socket_getpeersec_stream(sock, 1962 optval, optlen, len); 1963 1964 case SO_MARK: 1965 v.val = READ_ONCE(sk->sk_mark); 1966 break; 1967 1968 case SO_RCVMARK: 1969 v.val = sock_flag(sk, SOCK_RCVMARK); 1970 break; 1971 1972 case SO_RCVPRIORITY: 1973 v.val = sock_flag(sk, SOCK_RCVPRIORITY); 1974 break; 1975 1976 case SO_RXQ_OVFL: 1977 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 1978 break; 1979 1980 case SO_WIFI_STATUS: 1981 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 1982 break; 1983 1984 case SO_PEEK_OFF: 1985 if (!READ_ONCE(sock->ops)->set_peek_off) 1986 return -EOPNOTSUPP; 1987 1988 v.val = READ_ONCE(sk->sk_peek_off); 1989 break; 1990 case SO_NOFCS: 1991 v.val = sock_flag(sk, SOCK_NOFCS); 1992 break; 1993 1994 case SO_BINDTODEVICE: 1995 return sock_getbindtodevice(sk, optval, optlen, len); 1996 1997 case SO_GET_FILTER: 1998 len = sk_get_filter(sk, optval, len); 1999 if (len < 0) 2000 return len; 2001 2002 goto lenout; 2003 2004 case SO_LOCK_FILTER: 2005 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 2006 break; 2007 2008 case SO_BPF_EXTENSIONS: 2009 v.val = bpf_tell_extensions(); 2010 break; 2011 2012 case SO_SELECT_ERR_QUEUE: 2013 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 2014 break; 2015 2016 #ifdef CONFIG_NET_RX_BUSY_POLL 2017 case SO_BUSY_POLL: 2018 v.val = READ_ONCE(sk->sk_ll_usec); 2019 break; 2020 case SO_PREFER_BUSY_POLL: 2021 v.val = READ_ONCE(sk->sk_prefer_busy_poll); 2022 break; 2023 #endif 2024 2025 case SO_MAX_PACING_RATE: 2026 /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ 2027 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 2028 lv = sizeof(v.ulval); 2029 v.ulval = READ_ONCE(sk->sk_max_pacing_rate); 2030 } else { 2031 /* 32bit version */ 2032 v.val = min_t(unsigned long, ~0U, 2033 READ_ONCE(sk->sk_max_pacing_rate)); 2034 } 2035 break; 2036 2037 case SO_INCOMING_CPU: 2038 v.val = READ_ONCE(sk->sk_incoming_cpu); 2039 break; 2040 2041 case SO_MEMINFO: 2042 { 2043 u32 meminfo[SK_MEMINFO_VARS]; 2044 2045 sk_get_meminfo(sk, meminfo); 2046 2047 len = min_t(unsigned int, len, sizeof(meminfo)); 2048 if (copy_to_sockptr(optval, &meminfo, len)) 2049 return -EFAULT; 2050 2051 goto lenout; 2052 } 2053 2054 #ifdef CONFIG_NET_RX_BUSY_POLL 2055 case SO_INCOMING_NAPI_ID: 2056 v.val = READ_ONCE(sk->sk_napi_id); 2057 2058 /* aggregate non-NAPI IDs down to 0 */ 2059 if (!napi_id_valid(v.val)) 2060 v.val = 0; 2061 2062 break; 2063 #endif 2064 2065 case SO_COOKIE: 2066 lv = sizeof(u64); 2067 if (len < lv) 2068 return -EINVAL; 2069 v.val64 = sock_gen_cookie(sk); 2070 break; 2071 2072 case SO_ZEROCOPY: 2073 v.val = sock_flag(sk, SOCK_ZEROCOPY); 2074 break; 2075 2076 case SO_TXTIME: 2077 lv = sizeof(v.txtime); 2078 v.txtime.clockid = sk->sk_clockid; 2079 v.txtime.flags |= sk->sk_txtime_deadline_mode ? 2080 SOF_TXTIME_DEADLINE_MODE : 0; 2081 v.txtime.flags |= sk->sk_txtime_report_errors ? 2082 SOF_TXTIME_REPORT_ERRORS : 0; 2083 break; 2084 2085 case SO_BINDTOIFINDEX: 2086 v.val = READ_ONCE(sk->sk_bound_dev_if); 2087 break; 2088 2089 case SO_NETNS_COOKIE: 2090 lv = sizeof(u64); 2091 if (len != lv) 2092 return -EINVAL; 2093 v.val64 = sock_net(sk)->net_cookie; 2094 break; 2095 2096 case SO_BUF_LOCK: 2097 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; 2098 break; 2099 2100 case SO_RESERVE_MEM: 2101 v.val = READ_ONCE(sk->sk_reserved_mem); 2102 break; 2103 2104 case SO_TXREHASH: 2105 /* Paired with WRITE_ONCE() in sk_setsockopt() */ 2106 v.val = READ_ONCE(sk->sk_txrehash); 2107 break; 2108 2109 default: 2110 /* We implement the SO_SNDLOWAT etc to not be settable 2111 * (1003.1g 7). 2112 */ 2113 return -ENOPROTOOPT; 2114 } 2115 2116 if (len > lv) 2117 len = lv; 2118 if (copy_to_sockptr(optval, &v, len)) 2119 return -EFAULT; 2120 lenout: 2121 if (copy_to_sockptr(optlen, &len, sizeof(int))) 2122 return -EFAULT; 2123 return 0; 2124 } 2125 2126 /* 2127 * Initialize an sk_lock. 2128 * 2129 * (We also register the sk_lock with the lock validator.) 2130 */ 2131 static inline void sock_lock_init(struct sock *sk) 2132 { 2133 if (sk->sk_kern_sock) 2134 sock_lock_init_class_and_name( 2135 sk, 2136 af_family_kern_slock_key_strings[sk->sk_family], 2137 af_family_kern_slock_keys + sk->sk_family, 2138 af_family_kern_key_strings[sk->sk_family], 2139 af_family_kern_keys + sk->sk_family); 2140 else 2141 sock_lock_init_class_and_name( 2142 sk, 2143 af_family_slock_key_strings[sk->sk_family], 2144 af_family_slock_keys + sk->sk_family, 2145 af_family_key_strings[sk->sk_family], 2146 af_family_keys + sk->sk_family); 2147 } 2148 2149 /* 2150 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 2151 * even temporarily, because of RCU lookups. sk_node should also be left as is. 2152 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 2153 */ 2154 static void sock_copy(struct sock *nsk, const struct sock *osk) 2155 { 2156 const struct proto *prot = READ_ONCE(osk->sk_prot); 2157 #ifdef CONFIG_SECURITY_NETWORK 2158 void *sptr = nsk->sk_security; 2159 #endif 2160 2161 /* If we move sk_tx_queue_mapping out of the private section, 2162 * we must check if sk_tx_queue_clear() is called after 2163 * sock_copy() in sk_clone_lock(). 2164 */ 2165 BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) < 2166 offsetof(struct sock, sk_dontcopy_begin) || 2167 offsetof(struct sock, sk_tx_queue_mapping) >= 2168 offsetof(struct sock, sk_dontcopy_end)); 2169 2170 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 2171 2172 unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 2173 prot->obj_size - offsetof(struct sock, sk_dontcopy_end), 2174 /* alloc is larger than struct, see sk_prot_alloc() */); 2175 2176 #ifdef CONFIG_SECURITY_NETWORK 2177 nsk->sk_security = sptr; 2178 security_sk_clone(osk, nsk); 2179 #endif 2180 } 2181 2182 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 2183 int family) 2184 { 2185 struct sock *sk; 2186 struct kmem_cache *slab; 2187 2188 slab = prot->slab; 2189 if (slab != NULL) { 2190 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 2191 if (!sk) 2192 return sk; 2193 if (want_init_on_alloc(priority)) 2194 sk_prot_clear_nulls(sk, prot->obj_size); 2195 } else 2196 sk = kmalloc(prot->obj_size, priority); 2197 2198 if (sk != NULL) { 2199 if (security_sk_alloc(sk, family, priority)) 2200 goto out_free; 2201 2202 if (!try_module_get(prot->owner)) 2203 goto out_free_sec; 2204 } 2205 2206 return sk; 2207 2208 out_free_sec: 2209 security_sk_free(sk); 2210 out_free: 2211 if (slab != NULL) 2212 kmem_cache_free(slab, sk); 2213 else 2214 kfree(sk); 2215 return NULL; 2216 } 2217 2218 static void sk_prot_free(struct proto *prot, struct sock *sk) 2219 { 2220 struct kmem_cache *slab; 2221 struct module *owner; 2222 2223 owner = prot->owner; 2224 slab = prot->slab; 2225 2226 cgroup_sk_free(&sk->sk_cgrp_data); 2227 mem_cgroup_sk_free(sk); 2228 security_sk_free(sk); 2229 if (slab != NULL) 2230 kmem_cache_free(slab, sk); 2231 else 2232 kfree(sk); 2233 module_put(owner); 2234 } 2235 2236 /** 2237 * sk_alloc - All socket objects are allocated here 2238 * @net: the applicable net namespace 2239 * @family: protocol family 2240 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2241 * @prot: struct proto associated with this new sock instance 2242 * @kern: is this to be a kernel socket? 2243 */ 2244 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 2245 struct proto *prot, int kern) 2246 { 2247 struct sock *sk; 2248 2249 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 2250 if (sk) { 2251 sk->sk_family = family; 2252 /* 2253 * See comment in struct sock definition to understand 2254 * why we need sk_prot_creator -acme 2255 */ 2256 sk->sk_prot = sk->sk_prot_creator = prot; 2257 sk->sk_kern_sock = kern; 2258 sock_lock_init(sk); 2259 sk->sk_net_refcnt = kern ? 0 : 1; 2260 if (likely(sk->sk_net_refcnt)) { 2261 get_net_track(net, &sk->ns_tracker, priority); 2262 sock_inuse_add(net, 1); 2263 } else { 2264 net_passive_inc(net); 2265 __netns_tracker_alloc(net, &sk->ns_tracker, 2266 false, priority); 2267 } 2268 2269 sock_net_set(sk, net); 2270 refcount_set(&sk->sk_wmem_alloc, 1); 2271 2272 mem_cgroup_sk_alloc(sk); 2273 cgroup_sk_alloc(&sk->sk_cgrp_data); 2274 sock_update_classid(&sk->sk_cgrp_data); 2275 sock_update_netprioidx(&sk->sk_cgrp_data); 2276 sk_tx_queue_clear(sk); 2277 } 2278 2279 return sk; 2280 } 2281 EXPORT_SYMBOL(sk_alloc); 2282 2283 /* Sockets having SOCK_RCU_FREE will call this function after one RCU 2284 * grace period. This is the case for UDP sockets and TCP listeners. 2285 */ 2286 static void __sk_destruct(struct rcu_head *head) 2287 { 2288 struct sock *sk = container_of(head, struct sock, sk_rcu); 2289 struct net *net = sock_net(sk); 2290 struct sk_filter *filter; 2291 2292 if (sk->sk_destruct) 2293 sk->sk_destruct(sk); 2294 2295 filter = rcu_dereference_check(sk->sk_filter, 2296 refcount_read(&sk->sk_wmem_alloc) == 0); 2297 if (filter) { 2298 sk_filter_uncharge(sk, filter); 2299 RCU_INIT_POINTER(sk->sk_filter, NULL); 2300 } 2301 2302 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 2303 2304 #ifdef CONFIG_BPF_SYSCALL 2305 bpf_sk_storage_free(sk); 2306 #endif 2307 2308 if (atomic_read(&sk->sk_omem_alloc)) 2309 pr_debug("%s: optmem leakage (%d bytes) detected\n", 2310 __func__, atomic_read(&sk->sk_omem_alloc)); 2311 2312 if (sk->sk_frag.page) { 2313 put_page(sk->sk_frag.page); 2314 sk->sk_frag.page = NULL; 2315 } 2316 2317 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */ 2318 put_cred(sk->sk_peer_cred); 2319 put_pid(sk->sk_peer_pid); 2320 2321 if (likely(sk->sk_net_refcnt)) { 2322 put_net_track(net, &sk->ns_tracker); 2323 } else { 2324 __netns_tracker_free(net, &sk->ns_tracker, false); 2325 net_passive_dec(net); 2326 } 2327 sk_prot_free(sk->sk_prot_creator, sk); 2328 } 2329 2330 void sk_net_refcnt_upgrade(struct sock *sk) 2331 { 2332 struct net *net = sock_net(sk); 2333 2334 WARN_ON_ONCE(sk->sk_net_refcnt); 2335 __netns_tracker_free(net, &sk->ns_tracker, false); 2336 net_passive_dec(net); 2337 sk->sk_net_refcnt = 1; 2338 get_net_track(net, &sk->ns_tracker, GFP_KERNEL); 2339 sock_inuse_add(net, 1); 2340 } 2341 EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); 2342 2343 void sk_destruct(struct sock *sk) 2344 { 2345 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); 2346 2347 if (rcu_access_pointer(sk->sk_reuseport_cb)) { 2348 reuseport_detach_sock(sk); 2349 use_call_rcu = true; 2350 } 2351 2352 if (use_call_rcu) 2353 call_rcu(&sk->sk_rcu, __sk_destruct); 2354 else 2355 __sk_destruct(&sk->sk_rcu); 2356 } 2357 2358 static void __sk_free(struct sock *sk) 2359 { 2360 if (likely(sk->sk_net_refcnt)) 2361 sock_inuse_add(sock_net(sk), -1); 2362 2363 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) 2364 sock_diag_broadcast_destroy(sk); 2365 else 2366 sk_destruct(sk); 2367 } 2368 2369 void sk_free(struct sock *sk) 2370 { 2371 /* 2372 * We subtract one from sk_wmem_alloc and can know if 2373 * some packets are still in some tx queue. 2374 * If not null, sock_wfree() will call __sk_free(sk) later 2375 */ 2376 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 2377 __sk_free(sk); 2378 } 2379 EXPORT_SYMBOL(sk_free); 2380 2381 static void sk_init_common(struct sock *sk) 2382 { 2383 skb_queue_head_init(&sk->sk_receive_queue); 2384 skb_queue_head_init(&sk->sk_write_queue); 2385 skb_queue_head_init(&sk->sk_error_queue); 2386 2387 rwlock_init(&sk->sk_callback_lock); 2388 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, 2389 af_rlock_keys + sk->sk_family, 2390 af_family_rlock_key_strings[sk->sk_family]); 2391 lockdep_set_class_and_name(&sk->sk_write_queue.lock, 2392 af_wlock_keys + sk->sk_family, 2393 af_family_wlock_key_strings[sk->sk_family]); 2394 lockdep_set_class_and_name(&sk->sk_error_queue.lock, 2395 af_elock_keys + sk->sk_family, 2396 af_family_elock_key_strings[sk->sk_family]); 2397 if (sk->sk_kern_sock) 2398 lockdep_set_class_and_name(&sk->sk_callback_lock, 2399 af_kern_callback_keys + sk->sk_family, 2400 af_family_kern_clock_key_strings[sk->sk_family]); 2401 else 2402 lockdep_set_class_and_name(&sk->sk_callback_lock, 2403 af_callback_keys + sk->sk_family, 2404 af_family_clock_key_strings[sk->sk_family]); 2405 } 2406 2407 /** 2408 * sk_clone_lock - clone a socket, and lock its clone 2409 * @sk: the socket to clone 2410 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2411 * 2412 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 2413 */ 2414 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 2415 { 2416 struct proto *prot = READ_ONCE(sk->sk_prot); 2417 struct sk_filter *filter; 2418 bool is_charged = true; 2419 struct sock *newsk; 2420 2421 newsk = sk_prot_alloc(prot, priority, sk->sk_family); 2422 if (!newsk) 2423 goto out; 2424 2425 sock_copy(newsk, sk); 2426 2427 newsk->sk_prot_creator = prot; 2428 2429 /* SANITY */ 2430 if (likely(newsk->sk_net_refcnt)) { 2431 get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); 2432 sock_inuse_add(sock_net(newsk), 1); 2433 } else { 2434 /* Kernel sockets are not elevating the struct net refcount. 2435 * Instead, use a tracker to more easily detect if a layer 2436 * is not properly dismantling its kernel sockets at netns 2437 * destroy time. 2438 */ 2439 net_passive_inc(sock_net(newsk)); 2440 __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, 2441 false, priority); 2442 } 2443 sk_node_init(&newsk->sk_node); 2444 sock_lock_init(newsk); 2445 bh_lock_sock(newsk); 2446 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 2447 newsk->sk_backlog.len = 0; 2448 2449 atomic_set(&newsk->sk_rmem_alloc, 0); 2450 2451 /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ 2452 refcount_set(&newsk->sk_wmem_alloc, 1); 2453 2454 atomic_set(&newsk->sk_omem_alloc, 0); 2455 sk_init_common(newsk); 2456 2457 newsk->sk_dst_cache = NULL; 2458 newsk->sk_dst_pending_confirm = 0; 2459 newsk->sk_wmem_queued = 0; 2460 newsk->sk_forward_alloc = 0; 2461 newsk->sk_reserved_mem = 0; 2462 atomic_set(&newsk->sk_drops, 0); 2463 newsk->sk_send_head = NULL; 2464 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 2465 atomic_set(&newsk->sk_zckey, 0); 2466 2467 sock_reset_flag(newsk, SOCK_DONE); 2468 2469 /* sk->sk_memcg will be populated at accept() time */ 2470 newsk->sk_memcg = NULL; 2471 2472 cgroup_sk_clone(&newsk->sk_cgrp_data); 2473 2474 rcu_read_lock(); 2475 filter = rcu_dereference(sk->sk_filter); 2476 if (filter != NULL) 2477 /* though it's an empty new sock, the charging may fail 2478 * if sysctl_optmem_max was changed between creation of 2479 * original socket and cloning 2480 */ 2481 is_charged = sk_filter_charge(newsk, filter); 2482 RCU_INIT_POINTER(newsk->sk_filter, filter); 2483 rcu_read_unlock(); 2484 2485 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 2486 /* We need to make sure that we don't uncharge the new 2487 * socket if we couldn't charge it in the first place 2488 * as otherwise we uncharge the parent's filter. 2489 */ 2490 if (!is_charged) 2491 RCU_INIT_POINTER(newsk->sk_filter, NULL); 2492 sk_free_unlock_clone(newsk); 2493 newsk = NULL; 2494 goto out; 2495 } 2496 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 2497 2498 if (bpf_sk_storage_clone(sk, newsk)) { 2499 sk_free_unlock_clone(newsk); 2500 newsk = NULL; 2501 goto out; 2502 } 2503 2504 /* Clear sk_user_data if parent had the pointer tagged 2505 * as not suitable for copying when cloning. 2506 */ 2507 if (sk_user_data_is_nocopy(newsk)) 2508 newsk->sk_user_data = NULL; 2509 2510 newsk->sk_err = 0; 2511 newsk->sk_err_soft = 0; 2512 newsk->sk_priority = 0; 2513 newsk->sk_incoming_cpu = raw_smp_processor_id(); 2514 2515 /* Before updating sk_refcnt, we must commit prior changes to memory 2516 * (Documentation/RCU/rculist_nulls.rst for details) 2517 */ 2518 smp_wmb(); 2519 refcount_set(&newsk->sk_refcnt, 2); 2520 2521 sk_set_socket(newsk, NULL); 2522 sk_tx_queue_clear(newsk); 2523 RCU_INIT_POINTER(newsk->sk_wq, NULL); 2524 2525 if (newsk->sk_prot->sockets_allocated) 2526 sk_sockets_allocated_inc(newsk); 2527 2528 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) 2529 net_enable_timestamp(); 2530 out: 2531 return newsk; 2532 } 2533 EXPORT_SYMBOL_GPL(sk_clone_lock); 2534 2535 void sk_free_unlock_clone(struct sock *sk) 2536 { 2537 /* It is still raw copy of parent, so invalidate 2538 * destructor and make plain sk_free() */ 2539 sk->sk_destruct = NULL; 2540 bh_unlock_sock(sk); 2541 sk_free(sk); 2542 } 2543 EXPORT_SYMBOL_GPL(sk_free_unlock_clone); 2544 2545 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) 2546 { 2547 bool is_ipv6 = false; 2548 u32 max_size; 2549 2550 #if IS_ENABLED(CONFIG_IPV6) 2551 is_ipv6 = (sk->sk_family == AF_INET6 && 2552 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); 2553 #endif 2554 /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 2555 max_size = is_ipv6 ? READ_ONCE(dst->dev->gso_max_size) : 2556 READ_ONCE(dst->dev->gso_ipv4_max_size); 2557 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) 2558 max_size = GSO_LEGACY_MAX_SIZE; 2559 2560 return max_size - (MAX_TCP_HEADER + 1); 2561 } 2562 2563 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 2564 { 2565 u32 max_segs = 1; 2566 2567 sk->sk_route_caps = dst->dev->features; 2568 if (sk_is_tcp(sk)) { 2569 struct inet_connection_sock *icsk = inet_csk(sk); 2570 2571 sk->sk_route_caps |= NETIF_F_GSO; 2572 icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK); 2573 } 2574 if (sk->sk_route_caps & NETIF_F_GSO) 2575 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 2576 if (unlikely(sk->sk_gso_disabled)) 2577 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2578 if (sk_can_gso(sk)) { 2579 if (dst->header_len && !xfrm_dst_offload_ok(dst)) { 2580 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2581 } else { 2582 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 2583 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst); 2584 /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ 2585 max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1); 2586 } 2587 } 2588 sk->sk_gso_max_segs = max_segs; 2589 sk_dst_set(sk, dst); 2590 } 2591 EXPORT_SYMBOL_GPL(sk_setup_caps); 2592 2593 /* 2594 * Simple resource managers for sockets. 2595 */ 2596 2597 2598 /* 2599 * Write buffer destructor automatically called from kfree_skb. 2600 */ 2601 void sock_wfree(struct sk_buff *skb) 2602 { 2603 struct sock *sk = skb->sk; 2604 unsigned int len = skb->truesize; 2605 bool free; 2606 2607 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 2608 if (sock_flag(sk, SOCK_RCU_FREE) && 2609 sk->sk_write_space == sock_def_write_space) { 2610 rcu_read_lock(); 2611 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); 2612 sock_def_write_space_wfree(sk); 2613 rcu_read_unlock(); 2614 if (unlikely(free)) 2615 __sk_free(sk); 2616 return; 2617 } 2618 2619 /* 2620 * Keep a reference on sk_wmem_alloc, this will be released 2621 * after sk_write_space() call 2622 */ 2623 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 2624 sk->sk_write_space(sk); 2625 len = 1; 2626 } 2627 /* 2628 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 2629 * could not do because of in-flight packets 2630 */ 2631 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 2632 __sk_free(sk); 2633 } 2634 EXPORT_SYMBOL(sock_wfree); 2635 2636 /* This variant of sock_wfree() is used by TCP, 2637 * since it sets SOCK_USE_WRITE_QUEUE. 2638 */ 2639 void __sock_wfree(struct sk_buff *skb) 2640 { 2641 struct sock *sk = skb->sk; 2642 2643 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 2644 __sk_free(sk); 2645 } 2646 2647 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 2648 { 2649 skb_orphan(skb); 2650 #ifdef CONFIG_INET 2651 if (unlikely(!sk_fullsock(sk))) 2652 return skb_set_owner_edemux(skb, sk); 2653 #endif 2654 skb->sk = sk; 2655 skb->destructor = sock_wfree; 2656 skb_set_hash_from_sk(skb, sk); 2657 /* 2658 * We used to take a refcount on sk, but following operation 2659 * is enough to guarantee sk_free() won't free this sock until 2660 * all in-flight packets are completed 2661 */ 2662 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 2663 } 2664 EXPORT_SYMBOL(skb_set_owner_w); 2665 2666 static bool can_skb_orphan_partial(const struct sk_buff *skb) 2667 { 2668 /* Drivers depend on in-order delivery for crypto offload, 2669 * partial orphan breaks out-of-order-OK logic. 2670 */ 2671 if (skb_is_decrypted(skb)) 2672 return false; 2673 2674 return (skb->destructor == sock_wfree || 2675 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); 2676 } 2677 2678 /* This helper is used by netem, as it can hold packets in its 2679 * delay queue. We want to allow the owner socket to send more 2680 * packets, as if they were already TX completed by a typical driver. 2681 * But we also want to keep skb->sk set because some packet schedulers 2682 * rely on it (sch_fq for example). 2683 */ 2684 void skb_orphan_partial(struct sk_buff *skb) 2685 { 2686 if (skb_is_tcp_pure_ack(skb)) 2687 return; 2688 2689 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk)) 2690 return; 2691 2692 skb_orphan(skb); 2693 } 2694 EXPORT_SYMBOL(skb_orphan_partial); 2695 2696 /* 2697 * Read buffer destructor automatically called from kfree_skb. 2698 */ 2699 void sock_rfree(struct sk_buff *skb) 2700 { 2701 struct sock *sk = skb->sk; 2702 unsigned int len = skb->truesize; 2703 2704 atomic_sub(len, &sk->sk_rmem_alloc); 2705 sk_mem_uncharge(sk, len); 2706 } 2707 EXPORT_SYMBOL(sock_rfree); 2708 2709 /* 2710 * Buffer destructor for skbs that are not used directly in read or write 2711 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 2712 */ 2713 void sock_efree(struct sk_buff *skb) 2714 { 2715 sock_put(skb->sk); 2716 } 2717 EXPORT_SYMBOL(sock_efree); 2718 2719 /* Buffer destructor for prefetch/receive path where reference count may 2720 * not be held, e.g. for listen sockets. 2721 */ 2722 #ifdef CONFIG_INET 2723 void sock_pfree(struct sk_buff *skb) 2724 { 2725 struct sock *sk = skb->sk; 2726 2727 if (!sk_is_refcounted(sk)) 2728 return; 2729 2730 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) { 2731 inet_reqsk(sk)->rsk_listener = NULL; 2732 reqsk_free(inet_reqsk(sk)); 2733 return; 2734 } 2735 2736 sock_gen_put(sk); 2737 } 2738 EXPORT_SYMBOL(sock_pfree); 2739 #endif /* CONFIG_INET */ 2740 2741 kuid_t sock_i_uid(struct sock *sk) 2742 { 2743 kuid_t uid; 2744 2745 read_lock_bh(&sk->sk_callback_lock); 2746 uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : GLOBAL_ROOT_UID; 2747 read_unlock_bh(&sk->sk_callback_lock); 2748 return uid; 2749 } 2750 EXPORT_SYMBOL(sock_i_uid); 2751 2752 unsigned long __sock_i_ino(struct sock *sk) 2753 { 2754 unsigned long ino; 2755 2756 read_lock(&sk->sk_callback_lock); 2757 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 2758 read_unlock(&sk->sk_callback_lock); 2759 return ino; 2760 } 2761 EXPORT_SYMBOL(__sock_i_ino); 2762 2763 unsigned long sock_i_ino(struct sock *sk) 2764 { 2765 unsigned long ino; 2766 2767 local_bh_disable(); 2768 ino = __sock_i_ino(sk); 2769 local_bh_enable(); 2770 return ino; 2771 } 2772 EXPORT_SYMBOL(sock_i_ino); 2773 2774 /* 2775 * Allocate a skb from the socket's send buffer. 2776 */ 2777 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 2778 gfp_t priority) 2779 { 2780 if (force || 2781 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { 2782 struct sk_buff *skb = alloc_skb(size, priority); 2783 2784 if (skb) { 2785 skb_set_owner_w(skb, sk); 2786 return skb; 2787 } 2788 } 2789 return NULL; 2790 } 2791 EXPORT_SYMBOL(sock_wmalloc); 2792 2793 static void sock_ofree(struct sk_buff *skb) 2794 { 2795 struct sock *sk = skb->sk; 2796 2797 atomic_sub(skb->truesize, &sk->sk_omem_alloc); 2798 } 2799 2800 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 2801 gfp_t priority) 2802 { 2803 struct sk_buff *skb; 2804 2805 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ 2806 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > 2807 READ_ONCE(sock_net(sk)->core.sysctl_optmem_max)) 2808 return NULL; 2809 2810 skb = alloc_skb(size, priority); 2811 if (!skb) 2812 return NULL; 2813 2814 atomic_add(skb->truesize, &sk->sk_omem_alloc); 2815 skb->sk = sk; 2816 skb->destructor = sock_ofree; 2817 return skb; 2818 } 2819 2820 /* 2821 * Allocate a memory block from the socket's option memory buffer. 2822 */ 2823 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 2824 { 2825 int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); 2826 2827 if ((unsigned int)size <= optmem_max && 2828 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { 2829 void *mem; 2830 /* First do the add, to avoid the race if kmalloc 2831 * might sleep. 2832 */ 2833 atomic_add(size, &sk->sk_omem_alloc); 2834 mem = kmalloc(size, priority); 2835 if (mem) 2836 return mem; 2837 atomic_sub(size, &sk->sk_omem_alloc); 2838 } 2839 return NULL; 2840 } 2841 EXPORT_SYMBOL(sock_kmalloc); 2842 2843 /* 2844 * Duplicate the input "src" memory block using the socket's 2845 * option memory buffer. 2846 */ 2847 void *sock_kmemdup(struct sock *sk, const void *src, 2848 int size, gfp_t priority) 2849 { 2850 void *mem; 2851 2852 mem = sock_kmalloc(sk, size, priority); 2853 if (mem) 2854 memcpy(mem, src, size); 2855 return mem; 2856 } 2857 EXPORT_SYMBOL(sock_kmemdup); 2858 2859 /* Free an option memory block. Note, we actually want the inline 2860 * here as this allows gcc to detect the nullify and fold away the 2861 * condition entirely. 2862 */ 2863 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 2864 const bool nullify) 2865 { 2866 if (WARN_ON_ONCE(!mem)) 2867 return; 2868 if (nullify) 2869 kfree_sensitive(mem); 2870 else 2871 kfree(mem); 2872 atomic_sub(size, &sk->sk_omem_alloc); 2873 } 2874 2875 void sock_kfree_s(struct sock *sk, void *mem, int size) 2876 { 2877 __sock_kfree_s(sk, mem, size, false); 2878 } 2879 EXPORT_SYMBOL(sock_kfree_s); 2880 2881 void sock_kzfree_s(struct sock *sk, void *mem, int size) 2882 { 2883 __sock_kfree_s(sk, mem, size, true); 2884 } 2885 EXPORT_SYMBOL(sock_kzfree_s); 2886 2887 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 2888 I think, these locks should be removed for datagram sockets. 2889 */ 2890 static long sock_wait_for_wmem(struct sock *sk, long timeo) 2891 { 2892 DEFINE_WAIT(wait); 2893 2894 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2895 for (;;) { 2896 if (!timeo) 2897 break; 2898 if (signal_pending(current)) 2899 break; 2900 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2901 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2902 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) 2903 break; 2904 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2905 break; 2906 if (READ_ONCE(sk->sk_err)) 2907 break; 2908 timeo = schedule_timeout(timeo); 2909 } 2910 finish_wait(sk_sleep(sk), &wait); 2911 return timeo; 2912 } 2913 2914 2915 /* 2916 * Generic send/receive buffer handlers 2917 */ 2918 2919 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 2920 unsigned long data_len, int noblock, 2921 int *errcode, int max_page_order) 2922 { 2923 struct sk_buff *skb; 2924 long timeo; 2925 int err; 2926 2927 timeo = sock_sndtimeo(sk, noblock); 2928 for (;;) { 2929 err = sock_error(sk); 2930 if (err != 0) 2931 goto failure; 2932 2933 err = -EPIPE; 2934 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2935 goto failure; 2936 2937 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) 2938 break; 2939 2940 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2941 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2942 err = -EAGAIN; 2943 if (!timeo) 2944 goto failure; 2945 if (signal_pending(current)) 2946 goto interrupted; 2947 timeo = sock_wait_for_wmem(sk, timeo); 2948 } 2949 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 2950 errcode, sk->sk_allocation); 2951 if (skb) 2952 skb_set_owner_w(skb, sk); 2953 return skb; 2954 2955 interrupted: 2956 err = sock_intr_errno(timeo); 2957 failure: 2958 *errcode = err; 2959 return NULL; 2960 } 2961 EXPORT_SYMBOL(sock_alloc_send_pskb); 2962 2963 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, 2964 struct sockcm_cookie *sockc) 2965 { 2966 u32 tsflags; 2967 2968 BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31)); 2969 2970 switch (cmsg->cmsg_type) { 2971 case SO_MARK: 2972 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 2973 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2974 return -EPERM; 2975 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2976 return -EINVAL; 2977 sockc->mark = *(u32 *)CMSG_DATA(cmsg); 2978 break; 2979 case SO_TIMESTAMPING_OLD: 2980 case SO_TIMESTAMPING_NEW: 2981 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 2982 return -EINVAL; 2983 2984 tsflags = *(u32 *)CMSG_DATA(cmsg); 2985 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) 2986 return -EINVAL; 2987 2988 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 2989 sockc->tsflags |= tsflags; 2990 break; 2991 case SCM_TXTIME: 2992 if (!sock_flag(sk, SOCK_TXTIME)) 2993 return -EINVAL; 2994 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) 2995 return -EINVAL; 2996 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); 2997 break; 2998 case SCM_TS_OPT_ID: 2999 if (sk_is_tcp(sk)) 3000 return -EINVAL; 3001 tsflags = READ_ONCE(sk->sk_tsflags); 3002 if (!(tsflags & SOF_TIMESTAMPING_OPT_ID)) 3003 return -EINVAL; 3004 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3005 return -EINVAL; 3006 sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg); 3007 sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID; 3008 break; 3009 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 3010 case SCM_RIGHTS: 3011 case SCM_CREDENTIALS: 3012 break; 3013 case SO_PRIORITY: 3014 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3015 return -EINVAL; 3016 if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg))) 3017 return -EPERM; 3018 sockc->priority = *(u32 *)CMSG_DATA(cmsg); 3019 break; 3020 default: 3021 return -EINVAL; 3022 } 3023 return 0; 3024 } 3025 EXPORT_SYMBOL(__sock_cmsg_send); 3026 3027 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 3028 struct sockcm_cookie *sockc) 3029 { 3030 struct cmsghdr *cmsg; 3031 int ret; 3032 3033 for_each_cmsghdr(cmsg, msg) { 3034 if (!CMSG_OK(msg, cmsg)) 3035 return -EINVAL; 3036 if (cmsg->cmsg_level != SOL_SOCKET) 3037 continue; 3038 ret = __sock_cmsg_send(sk, cmsg, sockc); 3039 if (ret) 3040 return ret; 3041 } 3042 return 0; 3043 } 3044 EXPORT_SYMBOL(sock_cmsg_send); 3045 3046 static void sk_enter_memory_pressure(struct sock *sk) 3047 { 3048 if (!sk->sk_prot->enter_memory_pressure) 3049 return; 3050 3051 sk->sk_prot->enter_memory_pressure(sk); 3052 } 3053 3054 static void sk_leave_memory_pressure(struct sock *sk) 3055 { 3056 if (sk->sk_prot->leave_memory_pressure) { 3057 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure, 3058 tcp_leave_memory_pressure, sk); 3059 } else { 3060 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; 3061 3062 if (memory_pressure && READ_ONCE(*memory_pressure)) 3063 WRITE_ONCE(*memory_pressure, 0); 3064 } 3065 } 3066 3067 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 3068 3069 /** 3070 * skb_page_frag_refill - check that a page_frag contains enough room 3071 * @sz: minimum size of the fragment we want to get 3072 * @pfrag: pointer to page_frag 3073 * @gfp: priority for memory allocation 3074 * 3075 * Note: While this allocator tries to use high order pages, there is 3076 * no guarantee that allocations succeed. Therefore, @sz MUST be 3077 * less or equal than PAGE_SIZE. 3078 */ 3079 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 3080 { 3081 if (pfrag->page) { 3082 if (page_ref_count(pfrag->page) == 1) { 3083 pfrag->offset = 0; 3084 return true; 3085 } 3086 if (pfrag->offset + sz <= pfrag->size) 3087 return true; 3088 put_page(pfrag->page); 3089 } 3090 3091 pfrag->offset = 0; 3092 if (SKB_FRAG_PAGE_ORDER && 3093 !static_branch_unlikely(&net_high_order_alloc_disable_key)) { 3094 /* Avoid direct reclaim but allow kswapd to wake */ 3095 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | 3096 __GFP_COMP | __GFP_NOWARN | 3097 __GFP_NORETRY, 3098 SKB_FRAG_PAGE_ORDER); 3099 if (likely(pfrag->page)) { 3100 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 3101 return true; 3102 } 3103 } 3104 pfrag->page = alloc_page(gfp); 3105 if (likely(pfrag->page)) { 3106 pfrag->size = PAGE_SIZE; 3107 return true; 3108 } 3109 return false; 3110 } 3111 EXPORT_SYMBOL(skb_page_frag_refill); 3112 3113 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 3114 { 3115 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 3116 return true; 3117 3118 sk_enter_memory_pressure(sk); 3119 sk_stream_moderate_sndbuf(sk); 3120 return false; 3121 } 3122 EXPORT_SYMBOL(sk_page_frag_refill); 3123 3124 void __lock_sock(struct sock *sk) 3125 __releases(&sk->sk_lock.slock) 3126 __acquires(&sk->sk_lock.slock) 3127 { 3128 DEFINE_WAIT(wait); 3129 3130 for (;;) { 3131 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 3132 TASK_UNINTERRUPTIBLE); 3133 spin_unlock_bh(&sk->sk_lock.slock); 3134 schedule(); 3135 spin_lock_bh(&sk->sk_lock.slock); 3136 if (!sock_owned_by_user(sk)) 3137 break; 3138 } 3139 finish_wait(&sk->sk_lock.wq, &wait); 3140 } 3141 3142 void __release_sock(struct sock *sk) 3143 __releases(&sk->sk_lock.slock) 3144 __acquires(&sk->sk_lock.slock) 3145 { 3146 struct sk_buff *skb, *next; 3147 3148 while ((skb = sk->sk_backlog.head) != NULL) { 3149 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 3150 3151 spin_unlock_bh(&sk->sk_lock.slock); 3152 3153 do { 3154 next = skb->next; 3155 prefetch(next); 3156 DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb)); 3157 skb_mark_not_on_list(skb); 3158 sk_backlog_rcv(sk, skb); 3159 3160 cond_resched(); 3161 3162 skb = next; 3163 } while (skb != NULL); 3164 3165 spin_lock_bh(&sk->sk_lock.slock); 3166 } 3167 3168 /* 3169 * Doing the zeroing here guarantee we can not loop forever 3170 * while a wild producer attempts to flood us. 3171 */ 3172 sk->sk_backlog.len = 0; 3173 } 3174 3175 void __sk_flush_backlog(struct sock *sk) 3176 { 3177 spin_lock_bh(&sk->sk_lock.slock); 3178 __release_sock(sk); 3179 3180 if (sk->sk_prot->release_cb) 3181 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3182 tcp_release_cb, sk); 3183 3184 spin_unlock_bh(&sk->sk_lock.slock); 3185 } 3186 EXPORT_SYMBOL_GPL(__sk_flush_backlog); 3187 3188 /** 3189 * sk_wait_data - wait for data to arrive at sk_receive_queue 3190 * @sk: sock to wait on 3191 * @timeo: for how long 3192 * @skb: last skb seen on sk_receive_queue 3193 * 3194 * Now socket state including sk->sk_err is changed only under lock, 3195 * hence we may omit checks after joining wait queue. 3196 * We check receive queue before schedule() only as optimization; 3197 * it is very likely that release_sock() added new data. 3198 */ 3199 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) 3200 { 3201 DEFINE_WAIT_FUNC(wait, woken_wake_function); 3202 int rc; 3203 3204 add_wait_queue(sk_sleep(sk), &wait); 3205 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3206 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); 3207 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3208 remove_wait_queue(sk_sleep(sk), &wait); 3209 return rc; 3210 } 3211 EXPORT_SYMBOL(sk_wait_data); 3212 3213 /** 3214 * __sk_mem_raise_allocated - increase memory_allocated 3215 * @sk: socket 3216 * @size: memory size to allocate 3217 * @amt: pages to allocate 3218 * @kind: allocation type 3219 * 3220 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc. 3221 * 3222 * Unlike the globally shared limits among the sockets under same protocol, 3223 * consuming the budget of a memcg won't have direct effect on other ones. 3224 * So be optimistic about memcg's tolerance, and leave the callers to decide 3225 * whether or not to raise allocated through sk_under_memory_pressure() or 3226 * its variants. 3227 */ 3228 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) 3229 { 3230 struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; 3231 struct proto *prot = sk->sk_prot; 3232 bool charged = false; 3233 long allocated; 3234 3235 sk_memory_allocated_add(sk, amt); 3236 allocated = sk_memory_allocated(sk); 3237 3238 if (memcg) { 3239 if (!mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge())) 3240 goto suppress_allocation; 3241 charged = true; 3242 } 3243 3244 /* Under limit. */ 3245 if (allocated <= sk_prot_mem_limits(sk, 0)) { 3246 sk_leave_memory_pressure(sk); 3247 return 1; 3248 } 3249 3250 /* Under pressure. */ 3251 if (allocated > sk_prot_mem_limits(sk, 1)) 3252 sk_enter_memory_pressure(sk); 3253 3254 /* Over hard limit. */ 3255 if (allocated > sk_prot_mem_limits(sk, 2)) 3256 goto suppress_allocation; 3257 3258 /* Guarantee minimum buffer size under pressure (either global 3259 * or memcg) to make sure features described in RFC 7323 (TCP 3260 * Extensions for High Performance) work properly. 3261 * 3262 * This rule does NOT stand when exceeds global or memcg's hard 3263 * limit, or else a DoS attack can be taken place by spawning 3264 * lots of sockets whose usage are under minimum buffer size. 3265 */ 3266 if (kind == SK_MEM_RECV) { 3267 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) 3268 return 1; 3269 3270 } else { /* SK_MEM_SEND */ 3271 int wmem0 = sk_get_wmem0(sk, prot); 3272 3273 if (sk->sk_type == SOCK_STREAM) { 3274 if (sk->sk_wmem_queued < wmem0) 3275 return 1; 3276 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { 3277 return 1; 3278 } 3279 } 3280 3281 if (sk_has_memory_pressure(sk)) { 3282 u64 alloc; 3283 3284 /* The following 'average' heuristic is within the 3285 * scope of global accounting, so it only makes 3286 * sense for global memory pressure. 3287 */ 3288 if (!sk_under_global_memory_pressure(sk)) 3289 return 1; 3290 3291 /* Try to be fair among all the sockets under global 3292 * pressure by allowing the ones that below average 3293 * usage to raise. 3294 */ 3295 alloc = sk_sockets_allocated_read_positive(sk); 3296 if (sk_prot_mem_limits(sk, 2) > alloc * 3297 sk_mem_pages(sk->sk_wmem_queued + 3298 atomic_read(&sk->sk_rmem_alloc) + 3299 sk->sk_forward_alloc)) 3300 return 1; 3301 } 3302 3303 suppress_allocation: 3304 3305 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 3306 sk_stream_moderate_sndbuf(sk); 3307 3308 /* Fail only if socket is _under_ its sndbuf. 3309 * In this case we cannot block, so that we have to fail. 3310 */ 3311 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) { 3312 /* Force charge with __GFP_NOFAIL */ 3313 if (memcg && !charged) { 3314 mem_cgroup_charge_skmem(memcg, amt, 3315 gfp_memcg_charge() | __GFP_NOFAIL); 3316 } 3317 return 1; 3318 } 3319 } 3320 3321 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) 3322 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); 3323 3324 sk_memory_allocated_sub(sk, amt); 3325 3326 if (charged) 3327 mem_cgroup_uncharge_skmem(memcg, amt); 3328 3329 return 0; 3330 } 3331 3332 /** 3333 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 3334 * @sk: socket 3335 * @size: memory size to allocate 3336 * @kind: allocation type 3337 * 3338 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 3339 * rmem allocation. This function assumes that protocols which have 3340 * memory_pressure use sk_wmem_queued as write buffer accounting. 3341 */ 3342 int __sk_mem_schedule(struct sock *sk, int size, int kind) 3343 { 3344 int ret, amt = sk_mem_pages(size); 3345 3346 sk_forward_alloc_add(sk, amt << PAGE_SHIFT); 3347 ret = __sk_mem_raise_allocated(sk, size, amt, kind); 3348 if (!ret) 3349 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT)); 3350 return ret; 3351 } 3352 EXPORT_SYMBOL(__sk_mem_schedule); 3353 3354 /** 3355 * __sk_mem_reduce_allocated - reclaim memory_allocated 3356 * @sk: socket 3357 * @amount: number of quanta 3358 * 3359 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc 3360 */ 3361 void __sk_mem_reduce_allocated(struct sock *sk, int amount) 3362 { 3363 sk_memory_allocated_sub(sk, amount); 3364 3365 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 3366 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); 3367 3368 if (sk_under_global_memory_pressure(sk) && 3369 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 3370 sk_leave_memory_pressure(sk); 3371 } 3372 3373 /** 3374 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated 3375 * @sk: socket 3376 * @amount: number of bytes (rounded down to a PAGE_SIZE multiple) 3377 */ 3378 void __sk_mem_reclaim(struct sock *sk, int amount) 3379 { 3380 amount >>= PAGE_SHIFT; 3381 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT)); 3382 __sk_mem_reduce_allocated(sk, amount); 3383 } 3384 EXPORT_SYMBOL(__sk_mem_reclaim); 3385 3386 int sk_set_peek_off(struct sock *sk, int val) 3387 { 3388 WRITE_ONCE(sk->sk_peek_off, val); 3389 return 0; 3390 } 3391 EXPORT_SYMBOL_GPL(sk_set_peek_off); 3392 3393 /* 3394 * Set of default routines for initialising struct proto_ops when 3395 * the protocol does not support a particular function. In certain 3396 * cases where it makes no sense for a protocol to have a "do nothing" 3397 * function, some default processing is provided. 3398 */ 3399 3400 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 3401 { 3402 return -EOPNOTSUPP; 3403 } 3404 EXPORT_SYMBOL(sock_no_bind); 3405 3406 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 3407 int len, int flags) 3408 { 3409 return -EOPNOTSUPP; 3410 } 3411 EXPORT_SYMBOL(sock_no_connect); 3412 3413 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 3414 { 3415 return -EOPNOTSUPP; 3416 } 3417 EXPORT_SYMBOL(sock_no_socketpair); 3418 3419 int sock_no_accept(struct socket *sock, struct socket *newsock, 3420 struct proto_accept_arg *arg) 3421 { 3422 return -EOPNOTSUPP; 3423 } 3424 EXPORT_SYMBOL(sock_no_accept); 3425 3426 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 3427 int peer) 3428 { 3429 return -EOPNOTSUPP; 3430 } 3431 EXPORT_SYMBOL(sock_no_getname); 3432 3433 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3434 { 3435 return -EOPNOTSUPP; 3436 } 3437 EXPORT_SYMBOL(sock_no_ioctl); 3438 3439 int sock_no_listen(struct socket *sock, int backlog) 3440 { 3441 return -EOPNOTSUPP; 3442 } 3443 EXPORT_SYMBOL(sock_no_listen); 3444 3445 int sock_no_shutdown(struct socket *sock, int how) 3446 { 3447 return -EOPNOTSUPP; 3448 } 3449 EXPORT_SYMBOL(sock_no_shutdown); 3450 3451 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 3452 { 3453 return -EOPNOTSUPP; 3454 } 3455 EXPORT_SYMBOL(sock_no_sendmsg); 3456 3457 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) 3458 { 3459 return -EOPNOTSUPP; 3460 } 3461 EXPORT_SYMBOL(sock_no_sendmsg_locked); 3462 3463 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, 3464 int flags) 3465 { 3466 return -EOPNOTSUPP; 3467 } 3468 EXPORT_SYMBOL(sock_no_recvmsg); 3469 3470 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 3471 { 3472 /* Mirror missing mmap method error code */ 3473 return -ENODEV; 3474 } 3475 EXPORT_SYMBOL(sock_no_mmap); 3476 3477 /* 3478 * When a file is received (via SCM_RIGHTS, etc), we must bump the 3479 * various sock-based usage counts. 3480 */ 3481 void __receive_sock(struct file *file) 3482 { 3483 struct socket *sock; 3484 3485 sock = sock_from_file(file); 3486 if (sock) { 3487 sock_update_netprioidx(&sock->sk->sk_cgrp_data); 3488 sock_update_classid(&sock->sk->sk_cgrp_data); 3489 } 3490 } 3491 3492 /* 3493 * Default Socket Callbacks 3494 */ 3495 3496 static void sock_def_wakeup(struct sock *sk) 3497 { 3498 struct socket_wq *wq; 3499 3500 rcu_read_lock(); 3501 wq = rcu_dereference(sk->sk_wq); 3502 if (skwq_has_sleeper(wq)) 3503 wake_up_interruptible_all(&wq->wait); 3504 rcu_read_unlock(); 3505 } 3506 3507 static void sock_def_error_report(struct sock *sk) 3508 { 3509 struct socket_wq *wq; 3510 3511 rcu_read_lock(); 3512 wq = rcu_dereference(sk->sk_wq); 3513 if (skwq_has_sleeper(wq)) 3514 wake_up_interruptible_poll(&wq->wait, EPOLLERR); 3515 sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR); 3516 rcu_read_unlock(); 3517 } 3518 3519 void sock_def_readable(struct sock *sk) 3520 { 3521 struct socket_wq *wq; 3522 3523 trace_sk_data_ready(sk); 3524 3525 rcu_read_lock(); 3526 wq = rcu_dereference(sk->sk_wq); 3527 if (skwq_has_sleeper(wq)) 3528 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 3529 EPOLLRDNORM | EPOLLRDBAND); 3530 sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); 3531 rcu_read_unlock(); 3532 } 3533 3534 static void sock_def_write_space(struct sock *sk) 3535 { 3536 struct socket_wq *wq; 3537 3538 rcu_read_lock(); 3539 3540 /* Do not wake up a writer until he can make "significant" 3541 * progress. --DaveM 3542 */ 3543 if (sock_writeable(sk)) { 3544 wq = rcu_dereference(sk->sk_wq); 3545 if (skwq_has_sleeper(wq)) 3546 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3547 EPOLLWRNORM | EPOLLWRBAND); 3548 3549 /* Should agree with poll, otherwise some programs break */ 3550 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3551 } 3552 3553 rcu_read_unlock(); 3554 } 3555 3556 /* An optimised version of sock_def_write_space(), should only be called 3557 * for SOCK_RCU_FREE sockets under RCU read section and after putting 3558 * ->sk_wmem_alloc. 3559 */ 3560 static void sock_def_write_space_wfree(struct sock *sk) 3561 { 3562 /* Do not wake up a writer until he can make "significant" 3563 * progress. --DaveM 3564 */ 3565 if (sock_writeable(sk)) { 3566 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 3567 3568 /* rely on refcount_sub from sock_wfree() */ 3569 smp_mb__after_atomic(); 3570 if (wq && waitqueue_active(&wq->wait)) 3571 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3572 EPOLLWRNORM | EPOLLWRBAND); 3573 3574 /* Should agree with poll, otherwise some programs break */ 3575 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3576 } 3577 } 3578 3579 static void sock_def_destruct(struct sock *sk) 3580 { 3581 } 3582 3583 void sk_send_sigurg(struct sock *sk) 3584 { 3585 if (sk->sk_socket && sk->sk_socket->file) 3586 if (send_sigurg(sk->sk_socket->file)) 3587 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 3588 } 3589 EXPORT_SYMBOL(sk_send_sigurg); 3590 3591 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 3592 unsigned long expires) 3593 { 3594 if (!mod_timer(timer, expires)) 3595 sock_hold(sk); 3596 } 3597 EXPORT_SYMBOL(sk_reset_timer); 3598 3599 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 3600 { 3601 if (del_timer(timer)) 3602 __sock_put(sk); 3603 } 3604 EXPORT_SYMBOL(sk_stop_timer); 3605 3606 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 3607 { 3608 if (del_timer_sync(timer)) 3609 __sock_put(sk); 3610 } 3611 EXPORT_SYMBOL(sk_stop_timer_sync); 3612 3613 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) 3614 { 3615 sk_init_common(sk); 3616 sk->sk_send_head = NULL; 3617 3618 timer_setup(&sk->sk_timer, NULL, 0); 3619 3620 sk->sk_allocation = GFP_KERNEL; 3621 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); 3622 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); 3623 sk->sk_state = TCP_CLOSE; 3624 sk->sk_use_task_frag = true; 3625 sk_set_socket(sk, sock); 3626 3627 sock_set_flag(sk, SOCK_ZAPPED); 3628 3629 if (sock) { 3630 sk->sk_type = sock->type; 3631 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); 3632 sock->sk = sk; 3633 } else { 3634 RCU_INIT_POINTER(sk->sk_wq, NULL); 3635 } 3636 sk->sk_uid = uid; 3637 3638 sk->sk_state_change = sock_def_wakeup; 3639 sk->sk_data_ready = sock_def_readable; 3640 sk->sk_write_space = sock_def_write_space; 3641 sk->sk_error_report = sock_def_error_report; 3642 sk->sk_destruct = sock_def_destruct; 3643 3644 sk->sk_frag.page = NULL; 3645 sk->sk_frag.offset = 0; 3646 sk->sk_peek_off = -1; 3647 3648 sk->sk_peer_pid = NULL; 3649 sk->sk_peer_cred = NULL; 3650 spin_lock_init(&sk->sk_peer_lock); 3651 3652 sk->sk_write_pending = 0; 3653 sk->sk_rcvlowat = 1; 3654 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 3655 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 3656 3657 sk->sk_stamp = SK_DEFAULT_STAMP; 3658 #if BITS_PER_LONG==32 3659 seqlock_init(&sk->sk_stamp_seq); 3660 #endif 3661 atomic_set(&sk->sk_zckey, 0); 3662 3663 #ifdef CONFIG_NET_RX_BUSY_POLL 3664 sk->sk_napi_id = 0; 3665 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); 3666 #endif 3667 3668 sk->sk_max_pacing_rate = ~0UL; 3669 sk->sk_pacing_rate = ~0UL; 3670 WRITE_ONCE(sk->sk_pacing_shift, 10); 3671 sk->sk_incoming_cpu = -1; 3672 3673 sk_rx_queue_clear(sk); 3674 /* 3675 * Before updating sk_refcnt, we must commit prior changes to memory 3676 * (Documentation/RCU/rculist_nulls.rst for details) 3677 */ 3678 smp_wmb(); 3679 refcount_set(&sk->sk_refcnt, 1); 3680 atomic_set(&sk->sk_drops, 0); 3681 } 3682 EXPORT_SYMBOL(sock_init_data_uid); 3683 3684 void sock_init_data(struct socket *sock, struct sock *sk) 3685 { 3686 kuid_t uid = sock ? 3687 SOCK_INODE(sock)->i_uid : 3688 make_kuid(sock_net(sk)->user_ns, 0); 3689 3690 sock_init_data_uid(sock, sk, uid); 3691 } 3692 EXPORT_SYMBOL(sock_init_data); 3693 3694 void lock_sock_nested(struct sock *sk, int subclass) 3695 { 3696 /* The sk_lock has mutex_lock() semantics here. */ 3697 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3698 3699 might_sleep(); 3700 spin_lock_bh(&sk->sk_lock.slock); 3701 if (sock_owned_by_user_nocheck(sk)) 3702 __lock_sock(sk); 3703 sk->sk_lock.owned = 1; 3704 spin_unlock_bh(&sk->sk_lock.slock); 3705 } 3706 EXPORT_SYMBOL(lock_sock_nested); 3707 3708 void release_sock(struct sock *sk) 3709 { 3710 spin_lock_bh(&sk->sk_lock.slock); 3711 if (sk->sk_backlog.tail) 3712 __release_sock(sk); 3713 3714 if (sk->sk_prot->release_cb) 3715 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3716 tcp_release_cb, sk); 3717 3718 sock_release_ownership(sk); 3719 if (waitqueue_active(&sk->sk_lock.wq)) 3720 wake_up(&sk->sk_lock.wq); 3721 spin_unlock_bh(&sk->sk_lock.slock); 3722 } 3723 EXPORT_SYMBOL(release_sock); 3724 3725 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3726 { 3727 might_sleep(); 3728 spin_lock_bh(&sk->sk_lock.slock); 3729 3730 if (!sock_owned_by_user_nocheck(sk)) { 3731 /* 3732 * Fast path return with bottom halves disabled and 3733 * sock::sk_lock.slock held. 3734 * 3735 * The 'mutex' is not contended and holding 3736 * sock::sk_lock.slock prevents all other lockers to 3737 * proceed so the corresponding unlock_sock_fast() can 3738 * avoid the slow path of release_sock() completely and 3739 * just release slock. 3740 * 3741 * From a semantical POV this is equivalent to 'acquiring' 3742 * the 'mutex', hence the corresponding lockdep 3743 * mutex_release() has to happen in the fast path of 3744 * unlock_sock_fast(). 3745 */ 3746 return false; 3747 } 3748 3749 __lock_sock(sk); 3750 sk->sk_lock.owned = 1; 3751 __acquire(&sk->sk_lock.slock); 3752 spin_unlock_bh(&sk->sk_lock.slock); 3753 return true; 3754 } 3755 EXPORT_SYMBOL(__lock_sock_fast); 3756 3757 int sock_gettstamp(struct socket *sock, void __user *userstamp, 3758 bool timeval, bool time32) 3759 { 3760 struct sock *sk = sock->sk; 3761 struct timespec64 ts; 3762 3763 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 3764 ts = ktime_to_timespec64(sock_read_timestamp(sk)); 3765 if (ts.tv_sec == -1) 3766 return -ENOENT; 3767 if (ts.tv_sec == 0) { 3768 ktime_t kt = ktime_get_real(); 3769 sock_write_timestamp(sk, kt); 3770 ts = ktime_to_timespec64(kt); 3771 } 3772 3773 if (timeval) 3774 ts.tv_nsec /= 1000; 3775 3776 #ifdef CONFIG_COMPAT_32BIT_TIME 3777 if (time32) 3778 return put_old_timespec32(&ts, userstamp); 3779 #endif 3780 #ifdef CONFIG_SPARC64 3781 /* beware of padding in sparc64 timeval */ 3782 if (timeval && !in_compat_syscall()) { 3783 struct __kernel_old_timeval __user tv = { 3784 .tv_sec = ts.tv_sec, 3785 .tv_usec = ts.tv_nsec, 3786 }; 3787 if (copy_to_user(userstamp, &tv, sizeof(tv))) 3788 return -EFAULT; 3789 return 0; 3790 } 3791 #endif 3792 return put_timespec64(&ts, userstamp); 3793 } 3794 EXPORT_SYMBOL(sock_gettstamp); 3795 3796 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) 3797 { 3798 if (!sock_flag(sk, flag)) { 3799 unsigned long previous_flags = sk->sk_flags; 3800 3801 sock_set_flag(sk, flag); 3802 /* 3803 * we just set one of the two flags which require net 3804 * time stamping, but time stamping might have been on 3805 * already because of the other one 3806 */ 3807 if (sock_needs_netstamp(sk) && 3808 !(previous_flags & SK_FLAGS_TIMESTAMP)) 3809 net_enable_timestamp(); 3810 } 3811 } 3812 3813 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 3814 int level, int type) 3815 { 3816 struct sock_exterr_skb *serr; 3817 struct sk_buff *skb; 3818 int copied, err; 3819 3820 err = -EAGAIN; 3821 skb = sock_dequeue_err_skb(sk); 3822 if (skb == NULL) 3823 goto out; 3824 3825 copied = skb->len; 3826 if (copied > len) { 3827 msg->msg_flags |= MSG_TRUNC; 3828 copied = len; 3829 } 3830 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3831 if (err) 3832 goto out_free_skb; 3833 3834 sock_recv_timestamp(msg, sk, skb); 3835 3836 serr = SKB_EXT_ERR(skb); 3837 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 3838 3839 msg->msg_flags |= MSG_ERRQUEUE; 3840 err = copied; 3841 3842 out_free_skb: 3843 kfree_skb(skb); 3844 out: 3845 return err; 3846 } 3847 EXPORT_SYMBOL(sock_recv_errqueue); 3848 3849 /* 3850 * Get a socket option on an socket. 3851 * 3852 * FIX: POSIX 1003.1g is very ambiguous here. It states that 3853 * asynchronous errors should be reported by getsockopt. We assume 3854 * this means if you specify SO_ERROR (otherwise what is the point of it). 3855 */ 3856 int sock_common_getsockopt(struct socket *sock, int level, int optname, 3857 char __user *optval, int __user *optlen) 3858 { 3859 struct sock *sk = sock->sk; 3860 3861 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3862 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen); 3863 } 3864 EXPORT_SYMBOL(sock_common_getsockopt); 3865 3866 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 3867 int flags) 3868 { 3869 struct sock *sk = sock->sk; 3870 int addr_len = 0; 3871 int err; 3872 3873 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len); 3874 if (err >= 0) 3875 msg->msg_namelen = addr_len; 3876 return err; 3877 } 3878 EXPORT_SYMBOL(sock_common_recvmsg); 3879 3880 /* 3881 * Set socket options on an inet socket. 3882 */ 3883 int sock_common_setsockopt(struct socket *sock, int level, int optname, 3884 sockptr_t optval, unsigned int optlen) 3885 { 3886 struct sock *sk = sock->sk; 3887 3888 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3889 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen); 3890 } 3891 EXPORT_SYMBOL(sock_common_setsockopt); 3892 3893 void sk_common_release(struct sock *sk) 3894 { 3895 if (sk->sk_prot->destroy) 3896 sk->sk_prot->destroy(sk); 3897 3898 /* 3899 * Observation: when sk_common_release is called, processes have 3900 * no access to socket. But net still has. 3901 * Step one, detach it from networking: 3902 * 3903 * A. Remove from hash tables. 3904 */ 3905 3906 sk->sk_prot->unhash(sk); 3907 3908 /* 3909 * In this point socket cannot receive new packets, but it is possible 3910 * that some packets are in flight because some CPU runs receiver and 3911 * did hash table lookup before we unhashed socket. They will achieve 3912 * receive queue and will be purged by socket destructor. 3913 * 3914 * Also we still have packets pending on receive queue and probably, 3915 * our own packets waiting in device queues. sock_destroy will drain 3916 * receive queue, but transmitted packets will delay socket destruction 3917 * until the last reference will be released. 3918 */ 3919 3920 sock_orphan(sk); 3921 3922 xfrm_sk_free_policy(sk); 3923 3924 sock_put(sk); 3925 } 3926 EXPORT_SYMBOL(sk_common_release); 3927 3928 void sk_get_meminfo(const struct sock *sk, u32 *mem) 3929 { 3930 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); 3931 3932 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 3933 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); 3934 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); 3935 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); 3936 mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc); 3937 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3938 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3939 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3940 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3941 } 3942 3943 #ifdef CONFIG_PROC_FS 3944 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 3945 3946 int sock_prot_inuse_get(struct net *net, struct proto *prot) 3947 { 3948 int cpu, idx = prot->inuse_idx; 3949 int res = 0; 3950 3951 for_each_possible_cpu(cpu) 3952 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx]; 3953 3954 return res >= 0 ? res : 0; 3955 } 3956 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 3957 3958 int sock_inuse_get(struct net *net) 3959 { 3960 int cpu, res = 0; 3961 3962 for_each_possible_cpu(cpu) 3963 res += per_cpu_ptr(net->core.prot_inuse, cpu)->all; 3964 3965 return res; 3966 } 3967 3968 EXPORT_SYMBOL_GPL(sock_inuse_get); 3969 3970 static int __net_init sock_inuse_init_net(struct net *net) 3971 { 3972 net->core.prot_inuse = alloc_percpu(struct prot_inuse); 3973 if (net->core.prot_inuse == NULL) 3974 return -ENOMEM; 3975 return 0; 3976 } 3977 3978 static void __net_exit sock_inuse_exit_net(struct net *net) 3979 { 3980 free_percpu(net->core.prot_inuse); 3981 } 3982 3983 static struct pernet_operations net_inuse_ops = { 3984 .init = sock_inuse_init_net, 3985 .exit = sock_inuse_exit_net, 3986 }; 3987 3988 static __init int net_inuse_init(void) 3989 { 3990 if (register_pernet_subsys(&net_inuse_ops)) 3991 panic("Cannot initialize net inuse counters"); 3992 3993 return 0; 3994 } 3995 3996 core_initcall(net_inuse_init); 3997 3998 static int assign_proto_idx(struct proto *prot) 3999 { 4000 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 4001 4002 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { 4003 pr_err("PROTO_INUSE_NR exhausted\n"); 4004 return -ENOSPC; 4005 } 4006 4007 set_bit(prot->inuse_idx, proto_inuse_idx); 4008 return 0; 4009 } 4010 4011 static void release_proto_idx(struct proto *prot) 4012 { 4013 if (prot->inuse_idx != PROTO_INUSE_NR - 1) 4014 clear_bit(prot->inuse_idx, proto_inuse_idx); 4015 } 4016 #else 4017 static inline int assign_proto_idx(struct proto *prot) 4018 { 4019 return 0; 4020 } 4021 4022 static inline void release_proto_idx(struct proto *prot) 4023 { 4024 } 4025 4026 #endif 4027 4028 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) 4029 { 4030 if (!twsk_prot) 4031 return; 4032 kfree(twsk_prot->twsk_slab_name); 4033 twsk_prot->twsk_slab_name = NULL; 4034 kmem_cache_destroy(twsk_prot->twsk_slab); 4035 twsk_prot->twsk_slab = NULL; 4036 } 4037 4038 static int tw_prot_init(const struct proto *prot) 4039 { 4040 struct timewait_sock_ops *twsk_prot = prot->twsk_prot; 4041 4042 if (!twsk_prot) 4043 return 0; 4044 4045 twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", 4046 prot->name); 4047 if (!twsk_prot->twsk_slab_name) 4048 return -ENOMEM; 4049 4050 twsk_prot->twsk_slab = 4051 kmem_cache_create(twsk_prot->twsk_slab_name, 4052 twsk_prot->twsk_obj_size, 0, 4053 SLAB_ACCOUNT | prot->slab_flags, 4054 NULL); 4055 if (!twsk_prot->twsk_slab) { 4056 pr_crit("%s: Can't create timewait sock SLAB cache!\n", 4057 prot->name); 4058 return -ENOMEM; 4059 } 4060 4061 return 0; 4062 } 4063 4064 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 4065 { 4066 if (!rsk_prot) 4067 return; 4068 kfree(rsk_prot->slab_name); 4069 rsk_prot->slab_name = NULL; 4070 kmem_cache_destroy(rsk_prot->slab); 4071 rsk_prot->slab = NULL; 4072 } 4073 4074 static int req_prot_init(const struct proto *prot) 4075 { 4076 struct request_sock_ops *rsk_prot = prot->rsk_prot; 4077 4078 if (!rsk_prot) 4079 return 0; 4080 4081 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", 4082 prot->name); 4083 if (!rsk_prot->slab_name) 4084 return -ENOMEM; 4085 4086 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 4087 rsk_prot->obj_size, 0, 4088 SLAB_ACCOUNT | prot->slab_flags, 4089 NULL); 4090 4091 if (!rsk_prot->slab) { 4092 pr_crit("%s: Can't create request sock SLAB cache!\n", 4093 prot->name); 4094 return -ENOMEM; 4095 } 4096 return 0; 4097 } 4098 4099 int proto_register(struct proto *prot, int alloc_slab) 4100 { 4101 int ret = -ENOBUFS; 4102 4103 if (prot->memory_allocated && !prot->sysctl_mem) { 4104 pr_err("%s: missing sysctl_mem\n", prot->name); 4105 return -EINVAL; 4106 } 4107 if (prot->memory_allocated && !prot->per_cpu_fw_alloc) { 4108 pr_err("%s: missing per_cpu_fw_alloc\n", prot->name); 4109 return -EINVAL; 4110 } 4111 if (alloc_slab) { 4112 prot->slab = kmem_cache_create_usercopy(prot->name, 4113 prot->obj_size, 0, 4114 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | 4115 prot->slab_flags, 4116 prot->useroffset, prot->usersize, 4117 NULL); 4118 4119 if (prot->slab == NULL) { 4120 pr_crit("%s: Can't create sock SLAB cache!\n", 4121 prot->name); 4122 goto out; 4123 } 4124 4125 if (req_prot_init(prot)) 4126 goto out_free_request_sock_slab; 4127 4128 if (tw_prot_init(prot)) 4129 goto out_free_timewait_sock_slab; 4130 } 4131 4132 mutex_lock(&proto_list_mutex); 4133 ret = assign_proto_idx(prot); 4134 if (ret) { 4135 mutex_unlock(&proto_list_mutex); 4136 goto out_free_timewait_sock_slab; 4137 } 4138 list_add(&prot->node, &proto_list); 4139 mutex_unlock(&proto_list_mutex); 4140 return ret; 4141 4142 out_free_timewait_sock_slab: 4143 if (alloc_slab) 4144 tw_prot_cleanup(prot->twsk_prot); 4145 out_free_request_sock_slab: 4146 if (alloc_slab) { 4147 req_prot_cleanup(prot->rsk_prot); 4148 4149 kmem_cache_destroy(prot->slab); 4150 prot->slab = NULL; 4151 } 4152 out: 4153 return ret; 4154 } 4155 EXPORT_SYMBOL(proto_register); 4156 4157 void proto_unregister(struct proto *prot) 4158 { 4159 mutex_lock(&proto_list_mutex); 4160 release_proto_idx(prot); 4161 list_del(&prot->node); 4162 mutex_unlock(&proto_list_mutex); 4163 4164 kmem_cache_destroy(prot->slab); 4165 prot->slab = NULL; 4166 4167 req_prot_cleanup(prot->rsk_prot); 4168 tw_prot_cleanup(prot->twsk_prot); 4169 } 4170 EXPORT_SYMBOL(proto_unregister); 4171 4172 int sock_load_diag_module(int family, int protocol) 4173 { 4174 if (!protocol) { 4175 if (!sock_is_registered(family)) 4176 return -ENOENT; 4177 4178 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 4179 NETLINK_SOCK_DIAG, family); 4180 } 4181 4182 #ifdef CONFIG_INET 4183 if (family == AF_INET && 4184 protocol != IPPROTO_RAW && 4185 protocol < MAX_INET_PROTOS && 4186 !rcu_access_pointer(inet_protos[protocol])) 4187 return -ENOENT; 4188 #endif 4189 4190 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 4191 NETLINK_SOCK_DIAG, family, protocol); 4192 } 4193 EXPORT_SYMBOL(sock_load_diag_module); 4194 4195 #ifdef CONFIG_PROC_FS 4196 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 4197 __acquires(proto_list_mutex) 4198 { 4199 mutex_lock(&proto_list_mutex); 4200 return seq_list_start_head(&proto_list, *pos); 4201 } 4202 4203 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4204 { 4205 return seq_list_next(v, &proto_list, pos); 4206 } 4207 4208 static void proto_seq_stop(struct seq_file *seq, void *v) 4209 __releases(proto_list_mutex) 4210 { 4211 mutex_unlock(&proto_list_mutex); 4212 } 4213 4214 static char proto_method_implemented(const void *method) 4215 { 4216 return method == NULL ? 'n' : 'y'; 4217 } 4218 static long sock_prot_memory_allocated(struct proto *proto) 4219 { 4220 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 4221 } 4222 4223 static const char *sock_prot_memory_pressure(struct proto *proto) 4224 { 4225 return proto->memory_pressure != NULL ? 4226 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 4227 } 4228 4229 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 4230 { 4231 4232 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 4233 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 4234 proto->name, 4235 proto->obj_size, 4236 sock_prot_inuse_get(seq_file_net(seq), proto), 4237 sock_prot_memory_allocated(proto), 4238 sock_prot_memory_pressure(proto), 4239 proto->max_header, 4240 proto->slab == NULL ? "no" : "yes", 4241 module_name(proto->owner), 4242 proto_method_implemented(proto->close), 4243 proto_method_implemented(proto->connect), 4244 proto_method_implemented(proto->disconnect), 4245 proto_method_implemented(proto->accept), 4246 proto_method_implemented(proto->ioctl), 4247 proto_method_implemented(proto->init), 4248 proto_method_implemented(proto->destroy), 4249 proto_method_implemented(proto->shutdown), 4250 proto_method_implemented(proto->setsockopt), 4251 proto_method_implemented(proto->getsockopt), 4252 proto_method_implemented(proto->sendmsg), 4253 proto_method_implemented(proto->recvmsg), 4254 proto_method_implemented(proto->bind), 4255 proto_method_implemented(proto->backlog_rcv), 4256 proto_method_implemented(proto->hash), 4257 proto_method_implemented(proto->unhash), 4258 proto_method_implemented(proto->get_port), 4259 proto_method_implemented(proto->enter_memory_pressure)); 4260 } 4261 4262 static int proto_seq_show(struct seq_file *seq, void *v) 4263 { 4264 if (v == &proto_list) 4265 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 4266 "protocol", 4267 "size", 4268 "sockets", 4269 "memory", 4270 "press", 4271 "maxhdr", 4272 "slab", 4273 "module", 4274 "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n"); 4275 else 4276 proto_seq_printf(seq, list_entry(v, struct proto, node)); 4277 return 0; 4278 } 4279 4280 static const struct seq_operations proto_seq_ops = { 4281 .start = proto_seq_start, 4282 .next = proto_seq_next, 4283 .stop = proto_seq_stop, 4284 .show = proto_seq_show, 4285 }; 4286 4287 static __net_init int proto_init_net(struct net *net) 4288 { 4289 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, 4290 sizeof(struct seq_net_private))) 4291 return -ENOMEM; 4292 4293 return 0; 4294 } 4295 4296 static __net_exit void proto_exit_net(struct net *net) 4297 { 4298 remove_proc_entry("protocols", net->proc_net); 4299 } 4300 4301 4302 static __net_initdata struct pernet_operations proto_net_ops = { 4303 .init = proto_init_net, 4304 .exit = proto_exit_net, 4305 }; 4306 4307 static int __init proto_init(void) 4308 { 4309 return register_pernet_subsys(&proto_net_ops); 4310 } 4311 4312 subsys_initcall(proto_init); 4313 4314 #endif /* PROC_FS */ 4315 4316 #ifdef CONFIG_NET_RX_BUSY_POLL 4317 bool sk_busy_loop_end(void *p, unsigned long start_time) 4318 { 4319 struct sock *sk = p; 4320 4321 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 4322 return true; 4323 4324 if (sk_is_udp(sk) && 4325 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 4326 return true; 4327 4328 return sk_busy_loop_timeout(sk, start_time); 4329 } 4330 EXPORT_SYMBOL(sk_busy_loop_end); 4331 #endif /* CONFIG_NET_RX_BUSY_POLL */ 4332 4333 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) 4334 { 4335 if (!sk->sk_prot->bind_add) 4336 return -EOPNOTSUPP; 4337 return sk->sk_prot->bind_add(sk, addr, addr_len); 4338 } 4339 EXPORT_SYMBOL(sock_bind_add); 4340 4341 /* Copy 'size' bytes from userspace and return `size` back to userspace */ 4342 int sock_ioctl_inout(struct sock *sk, unsigned int cmd, 4343 void __user *arg, void *karg, size_t size) 4344 { 4345 int ret; 4346 4347 if (copy_from_user(karg, arg, size)) 4348 return -EFAULT; 4349 4350 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg); 4351 if (ret) 4352 return ret; 4353 4354 if (copy_to_user(arg, karg, size)) 4355 return -EFAULT; 4356 4357 return 0; 4358 } 4359 EXPORT_SYMBOL(sock_ioctl_inout); 4360 4361 /* This is the most common ioctl prep function, where the result (4 bytes) is 4362 * copied back to userspace if the ioctl() returns successfully. No input is 4363 * copied from userspace as input argument. 4364 */ 4365 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg) 4366 { 4367 int ret, karg = 0; 4368 4369 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg); 4370 if (ret) 4371 return ret; 4372 4373 return put_user(karg, (int __user *)arg); 4374 } 4375 4376 /* A wrapper around sock ioctls, which copies the data from userspace 4377 * (depending on the protocol/ioctl), and copies back the result to userspace. 4378 * The main motivation for this function is to pass kernel memory to the 4379 * protocol ioctl callbacks, instead of userspace memory. 4380 */ 4381 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 4382 { 4383 int rc = 1; 4384 4385 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET) 4386 rc = ipmr_sk_ioctl(sk, cmd, arg); 4387 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6) 4388 rc = ip6mr_sk_ioctl(sk, cmd, arg); 4389 else if (sk_is_phonet(sk)) 4390 rc = phonet_sk_ioctl(sk, cmd, arg); 4391 4392 /* If ioctl was processed, returns its value */ 4393 if (rc <= 0) 4394 return rc; 4395 4396 /* Otherwise call the default handler */ 4397 return sock_ioctl_out(sk, cmd, arg); 4398 } 4399 EXPORT_SYMBOL(sk_ioctl); 4400 4401 static int __init sock_struct_check(void) 4402 { 4403 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops); 4404 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off); 4405 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue); 4406 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue); 4407 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog); 4408 4409 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst); 4410 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex); 4411 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie); 4412 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf); 4413 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter); 4414 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq); 4415 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready); 4416 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo); 4417 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat); 4418 4419 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err); 4420 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket); 4421 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg); 4422 4423 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock); 4424 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem); 4425 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc); 4426 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags); 4427 4428 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4429 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4430 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf); 4431 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued); 4432 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc); 4433 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags); 4434 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head); 4435 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue); 4436 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending); 4437 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm); 4438 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status); 4439 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag); 4440 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer); 4441 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate); 4442 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey); 4443 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey); 4444 4445 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate); 4446 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo); 4447 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority); 4448 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark); 4449 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache); 4450 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps); 4451 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type); 4452 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size); 4453 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation); 4454 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash); 4455 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs); 4456 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift); 4457 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag); 4458 return 0; 4459 } 4460 4461 core_initcall(sock_struct_check); 4462