1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic socket support routines. Memory allocators, socket lock/release 8 * handler for protocols to use and generic option handler. 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 */ 85 86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 87 88 #include <linux/unaligned.h> 89 #include <linux/capability.h> 90 #include <linux/errno.h> 91 #include <linux/errqueue.h> 92 #include <linux/types.h> 93 #include <linux/socket.h> 94 #include <linux/in.h> 95 #include <linux/kernel.h> 96 #include <linux/module.h> 97 #include <linux/proc_fs.h> 98 #include <linux/seq_file.h> 99 #include <linux/sched.h> 100 #include <linux/sched/mm.h> 101 #include <linux/timer.h> 102 #include <linux/string.h> 103 #include <linux/sockios.h> 104 #include <linux/net.h> 105 #include <linux/mm.h> 106 #include <linux/slab.h> 107 #include <linux/interrupt.h> 108 #include <linux/poll.h> 109 #include <linux/tcp.h> 110 #include <linux/udp.h> 111 #include <linux/init.h> 112 #include <linux/highmem.h> 113 #include <linux/user_namespace.h> 114 #include <linux/static_key.h> 115 #include <linux/memcontrol.h> 116 #include <linux/prefetch.h> 117 #include <linux/compat.h> 118 #include <linux/mroute.h> 119 #include <linux/mroute6.h> 120 #include <linux/icmpv6.h> 121 122 #include <linux/uaccess.h> 123 124 #include <linux/netdevice.h> 125 #include <net/protocol.h> 126 #include <linux/skbuff.h> 127 #include <linux/skbuff_ref.h> 128 #include <net/net_namespace.h> 129 #include <net/request_sock.h> 130 #include <net/sock.h> 131 #include <net/proto_memory.h> 132 #include <linux/net_tstamp.h> 133 #include <net/xfrm.h> 134 #include <linux/ipsec.h> 135 #include <net/cls_cgroup.h> 136 #include <net/netprio_cgroup.h> 137 #include <linux/sock_diag.h> 138 139 #include <linux/filter.h> 140 #include <net/sock_reuseport.h> 141 #include <net/bpf_sk_storage.h> 142 143 #include <trace/events/sock.h> 144 145 #include <net/tcp.h> 146 #include <net/busy_poll.h> 147 #include <net/phonet/phonet.h> 148 149 #include <linux/ethtool.h> 150 151 #include <uapi/linux/pidfd.h> 152 153 #include "dev.h" 154 155 static DEFINE_MUTEX(proto_list_mutex); 156 static LIST_HEAD(proto_list); 157 158 static void sock_def_write_space_wfree(struct sock *sk); 159 static void sock_def_write_space(struct sock *sk); 160 161 /** 162 * sk_ns_capable - General socket capability test 163 * @sk: Socket to use a capability on or through 164 * @user_ns: The user namespace of the capability to use 165 * @cap: The capability to use 166 * 167 * Test to see if the opener of the socket had when the socket was 168 * created and the current process has the capability @cap in the user 169 * namespace @user_ns. 170 */ 171 bool sk_ns_capable(const struct sock *sk, 172 struct user_namespace *user_ns, int cap) 173 { 174 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 175 ns_capable(user_ns, cap); 176 } 177 EXPORT_SYMBOL(sk_ns_capable); 178 179 /** 180 * sk_capable - Socket global capability test 181 * @sk: Socket to use a capability on or through 182 * @cap: The global capability to use 183 * 184 * Test to see if the opener of the socket had when the socket was 185 * created and the current process has the capability @cap in all user 186 * namespaces. 187 */ 188 bool sk_capable(const struct sock *sk, int cap) 189 { 190 return sk_ns_capable(sk, &init_user_ns, cap); 191 } 192 EXPORT_SYMBOL(sk_capable); 193 194 /** 195 * sk_net_capable - Network namespace socket capability test 196 * @sk: Socket to use a capability on or through 197 * @cap: The capability to use 198 * 199 * Test to see if the opener of the socket had when the socket was created 200 * and the current process has the capability @cap over the network namespace 201 * the socket is a member of. 202 */ 203 bool sk_net_capable(const struct sock *sk, int cap) 204 { 205 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 206 } 207 EXPORT_SYMBOL(sk_net_capable); 208 209 /* 210 * Each address family might have different locking rules, so we have 211 * one slock key per address family and separate keys for internal and 212 * userspace sockets. 213 */ 214 static struct lock_class_key af_family_keys[AF_MAX]; 215 static struct lock_class_key af_family_kern_keys[AF_MAX]; 216 static struct lock_class_key af_family_slock_keys[AF_MAX]; 217 static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; 218 219 /* 220 * Make lock validator output more readable. (we pre-construct these 221 * strings build-time, so that runtime initialization of socket 222 * locks is fast): 223 */ 224 225 #define _sock_locks(x) \ 226 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ 227 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ 228 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ 229 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ 230 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ 231 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ 232 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ 233 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ 234 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ 235 x "27" , x "28" , x "AF_CAN" , \ 236 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ 237 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ 238 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ 239 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ 240 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ 241 x "AF_MCTP" , \ 242 x "AF_MAX" 243 244 static const char *const af_family_key_strings[AF_MAX+1] = { 245 _sock_locks("sk_lock-") 246 }; 247 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 248 _sock_locks("slock-") 249 }; 250 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 251 _sock_locks("clock-") 252 }; 253 254 static const char *const af_family_kern_key_strings[AF_MAX+1] = { 255 _sock_locks("k-sk_lock-") 256 }; 257 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { 258 _sock_locks("k-slock-") 259 }; 260 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 261 _sock_locks("k-clock-") 262 }; 263 static const char *const af_family_rlock_key_strings[AF_MAX+1] = { 264 _sock_locks("rlock-") 265 }; 266 static const char *const af_family_wlock_key_strings[AF_MAX+1] = { 267 _sock_locks("wlock-") 268 }; 269 static const char *const af_family_elock_key_strings[AF_MAX+1] = { 270 _sock_locks("elock-") 271 }; 272 273 /* 274 * sk_callback_lock and sk queues locking rules are per-address-family, 275 * so split the lock classes by using a per-AF key: 276 */ 277 static struct lock_class_key af_callback_keys[AF_MAX]; 278 static struct lock_class_key af_rlock_keys[AF_MAX]; 279 static struct lock_class_key af_wlock_keys[AF_MAX]; 280 static struct lock_class_key af_elock_keys[AF_MAX]; 281 static struct lock_class_key af_kern_callback_keys[AF_MAX]; 282 283 /* Run time adjustable parameters. */ 284 __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; 285 EXPORT_SYMBOL(sysctl_wmem_max); 286 __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; 287 EXPORT_SYMBOL(sysctl_rmem_max); 288 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 289 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 290 291 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); 292 EXPORT_SYMBOL_GPL(memalloc_socks_key); 293 294 /** 295 * sk_set_memalloc - sets %SOCK_MEMALLOC 296 * @sk: socket to set it on 297 * 298 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 299 * It's the responsibility of the admin to adjust min_free_kbytes 300 * to meet the requirements 301 */ 302 void sk_set_memalloc(struct sock *sk) 303 { 304 sock_set_flag(sk, SOCK_MEMALLOC); 305 sk->sk_allocation |= __GFP_MEMALLOC; 306 static_branch_inc(&memalloc_socks_key); 307 } 308 EXPORT_SYMBOL_GPL(sk_set_memalloc); 309 310 void sk_clear_memalloc(struct sock *sk) 311 { 312 sock_reset_flag(sk, SOCK_MEMALLOC); 313 sk->sk_allocation &= ~__GFP_MEMALLOC; 314 static_branch_dec(&memalloc_socks_key); 315 316 /* 317 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 318 * progress of swapping. SOCK_MEMALLOC may be cleared while 319 * it has rmem allocations due to the last swapfile being deactivated 320 * but there is a risk that the socket is unusable due to exceeding 321 * the rmem limits. Reclaim the reserves and obey rmem limits again. 322 */ 323 sk_mem_reclaim(sk); 324 } 325 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 326 327 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 328 { 329 int ret; 330 unsigned int noreclaim_flag; 331 332 /* these should have been dropped before queueing */ 333 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 334 335 noreclaim_flag = memalloc_noreclaim_save(); 336 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, 337 tcp_v6_do_rcv, 338 tcp_v4_do_rcv, 339 sk, skb); 340 memalloc_noreclaim_restore(noreclaim_flag); 341 342 return ret; 343 } 344 EXPORT_SYMBOL(__sk_backlog_rcv); 345 346 void sk_error_report(struct sock *sk) 347 { 348 sk->sk_error_report(sk); 349 350 switch (sk->sk_family) { 351 case AF_INET: 352 fallthrough; 353 case AF_INET6: 354 trace_inet_sk_error_report(sk); 355 break; 356 default: 357 break; 358 } 359 } 360 EXPORT_SYMBOL(sk_error_report); 361 362 int sock_get_timeout(long timeo, void *optval, bool old_timeval) 363 { 364 struct __kernel_sock_timeval tv; 365 366 if (timeo == MAX_SCHEDULE_TIMEOUT) { 367 tv.tv_sec = 0; 368 tv.tv_usec = 0; 369 } else { 370 tv.tv_sec = timeo / HZ; 371 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 372 } 373 374 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 375 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 376 *(struct old_timeval32 *)optval = tv32; 377 return sizeof(tv32); 378 } 379 380 if (old_timeval) { 381 struct __kernel_old_timeval old_tv; 382 old_tv.tv_sec = tv.tv_sec; 383 old_tv.tv_usec = tv.tv_usec; 384 *(struct __kernel_old_timeval *)optval = old_tv; 385 return sizeof(old_tv); 386 } 387 388 *(struct __kernel_sock_timeval *)optval = tv; 389 return sizeof(tv); 390 } 391 EXPORT_SYMBOL(sock_get_timeout); 392 393 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, 394 sockptr_t optval, int optlen, bool old_timeval) 395 { 396 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 397 struct old_timeval32 tv32; 398 399 if (optlen < sizeof(tv32)) 400 return -EINVAL; 401 402 if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) 403 return -EFAULT; 404 tv->tv_sec = tv32.tv_sec; 405 tv->tv_usec = tv32.tv_usec; 406 } else if (old_timeval) { 407 struct __kernel_old_timeval old_tv; 408 409 if (optlen < sizeof(old_tv)) 410 return -EINVAL; 411 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) 412 return -EFAULT; 413 tv->tv_sec = old_tv.tv_sec; 414 tv->tv_usec = old_tv.tv_usec; 415 } else { 416 if (optlen < sizeof(*tv)) 417 return -EINVAL; 418 if (copy_from_sockptr(tv, optval, sizeof(*tv))) 419 return -EFAULT; 420 } 421 422 return 0; 423 } 424 EXPORT_SYMBOL(sock_copy_user_timeval); 425 426 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, 427 bool old_timeval) 428 { 429 struct __kernel_sock_timeval tv; 430 int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); 431 long val; 432 433 if (err) 434 return err; 435 436 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 437 return -EDOM; 438 439 if (tv.tv_sec < 0) { 440 static int warned __read_mostly; 441 442 WRITE_ONCE(*timeo_p, 0); 443 if (warned < 10 && net_ratelimit()) { 444 warned++; 445 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 446 __func__, current->comm, task_pid_nr(current)); 447 } 448 return 0; 449 } 450 val = MAX_SCHEDULE_TIMEOUT; 451 if ((tv.tv_sec || tv.tv_usec) && 452 (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))) 453 val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, 454 USEC_PER_SEC / HZ); 455 WRITE_ONCE(*timeo_p, val); 456 return 0; 457 } 458 459 static bool sk_set_prio_allowed(const struct sock *sk, int val) 460 { 461 return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) || 462 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || 463 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)); 464 } 465 466 static bool sock_needs_netstamp(const struct sock *sk) 467 { 468 switch (sk->sk_family) { 469 case AF_UNSPEC: 470 case AF_UNIX: 471 return false; 472 default: 473 return true; 474 } 475 } 476 477 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 478 { 479 if (sk->sk_flags & flags) { 480 sk->sk_flags &= ~flags; 481 if (sock_needs_netstamp(sk) && 482 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 483 net_disable_timestamp(); 484 } 485 } 486 487 488 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 489 { 490 unsigned long flags; 491 struct sk_buff_head *list = &sk->sk_receive_queue; 492 493 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { 494 atomic_inc(&sk->sk_drops); 495 trace_sock_rcvqueue_full(sk, skb); 496 return -ENOMEM; 497 } 498 499 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 500 atomic_inc(&sk->sk_drops); 501 return -ENOBUFS; 502 } 503 504 skb->dev = NULL; 505 skb_set_owner_r(skb, sk); 506 507 /* we escape from rcu protected region, make sure we dont leak 508 * a norefcounted dst 509 */ 510 skb_dst_force(skb); 511 512 spin_lock_irqsave(&list->lock, flags); 513 sock_skb_set_dropcount(sk, skb); 514 __skb_queue_tail(list, skb); 515 spin_unlock_irqrestore(&list->lock, flags); 516 517 if (!sock_flag(sk, SOCK_DEAD)) 518 sk->sk_data_ready(sk); 519 return 0; 520 } 521 EXPORT_SYMBOL(__sock_queue_rcv_skb); 522 523 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb, 524 enum skb_drop_reason *reason) 525 { 526 enum skb_drop_reason drop_reason; 527 int err; 528 529 err = sk_filter(sk, skb); 530 if (err) { 531 drop_reason = SKB_DROP_REASON_SOCKET_FILTER; 532 goto out; 533 } 534 err = __sock_queue_rcv_skb(sk, skb); 535 switch (err) { 536 case -ENOMEM: 537 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 538 break; 539 case -ENOBUFS: 540 drop_reason = SKB_DROP_REASON_PROTO_MEM; 541 break; 542 default: 543 drop_reason = SKB_NOT_DROPPED_YET; 544 break; 545 } 546 out: 547 if (reason) 548 *reason = drop_reason; 549 return err; 550 } 551 EXPORT_SYMBOL(sock_queue_rcv_skb_reason); 552 553 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 554 const int nested, unsigned int trim_cap, bool refcounted) 555 { 556 int rc = NET_RX_SUCCESS; 557 558 if (sk_filter_trim_cap(sk, skb, trim_cap)) 559 goto discard_and_relse; 560 561 skb->dev = NULL; 562 563 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { 564 atomic_inc(&sk->sk_drops); 565 goto discard_and_relse; 566 } 567 if (nested) 568 bh_lock_sock_nested(sk); 569 else 570 bh_lock_sock(sk); 571 if (!sock_owned_by_user(sk)) { 572 /* 573 * trylock + unlock semantics: 574 */ 575 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 576 577 rc = sk_backlog_rcv(sk, skb); 578 579 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 580 } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) { 581 bh_unlock_sock(sk); 582 atomic_inc(&sk->sk_drops); 583 goto discard_and_relse; 584 } 585 586 bh_unlock_sock(sk); 587 out: 588 if (refcounted) 589 sock_put(sk); 590 return rc; 591 discard_and_relse: 592 kfree_skb(skb); 593 goto out; 594 } 595 EXPORT_SYMBOL(__sk_receive_skb); 596 597 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, 598 u32)); 599 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 600 u32)); 601 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 602 { 603 struct dst_entry *dst = __sk_dst_get(sk); 604 605 if (dst && READ_ONCE(dst->obsolete) && 606 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 607 dst, cookie) == NULL) { 608 sk_tx_queue_clear(sk); 609 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); 610 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 611 dst_release(dst); 612 return NULL; 613 } 614 615 return dst; 616 } 617 EXPORT_SYMBOL(__sk_dst_check); 618 619 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 620 { 621 struct dst_entry *dst = sk_dst_get(sk); 622 623 if (dst && READ_ONCE(dst->obsolete) && 624 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 625 dst, cookie) == NULL) { 626 sk_dst_reset(sk); 627 dst_release(dst); 628 return NULL; 629 } 630 631 return dst; 632 } 633 EXPORT_SYMBOL(sk_dst_check); 634 635 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) 636 { 637 int ret = -ENOPROTOOPT; 638 #ifdef CONFIG_NETDEVICES 639 struct net *net = sock_net(sk); 640 641 /* Sorry... */ 642 ret = -EPERM; 643 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) 644 goto out; 645 646 ret = -EINVAL; 647 if (ifindex < 0) 648 goto out; 649 650 /* Paired with all READ_ONCE() done locklessly. */ 651 WRITE_ONCE(sk->sk_bound_dev_if, ifindex); 652 653 if (sk->sk_prot->rehash) 654 sk->sk_prot->rehash(sk); 655 sk_dst_reset(sk); 656 657 ret = 0; 658 659 out: 660 #endif 661 662 return ret; 663 } 664 665 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) 666 { 667 int ret; 668 669 if (lock_sk) 670 lock_sock(sk); 671 ret = sock_bindtoindex_locked(sk, ifindex); 672 if (lock_sk) 673 release_sock(sk); 674 675 return ret; 676 } 677 EXPORT_SYMBOL(sock_bindtoindex); 678 679 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) 680 { 681 int ret = -ENOPROTOOPT; 682 #ifdef CONFIG_NETDEVICES 683 struct net *net = sock_net(sk); 684 char devname[IFNAMSIZ]; 685 int index; 686 687 ret = -EINVAL; 688 if (optlen < 0) 689 goto out; 690 691 /* Bind this socket to a particular device like "eth0", 692 * as specified in the passed interface name. If the 693 * name is "" or the option length is zero the socket 694 * is not bound. 695 */ 696 if (optlen > IFNAMSIZ - 1) 697 optlen = IFNAMSIZ - 1; 698 memset(devname, 0, sizeof(devname)); 699 700 ret = -EFAULT; 701 if (copy_from_sockptr(devname, optval, optlen)) 702 goto out; 703 704 index = 0; 705 if (devname[0] != '\0') { 706 struct net_device *dev; 707 708 rcu_read_lock(); 709 dev = dev_get_by_name_rcu(net, devname); 710 if (dev) 711 index = dev->ifindex; 712 rcu_read_unlock(); 713 ret = -ENODEV; 714 if (!dev) 715 goto out; 716 } 717 718 sockopt_lock_sock(sk); 719 ret = sock_bindtoindex_locked(sk, index); 720 sockopt_release_sock(sk); 721 out: 722 #endif 723 724 return ret; 725 } 726 727 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, 728 sockptr_t optlen, int len) 729 { 730 int ret = -ENOPROTOOPT; 731 #ifdef CONFIG_NETDEVICES 732 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 733 struct net *net = sock_net(sk); 734 char devname[IFNAMSIZ]; 735 736 if (bound_dev_if == 0) { 737 len = 0; 738 goto zero; 739 } 740 741 ret = -EINVAL; 742 if (len < IFNAMSIZ) 743 goto out; 744 745 ret = netdev_get_name(net, devname, bound_dev_if); 746 if (ret) 747 goto out; 748 749 len = strlen(devname) + 1; 750 751 ret = -EFAULT; 752 if (copy_to_sockptr(optval, devname, len)) 753 goto out; 754 755 zero: 756 ret = -EFAULT; 757 if (copy_to_sockptr(optlen, &len, sizeof(int))) 758 goto out; 759 760 ret = 0; 761 762 out: 763 #endif 764 765 return ret; 766 } 767 768 bool sk_mc_loop(const struct sock *sk) 769 { 770 if (dev_recursion_level()) 771 return false; 772 if (!sk) 773 return true; 774 /* IPV6_ADDRFORM can change sk->sk_family under us. */ 775 switch (READ_ONCE(sk->sk_family)) { 776 case AF_INET: 777 return inet_test_bit(MC_LOOP, sk); 778 #if IS_ENABLED(CONFIG_IPV6) 779 case AF_INET6: 780 return inet6_test_bit(MC6_LOOP, sk); 781 #endif 782 } 783 WARN_ON_ONCE(1); 784 return true; 785 } 786 EXPORT_SYMBOL(sk_mc_loop); 787 788 void sock_set_reuseaddr(struct sock *sk) 789 { 790 lock_sock(sk); 791 sk->sk_reuse = SK_CAN_REUSE; 792 release_sock(sk); 793 } 794 EXPORT_SYMBOL(sock_set_reuseaddr); 795 796 void sock_set_reuseport(struct sock *sk) 797 { 798 lock_sock(sk); 799 sk->sk_reuseport = true; 800 release_sock(sk); 801 } 802 EXPORT_SYMBOL(sock_set_reuseport); 803 804 void sock_no_linger(struct sock *sk) 805 { 806 lock_sock(sk); 807 WRITE_ONCE(sk->sk_lingertime, 0); 808 sock_set_flag(sk, SOCK_LINGER); 809 release_sock(sk); 810 } 811 EXPORT_SYMBOL(sock_no_linger); 812 813 void sock_set_priority(struct sock *sk, u32 priority) 814 { 815 WRITE_ONCE(sk->sk_priority, priority); 816 } 817 EXPORT_SYMBOL(sock_set_priority); 818 819 void sock_set_sndtimeo(struct sock *sk, s64 secs) 820 { 821 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) 822 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); 823 else 824 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); 825 } 826 EXPORT_SYMBOL(sock_set_sndtimeo); 827 828 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) 829 { 830 sock_valbool_flag(sk, SOCK_RCVTSTAMP, val); 831 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns); 832 if (val) { 833 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); 834 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 835 } 836 } 837 838 void sock_set_timestamp(struct sock *sk, int optname, bool valbool) 839 { 840 switch (optname) { 841 case SO_TIMESTAMP_OLD: 842 __sock_set_timestamps(sk, valbool, false, false); 843 break; 844 case SO_TIMESTAMP_NEW: 845 __sock_set_timestamps(sk, valbool, true, false); 846 break; 847 case SO_TIMESTAMPNS_OLD: 848 __sock_set_timestamps(sk, valbool, false, true); 849 break; 850 case SO_TIMESTAMPNS_NEW: 851 __sock_set_timestamps(sk, valbool, true, true); 852 break; 853 } 854 } 855 856 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) 857 { 858 struct net *net = sock_net(sk); 859 struct net_device *dev = NULL; 860 bool match = false; 861 int *vclock_index; 862 int i, num; 863 864 if (sk->sk_bound_dev_if) 865 dev = dev_get_by_index(net, sk->sk_bound_dev_if); 866 867 if (!dev) { 868 pr_err("%s: sock not bind to device\n", __func__); 869 return -EOPNOTSUPP; 870 } 871 872 num = ethtool_get_phc_vclocks(dev, &vclock_index); 873 dev_put(dev); 874 875 for (i = 0; i < num; i++) { 876 if (*(vclock_index + i) == phc_index) { 877 match = true; 878 break; 879 } 880 } 881 882 if (num > 0) 883 kfree(vclock_index); 884 885 if (!match) 886 return -EINVAL; 887 888 WRITE_ONCE(sk->sk_bind_phc, phc_index); 889 890 return 0; 891 } 892 893 int sock_set_timestamping(struct sock *sk, int optname, 894 struct so_timestamping timestamping) 895 { 896 int val = timestamping.flags; 897 int ret; 898 899 if (val & ~SOF_TIMESTAMPING_MASK) 900 return -EINVAL; 901 902 if (val & SOF_TIMESTAMPING_OPT_ID_TCP && 903 !(val & SOF_TIMESTAMPING_OPT_ID)) 904 return -EINVAL; 905 906 if (val & SOF_TIMESTAMPING_OPT_ID && 907 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 908 if (sk_is_tcp(sk)) { 909 if ((1 << sk->sk_state) & 910 (TCPF_CLOSE | TCPF_LISTEN)) 911 return -EINVAL; 912 if (val & SOF_TIMESTAMPING_OPT_ID_TCP) 913 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq); 914 else 915 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); 916 } else { 917 atomic_set(&sk->sk_tskey, 0); 918 } 919 } 920 921 if (val & SOF_TIMESTAMPING_OPT_STATS && 922 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) 923 return -EINVAL; 924 925 if (val & SOF_TIMESTAMPING_BIND_PHC) { 926 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); 927 if (ret) 928 return ret; 929 } 930 931 WRITE_ONCE(sk->sk_tsflags, val); 932 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); 933 sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY)); 934 935 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 936 sock_enable_timestamp(sk, 937 SOCK_TIMESTAMPING_RX_SOFTWARE); 938 else 939 sock_disable_timestamp(sk, 940 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 941 return 0; 942 } 943 944 #if defined(CONFIG_CGROUP_BPF) 945 void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) 946 { 947 struct bpf_sock_ops_kern sock_ops; 948 949 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 950 sock_ops.op = op; 951 sock_ops.is_fullsock = 1; 952 sock_ops.sk = sk; 953 bpf_skops_init_skb(&sock_ops, skb, 0); 954 __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS); 955 } 956 #endif 957 958 void sock_set_keepalive(struct sock *sk) 959 { 960 lock_sock(sk); 961 if (sk->sk_prot->keepalive) 962 sk->sk_prot->keepalive(sk, true); 963 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); 964 release_sock(sk); 965 } 966 EXPORT_SYMBOL(sock_set_keepalive); 967 968 static void __sock_set_rcvbuf(struct sock *sk, int val) 969 { 970 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it 971 * as a negative value. 972 */ 973 val = min_t(int, val, INT_MAX / 2); 974 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 975 976 /* We double it on the way in to account for "struct sk_buff" etc. 977 * overhead. Applications assume that the SO_RCVBUF setting they make 978 * will allow that much actual data to be received on that socket. 979 * 980 * Applications are unaware that "struct sk_buff" and other overheads 981 * allocate from the receive buffer during socket buffer allocation. 982 * 983 * And after considering the possible alternatives, returning the value 984 * we actually used in getsockopt is the most desirable behavior. 985 */ 986 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); 987 } 988 989 void sock_set_rcvbuf(struct sock *sk, int val) 990 { 991 lock_sock(sk); 992 __sock_set_rcvbuf(sk, val); 993 release_sock(sk); 994 } 995 EXPORT_SYMBOL(sock_set_rcvbuf); 996 997 static void __sock_set_mark(struct sock *sk, u32 val) 998 { 999 if (val != sk->sk_mark) { 1000 WRITE_ONCE(sk->sk_mark, val); 1001 sk_dst_reset(sk); 1002 } 1003 } 1004 1005 void sock_set_mark(struct sock *sk, u32 val) 1006 { 1007 lock_sock(sk); 1008 __sock_set_mark(sk, val); 1009 release_sock(sk); 1010 } 1011 EXPORT_SYMBOL(sock_set_mark); 1012 1013 static void sock_release_reserved_memory(struct sock *sk, int bytes) 1014 { 1015 /* Round down bytes to multiple of pages */ 1016 bytes = round_down(bytes, PAGE_SIZE); 1017 1018 WARN_ON(bytes > sk->sk_reserved_mem); 1019 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); 1020 sk_mem_reclaim(sk); 1021 } 1022 1023 static int sock_reserve_memory(struct sock *sk, int bytes) 1024 { 1025 long allocated; 1026 bool charged; 1027 int pages; 1028 1029 if (!mem_cgroup_sockets_enabled || !sk->sk_memcg || !sk_has_account(sk)) 1030 return -EOPNOTSUPP; 1031 1032 if (!bytes) 1033 return 0; 1034 1035 pages = sk_mem_pages(bytes); 1036 1037 /* pre-charge to memcg */ 1038 charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages, 1039 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1040 if (!charged) 1041 return -ENOMEM; 1042 1043 /* pre-charge to forward_alloc */ 1044 sk_memory_allocated_add(sk, pages); 1045 allocated = sk_memory_allocated(sk); 1046 /* If the system goes into memory pressure with this 1047 * precharge, give up and return error. 1048 */ 1049 if (allocated > sk_prot_mem_limits(sk, 1)) { 1050 sk_memory_allocated_sub(sk, pages); 1051 mem_cgroup_uncharge_skmem(sk->sk_memcg, pages); 1052 return -ENOMEM; 1053 } 1054 sk_forward_alloc_add(sk, pages << PAGE_SHIFT); 1055 1056 WRITE_ONCE(sk->sk_reserved_mem, 1057 sk->sk_reserved_mem + (pages << PAGE_SHIFT)); 1058 1059 return 0; 1060 } 1061 1062 #ifdef CONFIG_PAGE_POOL 1063 1064 /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED 1065 * in 1 syscall. The limit exists to limit the amount of memory the kernel 1066 * allocates to copy these tokens, and to prevent looping over the frags for 1067 * too long. 1068 */ 1069 #define MAX_DONTNEED_TOKENS 128 1070 #define MAX_DONTNEED_FRAGS 1024 1071 1072 static noinline_for_stack int 1073 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) 1074 { 1075 unsigned int num_tokens, i, j, k, netmem_num = 0; 1076 struct dmabuf_token *tokens; 1077 int ret = 0, num_frags = 0; 1078 netmem_ref netmems[16]; 1079 1080 if (!sk_is_tcp(sk)) 1081 return -EBADF; 1082 1083 if (optlen % sizeof(*tokens) || 1084 optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS) 1085 return -EINVAL; 1086 1087 num_tokens = optlen / sizeof(*tokens); 1088 tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL); 1089 if (!tokens) 1090 return -ENOMEM; 1091 1092 if (copy_from_sockptr(tokens, optval, optlen)) { 1093 kvfree(tokens); 1094 return -EFAULT; 1095 } 1096 1097 xa_lock_bh(&sk->sk_user_frags); 1098 for (i = 0; i < num_tokens; i++) { 1099 for (j = 0; j < tokens[i].token_count; j++) { 1100 if (++num_frags > MAX_DONTNEED_FRAGS) 1101 goto frag_limit_reached; 1102 1103 netmem_ref netmem = (__force netmem_ref)__xa_erase( 1104 &sk->sk_user_frags, tokens[i].token_start + j); 1105 1106 if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 1107 continue; 1108 1109 netmems[netmem_num++] = netmem; 1110 if (netmem_num == ARRAY_SIZE(netmems)) { 1111 xa_unlock_bh(&sk->sk_user_frags); 1112 for (k = 0; k < netmem_num; k++) 1113 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1114 netmem_num = 0; 1115 xa_lock_bh(&sk->sk_user_frags); 1116 } 1117 ret++; 1118 } 1119 } 1120 1121 frag_limit_reached: 1122 xa_unlock_bh(&sk->sk_user_frags); 1123 for (k = 0; k < netmem_num; k++) 1124 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1125 1126 kvfree(tokens); 1127 return ret; 1128 } 1129 #endif 1130 1131 void sockopt_lock_sock(struct sock *sk) 1132 { 1133 /* When current->bpf_ctx is set, the setsockopt is called from 1134 * a bpf prog. bpf has ensured the sk lock has been 1135 * acquired before calling setsockopt(). 1136 */ 1137 if (has_current_bpf_ctx()) 1138 return; 1139 1140 lock_sock(sk); 1141 } 1142 EXPORT_SYMBOL(sockopt_lock_sock); 1143 1144 void sockopt_release_sock(struct sock *sk) 1145 { 1146 if (has_current_bpf_ctx()) 1147 return; 1148 1149 release_sock(sk); 1150 } 1151 EXPORT_SYMBOL(sockopt_release_sock); 1152 1153 bool sockopt_ns_capable(struct user_namespace *ns, int cap) 1154 { 1155 return has_current_bpf_ctx() || ns_capable(ns, cap); 1156 } 1157 EXPORT_SYMBOL(sockopt_ns_capable); 1158 1159 bool sockopt_capable(int cap) 1160 { 1161 return has_current_bpf_ctx() || capable(cap); 1162 } 1163 EXPORT_SYMBOL(sockopt_capable); 1164 1165 static int sockopt_validate_clockid(__kernel_clockid_t value) 1166 { 1167 switch (value) { 1168 case CLOCK_REALTIME: 1169 case CLOCK_MONOTONIC: 1170 case CLOCK_TAI: 1171 return 0; 1172 } 1173 return -EINVAL; 1174 } 1175 1176 /* 1177 * This is meant for all protocols to use and covers goings on 1178 * at the socket level. Everything here is generic. 1179 */ 1180 1181 int sk_setsockopt(struct sock *sk, int level, int optname, 1182 sockptr_t optval, unsigned int optlen) 1183 { 1184 struct so_timestamping timestamping; 1185 struct socket *sock = sk->sk_socket; 1186 struct sock_txtime sk_txtime; 1187 int val; 1188 int valbool; 1189 struct linger ling; 1190 int ret = 0; 1191 1192 /* 1193 * Options without arguments 1194 */ 1195 1196 if (optname == SO_BINDTODEVICE) 1197 return sock_setbindtodevice(sk, optval, optlen); 1198 1199 if (optlen < sizeof(int)) 1200 return -EINVAL; 1201 1202 if (copy_from_sockptr(&val, optval, sizeof(val))) 1203 return -EFAULT; 1204 1205 valbool = val ? 1 : 0; 1206 1207 /* handle options which do not require locking the socket. */ 1208 switch (optname) { 1209 case SO_PRIORITY: 1210 if (sk_set_prio_allowed(sk, val)) { 1211 sock_set_priority(sk, val); 1212 return 0; 1213 } 1214 return -EPERM; 1215 case SO_TYPE: 1216 case SO_PROTOCOL: 1217 case SO_DOMAIN: 1218 case SO_ERROR: 1219 return -ENOPROTOOPT; 1220 #ifdef CONFIG_NET_RX_BUSY_POLL 1221 case SO_BUSY_POLL: 1222 if (val < 0) 1223 return -EINVAL; 1224 WRITE_ONCE(sk->sk_ll_usec, val); 1225 return 0; 1226 case SO_PREFER_BUSY_POLL: 1227 if (valbool && !sockopt_capable(CAP_NET_ADMIN)) 1228 return -EPERM; 1229 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); 1230 return 0; 1231 case SO_BUSY_POLL_BUDGET: 1232 if (val > READ_ONCE(sk->sk_busy_poll_budget) && 1233 !sockopt_capable(CAP_NET_ADMIN)) 1234 return -EPERM; 1235 if (val < 0 || val > U16_MAX) 1236 return -EINVAL; 1237 WRITE_ONCE(sk->sk_busy_poll_budget, val); 1238 return 0; 1239 #endif 1240 case SO_MAX_PACING_RATE: 1241 { 1242 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; 1243 unsigned long pacing_rate; 1244 1245 if (sizeof(ulval) != sizeof(val) && 1246 optlen >= sizeof(ulval) && 1247 copy_from_sockptr(&ulval, optval, sizeof(ulval))) { 1248 return -EFAULT; 1249 } 1250 if (ulval != ~0UL) 1251 cmpxchg(&sk->sk_pacing_status, 1252 SK_PACING_NONE, 1253 SK_PACING_NEEDED); 1254 /* Pairs with READ_ONCE() from sk_getsockopt() */ 1255 WRITE_ONCE(sk->sk_max_pacing_rate, ulval); 1256 pacing_rate = READ_ONCE(sk->sk_pacing_rate); 1257 if (ulval < pacing_rate) 1258 WRITE_ONCE(sk->sk_pacing_rate, ulval); 1259 return 0; 1260 } 1261 case SO_TXREHASH: 1262 if (!sk_is_tcp(sk)) 1263 return -EOPNOTSUPP; 1264 if (val < -1 || val > 1) 1265 return -EINVAL; 1266 if ((u8)val == SOCK_TXREHASH_DEFAULT) 1267 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); 1268 /* Paired with READ_ONCE() in tcp_rtx_synack() 1269 * and sk_getsockopt(). 1270 */ 1271 WRITE_ONCE(sk->sk_txrehash, (u8)val); 1272 return 0; 1273 case SO_PEEK_OFF: 1274 { 1275 int (*set_peek_off)(struct sock *sk, int val); 1276 1277 set_peek_off = READ_ONCE(sock->ops)->set_peek_off; 1278 if (set_peek_off) 1279 ret = set_peek_off(sk, val); 1280 else 1281 ret = -EOPNOTSUPP; 1282 return ret; 1283 } 1284 #ifdef CONFIG_PAGE_POOL 1285 case SO_DEVMEM_DONTNEED: 1286 return sock_devmem_dontneed(sk, optval, optlen); 1287 #endif 1288 case SO_SNDTIMEO_OLD: 1289 case SO_SNDTIMEO_NEW: 1290 return sock_set_timeout(&sk->sk_sndtimeo, optval, 1291 optlen, optname == SO_SNDTIMEO_OLD); 1292 case SO_RCVTIMEO_OLD: 1293 case SO_RCVTIMEO_NEW: 1294 return sock_set_timeout(&sk->sk_rcvtimeo, optval, 1295 optlen, optname == SO_RCVTIMEO_OLD); 1296 } 1297 1298 sockopt_lock_sock(sk); 1299 1300 switch (optname) { 1301 case SO_DEBUG: 1302 if (val && !sockopt_capable(CAP_NET_ADMIN)) 1303 ret = -EACCES; 1304 else 1305 sock_valbool_flag(sk, SOCK_DBG, valbool); 1306 break; 1307 case SO_REUSEADDR: 1308 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 1309 break; 1310 case SO_REUSEPORT: 1311 if (valbool && !sk_is_inet(sk)) 1312 ret = -EOPNOTSUPP; 1313 else 1314 sk->sk_reuseport = valbool; 1315 break; 1316 case SO_DONTROUTE: 1317 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 1318 sk_dst_reset(sk); 1319 break; 1320 case SO_BROADCAST: 1321 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 1322 break; 1323 case SO_SNDBUF: 1324 /* Don't error on this BSD doesn't and if you think 1325 * about it this is right. Otherwise apps have to 1326 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1327 * are treated in BSD as hints 1328 */ 1329 val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); 1330 set_sndbuf: 1331 /* Ensure val * 2 fits into an int, to prevent max_t() 1332 * from treating it as a negative value. 1333 */ 1334 val = min_t(int, val, INT_MAX / 2); 1335 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1336 WRITE_ONCE(sk->sk_sndbuf, 1337 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 1338 /* Wake up sending tasks if we upped the value. */ 1339 sk->sk_write_space(sk); 1340 break; 1341 1342 case SO_SNDBUFFORCE: 1343 if (!sockopt_capable(CAP_NET_ADMIN)) { 1344 ret = -EPERM; 1345 break; 1346 } 1347 1348 /* No negative values (to prevent underflow, as val will be 1349 * multiplied by 2). 1350 */ 1351 if (val < 0) 1352 val = 0; 1353 goto set_sndbuf; 1354 1355 case SO_RCVBUF: 1356 /* Don't error on this BSD doesn't and if you think 1357 * about it this is right. Otherwise apps have to 1358 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1359 * are treated in BSD as hints 1360 */ 1361 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); 1362 break; 1363 1364 case SO_RCVBUFFORCE: 1365 if (!sockopt_capable(CAP_NET_ADMIN)) { 1366 ret = -EPERM; 1367 break; 1368 } 1369 1370 /* No negative values (to prevent underflow, as val will be 1371 * multiplied by 2). 1372 */ 1373 __sock_set_rcvbuf(sk, max(val, 0)); 1374 break; 1375 1376 case SO_KEEPALIVE: 1377 if (sk->sk_prot->keepalive) 1378 sk->sk_prot->keepalive(sk, valbool); 1379 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 1380 break; 1381 1382 case SO_OOBINLINE: 1383 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 1384 break; 1385 1386 case SO_NO_CHECK: 1387 sk->sk_no_check_tx = valbool; 1388 break; 1389 1390 case SO_LINGER: 1391 if (optlen < sizeof(ling)) { 1392 ret = -EINVAL; /* 1003.1g */ 1393 break; 1394 } 1395 if (copy_from_sockptr(&ling, optval, sizeof(ling))) { 1396 ret = -EFAULT; 1397 break; 1398 } 1399 if (!ling.l_onoff) { 1400 sock_reset_flag(sk, SOCK_LINGER); 1401 } else { 1402 unsigned long t_sec = ling.l_linger; 1403 1404 if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ) 1405 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT); 1406 else 1407 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ); 1408 sock_set_flag(sk, SOCK_LINGER); 1409 } 1410 break; 1411 1412 case SO_BSDCOMPAT: 1413 break; 1414 1415 case SO_TIMESTAMP_OLD: 1416 case SO_TIMESTAMP_NEW: 1417 case SO_TIMESTAMPNS_OLD: 1418 case SO_TIMESTAMPNS_NEW: 1419 sock_set_timestamp(sk, optname, valbool); 1420 break; 1421 1422 case SO_TIMESTAMPING_NEW: 1423 case SO_TIMESTAMPING_OLD: 1424 if (optlen == sizeof(timestamping)) { 1425 if (copy_from_sockptr(×tamping, optval, 1426 sizeof(timestamping))) { 1427 ret = -EFAULT; 1428 break; 1429 } 1430 } else { 1431 memset(×tamping, 0, sizeof(timestamping)); 1432 timestamping.flags = val; 1433 } 1434 ret = sock_set_timestamping(sk, optname, timestamping); 1435 break; 1436 1437 case SO_RCVLOWAT: 1438 { 1439 int (*set_rcvlowat)(struct sock *sk, int val) = NULL; 1440 1441 if (val < 0) 1442 val = INT_MAX; 1443 if (sock) 1444 set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat; 1445 if (set_rcvlowat) 1446 ret = set_rcvlowat(sk, val); 1447 else 1448 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1449 break; 1450 } 1451 case SO_ATTACH_FILTER: { 1452 struct sock_fprog fprog; 1453 1454 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1455 if (!ret) 1456 ret = sk_attach_filter(&fprog, sk); 1457 break; 1458 } 1459 case SO_ATTACH_BPF: 1460 ret = -EINVAL; 1461 if (optlen == sizeof(u32)) { 1462 u32 ufd; 1463 1464 ret = -EFAULT; 1465 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1466 break; 1467 1468 ret = sk_attach_bpf(ufd, sk); 1469 } 1470 break; 1471 1472 case SO_ATTACH_REUSEPORT_CBPF: { 1473 struct sock_fprog fprog; 1474 1475 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1476 if (!ret) 1477 ret = sk_reuseport_attach_filter(&fprog, sk); 1478 break; 1479 } 1480 case SO_ATTACH_REUSEPORT_EBPF: 1481 ret = -EINVAL; 1482 if (optlen == sizeof(u32)) { 1483 u32 ufd; 1484 1485 ret = -EFAULT; 1486 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1487 break; 1488 1489 ret = sk_reuseport_attach_bpf(ufd, sk); 1490 } 1491 break; 1492 1493 case SO_DETACH_REUSEPORT_BPF: 1494 ret = reuseport_detach_prog(sk); 1495 break; 1496 1497 case SO_DETACH_FILTER: 1498 ret = sk_detach_filter(sk); 1499 break; 1500 1501 case SO_LOCK_FILTER: 1502 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 1503 ret = -EPERM; 1504 else 1505 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 1506 break; 1507 1508 case SO_MARK: 1509 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 1510 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1511 ret = -EPERM; 1512 break; 1513 } 1514 1515 __sock_set_mark(sk, val); 1516 break; 1517 case SO_RCVMARK: 1518 sock_valbool_flag(sk, SOCK_RCVMARK, valbool); 1519 break; 1520 1521 case SO_RCVPRIORITY: 1522 sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool); 1523 break; 1524 1525 case SO_RXQ_OVFL: 1526 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 1527 break; 1528 1529 case SO_WIFI_STATUS: 1530 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 1531 break; 1532 1533 case SO_NOFCS: 1534 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 1535 break; 1536 1537 case SO_SELECT_ERR_QUEUE: 1538 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 1539 break; 1540 1541 case SO_PASSCRED: 1542 if (sk_may_scm_recv(sk)) 1543 sk->sk_scm_credentials = valbool; 1544 else 1545 ret = -EOPNOTSUPP; 1546 break; 1547 1548 case SO_PASSSEC: 1549 if (IS_ENABLED(CONFIG_SECURITY_NETWORK) && sk_may_scm_recv(sk)) 1550 sk->sk_scm_security = valbool; 1551 else 1552 ret = -EOPNOTSUPP; 1553 break; 1554 1555 case SO_PASSPIDFD: 1556 if (sk_is_unix(sk)) 1557 sk->sk_scm_pidfd = valbool; 1558 else 1559 ret = -EOPNOTSUPP; 1560 break; 1561 1562 case SO_PASSRIGHTS: 1563 if (sk_is_unix(sk)) 1564 sk->sk_scm_rights = valbool; 1565 else 1566 ret = -EOPNOTSUPP; 1567 break; 1568 1569 case SO_INCOMING_CPU: 1570 reuseport_update_incoming_cpu(sk, val); 1571 break; 1572 1573 case SO_CNX_ADVICE: 1574 if (val == 1) 1575 dst_negative_advice(sk); 1576 break; 1577 1578 case SO_ZEROCOPY: 1579 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { 1580 if (!(sk_is_tcp(sk) || 1581 (sk->sk_type == SOCK_DGRAM && 1582 sk->sk_protocol == IPPROTO_UDP))) 1583 ret = -EOPNOTSUPP; 1584 } else if (sk->sk_family != PF_RDS) { 1585 ret = -EOPNOTSUPP; 1586 } 1587 if (!ret) { 1588 if (val < 0 || val > 1) 1589 ret = -EINVAL; 1590 else 1591 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); 1592 } 1593 break; 1594 1595 case SO_TXTIME: 1596 if (optlen != sizeof(struct sock_txtime)) { 1597 ret = -EINVAL; 1598 break; 1599 } else if (copy_from_sockptr(&sk_txtime, optval, 1600 sizeof(struct sock_txtime))) { 1601 ret = -EFAULT; 1602 break; 1603 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { 1604 ret = -EINVAL; 1605 break; 1606 } 1607 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet 1608 * scheduler has enough safe guards. 1609 */ 1610 if (sk_txtime.clockid != CLOCK_MONOTONIC && 1611 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1612 ret = -EPERM; 1613 break; 1614 } 1615 1616 ret = sockopt_validate_clockid(sk_txtime.clockid); 1617 if (ret) 1618 break; 1619 1620 sock_valbool_flag(sk, SOCK_TXTIME, true); 1621 sk->sk_clockid = sk_txtime.clockid; 1622 sk->sk_txtime_deadline_mode = 1623 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); 1624 sk->sk_txtime_report_errors = 1625 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); 1626 break; 1627 1628 case SO_BINDTOIFINDEX: 1629 ret = sock_bindtoindex_locked(sk, val); 1630 break; 1631 1632 case SO_BUF_LOCK: 1633 if (val & ~SOCK_BUF_LOCK_MASK) { 1634 ret = -EINVAL; 1635 break; 1636 } 1637 sk->sk_userlocks = val | (sk->sk_userlocks & 1638 ~SOCK_BUF_LOCK_MASK); 1639 break; 1640 1641 case SO_RESERVE_MEM: 1642 { 1643 int delta; 1644 1645 if (val < 0) { 1646 ret = -EINVAL; 1647 break; 1648 } 1649 1650 delta = val - sk->sk_reserved_mem; 1651 if (delta < 0) 1652 sock_release_reserved_memory(sk, -delta); 1653 else 1654 ret = sock_reserve_memory(sk, delta); 1655 break; 1656 } 1657 1658 default: 1659 ret = -ENOPROTOOPT; 1660 break; 1661 } 1662 sockopt_release_sock(sk); 1663 return ret; 1664 } 1665 1666 int sock_setsockopt(struct socket *sock, int level, int optname, 1667 sockptr_t optval, unsigned int optlen) 1668 { 1669 return sk_setsockopt(sock->sk, level, optname, 1670 optval, optlen); 1671 } 1672 EXPORT_SYMBOL(sock_setsockopt); 1673 1674 static const struct cred *sk_get_peer_cred(struct sock *sk) 1675 { 1676 const struct cred *cred; 1677 1678 spin_lock(&sk->sk_peer_lock); 1679 cred = get_cred(sk->sk_peer_cred); 1680 spin_unlock(&sk->sk_peer_lock); 1681 1682 return cred; 1683 } 1684 1685 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1686 struct ucred *ucred) 1687 { 1688 ucred->pid = pid_vnr(pid); 1689 ucred->uid = ucred->gid = -1; 1690 if (cred) { 1691 struct user_namespace *current_ns = current_user_ns(); 1692 1693 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1694 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1695 } 1696 } 1697 1698 static int groups_to_user(sockptr_t dst, const struct group_info *src) 1699 { 1700 struct user_namespace *user_ns = current_user_ns(); 1701 int i; 1702 1703 for (i = 0; i < src->ngroups; i++) { 1704 gid_t gid = from_kgid_munged(user_ns, src->gid[i]); 1705 1706 if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid))) 1707 return -EFAULT; 1708 } 1709 1710 return 0; 1711 } 1712 1713 int sk_getsockopt(struct sock *sk, int level, int optname, 1714 sockptr_t optval, sockptr_t optlen) 1715 { 1716 struct socket *sock = sk->sk_socket; 1717 1718 union { 1719 int val; 1720 u64 val64; 1721 unsigned long ulval; 1722 struct linger ling; 1723 struct old_timeval32 tm32; 1724 struct __kernel_old_timeval tm; 1725 struct __kernel_sock_timeval stm; 1726 struct sock_txtime txtime; 1727 struct so_timestamping timestamping; 1728 } v; 1729 1730 int lv = sizeof(int); 1731 int len; 1732 1733 if (copy_from_sockptr(&len, optlen, sizeof(int))) 1734 return -EFAULT; 1735 if (len < 0) 1736 return -EINVAL; 1737 1738 memset(&v, 0, sizeof(v)); 1739 1740 switch (optname) { 1741 case SO_DEBUG: 1742 v.val = sock_flag(sk, SOCK_DBG); 1743 break; 1744 1745 case SO_DONTROUTE: 1746 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1747 break; 1748 1749 case SO_BROADCAST: 1750 v.val = sock_flag(sk, SOCK_BROADCAST); 1751 break; 1752 1753 case SO_SNDBUF: 1754 v.val = READ_ONCE(sk->sk_sndbuf); 1755 break; 1756 1757 case SO_RCVBUF: 1758 v.val = READ_ONCE(sk->sk_rcvbuf); 1759 break; 1760 1761 case SO_REUSEADDR: 1762 v.val = sk->sk_reuse; 1763 break; 1764 1765 case SO_REUSEPORT: 1766 v.val = sk->sk_reuseport; 1767 break; 1768 1769 case SO_KEEPALIVE: 1770 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1771 break; 1772 1773 case SO_TYPE: 1774 v.val = sk->sk_type; 1775 break; 1776 1777 case SO_PROTOCOL: 1778 v.val = sk->sk_protocol; 1779 break; 1780 1781 case SO_DOMAIN: 1782 v.val = sk->sk_family; 1783 break; 1784 1785 case SO_ERROR: 1786 v.val = -sock_error(sk); 1787 if (v.val == 0) 1788 v.val = xchg(&sk->sk_err_soft, 0); 1789 break; 1790 1791 case SO_OOBINLINE: 1792 v.val = sock_flag(sk, SOCK_URGINLINE); 1793 break; 1794 1795 case SO_NO_CHECK: 1796 v.val = sk->sk_no_check_tx; 1797 break; 1798 1799 case SO_PRIORITY: 1800 v.val = READ_ONCE(sk->sk_priority); 1801 break; 1802 1803 case SO_LINGER: 1804 lv = sizeof(v.ling); 1805 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1806 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ; 1807 break; 1808 1809 case SO_BSDCOMPAT: 1810 break; 1811 1812 case SO_TIMESTAMP_OLD: 1813 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1814 !sock_flag(sk, SOCK_TSTAMP_NEW) && 1815 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1816 break; 1817 1818 case SO_TIMESTAMPNS_OLD: 1819 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); 1820 break; 1821 1822 case SO_TIMESTAMP_NEW: 1823 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); 1824 break; 1825 1826 case SO_TIMESTAMPNS_NEW: 1827 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); 1828 break; 1829 1830 case SO_TIMESTAMPING_OLD: 1831 case SO_TIMESTAMPING_NEW: 1832 lv = sizeof(v.timestamping); 1833 /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only 1834 * returning the flags when they were set through the same option. 1835 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD. 1836 */ 1837 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) { 1838 v.timestamping.flags = READ_ONCE(sk->sk_tsflags); 1839 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); 1840 } 1841 break; 1842 1843 case SO_RCVTIMEO_OLD: 1844 case SO_RCVTIMEO_NEW: 1845 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, 1846 SO_RCVTIMEO_OLD == optname); 1847 break; 1848 1849 case SO_SNDTIMEO_OLD: 1850 case SO_SNDTIMEO_NEW: 1851 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, 1852 SO_SNDTIMEO_OLD == optname); 1853 break; 1854 1855 case SO_RCVLOWAT: 1856 v.val = READ_ONCE(sk->sk_rcvlowat); 1857 break; 1858 1859 case SO_SNDLOWAT: 1860 v.val = 1; 1861 break; 1862 1863 case SO_PASSCRED: 1864 if (!sk_may_scm_recv(sk)) 1865 return -EOPNOTSUPP; 1866 1867 v.val = sk->sk_scm_credentials; 1868 break; 1869 1870 case SO_PASSPIDFD: 1871 if (!sk_is_unix(sk)) 1872 return -EOPNOTSUPP; 1873 1874 v.val = sk->sk_scm_pidfd; 1875 break; 1876 1877 case SO_PASSRIGHTS: 1878 if (!sk_is_unix(sk)) 1879 return -EOPNOTSUPP; 1880 1881 v.val = sk->sk_scm_rights; 1882 break; 1883 1884 case SO_PEERCRED: 1885 { 1886 struct ucred peercred; 1887 if (len > sizeof(peercred)) 1888 len = sizeof(peercred); 1889 1890 spin_lock(&sk->sk_peer_lock); 1891 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1892 spin_unlock(&sk->sk_peer_lock); 1893 1894 if (copy_to_sockptr(optval, &peercred, len)) 1895 return -EFAULT; 1896 goto lenout; 1897 } 1898 1899 case SO_PEERPIDFD: 1900 { 1901 struct pid *peer_pid; 1902 struct file *pidfd_file = NULL; 1903 unsigned int flags = 0; 1904 int pidfd; 1905 1906 if (len > sizeof(pidfd)) 1907 len = sizeof(pidfd); 1908 1909 spin_lock(&sk->sk_peer_lock); 1910 peer_pid = get_pid(sk->sk_peer_pid); 1911 spin_unlock(&sk->sk_peer_lock); 1912 1913 if (!peer_pid) 1914 return -ENODATA; 1915 1916 /* The use of PIDFD_STALE requires stashing of struct pid 1917 * on pidfs with pidfs_register_pid() and only AF_UNIX 1918 * were prepared for this. 1919 */ 1920 if (sk->sk_family == AF_UNIX) 1921 flags = PIDFD_STALE; 1922 1923 pidfd = pidfd_prepare(peer_pid, flags, &pidfd_file); 1924 put_pid(peer_pid); 1925 if (pidfd < 0) 1926 return pidfd; 1927 1928 if (copy_to_sockptr(optval, &pidfd, len) || 1929 copy_to_sockptr(optlen, &len, sizeof(int))) { 1930 put_unused_fd(pidfd); 1931 fput(pidfd_file); 1932 1933 return -EFAULT; 1934 } 1935 1936 fd_install(pidfd, pidfd_file); 1937 return 0; 1938 } 1939 1940 case SO_PEERGROUPS: 1941 { 1942 const struct cred *cred; 1943 int ret, n; 1944 1945 cred = sk_get_peer_cred(sk); 1946 if (!cred) 1947 return -ENODATA; 1948 1949 n = cred->group_info->ngroups; 1950 if (len < n * sizeof(gid_t)) { 1951 len = n * sizeof(gid_t); 1952 put_cred(cred); 1953 return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE; 1954 } 1955 len = n * sizeof(gid_t); 1956 1957 ret = groups_to_user(optval, cred->group_info); 1958 put_cred(cred); 1959 if (ret) 1960 return ret; 1961 goto lenout; 1962 } 1963 1964 case SO_PEERNAME: 1965 { 1966 struct sockaddr_storage address; 1967 1968 lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2); 1969 if (lv < 0) 1970 return -ENOTCONN; 1971 if (lv < len) 1972 return -EINVAL; 1973 if (copy_to_sockptr(optval, &address, len)) 1974 return -EFAULT; 1975 goto lenout; 1976 } 1977 1978 /* Dubious BSD thing... Probably nobody even uses it, but 1979 * the UNIX standard wants it for whatever reason... -DaveM 1980 */ 1981 case SO_ACCEPTCONN: 1982 v.val = sk->sk_state == TCP_LISTEN; 1983 break; 1984 1985 case SO_PASSSEC: 1986 if (!IS_ENABLED(CONFIG_SECURITY_NETWORK) || !sk_may_scm_recv(sk)) 1987 return -EOPNOTSUPP; 1988 1989 v.val = sk->sk_scm_security; 1990 break; 1991 1992 case SO_PEERSEC: 1993 return security_socket_getpeersec_stream(sock, 1994 optval, optlen, len); 1995 1996 case SO_MARK: 1997 v.val = READ_ONCE(sk->sk_mark); 1998 break; 1999 2000 case SO_RCVMARK: 2001 v.val = sock_flag(sk, SOCK_RCVMARK); 2002 break; 2003 2004 case SO_RCVPRIORITY: 2005 v.val = sock_flag(sk, SOCK_RCVPRIORITY); 2006 break; 2007 2008 case SO_RXQ_OVFL: 2009 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 2010 break; 2011 2012 case SO_WIFI_STATUS: 2013 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 2014 break; 2015 2016 case SO_PEEK_OFF: 2017 if (!READ_ONCE(sock->ops)->set_peek_off) 2018 return -EOPNOTSUPP; 2019 2020 v.val = READ_ONCE(sk->sk_peek_off); 2021 break; 2022 case SO_NOFCS: 2023 v.val = sock_flag(sk, SOCK_NOFCS); 2024 break; 2025 2026 case SO_BINDTODEVICE: 2027 return sock_getbindtodevice(sk, optval, optlen, len); 2028 2029 case SO_GET_FILTER: 2030 len = sk_get_filter(sk, optval, len); 2031 if (len < 0) 2032 return len; 2033 2034 goto lenout; 2035 2036 case SO_LOCK_FILTER: 2037 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 2038 break; 2039 2040 case SO_BPF_EXTENSIONS: 2041 v.val = bpf_tell_extensions(); 2042 break; 2043 2044 case SO_SELECT_ERR_QUEUE: 2045 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 2046 break; 2047 2048 #ifdef CONFIG_NET_RX_BUSY_POLL 2049 case SO_BUSY_POLL: 2050 v.val = READ_ONCE(sk->sk_ll_usec); 2051 break; 2052 case SO_PREFER_BUSY_POLL: 2053 v.val = READ_ONCE(sk->sk_prefer_busy_poll); 2054 break; 2055 #endif 2056 2057 case SO_MAX_PACING_RATE: 2058 /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ 2059 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 2060 lv = sizeof(v.ulval); 2061 v.ulval = READ_ONCE(sk->sk_max_pacing_rate); 2062 } else { 2063 /* 32bit version */ 2064 v.val = min_t(unsigned long, ~0U, 2065 READ_ONCE(sk->sk_max_pacing_rate)); 2066 } 2067 break; 2068 2069 case SO_INCOMING_CPU: 2070 v.val = READ_ONCE(sk->sk_incoming_cpu); 2071 break; 2072 2073 case SO_MEMINFO: 2074 { 2075 u32 meminfo[SK_MEMINFO_VARS]; 2076 2077 sk_get_meminfo(sk, meminfo); 2078 2079 len = min_t(unsigned int, len, sizeof(meminfo)); 2080 if (copy_to_sockptr(optval, &meminfo, len)) 2081 return -EFAULT; 2082 2083 goto lenout; 2084 } 2085 2086 #ifdef CONFIG_NET_RX_BUSY_POLL 2087 case SO_INCOMING_NAPI_ID: 2088 v.val = READ_ONCE(sk->sk_napi_id); 2089 2090 /* aggregate non-NAPI IDs down to 0 */ 2091 if (!napi_id_valid(v.val)) 2092 v.val = 0; 2093 2094 break; 2095 #endif 2096 2097 case SO_COOKIE: 2098 lv = sizeof(u64); 2099 if (len < lv) 2100 return -EINVAL; 2101 v.val64 = sock_gen_cookie(sk); 2102 break; 2103 2104 case SO_ZEROCOPY: 2105 v.val = sock_flag(sk, SOCK_ZEROCOPY); 2106 break; 2107 2108 case SO_TXTIME: 2109 lv = sizeof(v.txtime); 2110 v.txtime.clockid = sk->sk_clockid; 2111 v.txtime.flags |= sk->sk_txtime_deadline_mode ? 2112 SOF_TXTIME_DEADLINE_MODE : 0; 2113 v.txtime.flags |= sk->sk_txtime_report_errors ? 2114 SOF_TXTIME_REPORT_ERRORS : 0; 2115 break; 2116 2117 case SO_BINDTOIFINDEX: 2118 v.val = READ_ONCE(sk->sk_bound_dev_if); 2119 break; 2120 2121 case SO_NETNS_COOKIE: 2122 lv = sizeof(u64); 2123 if (len != lv) 2124 return -EINVAL; 2125 v.val64 = sock_net(sk)->net_cookie; 2126 break; 2127 2128 case SO_BUF_LOCK: 2129 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; 2130 break; 2131 2132 case SO_RESERVE_MEM: 2133 v.val = READ_ONCE(sk->sk_reserved_mem); 2134 break; 2135 2136 case SO_TXREHASH: 2137 if (!sk_is_tcp(sk)) 2138 return -EOPNOTSUPP; 2139 2140 /* Paired with WRITE_ONCE() in sk_setsockopt() */ 2141 v.val = READ_ONCE(sk->sk_txrehash); 2142 break; 2143 2144 default: 2145 /* We implement the SO_SNDLOWAT etc to not be settable 2146 * (1003.1g 7). 2147 */ 2148 return -ENOPROTOOPT; 2149 } 2150 2151 if (len > lv) 2152 len = lv; 2153 if (copy_to_sockptr(optval, &v, len)) 2154 return -EFAULT; 2155 lenout: 2156 if (copy_to_sockptr(optlen, &len, sizeof(int))) 2157 return -EFAULT; 2158 return 0; 2159 } 2160 2161 /* 2162 * Initialize an sk_lock. 2163 * 2164 * (We also register the sk_lock with the lock validator.) 2165 */ 2166 static inline void sock_lock_init(struct sock *sk) 2167 { 2168 sk_owner_clear(sk); 2169 2170 if (sk->sk_kern_sock) 2171 sock_lock_init_class_and_name( 2172 sk, 2173 af_family_kern_slock_key_strings[sk->sk_family], 2174 af_family_kern_slock_keys + sk->sk_family, 2175 af_family_kern_key_strings[sk->sk_family], 2176 af_family_kern_keys + sk->sk_family); 2177 else 2178 sock_lock_init_class_and_name( 2179 sk, 2180 af_family_slock_key_strings[sk->sk_family], 2181 af_family_slock_keys + sk->sk_family, 2182 af_family_key_strings[sk->sk_family], 2183 af_family_keys + sk->sk_family); 2184 } 2185 2186 /* 2187 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 2188 * even temporarily, because of RCU lookups. sk_node should also be left as is. 2189 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 2190 */ 2191 static void sock_copy(struct sock *nsk, const struct sock *osk) 2192 { 2193 const struct proto *prot = READ_ONCE(osk->sk_prot); 2194 #ifdef CONFIG_SECURITY_NETWORK 2195 void *sptr = nsk->sk_security; 2196 #endif 2197 2198 /* If we move sk_tx_queue_mapping out of the private section, 2199 * we must check if sk_tx_queue_clear() is called after 2200 * sock_copy() in sk_clone_lock(). 2201 */ 2202 BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) < 2203 offsetof(struct sock, sk_dontcopy_begin) || 2204 offsetof(struct sock, sk_tx_queue_mapping) >= 2205 offsetof(struct sock, sk_dontcopy_end)); 2206 2207 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 2208 2209 unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 2210 prot->obj_size - offsetof(struct sock, sk_dontcopy_end), 2211 /* alloc is larger than struct, see sk_prot_alloc() */); 2212 2213 #ifdef CONFIG_SECURITY_NETWORK 2214 nsk->sk_security = sptr; 2215 security_sk_clone(osk, nsk); 2216 #endif 2217 } 2218 2219 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 2220 int family) 2221 { 2222 struct sock *sk; 2223 struct kmem_cache *slab; 2224 2225 slab = prot->slab; 2226 if (slab != NULL) { 2227 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 2228 if (!sk) 2229 return sk; 2230 if (want_init_on_alloc(priority)) 2231 sk_prot_clear_nulls(sk, prot->obj_size); 2232 } else 2233 sk = kmalloc(prot->obj_size, priority); 2234 2235 if (sk != NULL) { 2236 if (security_sk_alloc(sk, family, priority)) 2237 goto out_free; 2238 2239 if (!try_module_get(prot->owner)) 2240 goto out_free_sec; 2241 } 2242 2243 return sk; 2244 2245 out_free_sec: 2246 security_sk_free(sk); 2247 out_free: 2248 if (slab != NULL) 2249 kmem_cache_free(slab, sk); 2250 else 2251 kfree(sk); 2252 return NULL; 2253 } 2254 2255 static void sk_prot_free(struct proto *prot, struct sock *sk) 2256 { 2257 struct kmem_cache *slab; 2258 struct module *owner; 2259 2260 owner = prot->owner; 2261 slab = prot->slab; 2262 2263 cgroup_sk_free(&sk->sk_cgrp_data); 2264 mem_cgroup_sk_free(sk); 2265 security_sk_free(sk); 2266 2267 sk_owner_put(sk); 2268 2269 if (slab != NULL) 2270 kmem_cache_free(slab, sk); 2271 else 2272 kfree(sk); 2273 module_put(owner); 2274 } 2275 2276 /** 2277 * sk_alloc - All socket objects are allocated here 2278 * @net: the applicable net namespace 2279 * @family: protocol family 2280 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2281 * @prot: struct proto associated with this new sock instance 2282 * @kern: is this to be a kernel socket? 2283 */ 2284 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 2285 struct proto *prot, int kern) 2286 { 2287 struct sock *sk; 2288 2289 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 2290 if (sk) { 2291 sk->sk_family = family; 2292 /* 2293 * See comment in struct sock definition to understand 2294 * why we need sk_prot_creator -acme 2295 */ 2296 sk->sk_prot = sk->sk_prot_creator = prot; 2297 sk->sk_kern_sock = kern; 2298 sock_lock_init(sk); 2299 sk->sk_net_refcnt = kern ? 0 : 1; 2300 if (likely(sk->sk_net_refcnt)) { 2301 get_net_track(net, &sk->ns_tracker, priority); 2302 sock_inuse_add(net, 1); 2303 } else { 2304 net_passive_inc(net); 2305 __netns_tracker_alloc(net, &sk->ns_tracker, 2306 false, priority); 2307 } 2308 2309 sock_net_set(sk, net); 2310 refcount_set(&sk->sk_wmem_alloc, 1); 2311 2312 mem_cgroup_sk_alloc(sk); 2313 cgroup_sk_alloc(&sk->sk_cgrp_data); 2314 sock_update_classid(&sk->sk_cgrp_data); 2315 sock_update_netprioidx(&sk->sk_cgrp_data); 2316 sk_tx_queue_clear(sk); 2317 } 2318 2319 return sk; 2320 } 2321 EXPORT_SYMBOL(sk_alloc); 2322 2323 /* Sockets having SOCK_RCU_FREE will call this function after one RCU 2324 * grace period. This is the case for UDP sockets and TCP listeners. 2325 */ 2326 static void __sk_destruct(struct rcu_head *head) 2327 { 2328 struct sock *sk = container_of(head, struct sock, sk_rcu); 2329 struct net *net = sock_net(sk); 2330 struct sk_filter *filter; 2331 2332 if (sk->sk_destruct) 2333 sk->sk_destruct(sk); 2334 2335 filter = rcu_dereference_check(sk->sk_filter, 2336 refcount_read(&sk->sk_wmem_alloc) == 0); 2337 if (filter) { 2338 sk_filter_uncharge(sk, filter); 2339 RCU_INIT_POINTER(sk->sk_filter, NULL); 2340 } 2341 2342 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 2343 2344 #ifdef CONFIG_BPF_SYSCALL 2345 bpf_sk_storage_free(sk); 2346 #endif 2347 2348 if (atomic_read(&sk->sk_omem_alloc)) 2349 pr_debug("%s: optmem leakage (%d bytes) detected\n", 2350 __func__, atomic_read(&sk->sk_omem_alloc)); 2351 2352 if (sk->sk_frag.page) { 2353 put_page(sk->sk_frag.page); 2354 sk->sk_frag.page = NULL; 2355 } 2356 2357 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */ 2358 put_cred(sk->sk_peer_cred); 2359 put_pid(sk->sk_peer_pid); 2360 2361 if (likely(sk->sk_net_refcnt)) { 2362 put_net_track(net, &sk->ns_tracker); 2363 } else { 2364 __netns_tracker_free(net, &sk->ns_tracker, false); 2365 net_passive_dec(net); 2366 } 2367 sk_prot_free(sk->sk_prot_creator, sk); 2368 } 2369 2370 void sk_net_refcnt_upgrade(struct sock *sk) 2371 { 2372 struct net *net = sock_net(sk); 2373 2374 WARN_ON_ONCE(sk->sk_net_refcnt); 2375 __netns_tracker_free(net, &sk->ns_tracker, false); 2376 net_passive_dec(net); 2377 sk->sk_net_refcnt = 1; 2378 get_net_track(net, &sk->ns_tracker, GFP_KERNEL); 2379 sock_inuse_add(net, 1); 2380 } 2381 EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); 2382 2383 void sk_destruct(struct sock *sk) 2384 { 2385 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); 2386 2387 if (rcu_access_pointer(sk->sk_reuseport_cb)) { 2388 reuseport_detach_sock(sk); 2389 use_call_rcu = true; 2390 } 2391 2392 if (use_call_rcu) 2393 call_rcu(&sk->sk_rcu, __sk_destruct); 2394 else 2395 __sk_destruct(&sk->sk_rcu); 2396 } 2397 2398 static void __sk_free(struct sock *sk) 2399 { 2400 if (likely(sk->sk_net_refcnt)) 2401 sock_inuse_add(sock_net(sk), -1); 2402 2403 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) 2404 sock_diag_broadcast_destroy(sk); 2405 else 2406 sk_destruct(sk); 2407 } 2408 2409 void sk_free(struct sock *sk) 2410 { 2411 /* 2412 * We subtract one from sk_wmem_alloc and can know if 2413 * some packets are still in some tx queue. 2414 * If not null, sock_wfree() will call __sk_free(sk) later 2415 */ 2416 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 2417 __sk_free(sk); 2418 } 2419 EXPORT_SYMBOL(sk_free); 2420 2421 static void sk_init_common(struct sock *sk) 2422 { 2423 skb_queue_head_init(&sk->sk_receive_queue); 2424 skb_queue_head_init(&sk->sk_write_queue); 2425 skb_queue_head_init(&sk->sk_error_queue); 2426 2427 rwlock_init(&sk->sk_callback_lock); 2428 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, 2429 af_rlock_keys + sk->sk_family, 2430 af_family_rlock_key_strings[sk->sk_family]); 2431 lockdep_set_class_and_name(&sk->sk_write_queue.lock, 2432 af_wlock_keys + sk->sk_family, 2433 af_family_wlock_key_strings[sk->sk_family]); 2434 lockdep_set_class_and_name(&sk->sk_error_queue.lock, 2435 af_elock_keys + sk->sk_family, 2436 af_family_elock_key_strings[sk->sk_family]); 2437 if (sk->sk_kern_sock) 2438 lockdep_set_class_and_name(&sk->sk_callback_lock, 2439 af_kern_callback_keys + sk->sk_family, 2440 af_family_kern_clock_key_strings[sk->sk_family]); 2441 else 2442 lockdep_set_class_and_name(&sk->sk_callback_lock, 2443 af_callback_keys + sk->sk_family, 2444 af_family_clock_key_strings[sk->sk_family]); 2445 } 2446 2447 /** 2448 * sk_clone_lock - clone a socket, and lock its clone 2449 * @sk: the socket to clone 2450 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2451 * 2452 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 2453 */ 2454 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 2455 { 2456 struct proto *prot = READ_ONCE(sk->sk_prot); 2457 struct sk_filter *filter; 2458 bool is_charged = true; 2459 struct sock *newsk; 2460 2461 newsk = sk_prot_alloc(prot, priority, sk->sk_family); 2462 if (!newsk) 2463 goto out; 2464 2465 sock_copy(newsk, sk); 2466 2467 newsk->sk_prot_creator = prot; 2468 2469 /* SANITY */ 2470 if (likely(newsk->sk_net_refcnt)) { 2471 get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); 2472 sock_inuse_add(sock_net(newsk), 1); 2473 } else { 2474 /* Kernel sockets are not elevating the struct net refcount. 2475 * Instead, use a tracker to more easily detect if a layer 2476 * is not properly dismantling its kernel sockets at netns 2477 * destroy time. 2478 */ 2479 net_passive_inc(sock_net(newsk)); 2480 __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, 2481 false, priority); 2482 } 2483 sk_node_init(&newsk->sk_node); 2484 sock_lock_init(newsk); 2485 bh_lock_sock(newsk); 2486 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 2487 newsk->sk_backlog.len = 0; 2488 2489 atomic_set(&newsk->sk_rmem_alloc, 0); 2490 2491 /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */ 2492 refcount_set(&newsk->sk_wmem_alloc, 1); 2493 2494 atomic_set(&newsk->sk_omem_alloc, 0); 2495 sk_init_common(newsk); 2496 2497 newsk->sk_dst_cache = NULL; 2498 newsk->sk_dst_pending_confirm = 0; 2499 newsk->sk_wmem_queued = 0; 2500 newsk->sk_forward_alloc = 0; 2501 newsk->sk_reserved_mem = 0; 2502 atomic_set(&newsk->sk_drops, 0); 2503 newsk->sk_send_head = NULL; 2504 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 2505 atomic_set(&newsk->sk_zckey, 0); 2506 2507 sock_reset_flag(newsk, SOCK_DONE); 2508 2509 /* sk->sk_memcg will be populated at accept() time */ 2510 newsk->sk_memcg = NULL; 2511 2512 cgroup_sk_clone(&newsk->sk_cgrp_data); 2513 2514 rcu_read_lock(); 2515 filter = rcu_dereference(sk->sk_filter); 2516 if (filter != NULL) 2517 /* though it's an empty new sock, the charging may fail 2518 * if sysctl_optmem_max was changed between creation of 2519 * original socket and cloning 2520 */ 2521 is_charged = sk_filter_charge(newsk, filter); 2522 RCU_INIT_POINTER(newsk->sk_filter, filter); 2523 rcu_read_unlock(); 2524 2525 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 2526 /* We need to make sure that we don't uncharge the new 2527 * socket if we couldn't charge it in the first place 2528 * as otherwise we uncharge the parent's filter. 2529 */ 2530 if (!is_charged) 2531 RCU_INIT_POINTER(newsk->sk_filter, NULL); 2532 2533 goto free; 2534 } 2535 2536 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 2537 2538 if (bpf_sk_storage_clone(sk, newsk)) 2539 goto free; 2540 2541 /* Clear sk_user_data if parent had the pointer tagged 2542 * as not suitable for copying when cloning. 2543 */ 2544 if (sk_user_data_is_nocopy(newsk)) 2545 newsk->sk_user_data = NULL; 2546 2547 newsk->sk_err = 0; 2548 newsk->sk_err_soft = 0; 2549 newsk->sk_priority = 0; 2550 newsk->sk_incoming_cpu = raw_smp_processor_id(); 2551 2552 /* Before updating sk_refcnt, we must commit prior changes to memory 2553 * (Documentation/RCU/rculist_nulls.rst for details) 2554 */ 2555 smp_wmb(); 2556 refcount_set(&newsk->sk_refcnt, 2); 2557 2558 sk_set_socket(newsk, NULL); 2559 sk_tx_queue_clear(newsk); 2560 RCU_INIT_POINTER(newsk->sk_wq, NULL); 2561 2562 if (newsk->sk_prot->sockets_allocated) 2563 sk_sockets_allocated_inc(newsk); 2564 2565 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) 2566 net_enable_timestamp(); 2567 out: 2568 return newsk; 2569 free: 2570 /* It is still raw copy of parent, so invalidate 2571 * destructor and make plain sk_free() 2572 */ 2573 newsk->sk_destruct = NULL; 2574 bh_unlock_sock(newsk); 2575 sk_free(newsk); 2576 newsk = NULL; 2577 goto out; 2578 } 2579 EXPORT_SYMBOL_GPL(sk_clone_lock); 2580 2581 static u32 sk_dst_gso_max_size(struct sock *sk, struct dst_entry *dst) 2582 { 2583 bool is_ipv6 = false; 2584 u32 max_size; 2585 2586 #if IS_ENABLED(CONFIG_IPV6) 2587 is_ipv6 = (sk->sk_family == AF_INET6 && 2588 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); 2589 #endif 2590 /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 2591 max_size = is_ipv6 ? READ_ONCE(dst_dev(dst)->gso_max_size) : 2592 READ_ONCE(dst_dev(dst)->gso_ipv4_max_size); 2593 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) 2594 max_size = GSO_LEGACY_MAX_SIZE; 2595 2596 return max_size - (MAX_TCP_HEADER + 1); 2597 } 2598 2599 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 2600 { 2601 u32 max_segs = 1; 2602 2603 sk->sk_route_caps = dst_dev(dst)->features; 2604 if (sk_is_tcp(sk)) { 2605 struct inet_connection_sock *icsk = inet_csk(sk); 2606 2607 sk->sk_route_caps |= NETIF_F_GSO; 2608 icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK); 2609 } 2610 if (sk->sk_route_caps & NETIF_F_GSO) 2611 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 2612 if (unlikely(sk->sk_gso_disabled)) 2613 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2614 if (sk_can_gso(sk)) { 2615 if (dst->header_len && !xfrm_dst_offload_ok(dst)) { 2616 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2617 } else { 2618 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 2619 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dst); 2620 /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ 2621 max_segs = max_t(u32, READ_ONCE(dst_dev(dst)->gso_max_segs), 1); 2622 } 2623 } 2624 sk->sk_gso_max_segs = max_segs; 2625 sk_dst_set(sk, dst); 2626 } 2627 EXPORT_SYMBOL_GPL(sk_setup_caps); 2628 2629 /* 2630 * Simple resource managers for sockets. 2631 */ 2632 2633 2634 /* 2635 * Write buffer destructor automatically called from kfree_skb. 2636 */ 2637 void sock_wfree(struct sk_buff *skb) 2638 { 2639 struct sock *sk = skb->sk; 2640 unsigned int len = skb->truesize; 2641 bool free; 2642 2643 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 2644 if (sock_flag(sk, SOCK_RCU_FREE) && 2645 sk->sk_write_space == sock_def_write_space) { 2646 rcu_read_lock(); 2647 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); 2648 sock_def_write_space_wfree(sk); 2649 rcu_read_unlock(); 2650 if (unlikely(free)) 2651 __sk_free(sk); 2652 return; 2653 } 2654 2655 /* 2656 * Keep a reference on sk_wmem_alloc, this will be released 2657 * after sk_write_space() call 2658 */ 2659 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 2660 sk->sk_write_space(sk); 2661 len = 1; 2662 } 2663 /* 2664 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 2665 * could not do because of in-flight packets 2666 */ 2667 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 2668 __sk_free(sk); 2669 } 2670 EXPORT_SYMBOL(sock_wfree); 2671 2672 /* This variant of sock_wfree() is used by TCP, 2673 * since it sets SOCK_USE_WRITE_QUEUE. 2674 */ 2675 void __sock_wfree(struct sk_buff *skb) 2676 { 2677 struct sock *sk = skb->sk; 2678 2679 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 2680 __sk_free(sk); 2681 } 2682 2683 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 2684 { 2685 skb_orphan(skb); 2686 #ifdef CONFIG_INET 2687 if (unlikely(!sk_fullsock(sk))) 2688 return skb_set_owner_edemux(skb, sk); 2689 #endif 2690 skb->sk = sk; 2691 skb->destructor = sock_wfree; 2692 skb_set_hash_from_sk(skb, sk); 2693 /* 2694 * We used to take a refcount on sk, but following operation 2695 * is enough to guarantee sk_free() won't free this sock until 2696 * all in-flight packets are completed 2697 */ 2698 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 2699 } 2700 EXPORT_SYMBOL(skb_set_owner_w); 2701 2702 static bool can_skb_orphan_partial(const struct sk_buff *skb) 2703 { 2704 /* Drivers depend on in-order delivery for crypto offload, 2705 * partial orphan breaks out-of-order-OK logic. 2706 */ 2707 if (skb_is_decrypted(skb)) 2708 return false; 2709 2710 return (skb->destructor == sock_wfree || 2711 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); 2712 } 2713 2714 /* This helper is used by netem, as it can hold packets in its 2715 * delay queue. We want to allow the owner socket to send more 2716 * packets, as if they were already TX completed by a typical driver. 2717 * But we also want to keep skb->sk set because some packet schedulers 2718 * rely on it (sch_fq for example). 2719 */ 2720 void skb_orphan_partial(struct sk_buff *skb) 2721 { 2722 if (skb_is_tcp_pure_ack(skb)) 2723 return; 2724 2725 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk)) 2726 return; 2727 2728 skb_orphan(skb); 2729 } 2730 EXPORT_SYMBOL(skb_orphan_partial); 2731 2732 /* 2733 * Read buffer destructor automatically called from kfree_skb. 2734 */ 2735 void sock_rfree(struct sk_buff *skb) 2736 { 2737 struct sock *sk = skb->sk; 2738 unsigned int len = skb->truesize; 2739 2740 atomic_sub(len, &sk->sk_rmem_alloc); 2741 sk_mem_uncharge(sk, len); 2742 } 2743 EXPORT_SYMBOL(sock_rfree); 2744 2745 /* 2746 * Buffer destructor for skbs that are not used directly in read or write 2747 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 2748 */ 2749 void sock_efree(struct sk_buff *skb) 2750 { 2751 sock_put(skb->sk); 2752 } 2753 EXPORT_SYMBOL(sock_efree); 2754 2755 /* Buffer destructor for prefetch/receive path where reference count may 2756 * not be held, e.g. for listen sockets. 2757 */ 2758 #ifdef CONFIG_INET 2759 void sock_pfree(struct sk_buff *skb) 2760 { 2761 struct sock *sk = skb->sk; 2762 2763 if (!sk_is_refcounted(sk)) 2764 return; 2765 2766 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) { 2767 inet_reqsk(sk)->rsk_listener = NULL; 2768 reqsk_free(inet_reqsk(sk)); 2769 return; 2770 } 2771 2772 sock_gen_put(sk); 2773 } 2774 EXPORT_SYMBOL(sock_pfree); 2775 #endif /* CONFIG_INET */ 2776 2777 unsigned long __sock_i_ino(struct sock *sk) 2778 { 2779 unsigned long ino; 2780 2781 read_lock(&sk->sk_callback_lock); 2782 ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; 2783 read_unlock(&sk->sk_callback_lock); 2784 return ino; 2785 } 2786 EXPORT_SYMBOL(__sock_i_ino); 2787 2788 unsigned long sock_i_ino(struct sock *sk) 2789 { 2790 unsigned long ino; 2791 2792 local_bh_disable(); 2793 ino = __sock_i_ino(sk); 2794 local_bh_enable(); 2795 return ino; 2796 } 2797 EXPORT_SYMBOL(sock_i_ino); 2798 2799 /* 2800 * Allocate a skb from the socket's send buffer. 2801 */ 2802 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 2803 gfp_t priority) 2804 { 2805 if (force || 2806 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { 2807 struct sk_buff *skb = alloc_skb(size, priority); 2808 2809 if (skb) { 2810 skb_set_owner_w(skb, sk); 2811 return skb; 2812 } 2813 } 2814 return NULL; 2815 } 2816 EXPORT_SYMBOL(sock_wmalloc); 2817 2818 static void sock_ofree(struct sk_buff *skb) 2819 { 2820 struct sock *sk = skb->sk; 2821 2822 atomic_sub(skb->truesize, &sk->sk_omem_alloc); 2823 } 2824 2825 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 2826 gfp_t priority) 2827 { 2828 struct sk_buff *skb; 2829 2830 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ 2831 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > 2832 READ_ONCE(sock_net(sk)->core.sysctl_optmem_max)) 2833 return NULL; 2834 2835 skb = alloc_skb(size, priority); 2836 if (!skb) 2837 return NULL; 2838 2839 atomic_add(skb->truesize, &sk->sk_omem_alloc); 2840 skb->sk = sk; 2841 skb->destructor = sock_ofree; 2842 return skb; 2843 } 2844 2845 /* 2846 * Allocate a memory block from the socket's option memory buffer. 2847 */ 2848 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 2849 { 2850 int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); 2851 2852 if ((unsigned int)size <= optmem_max && 2853 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { 2854 void *mem; 2855 /* First do the add, to avoid the race if kmalloc 2856 * might sleep. 2857 */ 2858 atomic_add(size, &sk->sk_omem_alloc); 2859 mem = kmalloc(size, priority); 2860 if (mem) 2861 return mem; 2862 atomic_sub(size, &sk->sk_omem_alloc); 2863 } 2864 return NULL; 2865 } 2866 EXPORT_SYMBOL(sock_kmalloc); 2867 2868 /* 2869 * Duplicate the input "src" memory block using the socket's 2870 * option memory buffer. 2871 */ 2872 void *sock_kmemdup(struct sock *sk, const void *src, 2873 int size, gfp_t priority) 2874 { 2875 void *mem; 2876 2877 mem = sock_kmalloc(sk, size, priority); 2878 if (mem) 2879 memcpy(mem, src, size); 2880 return mem; 2881 } 2882 EXPORT_SYMBOL(sock_kmemdup); 2883 2884 /* Free an option memory block. Note, we actually want the inline 2885 * here as this allows gcc to detect the nullify and fold away the 2886 * condition entirely. 2887 */ 2888 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 2889 const bool nullify) 2890 { 2891 if (WARN_ON_ONCE(!mem)) 2892 return; 2893 if (nullify) 2894 kfree_sensitive(mem); 2895 else 2896 kfree(mem); 2897 atomic_sub(size, &sk->sk_omem_alloc); 2898 } 2899 2900 void sock_kfree_s(struct sock *sk, void *mem, int size) 2901 { 2902 __sock_kfree_s(sk, mem, size, false); 2903 } 2904 EXPORT_SYMBOL(sock_kfree_s); 2905 2906 void sock_kzfree_s(struct sock *sk, void *mem, int size) 2907 { 2908 __sock_kfree_s(sk, mem, size, true); 2909 } 2910 EXPORT_SYMBOL(sock_kzfree_s); 2911 2912 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 2913 I think, these locks should be removed for datagram sockets. 2914 */ 2915 static long sock_wait_for_wmem(struct sock *sk, long timeo) 2916 { 2917 DEFINE_WAIT(wait); 2918 2919 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2920 for (;;) { 2921 if (!timeo) 2922 break; 2923 if (signal_pending(current)) 2924 break; 2925 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2926 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2927 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) 2928 break; 2929 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2930 break; 2931 if (READ_ONCE(sk->sk_err)) 2932 break; 2933 timeo = schedule_timeout(timeo); 2934 } 2935 finish_wait(sk_sleep(sk), &wait); 2936 return timeo; 2937 } 2938 2939 2940 /* 2941 * Generic send/receive buffer handlers 2942 */ 2943 2944 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 2945 unsigned long data_len, int noblock, 2946 int *errcode, int max_page_order) 2947 { 2948 struct sk_buff *skb; 2949 long timeo; 2950 int err; 2951 2952 timeo = sock_sndtimeo(sk, noblock); 2953 for (;;) { 2954 err = sock_error(sk); 2955 if (err != 0) 2956 goto failure; 2957 2958 err = -EPIPE; 2959 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2960 goto failure; 2961 2962 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) 2963 break; 2964 2965 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2966 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2967 err = -EAGAIN; 2968 if (!timeo) 2969 goto failure; 2970 if (signal_pending(current)) 2971 goto interrupted; 2972 timeo = sock_wait_for_wmem(sk, timeo); 2973 } 2974 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 2975 errcode, sk->sk_allocation); 2976 if (skb) 2977 skb_set_owner_w(skb, sk); 2978 return skb; 2979 2980 interrupted: 2981 err = sock_intr_errno(timeo); 2982 failure: 2983 *errcode = err; 2984 return NULL; 2985 } 2986 EXPORT_SYMBOL(sock_alloc_send_pskb); 2987 2988 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, 2989 struct sockcm_cookie *sockc) 2990 { 2991 u32 tsflags; 2992 2993 BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31)); 2994 2995 switch (cmsg->cmsg_type) { 2996 case SO_MARK: 2997 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 2998 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2999 return -EPERM; 3000 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3001 return -EINVAL; 3002 sockc->mark = *(u32 *)CMSG_DATA(cmsg); 3003 break; 3004 case SO_TIMESTAMPING_OLD: 3005 case SO_TIMESTAMPING_NEW: 3006 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3007 return -EINVAL; 3008 3009 tsflags = *(u32 *)CMSG_DATA(cmsg); 3010 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) 3011 return -EINVAL; 3012 3013 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 3014 sockc->tsflags |= tsflags; 3015 break; 3016 case SCM_TXTIME: 3017 if (!sock_flag(sk, SOCK_TXTIME)) 3018 return -EINVAL; 3019 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) 3020 return -EINVAL; 3021 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); 3022 break; 3023 case SCM_TS_OPT_ID: 3024 if (sk_is_tcp(sk)) 3025 return -EINVAL; 3026 tsflags = READ_ONCE(sk->sk_tsflags); 3027 if (!(tsflags & SOF_TIMESTAMPING_OPT_ID)) 3028 return -EINVAL; 3029 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3030 return -EINVAL; 3031 sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg); 3032 sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID; 3033 break; 3034 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 3035 case SCM_RIGHTS: 3036 case SCM_CREDENTIALS: 3037 break; 3038 case SO_PRIORITY: 3039 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3040 return -EINVAL; 3041 if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg))) 3042 return -EPERM; 3043 sockc->priority = *(u32 *)CMSG_DATA(cmsg); 3044 break; 3045 case SCM_DEVMEM_DMABUF: 3046 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3047 return -EINVAL; 3048 sockc->dmabuf_id = *(u32 *)CMSG_DATA(cmsg); 3049 break; 3050 default: 3051 return -EINVAL; 3052 } 3053 return 0; 3054 } 3055 EXPORT_SYMBOL(__sock_cmsg_send); 3056 3057 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 3058 struct sockcm_cookie *sockc) 3059 { 3060 struct cmsghdr *cmsg; 3061 int ret; 3062 3063 for_each_cmsghdr(cmsg, msg) { 3064 if (!CMSG_OK(msg, cmsg)) 3065 return -EINVAL; 3066 if (cmsg->cmsg_level != SOL_SOCKET) 3067 continue; 3068 ret = __sock_cmsg_send(sk, cmsg, sockc); 3069 if (ret) 3070 return ret; 3071 } 3072 return 0; 3073 } 3074 EXPORT_SYMBOL(sock_cmsg_send); 3075 3076 static void sk_enter_memory_pressure(struct sock *sk) 3077 { 3078 if (!sk->sk_prot->enter_memory_pressure) 3079 return; 3080 3081 sk->sk_prot->enter_memory_pressure(sk); 3082 } 3083 3084 static void sk_leave_memory_pressure(struct sock *sk) 3085 { 3086 if (sk->sk_prot->leave_memory_pressure) { 3087 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure, 3088 tcp_leave_memory_pressure, sk); 3089 } else { 3090 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; 3091 3092 if (memory_pressure && READ_ONCE(*memory_pressure)) 3093 WRITE_ONCE(*memory_pressure, 0); 3094 } 3095 } 3096 3097 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 3098 3099 /** 3100 * skb_page_frag_refill - check that a page_frag contains enough room 3101 * @sz: minimum size of the fragment we want to get 3102 * @pfrag: pointer to page_frag 3103 * @gfp: priority for memory allocation 3104 * 3105 * Note: While this allocator tries to use high order pages, there is 3106 * no guarantee that allocations succeed. Therefore, @sz MUST be 3107 * less or equal than PAGE_SIZE. 3108 */ 3109 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 3110 { 3111 if (pfrag->page) { 3112 if (page_ref_count(pfrag->page) == 1) { 3113 pfrag->offset = 0; 3114 return true; 3115 } 3116 if (pfrag->offset + sz <= pfrag->size) 3117 return true; 3118 put_page(pfrag->page); 3119 } 3120 3121 pfrag->offset = 0; 3122 if (SKB_FRAG_PAGE_ORDER && 3123 !static_branch_unlikely(&net_high_order_alloc_disable_key)) { 3124 /* Avoid direct reclaim but allow kswapd to wake */ 3125 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | 3126 __GFP_COMP | __GFP_NOWARN | 3127 __GFP_NORETRY, 3128 SKB_FRAG_PAGE_ORDER); 3129 if (likely(pfrag->page)) { 3130 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 3131 return true; 3132 } 3133 } 3134 pfrag->page = alloc_page(gfp); 3135 if (likely(pfrag->page)) { 3136 pfrag->size = PAGE_SIZE; 3137 return true; 3138 } 3139 return false; 3140 } 3141 EXPORT_SYMBOL(skb_page_frag_refill); 3142 3143 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 3144 { 3145 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 3146 return true; 3147 3148 sk_enter_memory_pressure(sk); 3149 sk_stream_moderate_sndbuf(sk); 3150 return false; 3151 } 3152 EXPORT_SYMBOL(sk_page_frag_refill); 3153 3154 void __lock_sock(struct sock *sk) 3155 __releases(&sk->sk_lock.slock) 3156 __acquires(&sk->sk_lock.slock) 3157 { 3158 DEFINE_WAIT(wait); 3159 3160 for (;;) { 3161 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 3162 TASK_UNINTERRUPTIBLE); 3163 spin_unlock_bh(&sk->sk_lock.slock); 3164 schedule(); 3165 spin_lock_bh(&sk->sk_lock.slock); 3166 if (!sock_owned_by_user(sk)) 3167 break; 3168 } 3169 finish_wait(&sk->sk_lock.wq, &wait); 3170 } 3171 3172 void __release_sock(struct sock *sk) 3173 __releases(&sk->sk_lock.slock) 3174 __acquires(&sk->sk_lock.slock) 3175 { 3176 struct sk_buff *skb, *next; 3177 3178 while ((skb = sk->sk_backlog.head) != NULL) { 3179 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 3180 3181 spin_unlock_bh(&sk->sk_lock.slock); 3182 3183 do { 3184 next = skb->next; 3185 prefetch(next); 3186 DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb)); 3187 skb_mark_not_on_list(skb); 3188 sk_backlog_rcv(sk, skb); 3189 3190 cond_resched(); 3191 3192 skb = next; 3193 } while (skb != NULL); 3194 3195 spin_lock_bh(&sk->sk_lock.slock); 3196 } 3197 3198 /* 3199 * Doing the zeroing here guarantee we can not loop forever 3200 * while a wild producer attempts to flood us. 3201 */ 3202 sk->sk_backlog.len = 0; 3203 } 3204 3205 void __sk_flush_backlog(struct sock *sk) 3206 { 3207 spin_lock_bh(&sk->sk_lock.slock); 3208 __release_sock(sk); 3209 3210 if (sk->sk_prot->release_cb) 3211 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3212 tcp_release_cb, sk); 3213 3214 spin_unlock_bh(&sk->sk_lock.slock); 3215 } 3216 EXPORT_SYMBOL_GPL(__sk_flush_backlog); 3217 3218 /** 3219 * sk_wait_data - wait for data to arrive at sk_receive_queue 3220 * @sk: sock to wait on 3221 * @timeo: for how long 3222 * @skb: last skb seen on sk_receive_queue 3223 * 3224 * Now socket state including sk->sk_err is changed only under lock, 3225 * hence we may omit checks after joining wait queue. 3226 * We check receive queue before schedule() only as optimization; 3227 * it is very likely that release_sock() added new data. 3228 */ 3229 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) 3230 { 3231 DEFINE_WAIT_FUNC(wait, woken_wake_function); 3232 int rc; 3233 3234 add_wait_queue(sk_sleep(sk), &wait); 3235 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3236 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); 3237 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3238 remove_wait_queue(sk_sleep(sk), &wait); 3239 return rc; 3240 } 3241 EXPORT_SYMBOL(sk_wait_data); 3242 3243 /** 3244 * __sk_mem_raise_allocated - increase memory_allocated 3245 * @sk: socket 3246 * @size: memory size to allocate 3247 * @amt: pages to allocate 3248 * @kind: allocation type 3249 * 3250 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc. 3251 * 3252 * Unlike the globally shared limits among the sockets under same protocol, 3253 * consuming the budget of a memcg won't have direct effect on other ones. 3254 * So be optimistic about memcg's tolerance, and leave the callers to decide 3255 * whether or not to raise allocated through sk_under_memory_pressure() or 3256 * its variants. 3257 */ 3258 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) 3259 { 3260 struct mem_cgroup *memcg = mem_cgroup_sockets_enabled ? sk->sk_memcg : NULL; 3261 struct proto *prot = sk->sk_prot; 3262 bool charged = true; 3263 long allocated; 3264 3265 sk_memory_allocated_add(sk, amt); 3266 allocated = sk_memory_allocated(sk); 3267 3268 if (memcg) { 3269 charged = mem_cgroup_charge_skmem(memcg, amt, gfp_memcg_charge()); 3270 if (!charged) 3271 goto suppress_allocation; 3272 } 3273 3274 /* Under limit. */ 3275 if (allocated <= sk_prot_mem_limits(sk, 0)) { 3276 sk_leave_memory_pressure(sk); 3277 return 1; 3278 } 3279 3280 /* Under pressure. */ 3281 if (allocated > sk_prot_mem_limits(sk, 1)) 3282 sk_enter_memory_pressure(sk); 3283 3284 /* Over hard limit. */ 3285 if (allocated > sk_prot_mem_limits(sk, 2)) 3286 goto suppress_allocation; 3287 3288 /* Guarantee minimum buffer size under pressure (either global 3289 * or memcg) to make sure features described in RFC 7323 (TCP 3290 * Extensions for High Performance) work properly. 3291 * 3292 * This rule does NOT stand when exceeds global or memcg's hard 3293 * limit, or else a DoS attack can be taken place by spawning 3294 * lots of sockets whose usage are under minimum buffer size. 3295 */ 3296 if (kind == SK_MEM_RECV) { 3297 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) 3298 return 1; 3299 3300 } else { /* SK_MEM_SEND */ 3301 int wmem0 = sk_get_wmem0(sk, prot); 3302 3303 if (sk->sk_type == SOCK_STREAM) { 3304 if (sk->sk_wmem_queued < wmem0) 3305 return 1; 3306 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { 3307 return 1; 3308 } 3309 } 3310 3311 if (sk_has_memory_pressure(sk)) { 3312 u64 alloc; 3313 3314 /* The following 'average' heuristic is within the 3315 * scope of global accounting, so it only makes 3316 * sense for global memory pressure. 3317 */ 3318 if (!sk_under_global_memory_pressure(sk)) 3319 return 1; 3320 3321 /* Try to be fair among all the sockets under global 3322 * pressure by allowing the ones that below average 3323 * usage to raise. 3324 */ 3325 alloc = sk_sockets_allocated_read_positive(sk); 3326 if (sk_prot_mem_limits(sk, 2) > alloc * 3327 sk_mem_pages(sk->sk_wmem_queued + 3328 atomic_read(&sk->sk_rmem_alloc) + 3329 sk->sk_forward_alloc)) 3330 return 1; 3331 } 3332 3333 suppress_allocation: 3334 3335 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 3336 sk_stream_moderate_sndbuf(sk); 3337 3338 /* Fail only if socket is _under_ its sndbuf. 3339 * In this case we cannot block, so that we have to fail. 3340 */ 3341 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) { 3342 /* Force charge with __GFP_NOFAIL */ 3343 if (memcg && !charged) { 3344 mem_cgroup_charge_skmem(memcg, amt, 3345 gfp_memcg_charge() | __GFP_NOFAIL); 3346 } 3347 return 1; 3348 } 3349 } 3350 3351 if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged)) 3352 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); 3353 3354 sk_memory_allocated_sub(sk, amt); 3355 3356 if (memcg && charged) 3357 mem_cgroup_uncharge_skmem(memcg, amt); 3358 3359 return 0; 3360 } 3361 3362 /** 3363 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 3364 * @sk: socket 3365 * @size: memory size to allocate 3366 * @kind: allocation type 3367 * 3368 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 3369 * rmem allocation. This function assumes that protocols which have 3370 * memory_pressure use sk_wmem_queued as write buffer accounting. 3371 */ 3372 int __sk_mem_schedule(struct sock *sk, int size, int kind) 3373 { 3374 int ret, amt = sk_mem_pages(size); 3375 3376 sk_forward_alloc_add(sk, amt << PAGE_SHIFT); 3377 ret = __sk_mem_raise_allocated(sk, size, amt, kind); 3378 if (!ret) 3379 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT)); 3380 return ret; 3381 } 3382 EXPORT_SYMBOL(__sk_mem_schedule); 3383 3384 /** 3385 * __sk_mem_reduce_allocated - reclaim memory_allocated 3386 * @sk: socket 3387 * @amount: number of quanta 3388 * 3389 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc 3390 */ 3391 void __sk_mem_reduce_allocated(struct sock *sk, int amount) 3392 { 3393 sk_memory_allocated_sub(sk, amount); 3394 3395 if (mem_cgroup_sockets_enabled && sk->sk_memcg) 3396 mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); 3397 3398 if (sk_under_global_memory_pressure(sk) && 3399 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 3400 sk_leave_memory_pressure(sk); 3401 } 3402 3403 /** 3404 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated 3405 * @sk: socket 3406 * @amount: number of bytes (rounded down to a PAGE_SIZE multiple) 3407 */ 3408 void __sk_mem_reclaim(struct sock *sk, int amount) 3409 { 3410 amount >>= PAGE_SHIFT; 3411 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT)); 3412 __sk_mem_reduce_allocated(sk, amount); 3413 } 3414 EXPORT_SYMBOL(__sk_mem_reclaim); 3415 3416 int sk_set_peek_off(struct sock *sk, int val) 3417 { 3418 WRITE_ONCE(sk->sk_peek_off, val); 3419 return 0; 3420 } 3421 EXPORT_SYMBOL_GPL(sk_set_peek_off); 3422 3423 /* 3424 * Set of default routines for initialising struct proto_ops when 3425 * the protocol does not support a particular function. In certain 3426 * cases where it makes no sense for a protocol to have a "do nothing" 3427 * function, some default processing is provided. 3428 */ 3429 3430 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 3431 { 3432 return -EOPNOTSUPP; 3433 } 3434 EXPORT_SYMBOL(sock_no_bind); 3435 3436 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 3437 int len, int flags) 3438 { 3439 return -EOPNOTSUPP; 3440 } 3441 EXPORT_SYMBOL(sock_no_connect); 3442 3443 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 3444 { 3445 return -EOPNOTSUPP; 3446 } 3447 EXPORT_SYMBOL(sock_no_socketpair); 3448 3449 int sock_no_accept(struct socket *sock, struct socket *newsock, 3450 struct proto_accept_arg *arg) 3451 { 3452 return -EOPNOTSUPP; 3453 } 3454 EXPORT_SYMBOL(sock_no_accept); 3455 3456 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 3457 int peer) 3458 { 3459 return -EOPNOTSUPP; 3460 } 3461 EXPORT_SYMBOL(sock_no_getname); 3462 3463 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3464 { 3465 return -EOPNOTSUPP; 3466 } 3467 EXPORT_SYMBOL(sock_no_ioctl); 3468 3469 int sock_no_listen(struct socket *sock, int backlog) 3470 { 3471 return -EOPNOTSUPP; 3472 } 3473 EXPORT_SYMBOL(sock_no_listen); 3474 3475 int sock_no_shutdown(struct socket *sock, int how) 3476 { 3477 return -EOPNOTSUPP; 3478 } 3479 EXPORT_SYMBOL(sock_no_shutdown); 3480 3481 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 3482 { 3483 return -EOPNOTSUPP; 3484 } 3485 EXPORT_SYMBOL(sock_no_sendmsg); 3486 3487 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) 3488 { 3489 return -EOPNOTSUPP; 3490 } 3491 EXPORT_SYMBOL(sock_no_sendmsg_locked); 3492 3493 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, 3494 int flags) 3495 { 3496 return -EOPNOTSUPP; 3497 } 3498 EXPORT_SYMBOL(sock_no_recvmsg); 3499 3500 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 3501 { 3502 /* Mirror missing mmap method error code */ 3503 return -ENODEV; 3504 } 3505 EXPORT_SYMBOL(sock_no_mmap); 3506 3507 /* 3508 * When a file is received (via SCM_RIGHTS, etc), we must bump the 3509 * various sock-based usage counts. 3510 */ 3511 void __receive_sock(struct file *file) 3512 { 3513 struct socket *sock; 3514 3515 sock = sock_from_file(file); 3516 if (sock) { 3517 sock_update_netprioidx(&sock->sk->sk_cgrp_data); 3518 sock_update_classid(&sock->sk->sk_cgrp_data); 3519 } 3520 } 3521 3522 /* 3523 * Default Socket Callbacks 3524 */ 3525 3526 static void sock_def_wakeup(struct sock *sk) 3527 { 3528 struct socket_wq *wq; 3529 3530 rcu_read_lock(); 3531 wq = rcu_dereference(sk->sk_wq); 3532 if (skwq_has_sleeper(wq)) 3533 wake_up_interruptible_all(&wq->wait); 3534 rcu_read_unlock(); 3535 } 3536 3537 static void sock_def_error_report(struct sock *sk) 3538 { 3539 struct socket_wq *wq; 3540 3541 rcu_read_lock(); 3542 wq = rcu_dereference(sk->sk_wq); 3543 if (skwq_has_sleeper(wq)) 3544 wake_up_interruptible_poll(&wq->wait, EPOLLERR); 3545 sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR); 3546 rcu_read_unlock(); 3547 } 3548 3549 void sock_def_readable(struct sock *sk) 3550 { 3551 struct socket_wq *wq; 3552 3553 trace_sk_data_ready(sk); 3554 3555 rcu_read_lock(); 3556 wq = rcu_dereference(sk->sk_wq); 3557 if (skwq_has_sleeper(wq)) 3558 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 3559 EPOLLRDNORM | EPOLLRDBAND); 3560 sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); 3561 rcu_read_unlock(); 3562 } 3563 3564 static void sock_def_write_space(struct sock *sk) 3565 { 3566 struct socket_wq *wq; 3567 3568 rcu_read_lock(); 3569 3570 /* Do not wake up a writer until he can make "significant" 3571 * progress. --DaveM 3572 */ 3573 if (sock_writeable(sk)) { 3574 wq = rcu_dereference(sk->sk_wq); 3575 if (skwq_has_sleeper(wq)) 3576 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3577 EPOLLWRNORM | EPOLLWRBAND); 3578 3579 /* Should agree with poll, otherwise some programs break */ 3580 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3581 } 3582 3583 rcu_read_unlock(); 3584 } 3585 3586 /* An optimised version of sock_def_write_space(), should only be called 3587 * for SOCK_RCU_FREE sockets under RCU read section and after putting 3588 * ->sk_wmem_alloc. 3589 */ 3590 static void sock_def_write_space_wfree(struct sock *sk) 3591 { 3592 /* Do not wake up a writer until he can make "significant" 3593 * progress. --DaveM 3594 */ 3595 if (sock_writeable(sk)) { 3596 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 3597 3598 /* rely on refcount_sub from sock_wfree() */ 3599 smp_mb__after_atomic(); 3600 if (wq && waitqueue_active(&wq->wait)) 3601 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3602 EPOLLWRNORM | EPOLLWRBAND); 3603 3604 /* Should agree with poll, otherwise some programs break */ 3605 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3606 } 3607 } 3608 3609 static void sock_def_destruct(struct sock *sk) 3610 { 3611 } 3612 3613 void sk_send_sigurg(struct sock *sk) 3614 { 3615 if (sk->sk_socket && sk->sk_socket->file) 3616 if (send_sigurg(sk->sk_socket->file)) 3617 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 3618 } 3619 EXPORT_SYMBOL(sk_send_sigurg); 3620 3621 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 3622 unsigned long expires) 3623 { 3624 if (!mod_timer(timer, expires)) 3625 sock_hold(sk); 3626 } 3627 EXPORT_SYMBOL(sk_reset_timer); 3628 3629 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 3630 { 3631 if (timer_delete(timer)) 3632 __sock_put(sk); 3633 } 3634 EXPORT_SYMBOL(sk_stop_timer); 3635 3636 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 3637 { 3638 if (timer_delete_sync(timer)) 3639 __sock_put(sk); 3640 } 3641 EXPORT_SYMBOL(sk_stop_timer_sync); 3642 3643 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) 3644 { 3645 sk_init_common(sk); 3646 sk->sk_send_head = NULL; 3647 3648 timer_setup(&sk->sk_timer, NULL, 0); 3649 3650 sk->sk_allocation = GFP_KERNEL; 3651 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); 3652 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); 3653 sk->sk_state = TCP_CLOSE; 3654 sk->sk_use_task_frag = true; 3655 sk_set_socket(sk, sock); 3656 3657 sock_set_flag(sk, SOCK_ZAPPED); 3658 3659 if (sock) { 3660 sk->sk_type = sock->type; 3661 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); 3662 sock->sk = sk; 3663 } else { 3664 RCU_INIT_POINTER(sk->sk_wq, NULL); 3665 } 3666 sk->sk_uid = uid; 3667 3668 sk->sk_state_change = sock_def_wakeup; 3669 sk->sk_data_ready = sock_def_readable; 3670 sk->sk_write_space = sock_def_write_space; 3671 sk->sk_error_report = sock_def_error_report; 3672 sk->sk_destruct = sock_def_destruct; 3673 3674 sk->sk_frag.page = NULL; 3675 sk->sk_frag.offset = 0; 3676 sk->sk_peek_off = -1; 3677 3678 sk->sk_peer_pid = NULL; 3679 sk->sk_peer_cred = NULL; 3680 spin_lock_init(&sk->sk_peer_lock); 3681 3682 sk->sk_write_pending = 0; 3683 sk->sk_rcvlowat = 1; 3684 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 3685 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 3686 3687 sk->sk_stamp = SK_DEFAULT_STAMP; 3688 #if BITS_PER_LONG==32 3689 seqlock_init(&sk->sk_stamp_seq); 3690 #endif 3691 atomic_set(&sk->sk_zckey, 0); 3692 3693 #ifdef CONFIG_NET_RX_BUSY_POLL 3694 sk->sk_napi_id = 0; 3695 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); 3696 #endif 3697 3698 sk->sk_max_pacing_rate = ~0UL; 3699 sk->sk_pacing_rate = ~0UL; 3700 WRITE_ONCE(sk->sk_pacing_shift, 10); 3701 sk->sk_incoming_cpu = -1; 3702 3703 sk_rx_queue_clear(sk); 3704 /* 3705 * Before updating sk_refcnt, we must commit prior changes to memory 3706 * (Documentation/RCU/rculist_nulls.rst for details) 3707 */ 3708 smp_wmb(); 3709 refcount_set(&sk->sk_refcnt, 1); 3710 atomic_set(&sk->sk_drops, 0); 3711 } 3712 EXPORT_SYMBOL(sock_init_data_uid); 3713 3714 void sock_init_data(struct socket *sock, struct sock *sk) 3715 { 3716 kuid_t uid = sock ? 3717 SOCK_INODE(sock)->i_uid : 3718 make_kuid(sock_net(sk)->user_ns, 0); 3719 3720 sock_init_data_uid(sock, sk, uid); 3721 } 3722 EXPORT_SYMBOL(sock_init_data); 3723 3724 void lock_sock_nested(struct sock *sk, int subclass) 3725 { 3726 /* The sk_lock has mutex_lock() semantics here. */ 3727 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3728 3729 might_sleep(); 3730 spin_lock_bh(&sk->sk_lock.slock); 3731 if (sock_owned_by_user_nocheck(sk)) 3732 __lock_sock(sk); 3733 sk->sk_lock.owned = 1; 3734 spin_unlock_bh(&sk->sk_lock.slock); 3735 } 3736 EXPORT_SYMBOL(lock_sock_nested); 3737 3738 void release_sock(struct sock *sk) 3739 { 3740 spin_lock_bh(&sk->sk_lock.slock); 3741 if (sk->sk_backlog.tail) 3742 __release_sock(sk); 3743 3744 if (sk->sk_prot->release_cb) 3745 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3746 tcp_release_cb, sk); 3747 3748 sock_release_ownership(sk); 3749 if (waitqueue_active(&sk->sk_lock.wq)) 3750 wake_up(&sk->sk_lock.wq); 3751 spin_unlock_bh(&sk->sk_lock.slock); 3752 } 3753 EXPORT_SYMBOL(release_sock); 3754 3755 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3756 { 3757 might_sleep(); 3758 spin_lock_bh(&sk->sk_lock.slock); 3759 3760 if (!sock_owned_by_user_nocheck(sk)) { 3761 /* 3762 * Fast path return with bottom halves disabled and 3763 * sock::sk_lock.slock held. 3764 * 3765 * The 'mutex' is not contended and holding 3766 * sock::sk_lock.slock prevents all other lockers to 3767 * proceed so the corresponding unlock_sock_fast() can 3768 * avoid the slow path of release_sock() completely and 3769 * just release slock. 3770 * 3771 * From a semantical POV this is equivalent to 'acquiring' 3772 * the 'mutex', hence the corresponding lockdep 3773 * mutex_release() has to happen in the fast path of 3774 * unlock_sock_fast(). 3775 */ 3776 return false; 3777 } 3778 3779 __lock_sock(sk); 3780 sk->sk_lock.owned = 1; 3781 __acquire(&sk->sk_lock.slock); 3782 spin_unlock_bh(&sk->sk_lock.slock); 3783 return true; 3784 } 3785 EXPORT_SYMBOL(__lock_sock_fast); 3786 3787 int sock_gettstamp(struct socket *sock, void __user *userstamp, 3788 bool timeval, bool time32) 3789 { 3790 struct sock *sk = sock->sk; 3791 struct timespec64 ts; 3792 3793 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 3794 ts = ktime_to_timespec64(sock_read_timestamp(sk)); 3795 if (ts.tv_sec == -1) 3796 return -ENOENT; 3797 if (ts.tv_sec == 0) { 3798 ktime_t kt = ktime_get_real(); 3799 sock_write_timestamp(sk, kt); 3800 ts = ktime_to_timespec64(kt); 3801 } 3802 3803 if (timeval) 3804 ts.tv_nsec /= 1000; 3805 3806 #ifdef CONFIG_COMPAT_32BIT_TIME 3807 if (time32) 3808 return put_old_timespec32(&ts, userstamp); 3809 #endif 3810 #ifdef CONFIG_SPARC64 3811 /* beware of padding in sparc64 timeval */ 3812 if (timeval && !in_compat_syscall()) { 3813 struct __kernel_old_timeval __user tv = { 3814 .tv_sec = ts.tv_sec, 3815 .tv_usec = ts.tv_nsec, 3816 }; 3817 if (copy_to_user(userstamp, &tv, sizeof(tv))) 3818 return -EFAULT; 3819 return 0; 3820 } 3821 #endif 3822 return put_timespec64(&ts, userstamp); 3823 } 3824 EXPORT_SYMBOL(sock_gettstamp); 3825 3826 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) 3827 { 3828 if (!sock_flag(sk, flag)) { 3829 unsigned long previous_flags = sk->sk_flags; 3830 3831 sock_set_flag(sk, flag); 3832 /* 3833 * we just set one of the two flags which require net 3834 * time stamping, but time stamping might have been on 3835 * already because of the other one 3836 */ 3837 if (sock_needs_netstamp(sk) && 3838 !(previous_flags & SK_FLAGS_TIMESTAMP)) 3839 net_enable_timestamp(); 3840 } 3841 } 3842 3843 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 3844 int level, int type) 3845 { 3846 struct sock_exterr_skb *serr; 3847 struct sk_buff *skb; 3848 int copied, err; 3849 3850 err = -EAGAIN; 3851 skb = sock_dequeue_err_skb(sk); 3852 if (skb == NULL) 3853 goto out; 3854 3855 copied = skb->len; 3856 if (copied > len) { 3857 msg->msg_flags |= MSG_TRUNC; 3858 copied = len; 3859 } 3860 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3861 if (err) 3862 goto out_free_skb; 3863 3864 sock_recv_timestamp(msg, sk, skb); 3865 3866 serr = SKB_EXT_ERR(skb); 3867 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 3868 3869 msg->msg_flags |= MSG_ERRQUEUE; 3870 err = copied; 3871 3872 out_free_skb: 3873 kfree_skb(skb); 3874 out: 3875 return err; 3876 } 3877 EXPORT_SYMBOL(sock_recv_errqueue); 3878 3879 /* 3880 * Get a socket option on an socket. 3881 * 3882 * FIX: POSIX 1003.1g is very ambiguous here. It states that 3883 * asynchronous errors should be reported by getsockopt. We assume 3884 * this means if you specify SO_ERROR (otherwise what is the point of it). 3885 */ 3886 int sock_common_getsockopt(struct socket *sock, int level, int optname, 3887 char __user *optval, int __user *optlen) 3888 { 3889 struct sock *sk = sock->sk; 3890 3891 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3892 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen); 3893 } 3894 EXPORT_SYMBOL(sock_common_getsockopt); 3895 3896 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 3897 int flags) 3898 { 3899 struct sock *sk = sock->sk; 3900 int addr_len = 0; 3901 int err; 3902 3903 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len); 3904 if (err >= 0) 3905 msg->msg_namelen = addr_len; 3906 return err; 3907 } 3908 EXPORT_SYMBOL(sock_common_recvmsg); 3909 3910 /* 3911 * Set socket options on an inet socket. 3912 */ 3913 int sock_common_setsockopt(struct socket *sock, int level, int optname, 3914 sockptr_t optval, unsigned int optlen) 3915 { 3916 struct sock *sk = sock->sk; 3917 3918 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3919 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen); 3920 } 3921 EXPORT_SYMBOL(sock_common_setsockopt); 3922 3923 void sk_common_release(struct sock *sk) 3924 { 3925 if (sk->sk_prot->destroy) 3926 sk->sk_prot->destroy(sk); 3927 3928 /* 3929 * Observation: when sk_common_release is called, processes have 3930 * no access to socket. But net still has. 3931 * Step one, detach it from networking: 3932 * 3933 * A. Remove from hash tables. 3934 */ 3935 3936 sk->sk_prot->unhash(sk); 3937 3938 /* 3939 * In this point socket cannot receive new packets, but it is possible 3940 * that some packets are in flight because some CPU runs receiver and 3941 * did hash table lookup before we unhashed socket. They will achieve 3942 * receive queue and will be purged by socket destructor. 3943 * 3944 * Also we still have packets pending on receive queue and probably, 3945 * our own packets waiting in device queues. sock_destroy will drain 3946 * receive queue, but transmitted packets will delay socket destruction 3947 * until the last reference will be released. 3948 */ 3949 3950 sock_orphan(sk); 3951 3952 xfrm_sk_free_policy(sk); 3953 3954 sock_put(sk); 3955 } 3956 EXPORT_SYMBOL(sk_common_release); 3957 3958 void sk_get_meminfo(const struct sock *sk, u32 *mem) 3959 { 3960 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); 3961 3962 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 3963 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); 3964 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); 3965 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); 3966 mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc); 3967 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3968 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3969 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3970 mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops); 3971 } 3972 3973 #ifdef CONFIG_PROC_FS 3974 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 3975 3976 int sock_prot_inuse_get(struct net *net, struct proto *prot) 3977 { 3978 int cpu, idx = prot->inuse_idx; 3979 int res = 0; 3980 3981 for_each_possible_cpu(cpu) 3982 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx]; 3983 3984 return res >= 0 ? res : 0; 3985 } 3986 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 3987 3988 int sock_inuse_get(struct net *net) 3989 { 3990 int cpu, res = 0; 3991 3992 for_each_possible_cpu(cpu) 3993 res += per_cpu_ptr(net->core.prot_inuse, cpu)->all; 3994 3995 return res; 3996 } 3997 3998 EXPORT_SYMBOL_GPL(sock_inuse_get); 3999 4000 static int __net_init sock_inuse_init_net(struct net *net) 4001 { 4002 net->core.prot_inuse = alloc_percpu(struct prot_inuse); 4003 if (net->core.prot_inuse == NULL) 4004 return -ENOMEM; 4005 return 0; 4006 } 4007 4008 static void __net_exit sock_inuse_exit_net(struct net *net) 4009 { 4010 free_percpu(net->core.prot_inuse); 4011 } 4012 4013 static struct pernet_operations net_inuse_ops = { 4014 .init = sock_inuse_init_net, 4015 .exit = sock_inuse_exit_net, 4016 }; 4017 4018 static __init int net_inuse_init(void) 4019 { 4020 if (register_pernet_subsys(&net_inuse_ops)) 4021 panic("Cannot initialize net inuse counters"); 4022 4023 return 0; 4024 } 4025 4026 core_initcall(net_inuse_init); 4027 4028 static int assign_proto_idx(struct proto *prot) 4029 { 4030 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 4031 4032 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR)) { 4033 pr_err("PROTO_INUSE_NR exhausted\n"); 4034 return -ENOSPC; 4035 } 4036 4037 set_bit(prot->inuse_idx, proto_inuse_idx); 4038 return 0; 4039 } 4040 4041 static void release_proto_idx(struct proto *prot) 4042 { 4043 if (prot->inuse_idx != PROTO_INUSE_NR) 4044 clear_bit(prot->inuse_idx, proto_inuse_idx); 4045 } 4046 #else 4047 static inline int assign_proto_idx(struct proto *prot) 4048 { 4049 return 0; 4050 } 4051 4052 static inline void release_proto_idx(struct proto *prot) 4053 { 4054 } 4055 4056 #endif 4057 4058 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) 4059 { 4060 if (!twsk_prot) 4061 return; 4062 kfree(twsk_prot->twsk_slab_name); 4063 twsk_prot->twsk_slab_name = NULL; 4064 kmem_cache_destroy(twsk_prot->twsk_slab); 4065 twsk_prot->twsk_slab = NULL; 4066 } 4067 4068 static int tw_prot_init(const struct proto *prot) 4069 { 4070 struct timewait_sock_ops *twsk_prot = prot->twsk_prot; 4071 4072 if (!twsk_prot) 4073 return 0; 4074 4075 twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", 4076 prot->name); 4077 if (!twsk_prot->twsk_slab_name) 4078 return -ENOMEM; 4079 4080 twsk_prot->twsk_slab = 4081 kmem_cache_create(twsk_prot->twsk_slab_name, 4082 twsk_prot->twsk_obj_size, 0, 4083 SLAB_ACCOUNT | prot->slab_flags, 4084 NULL); 4085 if (!twsk_prot->twsk_slab) { 4086 pr_crit("%s: Can't create timewait sock SLAB cache!\n", 4087 prot->name); 4088 return -ENOMEM; 4089 } 4090 4091 return 0; 4092 } 4093 4094 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 4095 { 4096 if (!rsk_prot) 4097 return; 4098 kfree(rsk_prot->slab_name); 4099 rsk_prot->slab_name = NULL; 4100 kmem_cache_destroy(rsk_prot->slab); 4101 rsk_prot->slab = NULL; 4102 } 4103 4104 static int req_prot_init(const struct proto *prot) 4105 { 4106 struct request_sock_ops *rsk_prot = prot->rsk_prot; 4107 4108 if (!rsk_prot) 4109 return 0; 4110 4111 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", 4112 prot->name); 4113 if (!rsk_prot->slab_name) 4114 return -ENOMEM; 4115 4116 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 4117 rsk_prot->obj_size, 0, 4118 SLAB_ACCOUNT | prot->slab_flags, 4119 NULL); 4120 4121 if (!rsk_prot->slab) { 4122 pr_crit("%s: Can't create request sock SLAB cache!\n", 4123 prot->name); 4124 return -ENOMEM; 4125 } 4126 return 0; 4127 } 4128 4129 int proto_register(struct proto *prot, int alloc_slab) 4130 { 4131 int ret = -ENOBUFS; 4132 4133 if (prot->memory_allocated && !prot->sysctl_mem) { 4134 pr_err("%s: missing sysctl_mem\n", prot->name); 4135 return -EINVAL; 4136 } 4137 if (prot->memory_allocated && !prot->per_cpu_fw_alloc) { 4138 pr_err("%s: missing per_cpu_fw_alloc\n", prot->name); 4139 return -EINVAL; 4140 } 4141 if (alloc_slab) { 4142 prot->slab = kmem_cache_create_usercopy(prot->name, 4143 prot->obj_size, 0, 4144 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | 4145 prot->slab_flags, 4146 prot->useroffset, prot->usersize, 4147 NULL); 4148 4149 if (prot->slab == NULL) { 4150 pr_crit("%s: Can't create sock SLAB cache!\n", 4151 prot->name); 4152 goto out; 4153 } 4154 4155 if (req_prot_init(prot)) 4156 goto out_free_request_sock_slab; 4157 4158 if (tw_prot_init(prot)) 4159 goto out_free_timewait_sock_slab; 4160 } 4161 4162 mutex_lock(&proto_list_mutex); 4163 ret = assign_proto_idx(prot); 4164 if (ret) { 4165 mutex_unlock(&proto_list_mutex); 4166 goto out_free_timewait_sock_slab; 4167 } 4168 list_add(&prot->node, &proto_list); 4169 mutex_unlock(&proto_list_mutex); 4170 return ret; 4171 4172 out_free_timewait_sock_slab: 4173 if (alloc_slab) 4174 tw_prot_cleanup(prot->twsk_prot); 4175 out_free_request_sock_slab: 4176 if (alloc_slab) { 4177 req_prot_cleanup(prot->rsk_prot); 4178 4179 kmem_cache_destroy(prot->slab); 4180 prot->slab = NULL; 4181 } 4182 out: 4183 return ret; 4184 } 4185 EXPORT_SYMBOL(proto_register); 4186 4187 void proto_unregister(struct proto *prot) 4188 { 4189 mutex_lock(&proto_list_mutex); 4190 release_proto_idx(prot); 4191 list_del(&prot->node); 4192 mutex_unlock(&proto_list_mutex); 4193 4194 kmem_cache_destroy(prot->slab); 4195 prot->slab = NULL; 4196 4197 req_prot_cleanup(prot->rsk_prot); 4198 tw_prot_cleanup(prot->twsk_prot); 4199 } 4200 EXPORT_SYMBOL(proto_unregister); 4201 4202 int sock_load_diag_module(int family, int protocol) 4203 { 4204 if (!protocol) { 4205 if (!sock_is_registered(family)) 4206 return -ENOENT; 4207 4208 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 4209 NETLINK_SOCK_DIAG, family); 4210 } 4211 4212 #ifdef CONFIG_INET 4213 if (family == AF_INET && 4214 protocol != IPPROTO_RAW && 4215 protocol < MAX_INET_PROTOS && 4216 !rcu_access_pointer(inet_protos[protocol])) 4217 return -ENOENT; 4218 #endif 4219 4220 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 4221 NETLINK_SOCK_DIAG, family, protocol); 4222 } 4223 EXPORT_SYMBOL(sock_load_diag_module); 4224 4225 #ifdef CONFIG_PROC_FS 4226 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 4227 __acquires(proto_list_mutex) 4228 { 4229 mutex_lock(&proto_list_mutex); 4230 return seq_list_start_head(&proto_list, *pos); 4231 } 4232 4233 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4234 { 4235 return seq_list_next(v, &proto_list, pos); 4236 } 4237 4238 static void proto_seq_stop(struct seq_file *seq, void *v) 4239 __releases(proto_list_mutex) 4240 { 4241 mutex_unlock(&proto_list_mutex); 4242 } 4243 4244 static char proto_method_implemented(const void *method) 4245 { 4246 return method == NULL ? 'n' : 'y'; 4247 } 4248 static long sock_prot_memory_allocated(struct proto *proto) 4249 { 4250 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 4251 } 4252 4253 static const char *sock_prot_memory_pressure(struct proto *proto) 4254 { 4255 return proto->memory_pressure != NULL ? 4256 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 4257 } 4258 4259 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 4260 { 4261 4262 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 4263 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 4264 proto->name, 4265 proto->obj_size, 4266 sock_prot_inuse_get(seq_file_net(seq), proto), 4267 sock_prot_memory_allocated(proto), 4268 sock_prot_memory_pressure(proto), 4269 proto->max_header, 4270 proto->slab == NULL ? "no" : "yes", 4271 module_name(proto->owner), 4272 proto_method_implemented(proto->close), 4273 proto_method_implemented(proto->connect), 4274 proto_method_implemented(proto->disconnect), 4275 proto_method_implemented(proto->accept), 4276 proto_method_implemented(proto->ioctl), 4277 proto_method_implemented(proto->init), 4278 proto_method_implemented(proto->destroy), 4279 proto_method_implemented(proto->shutdown), 4280 proto_method_implemented(proto->setsockopt), 4281 proto_method_implemented(proto->getsockopt), 4282 proto_method_implemented(proto->sendmsg), 4283 proto_method_implemented(proto->recvmsg), 4284 proto_method_implemented(proto->bind), 4285 proto_method_implemented(proto->backlog_rcv), 4286 proto_method_implemented(proto->hash), 4287 proto_method_implemented(proto->unhash), 4288 proto_method_implemented(proto->get_port), 4289 proto_method_implemented(proto->enter_memory_pressure)); 4290 } 4291 4292 static int proto_seq_show(struct seq_file *seq, void *v) 4293 { 4294 if (v == &proto_list) 4295 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 4296 "protocol", 4297 "size", 4298 "sockets", 4299 "memory", 4300 "press", 4301 "maxhdr", 4302 "slab", 4303 "module", 4304 "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n"); 4305 else 4306 proto_seq_printf(seq, list_entry(v, struct proto, node)); 4307 return 0; 4308 } 4309 4310 static const struct seq_operations proto_seq_ops = { 4311 .start = proto_seq_start, 4312 .next = proto_seq_next, 4313 .stop = proto_seq_stop, 4314 .show = proto_seq_show, 4315 }; 4316 4317 static __net_init int proto_init_net(struct net *net) 4318 { 4319 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, 4320 sizeof(struct seq_net_private))) 4321 return -ENOMEM; 4322 4323 return 0; 4324 } 4325 4326 static __net_exit void proto_exit_net(struct net *net) 4327 { 4328 remove_proc_entry("protocols", net->proc_net); 4329 } 4330 4331 4332 static __net_initdata struct pernet_operations proto_net_ops = { 4333 .init = proto_init_net, 4334 .exit = proto_exit_net, 4335 }; 4336 4337 static int __init proto_init(void) 4338 { 4339 return register_pernet_subsys(&proto_net_ops); 4340 } 4341 4342 subsys_initcall(proto_init); 4343 4344 #endif /* PROC_FS */ 4345 4346 #ifdef CONFIG_NET_RX_BUSY_POLL 4347 bool sk_busy_loop_end(void *p, unsigned long start_time) 4348 { 4349 struct sock *sk = p; 4350 4351 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 4352 return true; 4353 4354 if (sk_is_udp(sk) && 4355 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 4356 return true; 4357 4358 return sk_busy_loop_timeout(sk, start_time); 4359 } 4360 EXPORT_SYMBOL(sk_busy_loop_end); 4361 #endif /* CONFIG_NET_RX_BUSY_POLL */ 4362 4363 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) 4364 { 4365 if (!sk->sk_prot->bind_add) 4366 return -EOPNOTSUPP; 4367 return sk->sk_prot->bind_add(sk, addr, addr_len); 4368 } 4369 EXPORT_SYMBOL(sock_bind_add); 4370 4371 /* Copy 'size' bytes from userspace and return `size` back to userspace */ 4372 int sock_ioctl_inout(struct sock *sk, unsigned int cmd, 4373 void __user *arg, void *karg, size_t size) 4374 { 4375 int ret; 4376 4377 if (copy_from_user(karg, arg, size)) 4378 return -EFAULT; 4379 4380 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg); 4381 if (ret) 4382 return ret; 4383 4384 if (copy_to_user(arg, karg, size)) 4385 return -EFAULT; 4386 4387 return 0; 4388 } 4389 EXPORT_SYMBOL(sock_ioctl_inout); 4390 4391 /* This is the most common ioctl prep function, where the result (4 bytes) is 4392 * copied back to userspace if the ioctl() returns successfully. No input is 4393 * copied from userspace as input argument. 4394 */ 4395 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg) 4396 { 4397 int ret, karg = 0; 4398 4399 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg); 4400 if (ret) 4401 return ret; 4402 4403 return put_user(karg, (int __user *)arg); 4404 } 4405 4406 /* A wrapper around sock ioctls, which copies the data from userspace 4407 * (depending on the protocol/ioctl), and copies back the result to userspace. 4408 * The main motivation for this function is to pass kernel memory to the 4409 * protocol ioctl callbacks, instead of userspace memory. 4410 */ 4411 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 4412 { 4413 int rc = 1; 4414 4415 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET) 4416 rc = ipmr_sk_ioctl(sk, cmd, arg); 4417 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6) 4418 rc = ip6mr_sk_ioctl(sk, cmd, arg); 4419 else if (sk_is_phonet(sk)) 4420 rc = phonet_sk_ioctl(sk, cmd, arg); 4421 4422 /* If ioctl was processed, returns its value */ 4423 if (rc <= 0) 4424 return rc; 4425 4426 /* Otherwise call the default handler */ 4427 return sock_ioctl_out(sk, cmd, arg); 4428 } 4429 EXPORT_SYMBOL(sk_ioctl); 4430 4431 static int __init sock_struct_check(void) 4432 { 4433 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops); 4434 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off); 4435 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue); 4436 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue); 4437 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog); 4438 4439 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst); 4440 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex); 4441 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie); 4442 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf); 4443 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter); 4444 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq); 4445 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready); 4446 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo); 4447 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat); 4448 4449 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err); 4450 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket); 4451 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg); 4452 4453 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock); 4454 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem); 4455 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc); 4456 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags); 4457 4458 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4459 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4460 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_sndbuf); 4461 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued); 4462 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc); 4463 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags); 4464 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head); 4465 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue); 4466 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending); 4467 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm); 4468 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status); 4469 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag); 4470 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer); 4471 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate); 4472 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey); 4473 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey); 4474 4475 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate); 4476 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo); 4477 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority); 4478 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark); 4479 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache); 4480 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps); 4481 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type); 4482 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size); 4483 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation); 4484 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash); 4485 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs); 4486 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift); 4487 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag); 4488 return 0; 4489 } 4490 4491 core_initcall(sock_struct_check); 4492