1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Generic socket support routines. Memory allocators, socket lock/release 8 * handler for protocols to use and generic option handler. 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Florian La Roche, <flla@stud.uni-sb.de> 13 * Alan Cox, <A.Cox@swansea.ac.uk> 14 * 15 * Fixes: 16 * Alan Cox : Numerous verify_area() problems 17 * Alan Cox : Connecting on a connecting socket 18 * now returns an error for tcp. 19 * Alan Cox : sock->protocol is set correctly. 20 * and is not sometimes left as 0. 21 * Alan Cox : connect handles icmp errors on a 22 * connect properly. Unfortunately there 23 * is a restart syscall nasty there. I 24 * can't match BSD without hacking the C 25 * library. Ideas urgently sought! 26 * Alan Cox : Disallow bind() to addresses that are 27 * not ours - especially broadcast ones!! 28 * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) 29 * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, 30 * instead they leave that for the DESTROY timer. 31 * Alan Cox : Clean up error flag in accept 32 * Alan Cox : TCP ack handling is buggy, the DESTROY timer 33 * was buggy. Put a remove_sock() in the handler 34 * for memory when we hit 0. Also altered the timer 35 * code. The ACK stuff can wait and needs major 36 * TCP layer surgery. 37 * Alan Cox : Fixed TCP ack bug, removed remove sock 38 * and fixed timer/inet_bh race. 39 * Alan Cox : Added zapped flag for TCP 40 * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code 41 * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb 42 * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources 43 * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. 44 * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... 45 * Rick Sladkey : Relaxed UDP rules for matching packets. 46 * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support 47 * Pauline Middelink : identd support 48 * Alan Cox : Fixed connect() taking signals I think. 49 * Alan Cox : SO_LINGER supported 50 * Alan Cox : Error reporting fixes 51 * Anonymous : inet_create tidied up (sk->reuse setting) 52 * Alan Cox : inet sockets don't set sk->type! 53 * Alan Cox : Split socket option code 54 * Alan Cox : Callbacks 55 * Alan Cox : Nagle flag for Charles & Johannes stuff 56 * Alex : Removed restriction on inet fioctl 57 * Alan Cox : Splitting INET from NET core 58 * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() 59 * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code 60 * Alan Cox : Split IP from generic code 61 * Alan Cox : New kfree_skbmem() 62 * Alan Cox : Make SO_DEBUG superuser only. 63 * Alan Cox : Allow anyone to clear SO_DEBUG 64 * (compatibility fix) 65 * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. 66 * Alan Cox : Allocator for a socket is settable. 67 * Alan Cox : SO_ERROR includes soft errors. 68 * Alan Cox : Allow NULL arguments on some SO_ opts 69 * Alan Cox : Generic socket allocation to make hooks 70 * easier (suggested by Craig Metz). 71 * Michael Pall : SO_ERROR returns positive errno again 72 * Steve Whitehouse: Added default destructor to free 73 * protocol private data. 74 * Steve Whitehouse: Added various other default routines 75 * common to several socket families. 76 * Chris Evans : Call suser() check last on F_SETOWN 77 * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. 78 * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() 79 * Andi Kleen : Fix write_space callback 80 * Chris Evans : Security fixes - signedness again 81 * Arnaldo C. Melo : cleanups, use skb_queue_purge 82 * 83 * To Fix: 84 */ 85 86 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 87 88 #include <linux/unaligned.h> 89 #include <linux/capability.h> 90 #include <linux/errno.h> 91 #include <linux/errqueue.h> 92 #include <linux/types.h> 93 #include <linux/socket.h> 94 #include <linux/in.h> 95 #include <linux/kernel.h> 96 #include <linux/module.h> 97 #include <linux/proc_fs.h> 98 #include <linux/seq_file.h> 99 #include <linux/sched.h> 100 #include <linux/sched/mm.h> 101 #include <linux/timer.h> 102 #include <linux/string.h> 103 #include <linux/sockios.h> 104 #include <linux/net.h> 105 #include <linux/mm.h> 106 #include <linux/slab.h> 107 #include <linux/interrupt.h> 108 #include <linux/poll.h> 109 #include <linux/tcp.h> 110 #include <linux/udp.h> 111 #include <linux/init.h> 112 #include <linux/highmem.h> 113 #include <linux/user_namespace.h> 114 #include <linux/static_key.h> 115 #include <linux/memcontrol.h> 116 #include <linux/prefetch.h> 117 #include <linux/compat.h> 118 #include <linux/mroute.h> 119 #include <linux/mroute6.h> 120 #include <linux/icmpv6.h> 121 122 #include <linux/uaccess.h> 123 124 #include <linux/netdevice.h> 125 #include <net/protocol.h> 126 #include <linux/skbuff.h> 127 #include <linux/skbuff_ref.h> 128 #include <net/net_namespace.h> 129 #include <net/request_sock.h> 130 #include <net/sock.h> 131 #include <net/proto_memory.h> 132 #include <linux/net_tstamp.h> 133 #include <net/xfrm.h> 134 #include <linux/ipsec.h> 135 #include <net/cls_cgroup.h> 136 #include <net/netprio_cgroup.h> 137 #include <linux/sock_diag.h> 138 139 #include <linux/filter.h> 140 #include <net/sock_reuseport.h> 141 #include <net/bpf_sk_storage.h> 142 143 #include <trace/events/sock.h> 144 145 #include <net/tcp.h> 146 #include <net/busy_poll.h> 147 #include <net/phonet/phonet.h> 148 149 #include <linux/ethtool.h> 150 151 #include <uapi/linux/pidfd.h> 152 153 #include "dev.h" 154 155 static DEFINE_MUTEX(proto_list_mutex); 156 static LIST_HEAD(proto_list); 157 158 static void sock_def_write_space_wfree(struct sock *sk); 159 static void sock_def_write_space(struct sock *sk); 160 161 /** 162 * sk_ns_capable - General socket capability test 163 * @sk: Socket to use a capability on or through 164 * @user_ns: The user namespace of the capability to use 165 * @cap: The capability to use 166 * 167 * Test to see if the opener of the socket had when the socket was 168 * created and the current process has the capability @cap in the user 169 * namespace @user_ns. 170 */ 171 bool sk_ns_capable(const struct sock *sk, 172 struct user_namespace *user_ns, int cap) 173 { 174 return file_ns_capable(sk->sk_socket->file, user_ns, cap) && 175 ns_capable(user_ns, cap); 176 } 177 EXPORT_SYMBOL(sk_ns_capable); 178 179 /** 180 * sk_capable - Socket global capability test 181 * @sk: Socket to use a capability on or through 182 * @cap: The global capability to use 183 * 184 * Test to see if the opener of the socket had when the socket was 185 * created and the current process has the capability @cap in all user 186 * namespaces. 187 */ 188 bool sk_capable(const struct sock *sk, int cap) 189 { 190 return sk_ns_capable(sk, &init_user_ns, cap); 191 } 192 EXPORT_SYMBOL(sk_capable); 193 194 /** 195 * sk_net_capable - Network namespace socket capability test 196 * @sk: Socket to use a capability on or through 197 * @cap: The capability to use 198 * 199 * Test to see if the opener of the socket had when the socket was created 200 * and the current process has the capability @cap over the network namespace 201 * the socket is a member of. 202 */ 203 bool sk_net_capable(const struct sock *sk, int cap) 204 { 205 return sk_ns_capable(sk, sock_net(sk)->user_ns, cap); 206 } 207 EXPORT_SYMBOL(sk_net_capable); 208 209 /* 210 * Each address family might have different locking rules, so we have 211 * one slock key per address family and separate keys for internal and 212 * userspace sockets. 213 */ 214 static struct lock_class_key af_family_keys[AF_MAX]; 215 static struct lock_class_key af_family_kern_keys[AF_MAX]; 216 static struct lock_class_key af_family_slock_keys[AF_MAX]; 217 static struct lock_class_key af_family_kern_slock_keys[AF_MAX]; 218 219 /* 220 * Make lock validator output more readable. (we pre-construct these 221 * strings build-time, so that runtime initialization of socket 222 * locks is fast): 223 */ 224 225 #define _sock_locks(x) \ 226 x "AF_UNSPEC", x "AF_UNIX" , x "AF_INET" , \ 227 x "AF_AX25" , x "AF_IPX" , x "AF_APPLETALK", \ 228 x "AF_NETROM", x "AF_BRIDGE" , x "AF_ATMPVC" , \ 229 x "AF_X25" , x "AF_INET6" , x "AF_ROSE" , \ 230 x "AF_DECnet", x "AF_NETBEUI" , x "AF_SECURITY" , \ 231 x "AF_KEY" , x "AF_NETLINK" , x "AF_PACKET" , \ 232 x "AF_ASH" , x "AF_ECONET" , x "AF_ATMSVC" , \ 233 x "AF_RDS" , x "AF_SNA" , x "AF_IRDA" , \ 234 x "AF_PPPOX" , x "AF_WANPIPE" , x "AF_LLC" , \ 235 x "27" , x "28" , x "AF_CAN" , \ 236 x "AF_TIPC" , x "AF_BLUETOOTH", x "IUCV" , \ 237 x "AF_RXRPC" , x "AF_ISDN" , x "AF_PHONET" , \ 238 x "AF_IEEE802154", x "AF_CAIF" , x "AF_ALG" , \ 239 x "AF_NFC" , x "AF_VSOCK" , x "AF_KCM" , \ 240 x "AF_QIPCRTR", x "AF_SMC" , x "AF_XDP" , \ 241 x "AF_MCTP" , \ 242 x "AF_MAX" 243 244 static const char *const af_family_key_strings[AF_MAX+1] = { 245 _sock_locks("sk_lock-") 246 }; 247 static const char *const af_family_slock_key_strings[AF_MAX+1] = { 248 _sock_locks("slock-") 249 }; 250 static const char *const af_family_clock_key_strings[AF_MAX+1] = { 251 _sock_locks("clock-") 252 }; 253 254 static const char *const af_family_kern_key_strings[AF_MAX+1] = { 255 _sock_locks("k-sk_lock-") 256 }; 257 static const char *const af_family_kern_slock_key_strings[AF_MAX+1] = { 258 _sock_locks("k-slock-") 259 }; 260 static const char *const af_family_kern_clock_key_strings[AF_MAX+1] = { 261 _sock_locks("k-clock-") 262 }; 263 static const char *const af_family_rlock_key_strings[AF_MAX+1] = { 264 _sock_locks("rlock-") 265 }; 266 static const char *const af_family_wlock_key_strings[AF_MAX+1] = { 267 _sock_locks("wlock-") 268 }; 269 static const char *const af_family_elock_key_strings[AF_MAX+1] = { 270 _sock_locks("elock-") 271 }; 272 273 /* 274 * sk_callback_lock and sk queues locking rules are per-address-family, 275 * so split the lock classes by using a per-AF key: 276 */ 277 static struct lock_class_key af_callback_keys[AF_MAX]; 278 static struct lock_class_key af_rlock_keys[AF_MAX]; 279 static struct lock_class_key af_wlock_keys[AF_MAX]; 280 static struct lock_class_key af_elock_keys[AF_MAX]; 281 static struct lock_class_key af_kern_callback_keys[AF_MAX]; 282 283 /* Run time adjustable parameters. */ 284 __u32 sysctl_wmem_max __read_mostly = 4 << 20; 285 EXPORT_SYMBOL(sysctl_wmem_max); 286 __u32 sysctl_rmem_max __read_mostly = 4 << 20; 287 EXPORT_SYMBOL(sysctl_rmem_max); 288 __u32 sysctl_wmem_default __read_mostly = SK_WMEM_DEFAULT; 289 __u32 sysctl_rmem_default __read_mostly = SK_RMEM_DEFAULT; 290 291 DEFINE_STATIC_KEY_FALSE(memalloc_socks_key); 292 EXPORT_SYMBOL_GPL(memalloc_socks_key); 293 294 /** 295 * sk_set_memalloc - sets %SOCK_MEMALLOC 296 * @sk: socket to set it on 297 * 298 * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. 299 * It's the responsibility of the admin to adjust min_free_kbytes 300 * to meet the requirements 301 */ 302 void sk_set_memalloc(struct sock *sk) 303 { 304 sock_set_flag(sk, SOCK_MEMALLOC); 305 sk->sk_allocation |= __GFP_MEMALLOC; 306 static_branch_inc(&memalloc_socks_key); 307 } 308 EXPORT_SYMBOL_GPL(sk_set_memalloc); 309 310 void sk_clear_memalloc(struct sock *sk) 311 { 312 sock_reset_flag(sk, SOCK_MEMALLOC); 313 sk->sk_allocation &= ~__GFP_MEMALLOC; 314 static_branch_dec(&memalloc_socks_key); 315 316 /* 317 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward 318 * progress of swapping. SOCK_MEMALLOC may be cleared while 319 * it has rmem allocations due to the last swapfile being deactivated 320 * but there is a risk that the socket is unusable due to exceeding 321 * the rmem limits. Reclaim the reserves and obey rmem limits again. 322 */ 323 sk_mem_reclaim(sk); 324 } 325 EXPORT_SYMBOL_GPL(sk_clear_memalloc); 326 327 int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) 328 { 329 int ret; 330 unsigned int noreclaim_flag; 331 332 /* these should have been dropped before queueing */ 333 BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); 334 335 noreclaim_flag = memalloc_noreclaim_save(); 336 ret = INDIRECT_CALL_INET(sk->sk_backlog_rcv, 337 tcp_v6_do_rcv, 338 tcp_v4_do_rcv, 339 sk, skb); 340 memalloc_noreclaim_restore(noreclaim_flag); 341 342 return ret; 343 } 344 EXPORT_SYMBOL(__sk_backlog_rcv); 345 346 void sk_error_report(struct sock *sk) 347 { 348 sk->sk_error_report(sk); 349 350 switch (sk->sk_family) { 351 case AF_INET: 352 fallthrough; 353 case AF_INET6: 354 trace_inet_sk_error_report(sk); 355 break; 356 default: 357 break; 358 } 359 } 360 EXPORT_SYMBOL(sk_error_report); 361 362 int sock_get_timeout(long timeo, void *optval, bool old_timeval) 363 { 364 struct __kernel_sock_timeval tv; 365 366 if (timeo == MAX_SCHEDULE_TIMEOUT) { 367 tv.tv_sec = 0; 368 tv.tv_usec = 0; 369 } else { 370 tv.tv_sec = timeo / HZ; 371 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 372 } 373 374 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 375 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 376 *(struct old_timeval32 *)optval = tv32; 377 return sizeof(tv32); 378 } 379 380 if (old_timeval) { 381 struct __kernel_old_timeval old_tv; 382 old_tv.tv_sec = tv.tv_sec; 383 old_tv.tv_usec = tv.tv_usec; 384 *(struct __kernel_old_timeval *)optval = old_tv; 385 return sizeof(old_tv); 386 } 387 388 *(struct __kernel_sock_timeval *)optval = tv; 389 return sizeof(tv); 390 } 391 EXPORT_SYMBOL(sock_get_timeout); 392 393 int sock_copy_user_timeval(struct __kernel_sock_timeval *tv, 394 sockptr_t optval, int optlen, bool old_timeval) 395 { 396 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 397 struct old_timeval32 tv32; 398 399 if (optlen < sizeof(tv32)) 400 return -EINVAL; 401 402 if (copy_from_sockptr(&tv32, optval, sizeof(tv32))) 403 return -EFAULT; 404 tv->tv_sec = tv32.tv_sec; 405 tv->tv_usec = tv32.tv_usec; 406 } else if (old_timeval) { 407 struct __kernel_old_timeval old_tv; 408 409 if (optlen < sizeof(old_tv)) 410 return -EINVAL; 411 if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv))) 412 return -EFAULT; 413 tv->tv_sec = old_tv.tv_sec; 414 tv->tv_usec = old_tv.tv_usec; 415 } else { 416 if (optlen < sizeof(*tv)) 417 return -EINVAL; 418 if (copy_from_sockptr(tv, optval, sizeof(*tv))) 419 return -EFAULT; 420 } 421 422 return 0; 423 } 424 EXPORT_SYMBOL(sock_copy_user_timeval); 425 426 static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen, 427 bool old_timeval) 428 { 429 struct __kernel_sock_timeval tv; 430 int err = sock_copy_user_timeval(&tv, optval, optlen, old_timeval); 431 long val; 432 433 if (err) 434 return err; 435 436 if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) 437 return -EDOM; 438 439 if (tv.tv_sec < 0) { 440 static int warned __read_mostly; 441 442 WRITE_ONCE(*timeo_p, 0); 443 if (warned < 10 && net_ratelimit()) { 444 warned++; 445 pr_info("%s: `%s' (pid %d) tries to set negative timeout\n", 446 __func__, current->comm, task_pid_nr(current)); 447 } 448 return 0; 449 } 450 val = MAX_SCHEDULE_TIMEOUT; 451 if ((tv.tv_sec || tv.tv_usec) && 452 (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1))) 453 val = tv.tv_sec * HZ + DIV_ROUND_UP((unsigned long)tv.tv_usec, 454 USEC_PER_SEC / HZ); 455 WRITE_ONCE(*timeo_p, val); 456 return 0; 457 } 458 459 static bool sk_set_prio_allowed(const struct sock *sk, int val) 460 { 461 return ((val >= TC_PRIO_BESTEFFORT && val <= TC_PRIO_INTERACTIVE) || 462 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) || 463 sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)); 464 } 465 466 static bool sock_needs_netstamp(const struct sock *sk) 467 { 468 switch (sk->sk_family) { 469 case AF_UNSPEC: 470 case AF_UNIX: 471 return false; 472 default: 473 return true; 474 } 475 } 476 477 static void sock_disable_timestamp(struct sock *sk, unsigned long flags) 478 { 479 if (sk->sk_flags & flags) { 480 sk->sk_flags &= ~flags; 481 if (sock_needs_netstamp(sk) && 482 !(sk->sk_flags & SK_FLAGS_TIMESTAMP)) 483 net_disable_timestamp(); 484 } 485 } 486 487 488 int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 489 { 490 unsigned long flags; 491 struct sk_buff_head *list = &sk->sk_receive_queue; 492 493 if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { 494 sk_drops_inc(sk); 495 trace_sock_rcvqueue_full(sk, skb); 496 return -ENOMEM; 497 } 498 499 if (!sk_rmem_schedule(sk, skb, skb->truesize)) { 500 sk_drops_inc(sk); 501 return -ENOBUFS; 502 } 503 504 skb->dev = NULL; 505 skb_set_owner_r(skb, sk); 506 507 /* we escape from rcu protected region, make sure we dont leak 508 * a norefcounted dst 509 */ 510 skb_dst_force(skb); 511 512 spin_lock_irqsave(&list->lock, flags); 513 sock_skb_set_dropcount(sk, skb); 514 __skb_queue_tail(list, skb); 515 spin_unlock_irqrestore(&list->lock, flags); 516 517 if (!sock_flag(sk, SOCK_DEAD)) 518 sk->sk_data_ready(sk); 519 return 0; 520 } 521 EXPORT_SYMBOL(__sock_queue_rcv_skb); 522 523 int sock_queue_rcv_skb_reason(struct sock *sk, struct sk_buff *skb, 524 enum skb_drop_reason *reason) 525 { 526 enum skb_drop_reason drop_reason; 527 int err; 528 529 err = sk_filter_reason(sk, skb, &drop_reason); 530 if (err) 531 goto out; 532 533 err = __sock_queue_rcv_skb(sk, skb); 534 switch (err) { 535 case -ENOMEM: 536 drop_reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 537 break; 538 case -ENOBUFS: 539 drop_reason = SKB_DROP_REASON_PROTO_MEM; 540 break; 541 default: 542 drop_reason = SKB_NOT_DROPPED_YET; 543 break; 544 } 545 out: 546 if (reason) 547 *reason = drop_reason; 548 return err; 549 } 550 EXPORT_SYMBOL(sock_queue_rcv_skb_reason); 551 552 int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, 553 const int nested, unsigned int trim_cap, bool refcounted) 554 { 555 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 556 int rc = NET_RX_SUCCESS; 557 int err; 558 559 if (sk_filter_trim_cap(sk, skb, trim_cap, &reason)) 560 goto discard_and_relse; 561 562 skb->dev = NULL; 563 564 if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { 565 sk_drops_inc(sk); 566 reason = SKB_DROP_REASON_SOCKET_RCVBUFF; 567 goto discard_and_relse; 568 } 569 if (nested) 570 bh_lock_sock_nested(sk); 571 else 572 bh_lock_sock(sk); 573 if (!sock_owned_by_user(sk)) { 574 /* 575 * trylock + unlock semantics: 576 */ 577 mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); 578 579 rc = sk_backlog_rcv(sk, skb); 580 581 mutex_release(&sk->sk_lock.dep_map, _RET_IP_); 582 } else if ((err = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))) { 583 bh_unlock_sock(sk); 584 if (err == -ENOMEM) 585 reason = SKB_DROP_REASON_PFMEMALLOC; 586 if (err == -ENOBUFS) 587 reason = SKB_DROP_REASON_SOCKET_BACKLOG; 588 sk_drops_inc(sk); 589 goto discard_and_relse; 590 } 591 592 bh_unlock_sock(sk); 593 out: 594 if (refcounted) 595 sock_put(sk); 596 return rc; 597 discard_and_relse: 598 sk_skb_reason_drop(sk, skb, reason); 599 goto out; 600 } 601 EXPORT_SYMBOL(__sk_receive_skb); 602 603 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ip6_dst_check(struct dst_entry *, 604 u32)); 605 INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *, 606 u32)); 607 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 608 { 609 struct dst_entry *dst = __sk_dst_get(sk); 610 611 if (dst && READ_ONCE(dst->obsolete) && 612 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 613 dst, cookie) == NULL) { 614 sk_tx_queue_clear(sk); 615 WRITE_ONCE(sk->sk_dst_pending_confirm, 0); 616 RCU_INIT_POINTER(sk->sk_dst_cache, NULL); 617 dst_release(dst); 618 return NULL; 619 } 620 621 return dst; 622 } 623 EXPORT_SYMBOL(__sk_dst_check); 624 625 struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) 626 { 627 struct dst_entry *dst = sk_dst_get(sk); 628 629 if (dst && READ_ONCE(dst->obsolete) && 630 INDIRECT_CALL_INET(dst->ops->check, ip6_dst_check, ipv4_dst_check, 631 dst, cookie) == NULL) { 632 sk_dst_reset(sk); 633 dst_release(dst); 634 return NULL; 635 } 636 637 return dst; 638 } 639 EXPORT_SYMBOL(sk_dst_check); 640 641 static int sock_bindtoindex_locked(struct sock *sk, int ifindex) 642 { 643 int ret = -ENOPROTOOPT; 644 #ifdef CONFIG_NETDEVICES 645 struct net *net = sock_net(sk); 646 647 /* Sorry... */ 648 ret = -EPERM; 649 if (sk->sk_bound_dev_if && !ns_capable(net->user_ns, CAP_NET_RAW)) 650 goto out; 651 652 ret = -EINVAL; 653 if (ifindex < 0) 654 goto out; 655 656 /* Paired with all READ_ONCE() done locklessly. */ 657 WRITE_ONCE(sk->sk_bound_dev_if, ifindex); 658 659 if (sk->sk_prot->rehash) 660 sk->sk_prot->rehash(sk); 661 sk_dst_reset(sk); 662 663 ret = 0; 664 665 out: 666 #endif 667 668 return ret; 669 } 670 671 int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk) 672 { 673 int ret; 674 675 if (lock_sk) 676 lock_sock(sk); 677 ret = sock_bindtoindex_locked(sk, ifindex); 678 if (lock_sk) 679 release_sock(sk); 680 681 return ret; 682 } 683 EXPORT_SYMBOL(sock_bindtoindex); 684 685 static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen) 686 { 687 int ret = -ENOPROTOOPT; 688 #ifdef CONFIG_NETDEVICES 689 struct net *net = sock_net(sk); 690 char devname[IFNAMSIZ]; 691 int index; 692 693 ret = -EINVAL; 694 if (optlen < 0) 695 goto out; 696 697 /* Bind this socket to a particular device like "eth0", 698 * as specified in the passed interface name. If the 699 * name is "" or the option length is zero the socket 700 * is not bound. 701 */ 702 if (optlen > IFNAMSIZ - 1) 703 optlen = IFNAMSIZ - 1; 704 memset(devname, 0, sizeof(devname)); 705 706 ret = -EFAULT; 707 if (copy_from_sockptr(devname, optval, optlen)) 708 goto out; 709 710 index = 0; 711 if (devname[0] != '\0') { 712 struct net_device *dev; 713 714 rcu_read_lock(); 715 dev = dev_get_by_name_rcu(net, devname); 716 if (dev) 717 index = dev->ifindex; 718 rcu_read_unlock(); 719 ret = -ENODEV; 720 if (!dev) 721 goto out; 722 } 723 724 sockopt_lock_sock(sk); 725 ret = sock_bindtoindex_locked(sk, index); 726 sockopt_release_sock(sk); 727 out: 728 #endif 729 730 return ret; 731 } 732 733 static int sock_getbindtodevice(struct sock *sk, sockptr_t optval, 734 sockptr_t optlen, int len) 735 { 736 int ret = -ENOPROTOOPT; 737 #ifdef CONFIG_NETDEVICES 738 int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); 739 struct net *net = sock_net(sk); 740 char devname[IFNAMSIZ]; 741 742 if (bound_dev_if == 0) { 743 len = 0; 744 goto zero; 745 } 746 747 ret = -EINVAL; 748 if (len < IFNAMSIZ) 749 goto out; 750 751 ret = netdev_get_name(net, devname, bound_dev_if); 752 if (ret) 753 goto out; 754 755 len = strlen(devname) + 1; 756 757 ret = -EFAULT; 758 if (copy_to_sockptr(optval, devname, len)) 759 goto out; 760 761 zero: 762 ret = -EFAULT; 763 if (copy_to_sockptr(optlen, &len, sizeof(int))) 764 goto out; 765 766 ret = 0; 767 768 out: 769 #endif 770 771 return ret; 772 } 773 774 bool sk_mc_loop(const struct sock *sk) 775 { 776 if (dev_recursion_level()) 777 return false; 778 if (!sk) 779 return true; 780 /* IPV6_ADDRFORM can change sk->sk_family under us. */ 781 switch (READ_ONCE(sk->sk_family)) { 782 case AF_INET: 783 return inet_test_bit(MC_LOOP, sk); 784 #if IS_ENABLED(CONFIG_IPV6) 785 case AF_INET6: 786 return inet6_test_bit(MC6_LOOP, sk); 787 #endif 788 } 789 WARN_ON_ONCE(1); 790 return true; 791 } 792 EXPORT_SYMBOL(sk_mc_loop); 793 794 void sock_set_reuseaddr(struct sock *sk) 795 { 796 lock_sock(sk); 797 sk->sk_reuse = SK_CAN_REUSE; 798 release_sock(sk); 799 } 800 EXPORT_SYMBOL(sock_set_reuseaddr); 801 802 void sock_set_reuseport(struct sock *sk) 803 { 804 lock_sock(sk); 805 sk->sk_reuseport = true; 806 release_sock(sk); 807 } 808 EXPORT_SYMBOL(sock_set_reuseport); 809 810 void sock_no_linger(struct sock *sk) 811 { 812 lock_sock(sk); 813 WRITE_ONCE(sk->sk_lingertime, 0); 814 sock_set_flag(sk, SOCK_LINGER); 815 release_sock(sk); 816 } 817 EXPORT_SYMBOL(sock_no_linger); 818 819 void sock_set_priority(struct sock *sk, u32 priority) 820 { 821 WRITE_ONCE(sk->sk_priority, priority); 822 } 823 EXPORT_SYMBOL(sock_set_priority); 824 825 void sock_set_sndtimeo(struct sock *sk, s64 secs) 826 { 827 if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1) 828 WRITE_ONCE(sk->sk_sndtimeo, secs * HZ); 829 else 830 WRITE_ONCE(sk->sk_sndtimeo, MAX_SCHEDULE_TIMEOUT); 831 } 832 EXPORT_SYMBOL(sock_set_sndtimeo); 833 834 static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns) 835 { 836 sock_valbool_flag(sk, SOCK_RCVTSTAMP, val); 837 sock_valbool_flag(sk, SOCK_RCVTSTAMPNS, val && ns); 838 if (val) { 839 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, new); 840 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 841 } 842 } 843 844 void sock_set_timestamp(struct sock *sk, int optname, bool valbool) 845 { 846 switch (optname) { 847 case SO_TIMESTAMP_OLD: 848 __sock_set_timestamps(sk, valbool, false, false); 849 break; 850 case SO_TIMESTAMP_NEW: 851 __sock_set_timestamps(sk, valbool, true, false); 852 break; 853 case SO_TIMESTAMPNS_OLD: 854 __sock_set_timestamps(sk, valbool, false, true); 855 break; 856 case SO_TIMESTAMPNS_NEW: 857 __sock_set_timestamps(sk, valbool, true, true); 858 break; 859 } 860 } 861 862 static int sock_timestamping_bind_phc(struct sock *sk, int phc_index) 863 { 864 struct net *net = sock_net(sk); 865 struct net_device *dev = NULL; 866 bool match = false; 867 int *vclock_index; 868 int i, num; 869 870 if (sk->sk_bound_dev_if) 871 dev = dev_get_by_index(net, sk->sk_bound_dev_if); 872 873 if (!dev) { 874 pr_err("%s: sock not bind to device\n", __func__); 875 return -EOPNOTSUPP; 876 } 877 878 num = ethtool_get_phc_vclocks(dev, &vclock_index); 879 dev_put(dev); 880 881 for (i = 0; i < num; i++) { 882 if (*(vclock_index + i) == phc_index) { 883 match = true; 884 break; 885 } 886 } 887 888 if (num > 0) 889 kfree(vclock_index); 890 891 if (!match) 892 return -EINVAL; 893 894 WRITE_ONCE(sk->sk_bind_phc, phc_index); 895 896 return 0; 897 } 898 899 int sock_set_timestamping(struct sock *sk, int optname, 900 struct so_timestamping timestamping) 901 { 902 int val = timestamping.flags; 903 int ret; 904 905 if (val & ~SOF_TIMESTAMPING_MASK) 906 return -EINVAL; 907 908 if (val & SOF_TIMESTAMPING_OPT_ID_TCP && 909 !(val & SOF_TIMESTAMPING_OPT_ID)) 910 return -EINVAL; 911 912 if (val & SOF_TIMESTAMPING_OPT_ID && 913 !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { 914 if (sk_is_tcp(sk)) { 915 if ((1 << sk->sk_state) & 916 (TCPF_CLOSE | TCPF_LISTEN)) 917 return -EINVAL; 918 if (val & SOF_TIMESTAMPING_OPT_ID_TCP) 919 atomic_set(&sk->sk_tskey, tcp_sk(sk)->write_seq); 920 else 921 atomic_set(&sk->sk_tskey, tcp_sk(sk)->snd_una); 922 } else { 923 atomic_set(&sk->sk_tskey, 0); 924 } 925 } 926 927 if (val & SOF_TIMESTAMPING_OPT_STATS && 928 !(val & SOF_TIMESTAMPING_OPT_TSONLY)) 929 return -EINVAL; 930 931 if (val & SOF_TIMESTAMPING_BIND_PHC) { 932 ret = sock_timestamping_bind_phc(sk, timestamping.bind_phc); 933 if (ret) 934 return ret; 935 } 936 937 WRITE_ONCE(sk->sk_tsflags, val); 938 sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW); 939 sock_valbool_flag(sk, SOCK_TIMESTAMPING_ANY, !!(val & TSFLAGS_ANY)); 940 941 if (val & SOF_TIMESTAMPING_RX_SOFTWARE) 942 sock_enable_timestamp(sk, 943 SOCK_TIMESTAMPING_RX_SOFTWARE); 944 else 945 sock_disable_timestamp(sk, 946 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)); 947 return 0; 948 } 949 950 #if defined(CONFIG_CGROUP_BPF) 951 void bpf_skops_tx_timestamping(struct sock *sk, struct sk_buff *skb, int op) 952 { 953 struct bpf_sock_ops_kern sock_ops; 954 955 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 956 sock_ops.op = op; 957 sock_ops.is_fullsock = 1; 958 sock_ops.sk = sk; 959 bpf_skops_init_skb(&sock_ops, skb, 0); 960 __cgroup_bpf_run_filter_sock_ops(sk, &sock_ops, CGROUP_SOCK_OPS); 961 } 962 #endif 963 964 void sock_set_keepalive(struct sock *sk) 965 { 966 lock_sock(sk); 967 if (sk->sk_prot->keepalive) 968 sk->sk_prot->keepalive(sk, true); 969 sock_valbool_flag(sk, SOCK_KEEPOPEN, true); 970 release_sock(sk); 971 } 972 EXPORT_SYMBOL(sock_set_keepalive); 973 974 static void __sock_set_rcvbuf(struct sock *sk, int val) 975 { 976 /* Ensure val * 2 fits into an int, to prevent max_t() from treating it 977 * as a negative value. 978 */ 979 val = min_t(int, val, INT_MAX / 2); 980 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 981 982 /* We double it on the way in to account for "struct sk_buff" etc. 983 * overhead. Applications assume that the SO_RCVBUF setting they make 984 * will allow that much actual data to be received on that socket. 985 * 986 * Applications are unaware that "struct sk_buff" and other overheads 987 * allocate from the receive buffer during socket buffer allocation. 988 * 989 * And after considering the possible alternatives, returning the value 990 * we actually used in getsockopt is the most desirable behavior. 991 */ 992 WRITE_ONCE(sk->sk_rcvbuf, max_t(int, val * 2, SOCK_MIN_RCVBUF)); 993 } 994 995 void sock_set_rcvbuf(struct sock *sk, int val) 996 { 997 lock_sock(sk); 998 __sock_set_rcvbuf(sk, val); 999 release_sock(sk); 1000 } 1001 EXPORT_SYMBOL(sock_set_rcvbuf); 1002 1003 static void __sock_set_mark(struct sock *sk, u32 val) 1004 { 1005 if (val != sk->sk_mark) { 1006 WRITE_ONCE(sk->sk_mark, val); 1007 sk_dst_reset(sk); 1008 } 1009 } 1010 1011 void sock_set_mark(struct sock *sk, u32 val) 1012 { 1013 lock_sock(sk); 1014 __sock_set_mark(sk, val); 1015 release_sock(sk); 1016 } 1017 EXPORT_SYMBOL(sock_set_mark); 1018 1019 static void sock_release_reserved_memory(struct sock *sk, int bytes) 1020 { 1021 /* Round down bytes to multiple of pages */ 1022 bytes = round_down(bytes, PAGE_SIZE); 1023 1024 WARN_ON(bytes > sk->sk_reserved_mem); 1025 WRITE_ONCE(sk->sk_reserved_mem, sk->sk_reserved_mem - bytes); 1026 sk_mem_reclaim(sk); 1027 } 1028 1029 static int sock_reserve_memory(struct sock *sk, int bytes) 1030 { 1031 long allocated; 1032 bool charged; 1033 int pages; 1034 1035 if (!mem_cgroup_sk_enabled(sk) || !sk_has_account(sk)) 1036 return -EOPNOTSUPP; 1037 1038 if (!bytes) 1039 return 0; 1040 1041 pages = sk_mem_pages(bytes); 1042 1043 /* pre-charge to memcg */ 1044 charged = mem_cgroup_sk_charge(sk, pages, 1045 GFP_KERNEL | __GFP_RETRY_MAYFAIL); 1046 if (!charged) 1047 return -ENOMEM; 1048 1049 /* pre-charge to forward_alloc */ 1050 sk_memory_allocated_add(sk, pages); 1051 allocated = sk_memory_allocated(sk); 1052 /* If the system goes into memory pressure with this 1053 * precharge, give up and return error. 1054 */ 1055 if (allocated > sk_prot_mem_limits(sk, 1)) { 1056 sk_memory_allocated_sub(sk, pages); 1057 mem_cgroup_sk_uncharge(sk, pages); 1058 return -ENOMEM; 1059 } 1060 sk_forward_alloc_add(sk, pages << PAGE_SHIFT); 1061 1062 WRITE_ONCE(sk->sk_reserved_mem, 1063 sk->sk_reserved_mem + (pages << PAGE_SHIFT)); 1064 1065 return 0; 1066 } 1067 1068 #ifdef CONFIG_PAGE_POOL 1069 1070 /* This is the number of tokens and frags that the user can SO_DEVMEM_DONTNEED 1071 * in 1 syscall. The limit exists to limit the amount of memory the kernel 1072 * allocates to copy these tokens, and to prevent looping over the frags for 1073 * too long. 1074 */ 1075 #define MAX_DONTNEED_TOKENS 128 1076 #define MAX_DONTNEED_FRAGS 1024 1077 1078 static noinline_for_stack int 1079 sock_devmem_dontneed(struct sock *sk, sockptr_t optval, unsigned int optlen) 1080 { 1081 unsigned int num_tokens, i, j, k, netmem_num = 0; 1082 struct dmabuf_token *tokens; 1083 int ret = 0, num_frags = 0; 1084 netmem_ref netmems[16]; 1085 1086 if (!sk_is_tcp(sk)) 1087 return -EBADF; 1088 1089 if (optlen % sizeof(*tokens) || 1090 optlen > sizeof(*tokens) * MAX_DONTNEED_TOKENS) 1091 return -EINVAL; 1092 1093 num_tokens = optlen / sizeof(*tokens); 1094 tokens = kvmalloc_array(num_tokens, sizeof(*tokens), GFP_KERNEL); 1095 if (!tokens) 1096 return -ENOMEM; 1097 1098 if (copy_from_sockptr(tokens, optval, optlen)) { 1099 kvfree(tokens); 1100 return -EFAULT; 1101 } 1102 1103 xa_lock_bh(&sk->sk_user_frags); 1104 for (i = 0; i < num_tokens; i++) { 1105 for (j = 0; j < tokens[i].token_count; j++) { 1106 if (++num_frags > MAX_DONTNEED_FRAGS) 1107 goto frag_limit_reached; 1108 1109 netmem_ref netmem = (__force netmem_ref)__xa_erase( 1110 &sk->sk_user_frags, tokens[i].token_start + j); 1111 1112 if (!netmem || WARN_ON_ONCE(!netmem_is_net_iov(netmem))) 1113 continue; 1114 1115 netmems[netmem_num++] = netmem; 1116 if (netmem_num == ARRAY_SIZE(netmems)) { 1117 xa_unlock_bh(&sk->sk_user_frags); 1118 for (k = 0; k < netmem_num; k++) 1119 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1120 netmem_num = 0; 1121 xa_lock_bh(&sk->sk_user_frags); 1122 } 1123 ret++; 1124 } 1125 } 1126 1127 frag_limit_reached: 1128 xa_unlock_bh(&sk->sk_user_frags); 1129 for (k = 0; k < netmem_num; k++) 1130 WARN_ON_ONCE(!napi_pp_put_page(netmems[k])); 1131 1132 kvfree(tokens); 1133 return ret; 1134 } 1135 #endif 1136 1137 void sockopt_lock_sock(struct sock *sk) 1138 { 1139 /* When current->bpf_ctx is set, the setsockopt is called from 1140 * a bpf prog. bpf has ensured the sk lock has been 1141 * acquired before calling setsockopt(). 1142 */ 1143 if (has_current_bpf_ctx()) 1144 return; 1145 1146 lock_sock(sk); 1147 } 1148 EXPORT_SYMBOL(sockopt_lock_sock); 1149 1150 void sockopt_release_sock(struct sock *sk) 1151 { 1152 if (has_current_bpf_ctx()) 1153 return; 1154 1155 release_sock(sk); 1156 } 1157 EXPORT_SYMBOL(sockopt_release_sock); 1158 1159 bool sockopt_ns_capable(struct user_namespace *ns, int cap) 1160 { 1161 return has_current_bpf_ctx() || ns_capable(ns, cap); 1162 } 1163 EXPORT_SYMBOL(sockopt_ns_capable); 1164 1165 bool sockopt_capable(int cap) 1166 { 1167 return has_current_bpf_ctx() || capable(cap); 1168 } 1169 EXPORT_SYMBOL(sockopt_capable); 1170 1171 static int sockopt_validate_clockid(__kernel_clockid_t value) 1172 { 1173 switch (value) { 1174 case CLOCK_REALTIME: 1175 case CLOCK_MONOTONIC: 1176 case CLOCK_TAI: 1177 return 0; 1178 } 1179 return -EINVAL; 1180 } 1181 1182 /* 1183 * This is meant for all protocols to use and covers goings on 1184 * at the socket level. Everything here is generic. 1185 */ 1186 1187 int sk_setsockopt(struct sock *sk, int level, int optname, 1188 sockptr_t optval, unsigned int optlen) 1189 { 1190 struct so_timestamping timestamping; 1191 struct socket *sock = sk->sk_socket; 1192 struct sock_txtime sk_txtime; 1193 int val; 1194 int valbool; 1195 struct linger ling; 1196 int ret = 0; 1197 1198 /* 1199 * Options without arguments 1200 */ 1201 1202 if (optname == SO_BINDTODEVICE) 1203 return sock_setbindtodevice(sk, optval, optlen); 1204 1205 if (optlen < sizeof(int)) 1206 return -EINVAL; 1207 1208 if (copy_from_sockptr(&val, optval, sizeof(val))) 1209 return -EFAULT; 1210 1211 valbool = val ? 1 : 0; 1212 1213 /* handle options which do not require locking the socket. */ 1214 switch (optname) { 1215 case SO_PRIORITY: 1216 if (sk_set_prio_allowed(sk, val)) { 1217 sock_set_priority(sk, val); 1218 return 0; 1219 } 1220 return -EPERM; 1221 case SO_TYPE: 1222 case SO_PROTOCOL: 1223 case SO_DOMAIN: 1224 case SO_ERROR: 1225 return -ENOPROTOOPT; 1226 #ifdef CONFIG_NET_RX_BUSY_POLL 1227 case SO_BUSY_POLL: 1228 if (val < 0) 1229 return -EINVAL; 1230 WRITE_ONCE(sk->sk_ll_usec, val); 1231 return 0; 1232 case SO_PREFER_BUSY_POLL: 1233 if (valbool && !sockopt_capable(CAP_NET_ADMIN)) 1234 return -EPERM; 1235 WRITE_ONCE(sk->sk_prefer_busy_poll, valbool); 1236 return 0; 1237 case SO_BUSY_POLL_BUDGET: 1238 if (val > READ_ONCE(sk->sk_busy_poll_budget) && 1239 !sockopt_capable(CAP_NET_ADMIN)) 1240 return -EPERM; 1241 if (val < 0 || val > U16_MAX) 1242 return -EINVAL; 1243 WRITE_ONCE(sk->sk_busy_poll_budget, val); 1244 return 0; 1245 #endif 1246 case SO_MAX_PACING_RATE: 1247 { 1248 unsigned long ulval = (val == ~0U) ? ~0UL : (unsigned int)val; 1249 unsigned long pacing_rate; 1250 1251 if (sizeof(ulval) != sizeof(val) && 1252 optlen >= sizeof(ulval) && 1253 copy_from_sockptr(&ulval, optval, sizeof(ulval))) { 1254 return -EFAULT; 1255 } 1256 if (ulval != ~0UL) 1257 cmpxchg(&sk->sk_pacing_status, 1258 SK_PACING_NONE, 1259 SK_PACING_NEEDED); 1260 /* Pairs with READ_ONCE() from sk_getsockopt() */ 1261 WRITE_ONCE(sk->sk_max_pacing_rate, ulval); 1262 pacing_rate = READ_ONCE(sk->sk_pacing_rate); 1263 if (ulval < pacing_rate) 1264 WRITE_ONCE(sk->sk_pacing_rate, ulval); 1265 return 0; 1266 } 1267 case SO_TXREHASH: 1268 if (!sk_is_tcp(sk)) 1269 return -EOPNOTSUPP; 1270 if (val < -1 || val > 1) 1271 return -EINVAL; 1272 if ((u8)val == SOCK_TXREHASH_DEFAULT) 1273 val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash); 1274 /* Paired with READ_ONCE() in tcp_rtx_synack() 1275 * and sk_getsockopt(). 1276 */ 1277 WRITE_ONCE(sk->sk_txrehash, (u8)val); 1278 return 0; 1279 case SO_PEEK_OFF: 1280 { 1281 int (*set_peek_off)(struct sock *sk, int val); 1282 1283 set_peek_off = READ_ONCE(sock->ops)->set_peek_off; 1284 if (set_peek_off) 1285 ret = set_peek_off(sk, val); 1286 else 1287 ret = -EOPNOTSUPP; 1288 return ret; 1289 } 1290 #ifdef CONFIG_PAGE_POOL 1291 case SO_DEVMEM_DONTNEED: 1292 return sock_devmem_dontneed(sk, optval, optlen); 1293 #endif 1294 case SO_SNDTIMEO_OLD: 1295 case SO_SNDTIMEO_NEW: 1296 return sock_set_timeout(&sk->sk_sndtimeo, optval, 1297 optlen, optname == SO_SNDTIMEO_OLD); 1298 case SO_RCVTIMEO_OLD: 1299 case SO_RCVTIMEO_NEW: 1300 return sock_set_timeout(&sk->sk_rcvtimeo, optval, 1301 optlen, optname == SO_RCVTIMEO_OLD); 1302 } 1303 1304 sockopt_lock_sock(sk); 1305 1306 switch (optname) { 1307 case SO_DEBUG: 1308 if (val && !sockopt_capable(CAP_NET_ADMIN)) 1309 ret = -EACCES; 1310 else 1311 sock_valbool_flag(sk, SOCK_DBG, valbool); 1312 break; 1313 case SO_REUSEADDR: 1314 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); 1315 break; 1316 case SO_REUSEPORT: 1317 if (valbool && !sk_is_inet(sk)) 1318 ret = -EOPNOTSUPP; 1319 else 1320 sk->sk_reuseport = valbool; 1321 break; 1322 case SO_DONTROUTE: 1323 sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); 1324 sk_dst_reset(sk); 1325 break; 1326 case SO_BROADCAST: 1327 sock_valbool_flag(sk, SOCK_BROADCAST, valbool); 1328 break; 1329 case SO_SNDBUF: 1330 /* Don't error on this BSD doesn't and if you think 1331 * about it this is right. Otherwise apps have to 1332 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1333 * are treated in BSD as hints 1334 */ 1335 val = min_t(u32, val, READ_ONCE(sysctl_wmem_max)); 1336 set_sndbuf: 1337 /* Ensure val * 2 fits into an int, to prevent max_t() 1338 * from treating it as a negative value. 1339 */ 1340 val = min_t(int, val, INT_MAX / 2); 1341 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1342 WRITE_ONCE(sk->sk_sndbuf, 1343 max_t(int, val * 2, SOCK_MIN_SNDBUF)); 1344 /* Wake up sending tasks if we upped the value. */ 1345 sk->sk_write_space(sk); 1346 break; 1347 1348 case SO_SNDBUFFORCE: 1349 if (!sockopt_capable(CAP_NET_ADMIN)) { 1350 ret = -EPERM; 1351 break; 1352 } 1353 1354 /* No negative values (to prevent underflow, as val will be 1355 * multiplied by 2). 1356 */ 1357 if (val < 0) 1358 val = 0; 1359 goto set_sndbuf; 1360 1361 case SO_RCVBUF: 1362 /* Don't error on this BSD doesn't and if you think 1363 * about it this is right. Otherwise apps have to 1364 * play 'guess the biggest size' games. RCVBUF/SNDBUF 1365 * are treated in BSD as hints 1366 */ 1367 __sock_set_rcvbuf(sk, min_t(u32, val, READ_ONCE(sysctl_rmem_max))); 1368 break; 1369 1370 case SO_RCVBUFFORCE: 1371 if (!sockopt_capable(CAP_NET_ADMIN)) { 1372 ret = -EPERM; 1373 break; 1374 } 1375 1376 /* No negative values (to prevent underflow, as val will be 1377 * multiplied by 2). 1378 */ 1379 __sock_set_rcvbuf(sk, max(val, 0)); 1380 break; 1381 1382 case SO_KEEPALIVE: 1383 if (sk->sk_prot->keepalive) 1384 sk->sk_prot->keepalive(sk, valbool); 1385 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); 1386 break; 1387 1388 case SO_OOBINLINE: 1389 sock_valbool_flag(sk, SOCK_URGINLINE, valbool); 1390 break; 1391 1392 case SO_NO_CHECK: 1393 sk->sk_no_check_tx = valbool; 1394 break; 1395 1396 case SO_LINGER: 1397 if (optlen < sizeof(ling)) { 1398 ret = -EINVAL; /* 1003.1g */ 1399 break; 1400 } 1401 if (copy_from_sockptr(&ling, optval, sizeof(ling))) { 1402 ret = -EFAULT; 1403 break; 1404 } 1405 if (!ling.l_onoff) { 1406 sock_reset_flag(sk, SOCK_LINGER); 1407 } else { 1408 unsigned long t_sec = ling.l_linger; 1409 1410 if (t_sec >= MAX_SCHEDULE_TIMEOUT / HZ) 1411 WRITE_ONCE(sk->sk_lingertime, MAX_SCHEDULE_TIMEOUT); 1412 else 1413 WRITE_ONCE(sk->sk_lingertime, t_sec * HZ); 1414 sock_set_flag(sk, SOCK_LINGER); 1415 } 1416 break; 1417 1418 case SO_BSDCOMPAT: 1419 break; 1420 1421 case SO_TIMESTAMP_OLD: 1422 case SO_TIMESTAMP_NEW: 1423 case SO_TIMESTAMPNS_OLD: 1424 case SO_TIMESTAMPNS_NEW: 1425 sock_set_timestamp(sk, optname, valbool); 1426 break; 1427 1428 case SO_TIMESTAMPING_NEW: 1429 case SO_TIMESTAMPING_OLD: 1430 if (optlen == sizeof(timestamping)) { 1431 if (copy_from_sockptr(×tamping, optval, 1432 sizeof(timestamping))) { 1433 ret = -EFAULT; 1434 break; 1435 } 1436 } else { 1437 memset(×tamping, 0, sizeof(timestamping)); 1438 timestamping.flags = val; 1439 } 1440 ret = sock_set_timestamping(sk, optname, timestamping); 1441 break; 1442 1443 case SO_RCVLOWAT: 1444 { 1445 int (*set_rcvlowat)(struct sock *sk, int val) = NULL; 1446 1447 if (val < 0) 1448 val = INT_MAX; 1449 if (sock) 1450 set_rcvlowat = READ_ONCE(sock->ops)->set_rcvlowat; 1451 if (set_rcvlowat) 1452 ret = set_rcvlowat(sk, val); 1453 else 1454 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1455 break; 1456 } 1457 case SO_ATTACH_FILTER: { 1458 struct sock_fprog fprog; 1459 1460 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1461 if (!ret) 1462 ret = sk_attach_filter(&fprog, sk); 1463 break; 1464 } 1465 case SO_ATTACH_BPF: 1466 ret = -EINVAL; 1467 if (optlen == sizeof(u32)) { 1468 u32 ufd; 1469 1470 ret = -EFAULT; 1471 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1472 break; 1473 1474 ret = sk_attach_bpf(ufd, sk); 1475 } 1476 break; 1477 1478 case SO_ATTACH_REUSEPORT_CBPF: { 1479 struct sock_fprog fprog; 1480 1481 ret = copy_bpf_fprog_from_user(&fprog, optval, optlen); 1482 if (!ret) 1483 ret = sk_reuseport_attach_filter(&fprog, sk); 1484 break; 1485 } 1486 case SO_ATTACH_REUSEPORT_EBPF: 1487 ret = -EINVAL; 1488 if (optlen == sizeof(u32)) { 1489 u32 ufd; 1490 1491 ret = -EFAULT; 1492 if (copy_from_sockptr(&ufd, optval, sizeof(ufd))) 1493 break; 1494 1495 ret = sk_reuseport_attach_bpf(ufd, sk); 1496 } 1497 break; 1498 1499 case SO_DETACH_REUSEPORT_BPF: 1500 ret = reuseport_detach_prog(sk); 1501 break; 1502 1503 case SO_DETACH_FILTER: 1504 ret = sk_detach_filter(sk); 1505 break; 1506 1507 case SO_LOCK_FILTER: 1508 if (sock_flag(sk, SOCK_FILTER_LOCKED) && !valbool) 1509 ret = -EPERM; 1510 else 1511 sock_valbool_flag(sk, SOCK_FILTER_LOCKED, valbool); 1512 break; 1513 1514 case SO_MARK: 1515 if (!sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 1516 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1517 ret = -EPERM; 1518 break; 1519 } 1520 1521 __sock_set_mark(sk, val); 1522 break; 1523 case SO_RCVMARK: 1524 sock_valbool_flag(sk, SOCK_RCVMARK, valbool); 1525 break; 1526 1527 case SO_RCVPRIORITY: 1528 sock_valbool_flag(sk, SOCK_RCVPRIORITY, valbool); 1529 break; 1530 1531 case SO_RXQ_OVFL: 1532 sock_valbool_flag(sk, SOCK_RXQ_OVFL, valbool); 1533 break; 1534 1535 case SO_WIFI_STATUS: 1536 sock_valbool_flag(sk, SOCK_WIFI_STATUS, valbool); 1537 break; 1538 1539 case SO_NOFCS: 1540 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 1541 break; 1542 1543 case SO_SELECT_ERR_QUEUE: 1544 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); 1545 break; 1546 1547 case SO_PASSCRED: 1548 if (sk_may_scm_recv(sk)) 1549 sk->sk_scm_credentials = valbool; 1550 else 1551 ret = -EOPNOTSUPP; 1552 break; 1553 1554 case SO_PASSSEC: 1555 if (IS_ENABLED(CONFIG_SECURITY_NETWORK) && sk_may_scm_recv(sk)) 1556 sk->sk_scm_security = valbool; 1557 else 1558 ret = -EOPNOTSUPP; 1559 break; 1560 1561 case SO_PASSPIDFD: 1562 if (sk_is_unix(sk)) 1563 sk->sk_scm_pidfd = valbool; 1564 else 1565 ret = -EOPNOTSUPP; 1566 break; 1567 1568 case SO_PASSRIGHTS: 1569 if (sk_is_unix(sk)) 1570 sk->sk_scm_rights = valbool; 1571 else 1572 ret = -EOPNOTSUPP; 1573 break; 1574 1575 case SO_INCOMING_CPU: 1576 reuseport_update_incoming_cpu(sk, val); 1577 break; 1578 1579 case SO_CNX_ADVICE: 1580 if (val == 1) 1581 dst_negative_advice(sk); 1582 break; 1583 1584 case SO_ZEROCOPY: 1585 if (sk->sk_family == PF_INET || sk->sk_family == PF_INET6) { 1586 if (!(sk_is_tcp(sk) || 1587 (sk->sk_type == SOCK_DGRAM && 1588 sk->sk_protocol == IPPROTO_UDP))) 1589 ret = -EOPNOTSUPP; 1590 } else if (sk->sk_family != PF_RDS) { 1591 ret = -EOPNOTSUPP; 1592 } 1593 if (!ret) { 1594 if (val < 0 || val > 1) 1595 ret = -EINVAL; 1596 else 1597 sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); 1598 } 1599 break; 1600 1601 case SO_TXTIME: 1602 if (optlen != sizeof(struct sock_txtime)) { 1603 ret = -EINVAL; 1604 break; 1605 } else if (copy_from_sockptr(&sk_txtime, optval, 1606 sizeof(struct sock_txtime))) { 1607 ret = -EFAULT; 1608 break; 1609 } else if (sk_txtime.flags & ~SOF_TXTIME_FLAGS_MASK) { 1610 ret = -EINVAL; 1611 break; 1612 } 1613 /* CLOCK_MONOTONIC is only used by sch_fq, and this packet 1614 * scheduler has enough safe guards. 1615 */ 1616 if (sk_txtime.clockid != CLOCK_MONOTONIC && 1617 !sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { 1618 ret = -EPERM; 1619 break; 1620 } 1621 1622 ret = sockopt_validate_clockid(sk_txtime.clockid); 1623 if (ret) 1624 break; 1625 1626 sock_valbool_flag(sk, SOCK_TXTIME, true); 1627 sk->sk_clockid = sk_txtime.clockid; 1628 sk->sk_txtime_deadline_mode = 1629 !!(sk_txtime.flags & SOF_TXTIME_DEADLINE_MODE); 1630 sk->sk_txtime_report_errors = 1631 !!(sk_txtime.flags & SOF_TXTIME_REPORT_ERRORS); 1632 break; 1633 1634 case SO_BINDTOIFINDEX: 1635 ret = sock_bindtoindex_locked(sk, val); 1636 break; 1637 1638 case SO_BUF_LOCK: 1639 if (val & ~SOCK_BUF_LOCK_MASK) { 1640 ret = -EINVAL; 1641 break; 1642 } 1643 sk->sk_userlocks = val | (sk->sk_userlocks & 1644 ~SOCK_BUF_LOCK_MASK); 1645 break; 1646 1647 case SO_RESERVE_MEM: 1648 { 1649 int delta; 1650 1651 if (val < 0) { 1652 ret = -EINVAL; 1653 break; 1654 } 1655 1656 delta = val - sk->sk_reserved_mem; 1657 if (delta < 0) 1658 sock_release_reserved_memory(sk, -delta); 1659 else 1660 ret = sock_reserve_memory(sk, delta); 1661 break; 1662 } 1663 1664 default: 1665 ret = -ENOPROTOOPT; 1666 break; 1667 } 1668 sockopt_release_sock(sk); 1669 return ret; 1670 } 1671 1672 int sock_setsockopt(struct socket *sock, int level, int optname, 1673 sockptr_t optval, unsigned int optlen) 1674 { 1675 return sk_setsockopt(sock->sk, level, optname, 1676 optval, optlen); 1677 } 1678 EXPORT_SYMBOL(sock_setsockopt); 1679 1680 static const struct cred *sk_get_peer_cred(struct sock *sk) 1681 { 1682 const struct cred *cred; 1683 1684 spin_lock(&sk->sk_peer_lock); 1685 cred = get_cred(sk->sk_peer_cred); 1686 spin_unlock(&sk->sk_peer_lock); 1687 1688 return cred; 1689 } 1690 1691 static void cred_to_ucred(struct pid *pid, const struct cred *cred, 1692 struct ucred *ucred) 1693 { 1694 ucred->pid = pid_vnr(pid); 1695 ucred->uid = ucred->gid = -1; 1696 if (cred) { 1697 struct user_namespace *current_ns = current_user_ns(); 1698 1699 ucred->uid = from_kuid_munged(current_ns, cred->euid); 1700 ucred->gid = from_kgid_munged(current_ns, cred->egid); 1701 } 1702 } 1703 1704 static int groups_to_user(sockptr_t dst, const struct group_info *src) 1705 { 1706 struct user_namespace *user_ns = current_user_ns(); 1707 int i; 1708 1709 for (i = 0; i < src->ngroups; i++) { 1710 gid_t gid = from_kgid_munged(user_ns, src->gid[i]); 1711 1712 if (copy_to_sockptr_offset(dst, i * sizeof(gid), &gid, sizeof(gid))) 1713 return -EFAULT; 1714 } 1715 1716 return 0; 1717 } 1718 1719 int sk_getsockopt(struct sock *sk, int level, int optname, 1720 sockptr_t optval, sockptr_t optlen) 1721 { 1722 struct socket *sock = sk->sk_socket; 1723 1724 union { 1725 int val; 1726 u64 val64; 1727 unsigned long ulval; 1728 struct linger ling; 1729 struct old_timeval32 tm32; 1730 struct __kernel_old_timeval tm; 1731 struct __kernel_sock_timeval stm; 1732 struct sock_txtime txtime; 1733 struct so_timestamping timestamping; 1734 } v; 1735 1736 int lv = sizeof(int); 1737 int len; 1738 1739 if (copy_from_sockptr(&len, optlen, sizeof(int))) 1740 return -EFAULT; 1741 if (len < 0) 1742 return -EINVAL; 1743 1744 memset(&v, 0, sizeof(v)); 1745 1746 switch (optname) { 1747 case SO_DEBUG: 1748 v.val = sock_flag(sk, SOCK_DBG); 1749 break; 1750 1751 case SO_DONTROUTE: 1752 v.val = sock_flag(sk, SOCK_LOCALROUTE); 1753 break; 1754 1755 case SO_BROADCAST: 1756 v.val = sock_flag(sk, SOCK_BROADCAST); 1757 break; 1758 1759 case SO_SNDBUF: 1760 v.val = READ_ONCE(sk->sk_sndbuf); 1761 break; 1762 1763 case SO_RCVBUF: 1764 v.val = READ_ONCE(sk->sk_rcvbuf); 1765 break; 1766 1767 case SO_REUSEADDR: 1768 v.val = sk->sk_reuse; 1769 break; 1770 1771 case SO_REUSEPORT: 1772 v.val = sk->sk_reuseport; 1773 break; 1774 1775 case SO_KEEPALIVE: 1776 v.val = sock_flag(sk, SOCK_KEEPOPEN); 1777 break; 1778 1779 case SO_TYPE: 1780 v.val = sk->sk_type; 1781 break; 1782 1783 case SO_PROTOCOL: 1784 v.val = sk->sk_protocol; 1785 break; 1786 1787 case SO_DOMAIN: 1788 v.val = sk->sk_family; 1789 break; 1790 1791 case SO_ERROR: 1792 v.val = -sock_error(sk); 1793 if (v.val == 0) 1794 v.val = xchg(&sk->sk_err_soft, 0); 1795 break; 1796 1797 case SO_OOBINLINE: 1798 v.val = sock_flag(sk, SOCK_URGINLINE); 1799 break; 1800 1801 case SO_NO_CHECK: 1802 v.val = sk->sk_no_check_tx; 1803 break; 1804 1805 case SO_PRIORITY: 1806 v.val = READ_ONCE(sk->sk_priority); 1807 break; 1808 1809 case SO_LINGER: 1810 lv = sizeof(v.ling); 1811 v.ling.l_onoff = sock_flag(sk, SOCK_LINGER); 1812 v.ling.l_linger = READ_ONCE(sk->sk_lingertime) / HZ; 1813 break; 1814 1815 case SO_BSDCOMPAT: 1816 break; 1817 1818 case SO_TIMESTAMP_OLD: 1819 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && 1820 !sock_flag(sk, SOCK_TSTAMP_NEW) && 1821 !sock_flag(sk, SOCK_RCVTSTAMPNS); 1822 break; 1823 1824 case SO_TIMESTAMPNS_OLD: 1825 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && !sock_flag(sk, SOCK_TSTAMP_NEW); 1826 break; 1827 1828 case SO_TIMESTAMP_NEW: 1829 v.val = sock_flag(sk, SOCK_RCVTSTAMP) && sock_flag(sk, SOCK_TSTAMP_NEW); 1830 break; 1831 1832 case SO_TIMESTAMPNS_NEW: 1833 v.val = sock_flag(sk, SOCK_RCVTSTAMPNS) && sock_flag(sk, SOCK_TSTAMP_NEW); 1834 break; 1835 1836 case SO_TIMESTAMPING_OLD: 1837 case SO_TIMESTAMPING_NEW: 1838 lv = sizeof(v.timestamping); 1839 /* For the later-added case SO_TIMESTAMPING_NEW: Be strict about only 1840 * returning the flags when they were set through the same option. 1841 * Don't change the beviour for the old case SO_TIMESTAMPING_OLD. 1842 */ 1843 if (optname == SO_TIMESTAMPING_OLD || sock_flag(sk, SOCK_TSTAMP_NEW)) { 1844 v.timestamping.flags = READ_ONCE(sk->sk_tsflags); 1845 v.timestamping.bind_phc = READ_ONCE(sk->sk_bind_phc); 1846 } 1847 break; 1848 1849 case SO_RCVTIMEO_OLD: 1850 case SO_RCVTIMEO_NEW: 1851 lv = sock_get_timeout(READ_ONCE(sk->sk_rcvtimeo), &v, 1852 SO_RCVTIMEO_OLD == optname); 1853 break; 1854 1855 case SO_SNDTIMEO_OLD: 1856 case SO_SNDTIMEO_NEW: 1857 lv = sock_get_timeout(READ_ONCE(sk->sk_sndtimeo), &v, 1858 SO_SNDTIMEO_OLD == optname); 1859 break; 1860 1861 case SO_RCVLOWAT: 1862 v.val = READ_ONCE(sk->sk_rcvlowat); 1863 break; 1864 1865 case SO_SNDLOWAT: 1866 v.val = 1; 1867 break; 1868 1869 case SO_PASSCRED: 1870 if (!sk_may_scm_recv(sk)) 1871 return -EOPNOTSUPP; 1872 1873 v.val = sk->sk_scm_credentials; 1874 break; 1875 1876 case SO_PASSPIDFD: 1877 if (!sk_is_unix(sk)) 1878 return -EOPNOTSUPP; 1879 1880 v.val = sk->sk_scm_pidfd; 1881 break; 1882 1883 case SO_PASSRIGHTS: 1884 if (!sk_is_unix(sk)) 1885 return -EOPNOTSUPP; 1886 1887 v.val = sk->sk_scm_rights; 1888 break; 1889 1890 case SO_PEERCRED: 1891 { 1892 struct ucred peercred; 1893 if (len > sizeof(peercred)) 1894 len = sizeof(peercred); 1895 1896 spin_lock(&sk->sk_peer_lock); 1897 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); 1898 spin_unlock(&sk->sk_peer_lock); 1899 1900 if (copy_to_sockptr(optval, &peercred, len)) 1901 return -EFAULT; 1902 goto lenout; 1903 } 1904 1905 case SO_PEERPIDFD: 1906 { 1907 struct pid *peer_pid; 1908 struct file *pidfd_file = NULL; 1909 unsigned int flags = 0; 1910 int pidfd; 1911 1912 if (len > sizeof(pidfd)) 1913 len = sizeof(pidfd); 1914 1915 spin_lock(&sk->sk_peer_lock); 1916 peer_pid = get_pid(sk->sk_peer_pid); 1917 spin_unlock(&sk->sk_peer_lock); 1918 1919 if (!peer_pid) 1920 return -ENODATA; 1921 1922 /* The use of PIDFD_STALE requires stashing of struct pid 1923 * on pidfs with pidfs_register_pid() and only AF_UNIX 1924 * were prepared for this. 1925 */ 1926 if (sk->sk_family == AF_UNIX) 1927 flags = PIDFD_STALE; 1928 1929 pidfd = pidfd_prepare(peer_pid, flags, &pidfd_file); 1930 put_pid(peer_pid); 1931 if (pidfd < 0) 1932 return pidfd; 1933 1934 if (copy_to_sockptr(optval, &pidfd, len) || 1935 copy_to_sockptr(optlen, &len, sizeof(int))) { 1936 put_unused_fd(pidfd); 1937 fput(pidfd_file); 1938 1939 return -EFAULT; 1940 } 1941 1942 fd_install(pidfd, pidfd_file); 1943 return 0; 1944 } 1945 1946 case SO_PEERGROUPS: 1947 { 1948 const struct cred *cred; 1949 int ret, n; 1950 1951 cred = sk_get_peer_cred(sk); 1952 if (!cred) 1953 return -ENODATA; 1954 1955 n = cred->group_info->ngroups; 1956 if (len < n * sizeof(gid_t)) { 1957 len = n * sizeof(gid_t); 1958 put_cred(cred); 1959 return copy_to_sockptr(optlen, &len, sizeof(int)) ? -EFAULT : -ERANGE; 1960 } 1961 len = n * sizeof(gid_t); 1962 1963 ret = groups_to_user(optval, cred->group_info); 1964 put_cred(cred); 1965 if (ret) 1966 return ret; 1967 goto lenout; 1968 } 1969 1970 case SO_PEERNAME: 1971 { 1972 struct sockaddr_storage address; 1973 1974 lv = READ_ONCE(sock->ops)->getname(sock, (struct sockaddr *)&address, 2); 1975 if (lv < 0) 1976 return -ENOTCONN; 1977 if (lv < len) 1978 return -EINVAL; 1979 if (copy_to_sockptr(optval, &address, len)) 1980 return -EFAULT; 1981 goto lenout; 1982 } 1983 1984 /* Dubious BSD thing... Probably nobody even uses it, but 1985 * the UNIX standard wants it for whatever reason... -DaveM 1986 */ 1987 case SO_ACCEPTCONN: 1988 v.val = sk->sk_state == TCP_LISTEN; 1989 break; 1990 1991 case SO_PASSSEC: 1992 if (!IS_ENABLED(CONFIG_SECURITY_NETWORK) || !sk_may_scm_recv(sk)) 1993 return -EOPNOTSUPP; 1994 1995 v.val = sk->sk_scm_security; 1996 break; 1997 1998 case SO_PEERSEC: 1999 return security_socket_getpeersec_stream(sock, 2000 optval, optlen, len); 2001 2002 case SO_MARK: 2003 v.val = READ_ONCE(sk->sk_mark); 2004 break; 2005 2006 case SO_RCVMARK: 2007 v.val = sock_flag(sk, SOCK_RCVMARK); 2008 break; 2009 2010 case SO_RCVPRIORITY: 2011 v.val = sock_flag(sk, SOCK_RCVPRIORITY); 2012 break; 2013 2014 case SO_RXQ_OVFL: 2015 v.val = sock_flag(sk, SOCK_RXQ_OVFL); 2016 break; 2017 2018 case SO_WIFI_STATUS: 2019 v.val = sock_flag(sk, SOCK_WIFI_STATUS); 2020 break; 2021 2022 case SO_PEEK_OFF: 2023 if (!READ_ONCE(sock->ops)->set_peek_off) 2024 return -EOPNOTSUPP; 2025 2026 v.val = READ_ONCE(sk->sk_peek_off); 2027 break; 2028 case SO_NOFCS: 2029 v.val = sock_flag(sk, SOCK_NOFCS); 2030 break; 2031 2032 case SO_BINDTODEVICE: 2033 return sock_getbindtodevice(sk, optval, optlen, len); 2034 2035 case SO_GET_FILTER: 2036 len = sk_get_filter(sk, optval, len); 2037 if (len < 0) 2038 return len; 2039 2040 goto lenout; 2041 2042 case SO_LOCK_FILTER: 2043 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 2044 break; 2045 2046 case SO_BPF_EXTENSIONS: 2047 v.val = bpf_tell_extensions(); 2048 break; 2049 2050 case SO_SELECT_ERR_QUEUE: 2051 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); 2052 break; 2053 2054 #ifdef CONFIG_NET_RX_BUSY_POLL 2055 case SO_BUSY_POLL: 2056 v.val = READ_ONCE(sk->sk_ll_usec); 2057 break; 2058 case SO_PREFER_BUSY_POLL: 2059 v.val = READ_ONCE(sk->sk_prefer_busy_poll); 2060 break; 2061 #endif 2062 2063 case SO_MAX_PACING_RATE: 2064 /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ 2065 if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { 2066 lv = sizeof(v.ulval); 2067 v.ulval = READ_ONCE(sk->sk_max_pacing_rate); 2068 } else { 2069 /* 32bit version */ 2070 v.val = min_t(unsigned long, ~0U, 2071 READ_ONCE(sk->sk_max_pacing_rate)); 2072 } 2073 break; 2074 2075 case SO_INCOMING_CPU: 2076 v.val = READ_ONCE(sk->sk_incoming_cpu); 2077 break; 2078 2079 case SO_MEMINFO: 2080 { 2081 u32 meminfo[SK_MEMINFO_VARS]; 2082 2083 sk_get_meminfo(sk, meminfo); 2084 2085 len = min_t(unsigned int, len, sizeof(meminfo)); 2086 if (copy_to_sockptr(optval, &meminfo, len)) 2087 return -EFAULT; 2088 2089 goto lenout; 2090 } 2091 2092 #ifdef CONFIG_NET_RX_BUSY_POLL 2093 case SO_INCOMING_NAPI_ID: 2094 v.val = READ_ONCE(sk->sk_napi_id); 2095 2096 /* aggregate non-NAPI IDs down to 0 */ 2097 if (!napi_id_valid(v.val)) 2098 v.val = 0; 2099 2100 break; 2101 #endif 2102 2103 case SO_COOKIE: 2104 lv = sizeof(u64); 2105 if (len < lv) 2106 return -EINVAL; 2107 v.val64 = sock_gen_cookie(sk); 2108 break; 2109 2110 case SO_ZEROCOPY: 2111 v.val = sock_flag(sk, SOCK_ZEROCOPY); 2112 break; 2113 2114 case SO_TXTIME: 2115 lv = sizeof(v.txtime); 2116 v.txtime.clockid = sk->sk_clockid; 2117 v.txtime.flags |= sk->sk_txtime_deadline_mode ? 2118 SOF_TXTIME_DEADLINE_MODE : 0; 2119 v.txtime.flags |= sk->sk_txtime_report_errors ? 2120 SOF_TXTIME_REPORT_ERRORS : 0; 2121 break; 2122 2123 case SO_BINDTOIFINDEX: 2124 v.val = READ_ONCE(sk->sk_bound_dev_if); 2125 break; 2126 2127 case SO_NETNS_COOKIE: 2128 lv = sizeof(u64); 2129 if (len != lv) 2130 return -EINVAL; 2131 v.val64 = sock_net(sk)->net_cookie; 2132 break; 2133 2134 case SO_BUF_LOCK: 2135 v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; 2136 break; 2137 2138 case SO_RESERVE_MEM: 2139 v.val = READ_ONCE(sk->sk_reserved_mem); 2140 break; 2141 2142 case SO_TXREHASH: 2143 if (!sk_is_tcp(sk)) 2144 return -EOPNOTSUPP; 2145 2146 /* Paired with WRITE_ONCE() in sk_setsockopt() */ 2147 v.val = READ_ONCE(sk->sk_txrehash); 2148 break; 2149 2150 default: 2151 /* We implement the SO_SNDLOWAT etc to not be settable 2152 * (1003.1g 7). 2153 */ 2154 return -ENOPROTOOPT; 2155 } 2156 2157 if (len > lv) 2158 len = lv; 2159 if (copy_to_sockptr(optval, &v, len)) 2160 return -EFAULT; 2161 lenout: 2162 if (copy_to_sockptr(optlen, &len, sizeof(int))) 2163 return -EFAULT; 2164 return 0; 2165 } 2166 2167 /* 2168 * Initialize an sk_lock. 2169 * 2170 * (We also register the sk_lock with the lock validator.) 2171 */ 2172 static inline void sock_lock_init(struct sock *sk) 2173 { 2174 sk_owner_clear(sk); 2175 2176 if (sk->sk_kern_sock) 2177 sock_lock_init_class_and_name( 2178 sk, 2179 af_family_kern_slock_key_strings[sk->sk_family], 2180 af_family_kern_slock_keys + sk->sk_family, 2181 af_family_kern_key_strings[sk->sk_family], 2182 af_family_kern_keys + sk->sk_family); 2183 else 2184 sock_lock_init_class_and_name( 2185 sk, 2186 af_family_slock_key_strings[sk->sk_family], 2187 af_family_slock_keys + sk->sk_family, 2188 af_family_key_strings[sk->sk_family], 2189 af_family_keys + sk->sk_family); 2190 } 2191 2192 /* 2193 * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, 2194 * even temporarily, because of RCU lookups. sk_node should also be left as is. 2195 * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end 2196 */ 2197 static void sock_copy(struct sock *nsk, const struct sock *osk) 2198 { 2199 const struct proto *prot = READ_ONCE(osk->sk_prot); 2200 #ifdef CONFIG_SECURITY_NETWORK 2201 void *sptr = nsk->sk_security; 2202 #endif 2203 2204 /* If we move sk_tx_queue_mapping out of the private section, 2205 * we must check if sk_tx_queue_clear() is called after 2206 * sock_copy() in sk_clone_lock(). 2207 */ 2208 BUILD_BUG_ON(offsetof(struct sock, sk_tx_queue_mapping) < 2209 offsetof(struct sock, sk_dontcopy_begin) || 2210 offsetof(struct sock, sk_tx_queue_mapping) >= 2211 offsetof(struct sock, sk_dontcopy_end)); 2212 2213 memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin)); 2214 2215 unsafe_memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end, 2216 prot->obj_size - offsetof(struct sock, sk_dontcopy_end), 2217 /* alloc is larger than struct, see sk_prot_alloc() */); 2218 2219 #ifdef CONFIG_SECURITY_NETWORK 2220 nsk->sk_security = sptr; 2221 security_sk_clone(osk, nsk); 2222 #endif 2223 } 2224 2225 static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, 2226 int family) 2227 { 2228 struct sock *sk; 2229 struct kmem_cache *slab; 2230 2231 slab = prot->slab; 2232 if (slab != NULL) { 2233 sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); 2234 if (!sk) 2235 return sk; 2236 if (want_init_on_alloc(priority)) 2237 sk_prot_clear_nulls(sk, prot->obj_size); 2238 } else 2239 sk = kmalloc(prot->obj_size, priority); 2240 2241 if (sk != NULL) { 2242 if (security_sk_alloc(sk, family, priority)) 2243 goto out_free; 2244 2245 if (!try_module_get(prot->owner)) 2246 goto out_free_sec; 2247 } 2248 2249 return sk; 2250 2251 out_free_sec: 2252 security_sk_free(sk); 2253 out_free: 2254 if (slab != NULL) 2255 kmem_cache_free(slab, sk); 2256 else 2257 kfree(sk); 2258 return NULL; 2259 } 2260 2261 static void sk_prot_free(struct proto *prot, struct sock *sk) 2262 { 2263 struct kmem_cache *slab; 2264 struct module *owner; 2265 2266 owner = prot->owner; 2267 slab = prot->slab; 2268 2269 cgroup_sk_free(&sk->sk_cgrp_data); 2270 mem_cgroup_sk_free(sk); 2271 security_sk_free(sk); 2272 2273 sk_owner_put(sk); 2274 2275 if (slab != NULL) 2276 kmem_cache_free(slab, sk); 2277 else 2278 kfree(sk); 2279 module_put(owner); 2280 } 2281 2282 /** 2283 * sk_alloc - All socket objects are allocated here 2284 * @net: the applicable net namespace 2285 * @family: protocol family 2286 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2287 * @prot: struct proto associated with this new sock instance 2288 * @kern: is this to be a kernel socket? 2289 */ 2290 struct sock *sk_alloc(struct net *net, int family, gfp_t priority, 2291 struct proto *prot, int kern) 2292 { 2293 struct sock *sk; 2294 2295 sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); 2296 if (sk) { 2297 sk->sk_family = family; 2298 /* 2299 * See comment in struct sock definition to understand 2300 * why we need sk_prot_creator -acme 2301 */ 2302 sk->sk_prot = sk->sk_prot_creator = prot; 2303 sk->sk_kern_sock = kern; 2304 sock_lock_init(sk); 2305 sk->sk_net_refcnt = kern ? 0 : 1; 2306 if (likely(sk->sk_net_refcnt)) { 2307 get_net_track(net, &sk->ns_tracker, priority); 2308 sock_inuse_add(net, 1); 2309 } else { 2310 net_passive_inc(net); 2311 __netns_tracker_alloc(net, &sk->ns_tracker, 2312 false, priority); 2313 } 2314 2315 sock_net_set(sk, net); 2316 refcount_set(&sk->sk_wmem_alloc, SK_WMEM_ALLOC_BIAS); 2317 2318 mem_cgroup_sk_alloc(sk); 2319 cgroup_sk_alloc(&sk->sk_cgrp_data); 2320 sock_update_classid(&sk->sk_cgrp_data); 2321 sock_update_netprioidx(&sk->sk_cgrp_data); 2322 sk_tx_queue_clear(sk); 2323 } 2324 2325 return sk; 2326 } 2327 EXPORT_SYMBOL(sk_alloc); 2328 2329 /* Sockets having SOCK_RCU_FREE will call this function after one RCU 2330 * grace period. This is the case for UDP sockets and TCP listeners. 2331 */ 2332 static void __sk_destruct(struct rcu_head *head) 2333 { 2334 struct sock *sk = container_of(head, struct sock, sk_rcu); 2335 struct net *net = sock_net(sk); 2336 struct sk_filter *filter; 2337 2338 if (sk->sk_destruct) 2339 sk->sk_destruct(sk); 2340 2341 filter = rcu_dereference_check(sk->sk_filter, 2342 refcount_read(&sk->sk_wmem_alloc) == 0); 2343 if (filter) { 2344 sk_filter_uncharge(sk, filter); 2345 RCU_INIT_POINTER(sk->sk_filter, NULL); 2346 } 2347 2348 sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP); 2349 2350 #ifdef CONFIG_BPF_SYSCALL 2351 bpf_sk_storage_free(sk); 2352 #endif 2353 2354 if (atomic_read(&sk->sk_omem_alloc)) 2355 pr_debug("%s: optmem leakage (%d bytes) detected\n", 2356 __func__, atomic_read(&sk->sk_omem_alloc)); 2357 2358 if (sk->sk_frag.page) { 2359 put_page(sk->sk_frag.page); 2360 sk->sk_frag.page = NULL; 2361 } 2362 2363 /* We do not need to acquire sk->sk_peer_lock, we are the last user. */ 2364 put_cred(sk->sk_peer_cred); 2365 put_pid(sk->sk_peer_pid); 2366 2367 if (likely(sk->sk_net_refcnt)) { 2368 put_net_track(net, &sk->ns_tracker); 2369 } else { 2370 __netns_tracker_free(net, &sk->ns_tracker, false); 2371 net_passive_dec(net); 2372 } 2373 sk_prot_free(sk->sk_prot_creator, sk); 2374 } 2375 2376 void sk_net_refcnt_upgrade(struct sock *sk) 2377 { 2378 struct net *net = sock_net(sk); 2379 2380 WARN_ON_ONCE(sk->sk_net_refcnt); 2381 __netns_tracker_free(net, &sk->ns_tracker, false); 2382 net_passive_dec(net); 2383 sk->sk_net_refcnt = 1; 2384 get_net_track(net, &sk->ns_tracker, GFP_KERNEL); 2385 sock_inuse_add(net, 1); 2386 } 2387 EXPORT_SYMBOL_GPL(sk_net_refcnt_upgrade); 2388 2389 void sk_destruct(struct sock *sk) 2390 { 2391 bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE); 2392 2393 if (rcu_access_pointer(sk->sk_reuseport_cb)) { 2394 reuseport_detach_sock(sk); 2395 use_call_rcu = true; 2396 } 2397 2398 if (use_call_rcu) 2399 call_rcu(&sk->sk_rcu, __sk_destruct); 2400 else 2401 __sk_destruct(&sk->sk_rcu); 2402 } 2403 2404 static void __sk_free(struct sock *sk) 2405 { 2406 if (likely(sk->sk_net_refcnt)) 2407 sock_inuse_add(sock_net(sk), -1); 2408 2409 if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk))) 2410 sock_diag_broadcast_destroy(sk); 2411 else 2412 sk_destruct(sk); 2413 } 2414 2415 void sk_free(struct sock *sk) 2416 { 2417 /* 2418 * We subtract one from sk_wmem_alloc and can know if 2419 * some packets are still in some tx queue. 2420 * If not null, sock_wfree() will call __sk_free(sk) later 2421 */ 2422 if (refcount_dec_and_test(&sk->sk_wmem_alloc)) 2423 __sk_free(sk); 2424 } 2425 EXPORT_SYMBOL(sk_free); 2426 2427 static void sk_init_common(struct sock *sk) 2428 { 2429 skb_queue_head_init(&sk->sk_receive_queue); 2430 skb_queue_head_init(&sk->sk_write_queue); 2431 skb_queue_head_init(&sk->sk_error_queue); 2432 2433 rwlock_init(&sk->sk_callback_lock); 2434 lockdep_set_class_and_name(&sk->sk_receive_queue.lock, 2435 af_rlock_keys + sk->sk_family, 2436 af_family_rlock_key_strings[sk->sk_family]); 2437 lockdep_set_class_and_name(&sk->sk_write_queue.lock, 2438 af_wlock_keys + sk->sk_family, 2439 af_family_wlock_key_strings[sk->sk_family]); 2440 lockdep_set_class_and_name(&sk->sk_error_queue.lock, 2441 af_elock_keys + sk->sk_family, 2442 af_family_elock_key_strings[sk->sk_family]); 2443 if (sk->sk_kern_sock) 2444 lockdep_set_class_and_name(&sk->sk_callback_lock, 2445 af_kern_callback_keys + sk->sk_family, 2446 af_family_kern_clock_key_strings[sk->sk_family]); 2447 else 2448 lockdep_set_class_and_name(&sk->sk_callback_lock, 2449 af_callback_keys + sk->sk_family, 2450 af_family_clock_key_strings[sk->sk_family]); 2451 } 2452 2453 /** 2454 * sk_clone_lock - clone a socket, and lock its clone 2455 * @sk: the socket to clone 2456 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) 2457 * 2458 * Caller must unlock socket even in error path (bh_unlock_sock(newsk)) 2459 */ 2460 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) 2461 { 2462 struct proto *prot = READ_ONCE(sk->sk_prot); 2463 struct sk_filter *filter; 2464 bool is_charged = true; 2465 struct sock *newsk; 2466 2467 newsk = sk_prot_alloc(prot, priority, sk->sk_family); 2468 if (!newsk) 2469 goto out; 2470 2471 sock_copy(newsk, sk); 2472 2473 newsk->sk_prot_creator = prot; 2474 2475 /* SANITY */ 2476 if (likely(newsk->sk_net_refcnt)) { 2477 get_net_track(sock_net(newsk), &newsk->ns_tracker, priority); 2478 sock_inuse_add(sock_net(newsk), 1); 2479 } else { 2480 /* Kernel sockets are not elevating the struct net refcount. 2481 * Instead, use a tracker to more easily detect if a layer 2482 * is not properly dismantling its kernel sockets at netns 2483 * destroy time. 2484 */ 2485 net_passive_inc(sock_net(newsk)); 2486 __netns_tracker_alloc(sock_net(newsk), &newsk->ns_tracker, 2487 false, priority); 2488 } 2489 sk_node_init(&newsk->sk_node); 2490 sock_lock_init(newsk); 2491 bh_lock_sock(newsk); 2492 newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; 2493 newsk->sk_backlog.len = 0; 2494 2495 atomic_set(&newsk->sk_rmem_alloc, 0); 2496 2497 refcount_set(&newsk->sk_wmem_alloc, SK_WMEM_ALLOC_BIAS); 2498 2499 atomic_set(&newsk->sk_omem_alloc, 0); 2500 sk_init_common(newsk); 2501 2502 newsk->sk_dst_cache = NULL; 2503 newsk->sk_dst_pending_confirm = 0; 2504 newsk->sk_wmem_queued = 0; 2505 newsk->sk_forward_alloc = 0; 2506 newsk->sk_reserved_mem = 0; 2507 DEBUG_NET_WARN_ON_ONCE(newsk->sk_drop_counters); 2508 sk_drops_reset(newsk); 2509 newsk->sk_send_head = NULL; 2510 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; 2511 atomic_set(&newsk->sk_zckey, 0); 2512 2513 sock_reset_flag(newsk, SOCK_DONE); 2514 2515 #ifdef CONFIG_MEMCG 2516 /* sk->sk_memcg will be populated at accept() time */ 2517 newsk->sk_memcg = NULL; 2518 #endif 2519 2520 cgroup_sk_clone(&newsk->sk_cgrp_data); 2521 2522 rcu_read_lock(); 2523 filter = rcu_dereference(sk->sk_filter); 2524 if (filter != NULL) 2525 /* though it's an empty new sock, the charging may fail 2526 * if sysctl_optmem_max was changed between creation of 2527 * original socket and cloning 2528 */ 2529 is_charged = sk_filter_charge(newsk, filter); 2530 RCU_INIT_POINTER(newsk->sk_filter, filter); 2531 rcu_read_unlock(); 2532 2533 if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { 2534 /* We need to make sure that we don't uncharge the new 2535 * socket if we couldn't charge it in the first place 2536 * as otherwise we uncharge the parent's filter. 2537 */ 2538 if (!is_charged) 2539 RCU_INIT_POINTER(newsk->sk_filter, NULL); 2540 2541 goto free; 2542 } 2543 2544 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 2545 2546 if (bpf_sk_storage_clone(sk, newsk)) 2547 goto free; 2548 2549 /* Clear sk_user_data if parent had the pointer tagged 2550 * as not suitable for copying when cloning. 2551 */ 2552 if (sk_user_data_is_nocopy(newsk)) 2553 newsk->sk_user_data = NULL; 2554 2555 newsk->sk_err = 0; 2556 newsk->sk_err_soft = 0; 2557 newsk->sk_priority = 0; 2558 newsk->sk_incoming_cpu = raw_smp_processor_id(); 2559 2560 /* Before updating sk_refcnt, we must commit prior changes to memory 2561 * (Documentation/RCU/rculist_nulls.rst for details) 2562 */ 2563 smp_wmb(); 2564 refcount_set(&newsk->sk_refcnt, 2); 2565 2566 sk_set_socket(newsk, NULL); 2567 sk_tx_queue_clear(newsk); 2568 RCU_INIT_POINTER(newsk->sk_wq, NULL); 2569 2570 if (newsk->sk_prot->sockets_allocated) 2571 sk_sockets_allocated_inc(newsk); 2572 2573 if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP) 2574 net_enable_timestamp(); 2575 out: 2576 return newsk; 2577 free: 2578 /* It is still raw copy of parent, so invalidate 2579 * destructor and make plain sk_free() 2580 */ 2581 newsk->sk_destruct = NULL; 2582 bh_unlock_sock(newsk); 2583 sk_free(newsk); 2584 newsk = NULL; 2585 goto out; 2586 } 2587 EXPORT_SYMBOL_GPL(sk_clone_lock); 2588 2589 static u32 sk_dst_gso_max_size(struct sock *sk, const struct net_device *dev) 2590 { 2591 bool is_ipv6 = false; 2592 u32 max_size; 2593 2594 #if IS_ENABLED(CONFIG_IPV6) 2595 is_ipv6 = (sk->sk_family == AF_INET6 && 2596 !ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr)); 2597 #endif 2598 /* pairs with the WRITE_ONCE() in netif_set_gso(_ipv4)_max_size() */ 2599 max_size = is_ipv6 ? READ_ONCE(dev->gso_max_size) : 2600 READ_ONCE(dev->gso_ipv4_max_size); 2601 if (max_size > GSO_LEGACY_MAX_SIZE && !sk_is_tcp(sk)) 2602 max_size = GSO_LEGACY_MAX_SIZE; 2603 2604 return max_size - (MAX_TCP_HEADER + 1); 2605 } 2606 2607 void sk_setup_caps(struct sock *sk, struct dst_entry *dst) 2608 { 2609 const struct net_device *dev; 2610 u32 max_segs = 1; 2611 2612 rcu_read_lock(); 2613 dev = dst_dev_rcu(dst); 2614 sk->sk_route_caps = dev->features; 2615 if (sk_is_tcp(sk)) { 2616 struct inet_connection_sock *icsk = inet_csk(sk); 2617 2618 sk->sk_route_caps |= NETIF_F_GSO; 2619 icsk->icsk_ack.dst_quick_ack = dst_metric(dst, RTAX_QUICKACK); 2620 } 2621 if (sk->sk_route_caps & NETIF_F_GSO) 2622 sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; 2623 if (unlikely(sk->sk_gso_disabled)) 2624 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2625 if (sk_can_gso(sk)) { 2626 if (dst->header_len && !xfrm_dst_offload_ok(dst)) { 2627 sk->sk_route_caps &= ~NETIF_F_GSO_MASK; 2628 } else { 2629 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; 2630 sk->sk_gso_max_size = sk_dst_gso_max_size(sk, dev); 2631 /* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */ 2632 max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1); 2633 } 2634 } 2635 sk->sk_gso_max_segs = max_segs; 2636 sk_dst_set(sk, dst); 2637 rcu_read_unlock(); 2638 } 2639 EXPORT_SYMBOL_GPL(sk_setup_caps); 2640 2641 /* 2642 * Simple resource managers for sockets. 2643 */ 2644 2645 2646 /* 2647 * Write buffer destructor automatically called from kfree_skb. 2648 */ 2649 void sock_wfree(struct sk_buff *skb) 2650 { 2651 struct sock *sk = skb->sk; 2652 unsigned int len = skb->truesize; 2653 bool free; 2654 2655 if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { 2656 if (sock_flag(sk, SOCK_RCU_FREE) && 2657 sk->sk_write_space == sock_def_write_space) { 2658 rcu_read_lock(); 2659 free = refcount_sub_and_test(len, &sk->sk_wmem_alloc); 2660 sock_def_write_space_wfree(sk); 2661 rcu_read_unlock(); 2662 if (unlikely(free)) 2663 __sk_free(sk); 2664 return; 2665 } 2666 2667 /* 2668 * Keep a reference on sk_wmem_alloc, this will be released 2669 * after sk_write_space() call 2670 */ 2671 WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc)); 2672 sk->sk_write_space(sk); 2673 len = 1; 2674 } 2675 /* 2676 * if sk_wmem_alloc reaches 0, we must finish what sk_free() 2677 * could not do because of in-flight packets 2678 */ 2679 if (refcount_sub_and_test(len, &sk->sk_wmem_alloc)) 2680 __sk_free(sk); 2681 } 2682 EXPORT_SYMBOL(sock_wfree); 2683 2684 /* This variant of sock_wfree() is used by TCP, 2685 * since it sets SOCK_USE_WRITE_QUEUE. 2686 */ 2687 void __sock_wfree(struct sk_buff *skb) 2688 { 2689 struct sock *sk = skb->sk; 2690 2691 if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc)) 2692 __sk_free(sk); 2693 } 2694 2695 void skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 2696 { 2697 int old_wmem; 2698 2699 skb_orphan(skb); 2700 #ifdef CONFIG_INET 2701 if (unlikely(!sk_fullsock(sk))) 2702 return skb_set_owner_edemux(skb, sk); 2703 #endif 2704 skb->sk = sk; 2705 skb->destructor = sock_wfree; 2706 skb_set_hash_from_sk(skb, sk); 2707 /* 2708 * We used to take a refcount on sk, but following operation 2709 * is enough to guarantee sk_free() won't free this sock until 2710 * all in-flight packets are completed 2711 */ 2712 __refcount_add(skb->truesize, &sk->sk_wmem_alloc, &old_wmem); 2713 2714 /* (old_wmem == SK_WMEM_ALLOC_BIAS) if no other TX packet for this socket 2715 * is in a host queue (qdisc, NIC queue). 2716 * Set skb->ooo_okay so that netdev_pick_tx() can choose a TX queue 2717 * based on XPS for better performance. 2718 * Otherwise clear ooo_okay to not risk Out Of Order delivery. 2719 */ 2720 skb->ooo_okay = (old_wmem == SK_WMEM_ALLOC_BIAS); 2721 } 2722 EXPORT_SYMBOL(skb_set_owner_w); 2723 2724 static bool can_skb_orphan_partial(const struct sk_buff *skb) 2725 { 2726 /* Drivers depend on in-order delivery for crypto offload, 2727 * partial orphan breaks out-of-order-OK logic. 2728 */ 2729 if (skb_is_decrypted(skb)) 2730 return false; 2731 2732 return (skb->destructor == sock_wfree || 2733 (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); 2734 } 2735 2736 /* This helper is used by netem, as it can hold packets in its 2737 * delay queue. We want to allow the owner socket to send more 2738 * packets, as if they were already TX completed by a typical driver. 2739 * But we also want to keep skb->sk set because some packet schedulers 2740 * rely on it (sch_fq for example). 2741 */ 2742 void skb_orphan_partial(struct sk_buff *skb) 2743 { 2744 if (skb_is_tcp_pure_ack(skb)) 2745 return; 2746 2747 if (can_skb_orphan_partial(skb) && skb_set_owner_sk_safe(skb, skb->sk)) 2748 return; 2749 2750 skb_orphan(skb); 2751 } 2752 EXPORT_SYMBOL(skb_orphan_partial); 2753 2754 /* 2755 * Read buffer destructor automatically called from kfree_skb. 2756 */ 2757 void sock_rfree(struct sk_buff *skb) 2758 { 2759 struct sock *sk = skb->sk; 2760 unsigned int len = skb->truesize; 2761 2762 atomic_sub(len, &sk->sk_rmem_alloc); 2763 sk_mem_uncharge(sk, len); 2764 } 2765 EXPORT_SYMBOL(sock_rfree); 2766 2767 /* 2768 * Buffer destructor for skbs that are not used directly in read or write 2769 * path, e.g. for error handler skbs. Automatically called from kfree_skb. 2770 */ 2771 void sock_efree(struct sk_buff *skb) 2772 { 2773 sock_put(skb->sk); 2774 } 2775 EXPORT_SYMBOL(sock_efree); 2776 2777 /* Buffer destructor for prefetch/receive path where reference count may 2778 * not be held, e.g. for listen sockets. 2779 */ 2780 #ifdef CONFIG_INET 2781 void sock_pfree(struct sk_buff *skb) 2782 { 2783 struct sock *sk = skb->sk; 2784 2785 if (!sk_is_refcounted(sk)) 2786 return; 2787 2788 if (sk->sk_state == TCP_NEW_SYN_RECV && inet_reqsk(sk)->syncookie) { 2789 inet_reqsk(sk)->rsk_listener = NULL; 2790 reqsk_free(inet_reqsk(sk)); 2791 return; 2792 } 2793 2794 sock_gen_put(sk); 2795 } 2796 EXPORT_SYMBOL(sock_pfree); 2797 #endif /* CONFIG_INET */ 2798 2799 /* 2800 * Allocate a skb from the socket's send buffer. 2801 */ 2802 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, 2803 gfp_t priority) 2804 { 2805 if (force || 2806 refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) { 2807 struct sk_buff *skb = alloc_skb(size, priority); 2808 2809 if (skb) { 2810 skb_set_owner_w(skb, sk); 2811 return skb; 2812 } 2813 } 2814 return NULL; 2815 } 2816 EXPORT_SYMBOL(sock_wmalloc); 2817 2818 static void sock_ofree(struct sk_buff *skb) 2819 { 2820 struct sock *sk = skb->sk; 2821 2822 atomic_sub(skb->truesize, &sk->sk_omem_alloc); 2823 } 2824 2825 struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, 2826 gfp_t priority) 2827 { 2828 struct sk_buff *skb; 2829 2830 /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ 2831 if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > 2832 READ_ONCE(sock_net(sk)->core.sysctl_optmem_max)) 2833 return NULL; 2834 2835 skb = alloc_skb(size, priority); 2836 if (!skb) 2837 return NULL; 2838 2839 atomic_add(skb->truesize, &sk->sk_omem_alloc); 2840 skb->sk = sk; 2841 skb->destructor = sock_ofree; 2842 return skb; 2843 } 2844 2845 /* 2846 * Allocate a memory block from the socket's option memory buffer. 2847 */ 2848 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 2849 { 2850 int optmem_max = READ_ONCE(sock_net(sk)->core.sysctl_optmem_max); 2851 2852 if ((unsigned int)size <= optmem_max && 2853 atomic_read(&sk->sk_omem_alloc) + size < optmem_max) { 2854 void *mem; 2855 /* First do the add, to avoid the race if kmalloc 2856 * might sleep. 2857 */ 2858 atomic_add(size, &sk->sk_omem_alloc); 2859 mem = kmalloc(size, priority); 2860 if (mem) 2861 return mem; 2862 atomic_sub(size, &sk->sk_omem_alloc); 2863 } 2864 return NULL; 2865 } 2866 EXPORT_SYMBOL(sock_kmalloc); 2867 2868 /* 2869 * Duplicate the input "src" memory block using the socket's 2870 * option memory buffer. 2871 */ 2872 void *sock_kmemdup(struct sock *sk, const void *src, 2873 int size, gfp_t priority) 2874 { 2875 void *mem; 2876 2877 mem = sock_kmalloc(sk, size, priority); 2878 if (mem) 2879 memcpy(mem, src, size); 2880 return mem; 2881 } 2882 EXPORT_SYMBOL(sock_kmemdup); 2883 2884 /* Free an option memory block. Note, we actually want the inline 2885 * here as this allows gcc to detect the nullify and fold away the 2886 * condition entirely. 2887 */ 2888 static inline void __sock_kfree_s(struct sock *sk, void *mem, int size, 2889 const bool nullify) 2890 { 2891 if (WARN_ON_ONCE(!mem)) 2892 return; 2893 if (nullify) 2894 kfree_sensitive(mem); 2895 else 2896 kfree(mem); 2897 atomic_sub(size, &sk->sk_omem_alloc); 2898 } 2899 2900 void sock_kfree_s(struct sock *sk, void *mem, int size) 2901 { 2902 __sock_kfree_s(sk, mem, size, false); 2903 } 2904 EXPORT_SYMBOL(sock_kfree_s); 2905 2906 void sock_kzfree_s(struct sock *sk, void *mem, int size) 2907 { 2908 __sock_kfree_s(sk, mem, size, true); 2909 } 2910 EXPORT_SYMBOL(sock_kzfree_s); 2911 2912 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. 2913 I think, these locks should be removed for datagram sockets. 2914 */ 2915 static long sock_wait_for_wmem(struct sock *sk, long timeo) 2916 { 2917 DEFINE_WAIT(wait); 2918 2919 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2920 for (;;) { 2921 if (!timeo) 2922 break; 2923 if (signal_pending(current)) 2924 break; 2925 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2926 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 2927 if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) 2928 break; 2929 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2930 break; 2931 if (READ_ONCE(sk->sk_err)) 2932 break; 2933 timeo = schedule_timeout(timeo); 2934 } 2935 finish_wait(sk_sleep(sk), &wait); 2936 return timeo; 2937 } 2938 2939 2940 /* 2941 * Generic send/receive buffer handlers 2942 */ 2943 2944 struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, 2945 unsigned long data_len, int noblock, 2946 int *errcode, int max_page_order) 2947 { 2948 struct sk_buff *skb; 2949 long timeo; 2950 int err; 2951 2952 timeo = sock_sndtimeo(sk, noblock); 2953 for (;;) { 2954 err = sock_error(sk); 2955 if (err != 0) 2956 goto failure; 2957 2958 err = -EPIPE; 2959 if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) 2960 goto failure; 2961 2962 if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf)) 2963 break; 2964 2965 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 2966 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 2967 err = -EAGAIN; 2968 if (!timeo) 2969 goto failure; 2970 if (signal_pending(current)) 2971 goto interrupted; 2972 timeo = sock_wait_for_wmem(sk, timeo); 2973 } 2974 skb = alloc_skb_with_frags(header_len, data_len, max_page_order, 2975 errcode, sk->sk_allocation); 2976 if (skb) 2977 skb_set_owner_w(skb, sk); 2978 return skb; 2979 2980 interrupted: 2981 err = sock_intr_errno(timeo); 2982 failure: 2983 *errcode = err; 2984 return NULL; 2985 } 2986 EXPORT_SYMBOL(sock_alloc_send_pskb); 2987 2988 int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg, 2989 struct sockcm_cookie *sockc) 2990 { 2991 u32 tsflags; 2992 2993 BUILD_BUG_ON(SOF_TIMESTAMPING_LAST == (1 << 31)); 2994 2995 switch (cmsg->cmsg_type) { 2996 case SO_MARK: 2997 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) && 2998 !ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) 2999 return -EPERM; 3000 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3001 return -EINVAL; 3002 sockc->mark = *(u32 *)CMSG_DATA(cmsg); 3003 break; 3004 case SO_TIMESTAMPING_OLD: 3005 case SO_TIMESTAMPING_NEW: 3006 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3007 return -EINVAL; 3008 3009 tsflags = *(u32 *)CMSG_DATA(cmsg); 3010 if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) 3011 return -EINVAL; 3012 3013 sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; 3014 sockc->tsflags |= tsflags; 3015 break; 3016 case SCM_TXTIME: 3017 if (!sock_flag(sk, SOCK_TXTIME)) 3018 return -EINVAL; 3019 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u64))) 3020 return -EINVAL; 3021 sockc->transmit_time = get_unaligned((u64 *)CMSG_DATA(cmsg)); 3022 break; 3023 case SCM_TS_OPT_ID: 3024 if (sk_is_tcp(sk)) 3025 return -EINVAL; 3026 tsflags = READ_ONCE(sk->sk_tsflags); 3027 if (!(tsflags & SOF_TIMESTAMPING_OPT_ID)) 3028 return -EINVAL; 3029 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3030 return -EINVAL; 3031 sockc->ts_opt_id = *(u32 *)CMSG_DATA(cmsg); 3032 sockc->tsflags |= SOCKCM_FLAG_TS_OPT_ID; 3033 break; 3034 /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */ 3035 case SCM_RIGHTS: 3036 case SCM_CREDENTIALS: 3037 break; 3038 case SO_PRIORITY: 3039 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3040 return -EINVAL; 3041 if (!sk_set_prio_allowed(sk, *(u32 *)CMSG_DATA(cmsg))) 3042 return -EPERM; 3043 sockc->priority = *(u32 *)CMSG_DATA(cmsg); 3044 break; 3045 case SCM_DEVMEM_DMABUF: 3046 if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) 3047 return -EINVAL; 3048 sockc->dmabuf_id = *(u32 *)CMSG_DATA(cmsg); 3049 break; 3050 default: 3051 return -EINVAL; 3052 } 3053 return 0; 3054 } 3055 EXPORT_SYMBOL(__sock_cmsg_send); 3056 3057 int sock_cmsg_send(struct sock *sk, struct msghdr *msg, 3058 struct sockcm_cookie *sockc) 3059 { 3060 struct cmsghdr *cmsg; 3061 int ret; 3062 3063 for_each_cmsghdr(cmsg, msg) { 3064 if (!CMSG_OK(msg, cmsg)) 3065 return -EINVAL; 3066 if (cmsg->cmsg_level != SOL_SOCKET) 3067 continue; 3068 ret = __sock_cmsg_send(sk, cmsg, sockc); 3069 if (ret) 3070 return ret; 3071 } 3072 return 0; 3073 } 3074 EXPORT_SYMBOL(sock_cmsg_send); 3075 3076 static void sk_enter_memory_pressure(struct sock *sk) 3077 { 3078 if (!sk->sk_prot->enter_memory_pressure) 3079 return; 3080 3081 sk->sk_prot->enter_memory_pressure(sk); 3082 } 3083 3084 static void sk_leave_memory_pressure(struct sock *sk) 3085 { 3086 if (sk->sk_prot->leave_memory_pressure) { 3087 INDIRECT_CALL_INET_1(sk->sk_prot->leave_memory_pressure, 3088 tcp_leave_memory_pressure, sk); 3089 } else { 3090 unsigned long *memory_pressure = sk->sk_prot->memory_pressure; 3091 3092 if (memory_pressure && READ_ONCE(*memory_pressure)) 3093 WRITE_ONCE(*memory_pressure, 0); 3094 } 3095 } 3096 3097 DEFINE_STATIC_KEY_FALSE(net_high_order_alloc_disable_key); 3098 3099 /** 3100 * skb_page_frag_refill - check that a page_frag contains enough room 3101 * @sz: minimum size of the fragment we want to get 3102 * @pfrag: pointer to page_frag 3103 * @gfp: priority for memory allocation 3104 * 3105 * Note: While this allocator tries to use high order pages, there is 3106 * no guarantee that allocations succeed. Therefore, @sz MUST be 3107 * less or equal than PAGE_SIZE. 3108 */ 3109 bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) 3110 { 3111 if (pfrag->page) { 3112 if (page_ref_count(pfrag->page) == 1) { 3113 pfrag->offset = 0; 3114 return true; 3115 } 3116 if (pfrag->offset + sz <= pfrag->size) 3117 return true; 3118 put_page(pfrag->page); 3119 } 3120 3121 pfrag->offset = 0; 3122 if (SKB_FRAG_PAGE_ORDER && 3123 !static_branch_unlikely(&net_high_order_alloc_disable_key)) { 3124 /* Avoid direct reclaim but allow kswapd to wake */ 3125 pfrag->page = alloc_pages((gfp & ~__GFP_DIRECT_RECLAIM) | 3126 __GFP_COMP | __GFP_NOWARN | 3127 __GFP_NORETRY, 3128 SKB_FRAG_PAGE_ORDER); 3129 if (likely(pfrag->page)) { 3130 pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; 3131 return true; 3132 } 3133 } 3134 pfrag->page = alloc_page(gfp); 3135 if (likely(pfrag->page)) { 3136 pfrag->size = PAGE_SIZE; 3137 return true; 3138 } 3139 return false; 3140 } 3141 EXPORT_SYMBOL(skb_page_frag_refill); 3142 3143 bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag) 3144 { 3145 if (likely(skb_page_frag_refill(32U, pfrag, sk->sk_allocation))) 3146 return true; 3147 3148 sk_enter_memory_pressure(sk); 3149 sk_stream_moderate_sndbuf(sk); 3150 return false; 3151 } 3152 EXPORT_SYMBOL(sk_page_frag_refill); 3153 3154 void __lock_sock(struct sock *sk) 3155 __releases(&sk->sk_lock.slock) 3156 __acquires(&sk->sk_lock.slock) 3157 { 3158 DEFINE_WAIT(wait); 3159 3160 for (;;) { 3161 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 3162 TASK_UNINTERRUPTIBLE); 3163 spin_unlock_bh(&sk->sk_lock.slock); 3164 schedule(); 3165 spin_lock_bh(&sk->sk_lock.slock); 3166 if (!sock_owned_by_user(sk)) 3167 break; 3168 } 3169 finish_wait(&sk->sk_lock.wq, &wait); 3170 } 3171 3172 void __release_sock(struct sock *sk) 3173 __releases(&sk->sk_lock.slock) 3174 __acquires(&sk->sk_lock.slock) 3175 { 3176 struct sk_buff *skb, *next; 3177 int nb = 0; 3178 3179 while ((skb = sk->sk_backlog.head) != NULL) { 3180 sk->sk_backlog.head = sk->sk_backlog.tail = NULL; 3181 3182 spin_unlock_bh(&sk->sk_lock.slock); 3183 3184 while (1) { 3185 next = skb->next; 3186 prefetch(next); 3187 DEBUG_NET_WARN_ON_ONCE(skb_dst_is_noref(skb)); 3188 skb_mark_not_on_list(skb); 3189 sk_backlog_rcv(sk, skb); 3190 3191 skb = next; 3192 if (!skb) 3193 break; 3194 3195 if (!(++nb & 15)) 3196 cond_resched(); 3197 } 3198 3199 spin_lock_bh(&sk->sk_lock.slock); 3200 } 3201 3202 /* 3203 * Doing the zeroing here guarantee we can not loop forever 3204 * while a wild producer attempts to flood us. 3205 */ 3206 sk->sk_backlog.len = 0; 3207 } 3208 3209 void __sk_flush_backlog(struct sock *sk) 3210 { 3211 spin_lock_bh(&sk->sk_lock.slock); 3212 __release_sock(sk); 3213 3214 if (sk->sk_prot->release_cb) 3215 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3216 tcp_release_cb, sk); 3217 3218 spin_unlock_bh(&sk->sk_lock.slock); 3219 } 3220 EXPORT_SYMBOL_GPL(__sk_flush_backlog); 3221 3222 /** 3223 * sk_wait_data - wait for data to arrive at sk_receive_queue 3224 * @sk: sock to wait on 3225 * @timeo: for how long 3226 * @skb: last skb seen on sk_receive_queue 3227 * 3228 * Now socket state including sk->sk_err is changed only under lock, 3229 * hence we may omit checks after joining wait queue. 3230 * We check receive queue before schedule() only as optimization; 3231 * it is very likely that release_sock() added new data. 3232 */ 3233 int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb) 3234 { 3235 DEFINE_WAIT_FUNC(wait, woken_wake_function); 3236 int rc; 3237 3238 add_wait_queue(sk_sleep(sk), &wait); 3239 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3240 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb, &wait); 3241 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 3242 remove_wait_queue(sk_sleep(sk), &wait); 3243 return rc; 3244 } 3245 EXPORT_SYMBOL(sk_wait_data); 3246 3247 /** 3248 * __sk_mem_raise_allocated - increase memory_allocated 3249 * @sk: socket 3250 * @size: memory size to allocate 3251 * @amt: pages to allocate 3252 * @kind: allocation type 3253 * 3254 * Similar to __sk_mem_schedule(), but does not update sk_forward_alloc. 3255 * 3256 * Unlike the globally shared limits among the sockets under same protocol, 3257 * consuming the budget of a memcg won't have direct effect on other ones. 3258 * So be optimistic about memcg's tolerance, and leave the callers to decide 3259 * whether or not to raise allocated through sk_under_memory_pressure() or 3260 * its variants. 3261 */ 3262 int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) 3263 { 3264 bool memcg_enabled = false, charged = false; 3265 struct proto *prot = sk->sk_prot; 3266 long allocated; 3267 3268 sk_memory_allocated_add(sk, amt); 3269 allocated = sk_memory_allocated(sk); 3270 3271 if (mem_cgroup_sk_enabled(sk)) { 3272 memcg_enabled = true; 3273 charged = mem_cgroup_sk_charge(sk, amt, gfp_memcg_charge()); 3274 if (!charged) 3275 goto suppress_allocation; 3276 } 3277 3278 /* Under limit. */ 3279 if (allocated <= sk_prot_mem_limits(sk, 0)) { 3280 sk_leave_memory_pressure(sk); 3281 return 1; 3282 } 3283 3284 /* Under pressure. */ 3285 if (allocated > sk_prot_mem_limits(sk, 1)) 3286 sk_enter_memory_pressure(sk); 3287 3288 /* Over hard limit. */ 3289 if (allocated > sk_prot_mem_limits(sk, 2)) 3290 goto suppress_allocation; 3291 3292 /* Guarantee minimum buffer size under pressure (either global 3293 * or memcg) to make sure features described in RFC 7323 (TCP 3294 * Extensions for High Performance) work properly. 3295 * 3296 * This rule does NOT stand when exceeds global or memcg's hard 3297 * limit, or else a DoS attack can be taken place by spawning 3298 * lots of sockets whose usage are under minimum buffer size. 3299 */ 3300 if (kind == SK_MEM_RECV) { 3301 if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot)) 3302 return 1; 3303 3304 } else { /* SK_MEM_SEND */ 3305 int wmem0 = sk_get_wmem0(sk, prot); 3306 3307 if (sk->sk_type == SOCK_STREAM) { 3308 if (sk->sk_wmem_queued < wmem0) 3309 return 1; 3310 } else if (refcount_read(&sk->sk_wmem_alloc) < wmem0) { 3311 return 1; 3312 } 3313 } 3314 3315 if (sk_has_memory_pressure(sk)) { 3316 u64 alloc; 3317 3318 /* The following 'average' heuristic is within the 3319 * scope of global accounting, so it only makes 3320 * sense for global memory pressure. 3321 */ 3322 if (!sk_under_global_memory_pressure(sk)) 3323 return 1; 3324 3325 /* Try to be fair among all the sockets under global 3326 * pressure by allowing the ones that below average 3327 * usage to raise. 3328 */ 3329 alloc = sk_sockets_allocated_read_positive(sk); 3330 if (sk_prot_mem_limits(sk, 2) > alloc * 3331 sk_mem_pages(sk->sk_wmem_queued + 3332 atomic_read(&sk->sk_rmem_alloc) + 3333 sk->sk_forward_alloc)) 3334 return 1; 3335 } 3336 3337 suppress_allocation: 3338 3339 if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { 3340 sk_stream_moderate_sndbuf(sk); 3341 3342 /* Fail only if socket is _under_ its sndbuf. 3343 * In this case we cannot block, so that we have to fail. 3344 */ 3345 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) { 3346 /* Force charge with __GFP_NOFAIL */ 3347 if (memcg_enabled && !charged) 3348 mem_cgroup_sk_charge(sk, amt, 3349 gfp_memcg_charge() | __GFP_NOFAIL); 3350 return 1; 3351 } 3352 } 3353 3354 trace_sock_exceed_buf_limit(sk, prot, allocated, kind); 3355 3356 sk_memory_allocated_sub(sk, amt); 3357 3358 if (charged) 3359 mem_cgroup_sk_uncharge(sk, amt); 3360 3361 return 0; 3362 } 3363 3364 /** 3365 * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated 3366 * @sk: socket 3367 * @size: memory size to allocate 3368 * @kind: allocation type 3369 * 3370 * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means 3371 * rmem allocation. This function assumes that protocols which have 3372 * memory_pressure use sk_wmem_queued as write buffer accounting. 3373 */ 3374 int __sk_mem_schedule(struct sock *sk, int size, int kind) 3375 { 3376 int ret, amt = sk_mem_pages(size); 3377 3378 sk_forward_alloc_add(sk, amt << PAGE_SHIFT); 3379 ret = __sk_mem_raise_allocated(sk, size, amt, kind); 3380 if (!ret) 3381 sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT)); 3382 return ret; 3383 } 3384 EXPORT_SYMBOL(__sk_mem_schedule); 3385 3386 /** 3387 * __sk_mem_reduce_allocated - reclaim memory_allocated 3388 * @sk: socket 3389 * @amount: number of quanta 3390 * 3391 * Similar to __sk_mem_reclaim(), but does not update sk_forward_alloc 3392 */ 3393 void __sk_mem_reduce_allocated(struct sock *sk, int amount) 3394 { 3395 sk_memory_allocated_sub(sk, amount); 3396 3397 if (mem_cgroup_sk_enabled(sk)) 3398 mem_cgroup_sk_uncharge(sk, amount); 3399 3400 if (sk_under_global_memory_pressure(sk) && 3401 (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) 3402 sk_leave_memory_pressure(sk); 3403 } 3404 3405 /** 3406 * __sk_mem_reclaim - reclaim sk_forward_alloc and memory_allocated 3407 * @sk: socket 3408 * @amount: number of bytes (rounded down to a PAGE_SIZE multiple) 3409 */ 3410 void __sk_mem_reclaim(struct sock *sk, int amount) 3411 { 3412 amount >>= PAGE_SHIFT; 3413 sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT)); 3414 __sk_mem_reduce_allocated(sk, amount); 3415 } 3416 EXPORT_SYMBOL(__sk_mem_reclaim); 3417 3418 int sk_set_peek_off(struct sock *sk, int val) 3419 { 3420 WRITE_ONCE(sk->sk_peek_off, val); 3421 return 0; 3422 } 3423 EXPORT_SYMBOL_GPL(sk_set_peek_off); 3424 3425 /* 3426 * Set of default routines for initialising struct proto_ops when 3427 * the protocol does not support a particular function. In certain 3428 * cases where it makes no sense for a protocol to have a "do nothing" 3429 * function, some default processing is provided. 3430 */ 3431 3432 int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) 3433 { 3434 return -EOPNOTSUPP; 3435 } 3436 EXPORT_SYMBOL(sock_no_bind); 3437 3438 int sock_no_connect(struct socket *sock, struct sockaddr *saddr, 3439 int len, int flags) 3440 { 3441 return -EOPNOTSUPP; 3442 } 3443 EXPORT_SYMBOL(sock_no_connect); 3444 3445 int sock_no_socketpair(struct socket *sock1, struct socket *sock2) 3446 { 3447 return -EOPNOTSUPP; 3448 } 3449 EXPORT_SYMBOL(sock_no_socketpair); 3450 3451 int sock_no_accept(struct socket *sock, struct socket *newsock, 3452 struct proto_accept_arg *arg) 3453 { 3454 return -EOPNOTSUPP; 3455 } 3456 EXPORT_SYMBOL(sock_no_accept); 3457 3458 int sock_no_getname(struct socket *sock, struct sockaddr *saddr, 3459 int peer) 3460 { 3461 return -EOPNOTSUPP; 3462 } 3463 EXPORT_SYMBOL(sock_no_getname); 3464 3465 int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 3466 { 3467 return -EOPNOTSUPP; 3468 } 3469 EXPORT_SYMBOL(sock_no_ioctl); 3470 3471 int sock_no_listen(struct socket *sock, int backlog) 3472 { 3473 return -EOPNOTSUPP; 3474 } 3475 EXPORT_SYMBOL(sock_no_listen); 3476 3477 int sock_no_shutdown(struct socket *sock, int how) 3478 { 3479 return -EOPNOTSUPP; 3480 } 3481 EXPORT_SYMBOL(sock_no_shutdown); 3482 3483 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) 3484 { 3485 return -EOPNOTSUPP; 3486 } 3487 EXPORT_SYMBOL(sock_no_sendmsg); 3488 3489 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) 3490 { 3491 return -EOPNOTSUPP; 3492 } 3493 EXPORT_SYMBOL(sock_no_sendmsg_locked); 3494 3495 int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, 3496 int flags) 3497 { 3498 return -EOPNOTSUPP; 3499 } 3500 EXPORT_SYMBOL(sock_no_recvmsg); 3501 3502 int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) 3503 { 3504 /* Mirror missing mmap method error code */ 3505 return -ENODEV; 3506 } 3507 EXPORT_SYMBOL(sock_no_mmap); 3508 3509 /* 3510 * When a file is received (via SCM_RIGHTS, etc), we must bump the 3511 * various sock-based usage counts. 3512 */ 3513 void __receive_sock(struct file *file) 3514 { 3515 struct socket *sock; 3516 3517 sock = sock_from_file(file); 3518 if (sock) { 3519 sock_update_netprioidx(&sock->sk->sk_cgrp_data); 3520 sock_update_classid(&sock->sk->sk_cgrp_data); 3521 } 3522 } 3523 3524 /* 3525 * Default Socket Callbacks 3526 */ 3527 3528 static void sock_def_wakeup(struct sock *sk) 3529 { 3530 struct socket_wq *wq; 3531 3532 rcu_read_lock(); 3533 wq = rcu_dereference(sk->sk_wq); 3534 if (skwq_has_sleeper(wq)) 3535 wake_up_interruptible_all(&wq->wait); 3536 rcu_read_unlock(); 3537 } 3538 3539 static void sock_def_error_report(struct sock *sk) 3540 { 3541 struct socket_wq *wq; 3542 3543 rcu_read_lock(); 3544 wq = rcu_dereference(sk->sk_wq); 3545 if (skwq_has_sleeper(wq)) 3546 wake_up_interruptible_poll(&wq->wait, EPOLLERR); 3547 sk_wake_async_rcu(sk, SOCK_WAKE_IO, POLL_ERR); 3548 rcu_read_unlock(); 3549 } 3550 3551 void sock_def_readable(struct sock *sk) 3552 { 3553 struct socket_wq *wq; 3554 3555 trace_sk_data_ready(sk); 3556 3557 rcu_read_lock(); 3558 wq = rcu_dereference(sk->sk_wq); 3559 if (skwq_has_sleeper(wq)) 3560 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | 3561 EPOLLRDNORM | EPOLLRDBAND); 3562 sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN); 3563 rcu_read_unlock(); 3564 } 3565 3566 static void sock_def_write_space(struct sock *sk) 3567 { 3568 struct socket_wq *wq; 3569 3570 rcu_read_lock(); 3571 3572 /* Do not wake up a writer until he can make "significant" 3573 * progress. --DaveM 3574 */ 3575 if (sock_writeable(sk)) { 3576 wq = rcu_dereference(sk->sk_wq); 3577 if (skwq_has_sleeper(wq)) 3578 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3579 EPOLLWRNORM | EPOLLWRBAND); 3580 3581 /* Should agree with poll, otherwise some programs break */ 3582 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3583 } 3584 3585 rcu_read_unlock(); 3586 } 3587 3588 /* An optimised version of sock_def_write_space(), should only be called 3589 * for SOCK_RCU_FREE sockets under RCU read section and after putting 3590 * ->sk_wmem_alloc. 3591 */ 3592 static void sock_def_write_space_wfree(struct sock *sk) 3593 { 3594 /* Do not wake up a writer until he can make "significant" 3595 * progress. --DaveM 3596 */ 3597 if (sock_writeable(sk)) { 3598 struct socket_wq *wq = rcu_dereference(sk->sk_wq); 3599 3600 /* rely on refcount_sub from sock_wfree() */ 3601 smp_mb__after_atomic(); 3602 if (wq && waitqueue_active(&wq->wait)) 3603 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | 3604 EPOLLWRNORM | EPOLLWRBAND); 3605 3606 /* Should agree with poll, otherwise some programs break */ 3607 sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT); 3608 } 3609 } 3610 3611 static void sock_def_destruct(struct sock *sk) 3612 { 3613 } 3614 3615 void sk_send_sigurg(struct sock *sk) 3616 { 3617 if (sk->sk_socket && sk->sk_socket->file) 3618 if (send_sigurg(sk->sk_socket->file)) 3619 sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); 3620 } 3621 EXPORT_SYMBOL(sk_send_sigurg); 3622 3623 void sk_reset_timer(struct sock *sk, struct timer_list* timer, 3624 unsigned long expires) 3625 { 3626 if (!mod_timer(timer, expires)) 3627 sock_hold(sk); 3628 } 3629 EXPORT_SYMBOL(sk_reset_timer); 3630 3631 void sk_stop_timer(struct sock *sk, struct timer_list* timer) 3632 { 3633 if (timer_delete(timer)) 3634 __sock_put(sk); 3635 } 3636 EXPORT_SYMBOL(sk_stop_timer); 3637 3638 void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer) 3639 { 3640 if (timer_delete_sync(timer)) 3641 __sock_put(sk); 3642 } 3643 EXPORT_SYMBOL(sk_stop_timer_sync); 3644 3645 void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) 3646 { 3647 sk_init_common(sk); 3648 sk->sk_send_head = NULL; 3649 3650 timer_setup(&sk->sk_timer, NULL, 0); 3651 3652 sk->sk_allocation = GFP_KERNEL; 3653 sk->sk_rcvbuf = READ_ONCE(sysctl_rmem_default); 3654 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default); 3655 sk->sk_state = TCP_CLOSE; 3656 sk->sk_use_task_frag = true; 3657 sk_set_socket(sk, sock); 3658 3659 sock_set_flag(sk, SOCK_ZAPPED); 3660 3661 if (sock) { 3662 sk->sk_type = sock->type; 3663 RCU_INIT_POINTER(sk->sk_wq, &sock->wq); 3664 sock->sk = sk; 3665 } else { 3666 RCU_INIT_POINTER(sk->sk_wq, NULL); 3667 } 3668 sk->sk_uid = uid; 3669 3670 sk->sk_state_change = sock_def_wakeup; 3671 sk->sk_data_ready = sock_def_readable; 3672 sk->sk_write_space = sock_def_write_space; 3673 sk->sk_error_report = sock_def_error_report; 3674 sk->sk_destruct = sock_def_destruct; 3675 3676 sk->sk_frag.page = NULL; 3677 sk->sk_frag.offset = 0; 3678 sk->sk_peek_off = -1; 3679 3680 sk->sk_peer_pid = NULL; 3681 sk->sk_peer_cred = NULL; 3682 spin_lock_init(&sk->sk_peer_lock); 3683 3684 sk->sk_write_pending = 0; 3685 sk->sk_rcvlowat = 1; 3686 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 3687 sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; 3688 3689 sk->sk_stamp = SK_DEFAULT_STAMP; 3690 #if BITS_PER_LONG==32 3691 seqlock_init(&sk->sk_stamp_seq); 3692 #endif 3693 atomic_set(&sk->sk_zckey, 0); 3694 3695 #ifdef CONFIG_NET_RX_BUSY_POLL 3696 sk->sk_napi_id = 0; 3697 sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read); 3698 #endif 3699 3700 sk->sk_max_pacing_rate = ~0UL; 3701 sk->sk_pacing_rate = ~0UL; 3702 WRITE_ONCE(sk->sk_pacing_shift, 10); 3703 sk->sk_incoming_cpu = -1; 3704 3705 sk_rx_queue_clear(sk); 3706 /* 3707 * Before updating sk_refcnt, we must commit prior changes to memory 3708 * (Documentation/RCU/rculist_nulls.rst for details) 3709 */ 3710 smp_wmb(); 3711 refcount_set(&sk->sk_refcnt, 1); 3712 sk_drops_reset(sk); 3713 } 3714 EXPORT_SYMBOL(sock_init_data_uid); 3715 3716 void sock_init_data(struct socket *sock, struct sock *sk) 3717 { 3718 kuid_t uid = sock ? 3719 SOCK_INODE(sock)->i_uid : 3720 make_kuid(sock_net(sk)->user_ns, 0); 3721 3722 sock_init_data_uid(sock, sk, uid); 3723 } 3724 EXPORT_SYMBOL(sock_init_data); 3725 3726 void lock_sock_nested(struct sock *sk, int subclass) 3727 { 3728 /* The sk_lock has mutex_lock() semantics here. */ 3729 mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); 3730 3731 might_sleep(); 3732 spin_lock_bh(&sk->sk_lock.slock); 3733 if (sock_owned_by_user_nocheck(sk)) 3734 __lock_sock(sk); 3735 sk->sk_lock.owned = 1; 3736 spin_unlock_bh(&sk->sk_lock.slock); 3737 } 3738 EXPORT_SYMBOL(lock_sock_nested); 3739 3740 void release_sock(struct sock *sk) 3741 { 3742 spin_lock_bh(&sk->sk_lock.slock); 3743 if (sk->sk_backlog.tail) 3744 __release_sock(sk); 3745 3746 if (sk->sk_prot->release_cb) 3747 INDIRECT_CALL_INET_1(sk->sk_prot->release_cb, 3748 tcp_release_cb, sk); 3749 3750 sock_release_ownership(sk); 3751 if (waitqueue_active(&sk->sk_lock.wq)) 3752 wake_up(&sk->sk_lock.wq); 3753 spin_unlock_bh(&sk->sk_lock.slock); 3754 } 3755 EXPORT_SYMBOL(release_sock); 3756 3757 bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock) 3758 { 3759 might_sleep(); 3760 spin_lock_bh(&sk->sk_lock.slock); 3761 3762 if (!sock_owned_by_user_nocheck(sk)) { 3763 /* 3764 * Fast path return with bottom halves disabled and 3765 * sock::sk_lock.slock held. 3766 * 3767 * The 'mutex' is not contended and holding 3768 * sock::sk_lock.slock prevents all other lockers to 3769 * proceed so the corresponding unlock_sock_fast() can 3770 * avoid the slow path of release_sock() completely and 3771 * just release slock. 3772 * 3773 * From a semantical POV this is equivalent to 'acquiring' 3774 * the 'mutex', hence the corresponding lockdep 3775 * mutex_release() has to happen in the fast path of 3776 * unlock_sock_fast(). 3777 */ 3778 return false; 3779 } 3780 3781 __lock_sock(sk); 3782 sk->sk_lock.owned = 1; 3783 __acquire(&sk->sk_lock.slock); 3784 spin_unlock_bh(&sk->sk_lock.slock); 3785 return true; 3786 } 3787 EXPORT_SYMBOL(__lock_sock_fast); 3788 3789 int sock_gettstamp(struct socket *sock, void __user *userstamp, 3790 bool timeval, bool time32) 3791 { 3792 struct sock *sk = sock->sk; 3793 struct timespec64 ts; 3794 3795 sock_enable_timestamp(sk, SOCK_TIMESTAMP); 3796 ts = ktime_to_timespec64(sock_read_timestamp(sk)); 3797 if (ts.tv_sec == -1) 3798 return -ENOENT; 3799 if (ts.tv_sec == 0) { 3800 ktime_t kt = ktime_get_real(); 3801 sock_write_timestamp(sk, kt); 3802 ts = ktime_to_timespec64(kt); 3803 } 3804 3805 if (timeval) 3806 ts.tv_nsec /= 1000; 3807 3808 #ifdef CONFIG_COMPAT_32BIT_TIME 3809 if (time32) 3810 return put_old_timespec32(&ts, userstamp); 3811 #endif 3812 #ifdef CONFIG_SPARC64 3813 /* beware of padding in sparc64 timeval */ 3814 if (timeval && !in_compat_syscall()) { 3815 struct __kernel_old_timeval __user tv = { 3816 .tv_sec = ts.tv_sec, 3817 .tv_usec = ts.tv_nsec, 3818 }; 3819 if (copy_to_user(userstamp, &tv, sizeof(tv))) 3820 return -EFAULT; 3821 return 0; 3822 } 3823 #endif 3824 return put_timespec64(&ts, userstamp); 3825 } 3826 EXPORT_SYMBOL(sock_gettstamp); 3827 3828 void sock_enable_timestamp(struct sock *sk, enum sock_flags flag) 3829 { 3830 if (!sock_flag(sk, flag)) { 3831 unsigned long previous_flags = sk->sk_flags; 3832 3833 sock_set_flag(sk, flag); 3834 /* 3835 * we just set one of the two flags which require net 3836 * time stamping, but time stamping might have been on 3837 * already because of the other one 3838 */ 3839 if (sock_needs_netstamp(sk) && 3840 !(previous_flags & SK_FLAGS_TIMESTAMP)) 3841 net_enable_timestamp(); 3842 } 3843 } 3844 3845 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, 3846 int level, int type) 3847 { 3848 struct sock_exterr_skb *serr; 3849 struct sk_buff *skb; 3850 int copied, err; 3851 3852 err = -EAGAIN; 3853 skb = sock_dequeue_err_skb(sk); 3854 if (skb == NULL) 3855 goto out; 3856 3857 copied = skb->len; 3858 if (copied > len) { 3859 msg->msg_flags |= MSG_TRUNC; 3860 copied = len; 3861 } 3862 err = skb_copy_datagram_msg(skb, 0, msg, copied); 3863 if (err) 3864 goto out_free_skb; 3865 3866 sock_recv_timestamp(msg, sk, skb); 3867 3868 serr = SKB_EXT_ERR(skb); 3869 put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee); 3870 3871 msg->msg_flags |= MSG_ERRQUEUE; 3872 err = copied; 3873 3874 out_free_skb: 3875 kfree_skb(skb); 3876 out: 3877 return err; 3878 } 3879 EXPORT_SYMBOL(sock_recv_errqueue); 3880 3881 /* 3882 * Get a socket option on an socket. 3883 * 3884 * FIX: POSIX 1003.1g is very ambiguous here. It states that 3885 * asynchronous errors should be reported by getsockopt. We assume 3886 * this means if you specify SO_ERROR (otherwise what is the point of it). 3887 */ 3888 int sock_common_getsockopt(struct socket *sock, int level, int optname, 3889 char __user *optval, int __user *optlen) 3890 { 3891 struct sock *sk = sock->sk; 3892 3893 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3894 return READ_ONCE(sk->sk_prot)->getsockopt(sk, level, optname, optval, optlen); 3895 } 3896 EXPORT_SYMBOL(sock_common_getsockopt); 3897 3898 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 3899 int flags) 3900 { 3901 struct sock *sk = sock->sk; 3902 int addr_len = 0; 3903 int err; 3904 3905 err = sk->sk_prot->recvmsg(sk, msg, size, flags, &addr_len); 3906 if (err >= 0) 3907 msg->msg_namelen = addr_len; 3908 return err; 3909 } 3910 EXPORT_SYMBOL(sock_common_recvmsg); 3911 3912 /* 3913 * Set socket options on an inet socket. 3914 */ 3915 int sock_common_setsockopt(struct socket *sock, int level, int optname, 3916 sockptr_t optval, unsigned int optlen) 3917 { 3918 struct sock *sk = sock->sk; 3919 3920 /* IPV6_ADDRFORM can change sk->sk_prot under us. */ 3921 return READ_ONCE(sk->sk_prot)->setsockopt(sk, level, optname, optval, optlen); 3922 } 3923 EXPORT_SYMBOL(sock_common_setsockopt); 3924 3925 void sk_common_release(struct sock *sk) 3926 { 3927 if (sk->sk_prot->destroy) 3928 sk->sk_prot->destroy(sk); 3929 3930 /* 3931 * Observation: when sk_common_release is called, processes have 3932 * no access to socket. But net still has. 3933 * Step one, detach it from networking: 3934 * 3935 * A. Remove from hash tables. 3936 */ 3937 3938 sk->sk_prot->unhash(sk); 3939 3940 /* 3941 * In this point socket cannot receive new packets, but it is possible 3942 * that some packets are in flight because some CPU runs receiver and 3943 * did hash table lookup before we unhashed socket. They will achieve 3944 * receive queue and will be purged by socket destructor. 3945 * 3946 * Also we still have packets pending on receive queue and probably, 3947 * our own packets waiting in device queues. sock_destroy will drain 3948 * receive queue, but transmitted packets will delay socket destruction 3949 * until the last reference will be released. 3950 */ 3951 3952 sock_orphan(sk); 3953 3954 xfrm_sk_free_policy(sk); 3955 3956 sock_put(sk); 3957 } 3958 EXPORT_SYMBOL(sk_common_release); 3959 3960 void sk_get_meminfo(const struct sock *sk, u32 *mem) 3961 { 3962 memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS); 3963 3964 mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk); 3965 mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf); 3966 mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk); 3967 mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf); 3968 mem[SK_MEMINFO_FWD_ALLOC] = READ_ONCE(sk->sk_forward_alloc); 3969 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); 3970 mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc); 3971 mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len); 3972 mem[SK_MEMINFO_DROPS] = sk_drops_read(sk); 3973 } 3974 3975 #ifdef CONFIG_PROC_FS 3976 static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); 3977 3978 int sock_prot_inuse_get(struct net *net, struct proto *prot) 3979 { 3980 int cpu, idx = prot->inuse_idx; 3981 int res = 0; 3982 3983 for_each_possible_cpu(cpu) 3984 res += per_cpu_ptr(net->core.prot_inuse, cpu)->val[idx]; 3985 3986 return res >= 0 ? res : 0; 3987 } 3988 EXPORT_SYMBOL_GPL(sock_prot_inuse_get); 3989 3990 int sock_inuse_get(struct net *net) 3991 { 3992 int cpu, res = 0; 3993 3994 for_each_possible_cpu(cpu) 3995 res += per_cpu_ptr(net->core.prot_inuse, cpu)->all; 3996 3997 return res; 3998 } 3999 4000 EXPORT_SYMBOL_GPL(sock_inuse_get); 4001 4002 static int __net_init sock_inuse_init_net(struct net *net) 4003 { 4004 net->core.prot_inuse = alloc_percpu(struct prot_inuse); 4005 if (net->core.prot_inuse == NULL) 4006 return -ENOMEM; 4007 return 0; 4008 } 4009 4010 static void __net_exit sock_inuse_exit_net(struct net *net) 4011 { 4012 free_percpu(net->core.prot_inuse); 4013 } 4014 4015 static struct pernet_operations net_inuse_ops = { 4016 .init = sock_inuse_init_net, 4017 .exit = sock_inuse_exit_net, 4018 }; 4019 4020 static __init int net_inuse_init(void) 4021 { 4022 if (register_pernet_subsys(&net_inuse_ops)) 4023 panic("Cannot initialize net inuse counters"); 4024 4025 return 0; 4026 } 4027 4028 core_initcall(net_inuse_init); 4029 4030 static int assign_proto_idx(struct proto *prot) 4031 { 4032 prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); 4033 4034 if (unlikely(prot->inuse_idx == PROTO_INUSE_NR)) { 4035 pr_err("PROTO_INUSE_NR exhausted\n"); 4036 return -ENOSPC; 4037 } 4038 4039 set_bit(prot->inuse_idx, proto_inuse_idx); 4040 return 0; 4041 } 4042 4043 static void release_proto_idx(struct proto *prot) 4044 { 4045 if (prot->inuse_idx != PROTO_INUSE_NR) 4046 clear_bit(prot->inuse_idx, proto_inuse_idx); 4047 } 4048 #else 4049 static inline int assign_proto_idx(struct proto *prot) 4050 { 4051 return 0; 4052 } 4053 4054 static inline void release_proto_idx(struct proto *prot) 4055 { 4056 } 4057 4058 #endif 4059 4060 static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot) 4061 { 4062 if (!twsk_prot) 4063 return; 4064 kfree(twsk_prot->twsk_slab_name); 4065 twsk_prot->twsk_slab_name = NULL; 4066 kmem_cache_destroy(twsk_prot->twsk_slab); 4067 twsk_prot->twsk_slab = NULL; 4068 } 4069 4070 static int tw_prot_init(const struct proto *prot) 4071 { 4072 struct timewait_sock_ops *twsk_prot = prot->twsk_prot; 4073 4074 if (!twsk_prot) 4075 return 0; 4076 4077 twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", 4078 prot->name); 4079 if (!twsk_prot->twsk_slab_name) 4080 return -ENOMEM; 4081 4082 twsk_prot->twsk_slab = 4083 kmem_cache_create(twsk_prot->twsk_slab_name, 4084 twsk_prot->twsk_obj_size, 0, 4085 SLAB_ACCOUNT | prot->slab_flags, 4086 NULL); 4087 if (!twsk_prot->twsk_slab) { 4088 pr_crit("%s: Can't create timewait sock SLAB cache!\n", 4089 prot->name); 4090 return -ENOMEM; 4091 } 4092 4093 return 0; 4094 } 4095 4096 static void req_prot_cleanup(struct request_sock_ops *rsk_prot) 4097 { 4098 if (!rsk_prot) 4099 return; 4100 kfree(rsk_prot->slab_name); 4101 rsk_prot->slab_name = NULL; 4102 kmem_cache_destroy(rsk_prot->slab); 4103 rsk_prot->slab = NULL; 4104 } 4105 4106 static int req_prot_init(const struct proto *prot) 4107 { 4108 struct request_sock_ops *rsk_prot = prot->rsk_prot; 4109 4110 if (!rsk_prot) 4111 return 0; 4112 4113 rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", 4114 prot->name); 4115 if (!rsk_prot->slab_name) 4116 return -ENOMEM; 4117 4118 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 4119 rsk_prot->obj_size, 0, 4120 SLAB_ACCOUNT | prot->slab_flags, 4121 NULL); 4122 4123 if (!rsk_prot->slab) { 4124 pr_crit("%s: Can't create request sock SLAB cache!\n", 4125 prot->name); 4126 return -ENOMEM; 4127 } 4128 return 0; 4129 } 4130 4131 int proto_register(struct proto *prot, int alloc_slab) 4132 { 4133 int ret = -ENOBUFS; 4134 4135 if (prot->memory_allocated && !prot->sysctl_mem) { 4136 pr_err("%s: missing sysctl_mem\n", prot->name); 4137 return -EINVAL; 4138 } 4139 if (prot->memory_allocated && !prot->per_cpu_fw_alloc) { 4140 pr_err("%s: missing per_cpu_fw_alloc\n", prot->name); 4141 return -EINVAL; 4142 } 4143 if (alloc_slab) { 4144 prot->slab = kmem_cache_create_usercopy(prot->name, 4145 prot->obj_size, 0, 4146 SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT | 4147 prot->slab_flags, 4148 prot->useroffset, prot->usersize, 4149 NULL); 4150 4151 if (prot->slab == NULL) { 4152 pr_crit("%s: Can't create sock SLAB cache!\n", 4153 prot->name); 4154 goto out; 4155 } 4156 4157 if (req_prot_init(prot)) 4158 goto out_free_request_sock_slab; 4159 4160 if (tw_prot_init(prot)) 4161 goto out_free_timewait_sock_slab; 4162 } 4163 4164 mutex_lock(&proto_list_mutex); 4165 ret = assign_proto_idx(prot); 4166 if (ret) { 4167 mutex_unlock(&proto_list_mutex); 4168 goto out_free_timewait_sock_slab; 4169 } 4170 list_add(&prot->node, &proto_list); 4171 mutex_unlock(&proto_list_mutex); 4172 return ret; 4173 4174 out_free_timewait_sock_slab: 4175 if (alloc_slab) 4176 tw_prot_cleanup(prot->twsk_prot); 4177 out_free_request_sock_slab: 4178 if (alloc_slab) { 4179 req_prot_cleanup(prot->rsk_prot); 4180 4181 kmem_cache_destroy(prot->slab); 4182 prot->slab = NULL; 4183 } 4184 out: 4185 return ret; 4186 } 4187 EXPORT_SYMBOL(proto_register); 4188 4189 void proto_unregister(struct proto *prot) 4190 { 4191 mutex_lock(&proto_list_mutex); 4192 release_proto_idx(prot); 4193 list_del(&prot->node); 4194 mutex_unlock(&proto_list_mutex); 4195 4196 kmem_cache_destroy(prot->slab); 4197 prot->slab = NULL; 4198 4199 req_prot_cleanup(prot->rsk_prot); 4200 tw_prot_cleanup(prot->twsk_prot); 4201 } 4202 EXPORT_SYMBOL(proto_unregister); 4203 4204 int sock_load_diag_module(int family, int protocol) 4205 { 4206 if (!protocol) { 4207 if (!sock_is_registered(family)) 4208 return -ENOENT; 4209 4210 return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 4211 NETLINK_SOCK_DIAG, family); 4212 } 4213 4214 #ifdef CONFIG_INET 4215 if (family == AF_INET && 4216 protocol != IPPROTO_RAW && 4217 protocol < MAX_INET_PROTOS && 4218 !rcu_access_pointer(inet_protos[protocol])) 4219 return -ENOENT; 4220 #endif 4221 4222 return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK, 4223 NETLINK_SOCK_DIAG, family, protocol); 4224 } 4225 EXPORT_SYMBOL(sock_load_diag_module); 4226 4227 #ifdef CONFIG_PROC_FS 4228 static void *proto_seq_start(struct seq_file *seq, loff_t *pos) 4229 __acquires(proto_list_mutex) 4230 { 4231 mutex_lock(&proto_list_mutex); 4232 return seq_list_start_head(&proto_list, *pos); 4233 } 4234 4235 static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4236 { 4237 return seq_list_next(v, &proto_list, pos); 4238 } 4239 4240 static void proto_seq_stop(struct seq_file *seq, void *v) 4241 __releases(proto_list_mutex) 4242 { 4243 mutex_unlock(&proto_list_mutex); 4244 } 4245 4246 static char proto_method_implemented(const void *method) 4247 { 4248 return method == NULL ? 'n' : 'y'; 4249 } 4250 static long sock_prot_memory_allocated(struct proto *proto) 4251 { 4252 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L; 4253 } 4254 4255 static const char *sock_prot_memory_pressure(struct proto *proto) 4256 { 4257 return proto->memory_pressure != NULL ? 4258 proto_memory_pressure(proto) ? "yes" : "no" : "NI"; 4259 } 4260 4261 static void proto_seq_printf(struct seq_file *seq, struct proto *proto) 4262 { 4263 4264 seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " 4265 "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", 4266 proto->name, 4267 proto->obj_size, 4268 sock_prot_inuse_get(seq_file_net(seq), proto), 4269 sock_prot_memory_allocated(proto), 4270 sock_prot_memory_pressure(proto), 4271 proto->max_header, 4272 proto->slab == NULL ? "no" : "yes", 4273 module_name(proto->owner), 4274 proto_method_implemented(proto->close), 4275 proto_method_implemented(proto->connect), 4276 proto_method_implemented(proto->disconnect), 4277 proto_method_implemented(proto->accept), 4278 proto_method_implemented(proto->ioctl), 4279 proto_method_implemented(proto->init), 4280 proto_method_implemented(proto->destroy), 4281 proto_method_implemented(proto->shutdown), 4282 proto_method_implemented(proto->setsockopt), 4283 proto_method_implemented(proto->getsockopt), 4284 proto_method_implemented(proto->sendmsg), 4285 proto_method_implemented(proto->recvmsg), 4286 proto_method_implemented(proto->bind), 4287 proto_method_implemented(proto->backlog_rcv), 4288 proto_method_implemented(proto->hash), 4289 proto_method_implemented(proto->unhash), 4290 proto_method_implemented(proto->get_port), 4291 proto_method_implemented(proto->enter_memory_pressure)); 4292 } 4293 4294 static int proto_seq_show(struct seq_file *seq, void *v) 4295 { 4296 if (v == &proto_list) 4297 seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", 4298 "protocol", 4299 "size", 4300 "sockets", 4301 "memory", 4302 "press", 4303 "maxhdr", 4304 "slab", 4305 "module", 4306 "cl co di ac io in de sh ss gs se re bi br ha uh gp em\n"); 4307 else 4308 proto_seq_printf(seq, list_entry(v, struct proto, node)); 4309 return 0; 4310 } 4311 4312 static const struct seq_operations proto_seq_ops = { 4313 .start = proto_seq_start, 4314 .next = proto_seq_next, 4315 .stop = proto_seq_stop, 4316 .show = proto_seq_show, 4317 }; 4318 4319 static __net_init int proto_init_net(struct net *net) 4320 { 4321 if (!proc_create_net("protocols", 0444, net->proc_net, &proto_seq_ops, 4322 sizeof(struct seq_net_private))) 4323 return -ENOMEM; 4324 4325 return 0; 4326 } 4327 4328 static __net_exit void proto_exit_net(struct net *net) 4329 { 4330 remove_proc_entry("protocols", net->proc_net); 4331 } 4332 4333 4334 static __net_initdata struct pernet_operations proto_net_ops = { 4335 .init = proto_init_net, 4336 .exit = proto_exit_net, 4337 }; 4338 4339 static int __init proto_init(void) 4340 { 4341 return register_pernet_subsys(&proto_net_ops); 4342 } 4343 4344 subsys_initcall(proto_init); 4345 4346 #endif /* PROC_FS */ 4347 4348 #ifdef CONFIG_NET_RX_BUSY_POLL 4349 bool sk_busy_loop_end(void *p, unsigned long start_time) 4350 { 4351 struct sock *sk = p; 4352 4353 if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) 4354 return true; 4355 4356 if (sk_is_udp(sk) && 4357 !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue)) 4358 return true; 4359 4360 return sk_busy_loop_timeout(sk, start_time); 4361 } 4362 EXPORT_SYMBOL(sk_busy_loop_end); 4363 #endif /* CONFIG_NET_RX_BUSY_POLL */ 4364 4365 int sock_bind_add(struct sock *sk, struct sockaddr *addr, int addr_len) 4366 { 4367 if (!sk->sk_prot->bind_add) 4368 return -EOPNOTSUPP; 4369 return sk->sk_prot->bind_add(sk, addr, addr_len); 4370 } 4371 EXPORT_SYMBOL(sock_bind_add); 4372 4373 /* Copy 'size' bytes from userspace and return `size` back to userspace */ 4374 int sock_ioctl_inout(struct sock *sk, unsigned int cmd, 4375 void __user *arg, void *karg, size_t size) 4376 { 4377 int ret; 4378 4379 if (copy_from_user(karg, arg, size)) 4380 return -EFAULT; 4381 4382 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, karg); 4383 if (ret) 4384 return ret; 4385 4386 if (copy_to_user(arg, karg, size)) 4387 return -EFAULT; 4388 4389 return 0; 4390 } 4391 EXPORT_SYMBOL(sock_ioctl_inout); 4392 4393 /* This is the most common ioctl prep function, where the result (4 bytes) is 4394 * copied back to userspace if the ioctl() returns successfully. No input is 4395 * copied from userspace as input argument. 4396 */ 4397 static int sock_ioctl_out(struct sock *sk, unsigned int cmd, void __user *arg) 4398 { 4399 int ret, karg = 0; 4400 4401 ret = READ_ONCE(sk->sk_prot)->ioctl(sk, cmd, &karg); 4402 if (ret) 4403 return ret; 4404 4405 return put_user(karg, (int __user *)arg); 4406 } 4407 4408 /* A wrapper around sock ioctls, which copies the data from userspace 4409 * (depending on the protocol/ioctl), and copies back the result to userspace. 4410 * The main motivation for this function is to pass kernel memory to the 4411 * protocol ioctl callbacks, instead of userspace memory. 4412 */ 4413 int sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) 4414 { 4415 int rc = 1; 4416 4417 if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET) 4418 rc = ipmr_sk_ioctl(sk, cmd, arg); 4419 else if (sk->sk_type == SOCK_RAW && sk->sk_family == AF_INET6) 4420 rc = ip6mr_sk_ioctl(sk, cmd, arg); 4421 else if (sk_is_phonet(sk)) 4422 rc = phonet_sk_ioctl(sk, cmd, arg); 4423 4424 /* If ioctl was processed, returns its value */ 4425 if (rc <= 0) 4426 return rc; 4427 4428 /* Otherwise call the default handler */ 4429 return sock_ioctl_out(sk, cmd, arg); 4430 } 4431 EXPORT_SYMBOL(sk_ioctl); 4432 4433 static int __init sock_struct_check(void) 4434 { 4435 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_drops); 4436 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_peek_off); 4437 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_error_queue); 4438 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_receive_queue); 4439 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rx, sk_backlog); 4440 4441 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst); 4442 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_ifindex); 4443 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rx_dst_cookie); 4444 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvbuf); 4445 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_filter); 4446 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_wq); 4447 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_data_ready); 4448 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvtimeo); 4449 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rx, sk_rcvlowat); 4450 4451 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_err); 4452 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_socket); 4453 #ifdef CONFIG_MEMCG 4454 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_rxtx, sk_memcg); 4455 #endif 4456 4457 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_lock); 4458 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_reserved_mem); 4459 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_forward_alloc); 4460 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_rxtx, sk_tsflags); 4461 4462 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4463 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_omem_alloc); 4464 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_err_soft); 4465 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_queued); 4466 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_wmem_alloc); 4467 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tsq_flags); 4468 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_send_head); 4469 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_queue); 4470 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_write_pending); 4471 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_dst_pending_confirm); 4472 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_status); 4473 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_frag); 4474 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_timer); 4475 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_pacing_rate); 4476 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_zckey); 4477 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_write_tx, sk_tskey); 4478 4479 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_max_pacing_rate); 4480 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndtimeo); 4481 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_priority); 4482 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_mark); 4483 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_uid); 4484 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_protocol); 4485 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_dst_cache); 4486 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_route_caps); 4487 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_type); 4488 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_size); 4489 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_allocation); 4490 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_txhash); 4491 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_sndbuf); 4492 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_gso_max_segs); 4493 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_pacing_shift); 4494 CACHELINE_ASSERT_GROUP_MEMBER(struct sock, sock_read_tx, sk_use_task_frag); 4495 return 0; 4496 } 4497 4498 core_initcall(sock_struct_check); 4499