1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Implementation of the Transmission Control Protocol(TCP). 8 * 9 * Authors: Ross Biro 10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Mark Evans, <evansmp@uhura.aston.ac.uk> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 13 * Florian La Roche, <flla@stud.uni-sb.de> 14 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 15 * Linus Torvalds, <torvalds@cs.helsinki.fi> 16 * Alan Cox, <gw4pts@gw4pts.ampr.org> 17 * Matthew Dillon, <dillon@apollo.west.oic.com> 18 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 19 * Jorge Cwik, <jorge@laser.satlink.net> 20 * 21 * Fixes: 22 * Alan Cox : Numerous verify_area() calls 23 * Alan Cox : Set the ACK bit on a reset 24 * Alan Cox : Stopped it crashing if it closed while 25 * sk->inuse=1 and was trying to connect 26 * (tcp_err()). 27 * Alan Cox : All icmp error handling was broken 28 * pointers passed where wrong and the 29 * socket was looked up backwards. Nobody 30 * tested any icmp error code obviously. 31 * Alan Cox : tcp_err() now handled properly. It 32 * wakes people on errors. poll 33 * behaves and the icmp error race 34 * has gone by moving it into sock.c 35 * Alan Cox : tcp_send_reset() fixed to work for 36 * everything not just packets for 37 * unknown sockets. 38 * Alan Cox : tcp option processing. 39 * Alan Cox : Reset tweaked (still not 100%) [Had 40 * syn rule wrong] 41 * Herp Rosmanith : More reset fixes 42 * Alan Cox : No longer acks invalid rst frames. 43 * Acking any kind of RST is right out. 44 * Alan Cox : Sets an ignore me flag on an rst 45 * receive otherwise odd bits of prattle 46 * escape still 47 * Alan Cox : Fixed another acking RST frame bug. 48 * Should stop LAN workplace lockups. 49 * Alan Cox : Some tidyups using the new skb list 50 * facilities 51 * Alan Cox : sk->keepopen now seems to work 52 * Alan Cox : Pulls options out correctly on accepts 53 * Alan Cox : Fixed assorted sk->rqueue->next errors 54 * Alan Cox : PSH doesn't end a TCP read. Switched a 55 * bit to skb ops. 56 * Alan Cox : Tidied tcp_data to avoid a potential 57 * nasty. 58 * Alan Cox : Added some better commenting, as the 59 * tcp is hard to follow 60 * Alan Cox : Removed incorrect check for 20 * psh 61 * Michael O'Reilly : ack < copied bug fix. 62 * Johannes Stille : Misc tcp fixes (not all in yet). 63 * Alan Cox : FIN with no memory -> CRASH 64 * Alan Cox : Added socket option proto entries. 65 * Also added awareness of them to accept. 66 * Alan Cox : Added TCP options (SOL_TCP) 67 * Alan Cox : Switched wakeup calls to callbacks, 68 * so the kernel can layer network 69 * sockets. 70 * Alan Cox : Use ip_tos/ip_ttl settings. 71 * Alan Cox : Handle FIN (more) properly (we hope). 72 * Alan Cox : RST frames sent on unsynchronised 73 * state ack error. 74 * Alan Cox : Put in missing check for SYN bit. 75 * Alan Cox : Added tcp_select_window() aka NET2E 76 * window non shrink trick. 77 * Alan Cox : Added a couple of small NET2E timer 78 * fixes 79 * Charles Hedrick : TCP fixes 80 * Toomas Tamm : TCP window fixes 81 * Alan Cox : Small URG fix to rlogin ^C ack fight 82 * Charles Hedrick : Rewrote most of it to actually work 83 * Linus : Rewrote tcp_read() and URG handling 84 * completely 85 * Gerhard Koerting: Fixed some missing timer handling 86 * Matthew Dillon : Reworked TCP machine states as per RFC 87 * Gerhard Koerting: PC/TCP workarounds 88 * Adam Caldwell : Assorted timer/timing errors 89 * Matthew Dillon : Fixed another RST bug 90 * Alan Cox : Move to kernel side addressing changes. 91 * Alan Cox : Beginning work on TCP fastpathing 92 * (not yet usable) 93 * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 94 * Alan Cox : TCP fast path debugging 95 * Alan Cox : Window clamping 96 * Michael Riepe : Bug in tcp_check() 97 * Matt Dillon : More TCP improvements and RST bug fixes 98 * Matt Dillon : Yet more small nasties remove from the 99 * TCP code (Be very nice to this man if 100 * tcp finally works 100%) 8) 101 * Alan Cox : BSD accept semantics. 102 * Alan Cox : Reset on closedown bug. 103 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 104 * Michael Pall : Handle poll() after URG properly in 105 * all cases. 106 * Michael Pall : Undo the last fix in tcp_read_urg() 107 * (multi URG PUSH broke rlogin). 108 * Michael Pall : Fix the multi URG PUSH problem in 109 * tcp_readable(), poll() after URG 110 * works now. 111 * Michael Pall : recv(...,MSG_OOB) never blocks in the 112 * BSD api. 113 * Alan Cox : Changed the semantics of sk->socket to 114 * fix a race and a signal problem with 115 * accept() and async I/O. 116 * Alan Cox : Relaxed the rules on tcp_sendto(). 117 * Yury Shevchuk : Really fixed accept() blocking problem. 118 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 119 * clients/servers which listen in on 120 * fixed ports. 121 * Alan Cox : Cleaned the above up and shrank it to 122 * a sensible code size. 123 * Alan Cox : Self connect lockup fix. 124 * Alan Cox : No connect to multicast. 125 * Ross Biro : Close unaccepted children on master 126 * socket close. 127 * Alan Cox : Reset tracing code. 128 * Alan Cox : Spurious resets on shutdown. 129 * Alan Cox : Giant 15 minute/60 second timer error 130 * Alan Cox : Small whoops in polling before an 131 * accept. 132 * Alan Cox : Kept the state trace facility since 133 * it's handy for debugging. 134 * Alan Cox : More reset handler fixes. 135 * Alan Cox : Started rewriting the code based on 136 * the RFC's for other useful protocol 137 * references see: Comer, KA9Q NOS, and 138 * for a reference on the difference 139 * between specifications and how BSD 140 * works see the 4.4lite source. 141 * A.N.Kuznetsov : Don't time wait on completion of tidy 142 * close. 143 * Linus Torvalds : Fin/Shutdown & copied_seq changes. 144 * Linus Torvalds : Fixed BSD port reuse to work first syn 145 * Alan Cox : Reimplemented timers as per the RFC 146 * and using multiple timers for sanity. 147 * Alan Cox : Small bug fixes, and a lot of new 148 * comments. 149 * Alan Cox : Fixed dual reader crash by locking 150 * the buffers (much like datagram.c) 151 * Alan Cox : Fixed stuck sockets in probe. A probe 152 * now gets fed up of retrying without 153 * (even a no space) answer. 154 * Alan Cox : Extracted closing code better 155 * Alan Cox : Fixed the closing state machine to 156 * resemble the RFC. 157 * Alan Cox : More 'per spec' fixes. 158 * Jorge Cwik : Even faster checksumming. 159 * Alan Cox : tcp_data() doesn't ack illegal PSH 160 * only frames. At least one pc tcp stack 161 * generates them. 162 * Alan Cox : Cache last socket. 163 * Alan Cox : Per route irtt. 164 * Matt Day : poll()->select() match BSD precisely on error 165 * Alan Cox : New buffers 166 * Marc Tamsky : Various sk->prot->retransmits and 167 * sk->retransmits misupdating fixed. 168 * Fixed tcp_write_timeout: stuck close, 169 * and TCP syn retries gets used now. 170 * Mark Yarvis : In tcp_read_wakeup(), don't send an 171 * ack if state is TCP_CLOSED. 172 * Alan Cox : Look up device on a retransmit - routes may 173 * change. Doesn't yet cope with MSS shrink right 174 * but it's a start! 175 * Marc Tamsky : Closing in closing fixes. 176 * Mike Shaver : RFC1122 verifications. 177 * Alan Cox : rcv_saddr errors. 178 * Alan Cox : Block double connect(). 179 * Alan Cox : Small hooks for enSKIP. 180 * Alexey Kuznetsov: Path MTU discovery. 181 * Alan Cox : Support soft errors. 182 * Alan Cox : Fix MTU discovery pathological case 183 * when the remote claims no mtu! 184 * Marc Tamsky : TCP_CLOSE fix. 185 * Colin (G3TNE) : Send a reset on syn ack replies in 186 * window but wrong (fixes NT lpd problems) 187 * Pedro Roque : Better TCP window handling, delayed ack. 188 * Joerg Reuter : No modification of locked buffers in 189 * tcp_do_retransmit() 190 * Eric Schenk : Changed receiver side silly window 191 * avoidance algorithm to BSD style 192 * algorithm. This doubles throughput 193 * against machines running Solaris, 194 * and seems to result in general 195 * improvement. 196 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 197 * Willy Konynenberg : Transparent proxying support. 198 * Mike McLagan : Routing by source 199 * Keith Owens : Do proper merging with partial SKB's in 200 * tcp_do_sendmsg to avoid burstiness. 201 * Eric Schenk : Fix fast close down bug with 202 * shutdown() followed by close(). 203 * Andi Kleen : Make poll agree with SIGIO 204 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 205 * lingertime == 0 (RFC 793 ABORT Call) 206 * Hirokazu Takahashi : Use copy_from_user() instead of 207 * csum_and_copy_from_user() if possible. 208 * 209 * Description of States: 210 * 211 * TCP_SYN_SENT sent a connection request, waiting for ack 212 * 213 * TCP_SYN_RECV received a connection request, sent ack, 214 * waiting for final ack in three-way handshake. 215 * 216 * TCP_ESTABLISHED connection established 217 * 218 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 219 * transmission of remaining buffered data 220 * 221 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 222 * to shutdown 223 * 224 * TCP_CLOSING both sides have shutdown but we still have 225 * data we have to finish sending 226 * 227 * TCP_TIME_WAIT timeout to catch resent junk before entering 228 * closed, can only be entered from FIN_WAIT2 229 * or CLOSING. Required because the other end 230 * may not have gotten our last ACK causing it 231 * to retransmit the data packet (which we ignore) 232 * 233 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 234 * us to finish writing our data and to shutdown 235 * (we have to close() to move on to LAST_ACK) 236 * 237 * TCP_LAST_ACK out side has shutdown after remote has 238 * shutdown. There may still be data in our 239 * buffer that we have to finish sending 240 * 241 * TCP_CLOSE socket is finished 242 */ 243 244 #define pr_fmt(fmt) "TCP: " fmt 245 246 #include <crypto/hash.h> 247 #include <linux/kernel.h> 248 #include <linux/module.h> 249 #include <linux/types.h> 250 #include <linux/fcntl.h> 251 #include <linux/poll.h> 252 #include <linux/inet_diag.h> 253 #include <linux/init.h> 254 #include <linux/fs.h> 255 #include <linux/skbuff.h> 256 #include <linux/scatterlist.h> 257 #include <linux/splice.h> 258 #include <linux/net.h> 259 #include <linux/socket.h> 260 #include <linux/random.h> 261 #include <linux/memblock.h> 262 #include <linux/highmem.h> 263 #include <linux/cache.h> 264 #include <linux/err.h> 265 #include <linux/time.h> 266 #include <linux/slab.h> 267 #include <linux/errqueue.h> 268 #include <linux/static_key.h> 269 #include <linux/btf.h> 270 271 #include <net/icmp.h> 272 #include <net/inet_common.h> 273 #include <net/tcp.h> 274 #include <net/mptcp.h> 275 #include <net/proto_memory.h> 276 #include <net/xfrm.h> 277 #include <net/ip.h> 278 #include <net/sock.h> 279 #include <net/rstreason.h> 280 281 #include <linux/uaccess.h> 282 #include <asm/ioctls.h> 283 #include <net/busy_poll.h> 284 #include <net/hotdata.h> 285 #include <trace/events/tcp.h> 286 #include <net/rps.h> 287 288 #include "../core/devmem.h" 289 290 /* Track pending CMSGs. */ 291 enum { 292 TCP_CMSG_INQ = 1, 293 TCP_CMSG_TS = 2 294 }; 295 296 DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 297 EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 298 299 DEFINE_PER_CPU(u32, tcp_tw_isn); 300 EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn); 301 302 long sysctl_tcp_mem[3] __read_mostly; 303 EXPORT_IPV6_MOD(sysctl_tcp_mem); 304 305 DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 306 EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 307 308 #if IS_ENABLED(CONFIG_SMC) 309 DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 310 EXPORT_SYMBOL(tcp_have_smc); 311 #endif 312 313 /* 314 * Current number of TCP sockets. 315 */ 316 struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 317 EXPORT_IPV6_MOD(tcp_sockets_allocated); 318 319 /* 320 * TCP splice context 321 */ 322 struct tcp_splice_state { 323 struct pipe_inode_info *pipe; 324 size_t len; 325 unsigned int flags; 326 }; 327 328 /* 329 * Pressure flag: try to collapse. 330 * Technical note: it is used by multiple contexts non atomically. 331 * All the __sk_mem_schedule() is of this nature: accounting 332 * is strict, actions are advisory and have some latency. 333 */ 334 unsigned long tcp_memory_pressure __read_mostly; 335 EXPORT_SYMBOL_GPL(tcp_memory_pressure); 336 337 void tcp_enter_memory_pressure(struct sock *sk) 338 { 339 unsigned long val; 340 341 if (READ_ONCE(tcp_memory_pressure)) 342 return; 343 val = jiffies; 344 345 if (!val) 346 val--; 347 if (!cmpxchg(&tcp_memory_pressure, 0, val)) 348 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 349 } 350 EXPORT_IPV6_MOD_GPL(tcp_enter_memory_pressure); 351 352 void tcp_leave_memory_pressure(struct sock *sk) 353 { 354 unsigned long val; 355 356 if (!READ_ONCE(tcp_memory_pressure)) 357 return; 358 val = xchg(&tcp_memory_pressure, 0); 359 if (val) 360 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 361 jiffies_to_msecs(jiffies - val)); 362 } 363 EXPORT_IPV6_MOD_GPL(tcp_leave_memory_pressure); 364 365 /* Convert seconds to retransmits based on initial and max timeout */ 366 static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 367 { 368 u8 res = 0; 369 370 if (seconds > 0) { 371 int period = timeout; 372 373 res = 1; 374 while (seconds > period && res < 255) { 375 res++; 376 timeout <<= 1; 377 if (timeout > rto_max) 378 timeout = rto_max; 379 period += timeout; 380 } 381 } 382 return res; 383 } 384 385 /* Convert retransmits to seconds based on initial and max timeout */ 386 static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 387 { 388 int period = 0; 389 390 if (retrans > 0) { 391 period = timeout; 392 while (--retrans) { 393 timeout <<= 1; 394 if (timeout > rto_max) 395 timeout = rto_max; 396 period += timeout; 397 } 398 } 399 return period; 400 } 401 402 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 403 { 404 u32 rate = READ_ONCE(tp->rate_delivered); 405 u32 intv = READ_ONCE(tp->rate_interval_us); 406 u64 rate64 = 0; 407 408 if (rate && intv) { 409 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 410 do_div(rate64, intv); 411 } 412 return rate64; 413 } 414 415 /* Address-family independent initialization for a tcp_sock. 416 * 417 * NOTE: A lot of things set to zero explicitly by call to 418 * sk_alloc() so need not be done here. 419 */ 420 void tcp_init_sock(struct sock *sk) 421 { 422 struct inet_connection_sock *icsk = inet_csk(sk); 423 struct tcp_sock *tp = tcp_sk(sk); 424 int rto_min_us, rto_max_ms; 425 426 tp->out_of_order_queue = RB_ROOT; 427 sk->tcp_rtx_queue = RB_ROOT; 428 tcp_init_xmit_timers(sk); 429 INIT_LIST_HEAD(&tp->tsq_node); 430 INIT_LIST_HEAD(&tp->tsorted_sent_queue); 431 432 icsk->icsk_rto = TCP_TIMEOUT_INIT; 433 434 rto_max_ms = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_max_ms); 435 icsk->icsk_rto_max = msecs_to_jiffies(rto_max_ms); 436 437 rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us); 438 icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us); 439 icsk->icsk_delack_max = TCP_DELACK_MAX; 440 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 441 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 442 443 /* So many TCP implementations out there (incorrectly) count the 444 * initial SYN frame in their delayed-ACK and congestion control 445 * algorithms that we must have the following bandaid to talk 446 * efficiently to them. -DaveM 447 */ 448 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 449 450 /* There's a bubble in the pipe until at least the first ACK. */ 451 tp->app_limited = ~0U; 452 tp->rate_app_limited = 1; 453 454 /* See draft-stevens-tcpca-spec-01 for discussion of the 455 * initialization of these values. 456 */ 457 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 458 tp->snd_cwnd_clamp = ~0; 459 tp->mss_cache = TCP_MSS_DEFAULT; 460 461 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 462 tcp_assign_congestion_control(sk); 463 464 tp->tsoffset = 0; 465 tp->rack.reo_wnd_steps = 1; 466 467 sk->sk_write_space = sk_stream_write_space; 468 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 469 470 icsk->icsk_sync_mss = tcp_sync_mss; 471 472 WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 473 WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 474 tcp_scaling_ratio_init(sk); 475 476 set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 477 sk_sockets_allocated_inc(sk); 478 xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1); 479 } 480 EXPORT_IPV6_MOD(tcp_init_sock); 481 482 static void tcp_tx_timestamp(struct sock *sk, struct sockcm_cookie *sockc) 483 { 484 struct sk_buff *skb = tcp_write_queue_tail(sk); 485 u32 tsflags = sockc->tsflags; 486 487 if (tsflags && skb) { 488 struct skb_shared_info *shinfo = skb_shinfo(skb); 489 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 490 491 sock_tx_timestamp(sk, sockc, &shinfo->tx_flags); 492 if (tsflags & SOF_TIMESTAMPING_TX_ACK) 493 tcb->txstamp_ack |= TSTAMP_ACK_SK; 494 if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 495 shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 496 } 497 498 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && 499 SK_BPF_CB_FLAG_TEST(sk, SK_BPF_CB_TX_TIMESTAMPING) && skb) 500 bpf_skops_tx_timestamping(sk, skb, BPF_SOCK_OPS_TSTAMP_SENDMSG_CB); 501 } 502 503 static bool tcp_stream_is_readable(struct sock *sk, int target) 504 { 505 if (tcp_epollin_ready(sk, target)) 506 return true; 507 return sk_is_readable(sk); 508 } 509 510 /* 511 * Wait for a TCP event. 512 * 513 * Note that we don't need to lock the socket, as the upper poll layers 514 * take care of normal races (between the test and the event) and we don't 515 * go look at any of the socket buffers directly. 516 */ 517 __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 518 { 519 __poll_t mask; 520 struct sock *sk = sock->sk; 521 const struct tcp_sock *tp = tcp_sk(sk); 522 u8 shutdown; 523 int state; 524 525 sock_poll_wait(file, sock, wait); 526 527 state = inet_sk_state_load(sk); 528 if (state == TCP_LISTEN) 529 return inet_csk_listen_poll(sk); 530 531 /* Socket is not locked. We are protected from async events 532 * by poll logic and correct handling of state changes 533 * made by other threads is impossible in any case. 534 */ 535 536 mask = 0; 537 538 /* 539 * EPOLLHUP is certainly not done right. But poll() doesn't 540 * have a notion of HUP in just one direction, and for a 541 * socket the read side is more interesting. 542 * 543 * Some poll() documentation says that EPOLLHUP is incompatible 544 * with the EPOLLOUT/POLLWR flags, so somebody should check this 545 * all. But careful, it tends to be safer to return too many 546 * bits than too few, and you can easily break real applications 547 * if you don't tell them that something has hung up! 548 * 549 * Check-me. 550 * 551 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 552 * our fs/select.c). It means that after we received EOF, 553 * poll always returns immediately, making impossible poll() on write() 554 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 555 * if and only if shutdown has been made in both directions. 556 * Actually, it is interesting to look how Solaris and DUX 557 * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 558 * then we could set it on SND_SHUTDOWN. BTW examples given 559 * in Stevens' books assume exactly this behaviour, it explains 560 * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 561 * 562 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 563 * blocking on fresh not-connected or disconnected socket. --ANK 564 */ 565 shutdown = READ_ONCE(sk->sk_shutdown); 566 if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 567 mask |= EPOLLHUP; 568 if (shutdown & RCV_SHUTDOWN) 569 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 570 571 /* Connected or passive Fast Open socket? */ 572 if (state != TCP_SYN_SENT && 573 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 574 int target = sock_rcvlowat(sk, 0, INT_MAX); 575 u16 urg_data = READ_ONCE(tp->urg_data); 576 577 if (unlikely(urg_data) && 578 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 579 !sock_flag(sk, SOCK_URGINLINE)) 580 target++; 581 582 if (tcp_stream_is_readable(sk, target)) 583 mask |= EPOLLIN | EPOLLRDNORM; 584 585 if (!(shutdown & SEND_SHUTDOWN)) { 586 if (__sk_stream_is_writeable(sk, 1)) { 587 mask |= EPOLLOUT | EPOLLWRNORM; 588 } else { /* send SIGIO later */ 589 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 590 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 591 592 /* Race breaker. If space is freed after 593 * wspace test but before the flags are set, 594 * IO signal will be lost. Memory barrier 595 * pairs with the input side. 596 */ 597 smp_mb__after_atomic(); 598 if (__sk_stream_is_writeable(sk, 1)) 599 mask |= EPOLLOUT | EPOLLWRNORM; 600 } 601 } else 602 mask |= EPOLLOUT | EPOLLWRNORM; 603 604 if (urg_data & TCP_URG_VALID) 605 mask |= EPOLLPRI; 606 } else if (state == TCP_SYN_SENT && 607 inet_test_bit(DEFER_CONNECT, sk)) { 608 /* Active TCP fastopen socket with defer_connect 609 * Return EPOLLOUT so application can call write() 610 * in order for kernel to generate SYN+data 611 */ 612 mask |= EPOLLOUT | EPOLLWRNORM; 613 } 614 /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */ 615 smp_rmb(); 616 if (READ_ONCE(sk->sk_err) || 617 !skb_queue_empty_lockless(&sk->sk_error_queue)) 618 mask |= EPOLLERR; 619 620 return mask; 621 } 622 EXPORT_SYMBOL(tcp_poll); 623 624 int tcp_ioctl(struct sock *sk, int cmd, int *karg) 625 { 626 struct tcp_sock *tp = tcp_sk(sk); 627 int answ; 628 bool slow; 629 630 switch (cmd) { 631 case SIOCINQ: 632 if (sk->sk_state == TCP_LISTEN) 633 return -EINVAL; 634 635 slow = lock_sock_fast(sk); 636 answ = tcp_inq(sk); 637 unlock_sock_fast(sk, slow); 638 break; 639 case SIOCATMARK: 640 answ = READ_ONCE(tp->urg_data) && 641 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 642 break; 643 case SIOCOUTQ: 644 if (sk->sk_state == TCP_LISTEN) 645 return -EINVAL; 646 647 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 648 answ = 0; 649 else 650 answ = READ_ONCE(tp->write_seq) - tp->snd_una; 651 break; 652 case SIOCOUTQNSD: 653 if (sk->sk_state == TCP_LISTEN) 654 return -EINVAL; 655 656 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 657 answ = 0; 658 else 659 answ = READ_ONCE(tp->write_seq) - 660 READ_ONCE(tp->snd_nxt); 661 break; 662 default: 663 return -ENOIOCTLCMD; 664 } 665 666 *karg = answ; 667 return 0; 668 } 669 EXPORT_IPV6_MOD(tcp_ioctl); 670 671 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 672 { 673 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 674 tp->pushed_seq = tp->write_seq; 675 } 676 677 static inline bool forced_push(const struct tcp_sock *tp) 678 { 679 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 680 } 681 682 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 683 { 684 struct tcp_sock *tp = tcp_sk(sk); 685 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 686 687 tcb->seq = tcb->end_seq = tp->write_seq; 688 tcb->tcp_flags = TCPHDR_ACK; 689 __skb_header_release(skb); 690 tcp_add_write_queue_tail(sk, skb); 691 sk_wmem_queued_add(sk, skb->truesize); 692 sk_mem_charge(sk, skb->truesize); 693 if (tp->nonagle & TCP_NAGLE_PUSH) 694 tp->nonagle &= ~TCP_NAGLE_PUSH; 695 696 tcp_slow_start_after_idle_check(sk); 697 } 698 699 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 700 { 701 if (flags & MSG_OOB) 702 tp->snd_up = tp->write_seq; 703 } 704 705 /* If a not yet filled skb is pushed, do not send it if 706 * we have data packets in Qdisc or NIC queues : 707 * Because TX completion will happen shortly, it gives a chance 708 * to coalesce future sendmsg() payload into this skb, without 709 * need for a timer, and with no latency trade off. 710 * As packets containing data payload have a bigger truesize 711 * than pure acks (dataless) packets, the last checks prevent 712 * autocorking if we only have an ACK in Qdisc/NIC queues, 713 * or if TX completion was delayed after we processed ACK packet. 714 */ 715 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 716 int size_goal) 717 { 718 return skb->len < size_goal && 719 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 720 !tcp_rtx_queue_empty(sk) && 721 refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 722 tcp_skb_can_collapse_to(skb); 723 } 724 725 void tcp_push(struct sock *sk, int flags, int mss_now, 726 int nonagle, int size_goal) 727 { 728 struct tcp_sock *tp = tcp_sk(sk); 729 struct sk_buff *skb; 730 731 skb = tcp_write_queue_tail(sk); 732 if (!skb) 733 return; 734 if (!(flags & MSG_MORE) || forced_push(tp)) 735 tcp_mark_push(tp, skb); 736 737 tcp_mark_urg(tp, flags); 738 739 if (tcp_should_autocork(sk, skb, size_goal)) { 740 741 /* avoid atomic op if TSQ_THROTTLED bit is already set */ 742 if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 743 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 744 set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 745 smp_mb__after_atomic(); 746 } 747 /* It is possible TX completion already happened 748 * before we set TSQ_THROTTLED. 749 */ 750 if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 751 return; 752 } 753 754 if (flags & MSG_MORE) 755 nonagle = TCP_NAGLE_CORK; 756 757 __tcp_push_pending_frames(sk, mss_now, nonagle); 758 } 759 760 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 761 unsigned int offset, size_t len) 762 { 763 struct tcp_splice_state *tss = rd_desc->arg.data; 764 int ret; 765 766 ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 767 min(rd_desc->count, len), tss->flags); 768 if (ret > 0) 769 rd_desc->count -= ret; 770 return ret; 771 } 772 773 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 774 { 775 /* Store TCP splice context information in read_descriptor_t. */ 776 read_descriptor_t rd_desc = { 777 .arg.data = tss, 778 .count = tss->len, 779 }; 780 781 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 782 } 783 784 /** 785 * tcp_splice_read - splice data from TCP socket to a pipe 786 * @sock: socket to splice from 787 * @ppos: position (not valid) 788 * @pipe: pipe to splice to 789 * @len: number of bytes to splice 790 * @flags: splice modifier flags 791 * 792 * Description: 793 * Will read pages from given socket and fill them into a pipe. 794 * 795 **/ 796 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 797 struct pipe_inode_info *pipe, size_t len, 798 unsigned int flags) 799 { 800 struct sock *sk = sock->sk; 801 struct tcp_splice_state tss = { 802 .pipe = pipe, 803 .len = len, 804 .flags = flags, 805 }; 806 long timeo; 807 ssize_t spliced; 808 int ret; 809 810 sock_rps_record_flow(sk); 811 /* 812 * We can't seek on a socket input 813 */ 814 if (unlikely(*ppos)) 815 return -ESPIPE; 816 817 ret = spliced = 0; 818 819 lock_sock(sk); 820 821 timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 822 while (tss.len) { 823 ret = __tcp_splice_read(sk, &tss); 824 if (ret < 0) 825 break; 826 else if (!ret) { 827 if (spliced) 828 break; 829 if (sock_flag(sk, SOCK_DONE)) 830 break; 831 if (sk->sk_err) { 832 ret = sock_error(sk); 833 break; 834 } 835 if (sk->sk_shutdown & RCV_SHUTDOWN) 836 break; 837 if (sk->sk_state == TCP_CLOSE) { 838 /* 839 * This occurs when user tries to read 840 * from never connected socket. 841 */ 842 ret = -ENOTCONN; 843 break; 844 } 845 if (!timeo) { 846 ret = -EAGAIN; 847 break; 848 } 849 /* if __tcp_splice_read() got nothing while we have 850 * an skb in receive queue, we do not want to loop. 851 * This might happen with URG data. 852 */ 853 if (!skb_queue_empty(&sk->sk_receive_queue)) 854 break; 855 ret = sk_wait_data(sk, &timeo, NULL); 856 if (ret < 0) 857 break; 858 if (signal_pending(current)) { 859 ret = sock_intr_errno(timeo); 860 break; 861 } 862 continue; 863 } 864 tss.len -= ret; 865 spliced += ret; 866 867 if (!tss.len || !timeo) 868 break; 869 release_sock(sk); 870 lock_sock(sk); 871 872 if (sk->sk_err || sk->sk_state == TCP_CLOSE || 873 (sk->sk_shutdown & RCV_SHUTDOWN) || 874 signal_pending(current)) 875 break; 876 } 877 878 release_sock(sk); 879 880 if (spliced) 881 return spliced; 882 883 return ret; 884 } 885 EXPORT_IPV6_MOD(tcp_splice_read); 886 887 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 888 bool force_schedule) 889 { 890 struct sk_buff *skb; 891 892 skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 893 if (likely(skb)) { 894 bool mem_scheduled; 895 896 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 897 if (force_schedule) { 898 mem_scheduled = true; 899 sk_forced_mem_schedule(sk, skb->truesize); 900 } else { 901 mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 902 } 903 if (likely(mem_scheduled)) { 904 skb_reserve(skb, MAX_TCP_HEADER); 905 skb->ip_summed = CHECKSUM_PARTIAL; 906 INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 907 return skb; 908 } 909 __kfree_skb(skb); 910 } else { 911 sk->sk_prot->enter_memory_pressure(sk); 912 sk_stream_moderate_sndbuf(sk); 913 } 914 return NULL; 915 } 916 917 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 918 int large_allowed) 919 { 920 struct tcp_sock *tp = tcp_sk(sk); 921 u32 new_size_goal, size_goal; 922 923 if (!large_allowed) 924 return mss_now; 925 926 /* Note : tcp_tso_autosize() will eventually split this later */ 927 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 928 929 /* We try hard to avoid divides here */ 930 size_goal = tp->gso_segs * mss_now; 931 if (unlikely(new_size_goal < size_goal || 932 new_size_goal >= size_goal + mss_now)) { 933 tp->gso_segs = min_t(u16, new_size_goal / mss_now, 934 sk->sk_gso_max_segs); 935 size_goal = tp->gso_segs * mss_now; 936 } 937 938 return max(size_goal, mss_now); 939 } 940 941 int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 942 { 943 int mss_now; 944 945 mss_now = tcp_current_mss(sk); 946 *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 947 948 return mss_now; 949 } 950 951 /* In some cases, sendmsg() could have added an skb to the write queue, 952 * but failed adding payload on it. We need to remove it to consume less 953 * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger 954 * epoll() users. Another reason is that tcp_write_xmit() does not like 955 * finding an empty skb in the write queue. 956 */ 957 void tcp_remove_empty_skb(struct sock *sk) 958 { 959 struct sk_buff *skb = tcp_write_queue_tail(sk); 960 961 if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 962 tcp_unlink_write_queue(skb, sk); 963 if (tcp_write_queue_empty(sk)) 964 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 965 tcp_wmem_free_skb(sk, skb); 966 } 967 } 968 969 /* skb changing from pure zc to mixed, must charge zc */ 970 static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 971 { 972 if (unlikely(skb_zcopy_pure(skb))) { 973 u32 extra = skb->truesize - 974 SKB_TRUESIZE(skb_end_offset(skb)); 975 976 if (!sk_wmem_schedule(sk, extra)) 977 return -ENOMEM; 978 979 sk_mem_charge(sk, extra); 980 skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 981 } 982 return 0; 983 } 984 985 986 int tcp_wmem_schedule(struct sock *sk, int copy) 987 { 988 int left; 989 990 if (likely(sk_wmem_schedule(sk, copy))) 991 return copy; 992 993 /* We could be in trouble if we have nothing queued. 994 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 995 * to guarantee some progress. 996 */ 997 left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued; 998 if (left > 0) 999 sk_forced_mem_schedule(sk, min(left, copy)); 1000 return min(copy, sk->sk_forward_alloc); 1001 } 1002 1003 void tcp_free_fastopen_req(struct tcp_sock *tp) 1004 { 1005 if (tp->fastopen_req) { 1006 kfree(tp->fastopen_req); 1007 tp->fastopen_req = NULL; 1008 } 1009 } 1010 1011 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 1012 size_t size, struct ubuf_info *uarg) 1013 { 1014 struct tcp_sock *tp = tcp_sk(sk); 1015 struct inet_sock *inet = inet_sk(sk); 1016 struct sockaddr *uaddr = msg->msg_name; 1017 int err, flags; 1018 1019 if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 1020 TFO_CLIENT_ENABLE) || 1021 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1022 uaddr->sa_family == AF_UNSPEC)) 1023 return -EOPNOTSUPP; 1024 if (tp->fastopen_req) 1025 return -EALREADY; /* Another Fast Open is in progress */ 1026 1027 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1028 sk->sk_allocation); 1029 if (unlikely(!tp->fastopen_req)) 1030 return -ENOBUFS; 1031 tp->fastopen_req->data = msg; 1032 tp->fastopen_req->size = size; 1033 tp->fastopen_req->uarg = uarg; 1034 1035 if (inet_test_bit(DEFER_CONNECT, sk)) { 1036 err = tcp_connect(sk); 1037 /* Same failure procedure as in tcp_v4/6_connect */ 1038 if (err) { 1039 tcp_set_state(sk, TCP_CLOSE); 1040 inet->inet_dport = 0; 1041 sk->sk_route_caps = 0; 1042 } 1043 } 1044 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1045 err = __inet_stream_connect(sk->sk_socket, uaddr, 1046 msg->msg_namelen, flags, 1); 1047 /* fastopen_req could already be freed in __inet_stream_connect 1048 * if the connection times out or gets rst 1049 */ 1050 if (tp->fastopen_req) { 1051 *copied = tp->fastopen_req->copied; 1052 tcp_free_fastopen_req(tp); 1053 inet_clear_bit(DEFER_CONNECT, sk); 1054 } 1055 return err; 1056 } 1057 1058 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 1059 { 1060 struct net_devmem_dmabuf_binding *binding = NULL; 1061 struct tcp_sock *tp = tcp_sk(sk); 1062 struct ubuf_info *uarg = NULL; 1063 struct sk_buff *skb; 1064 struct sockcm_cookie sockc; 1065 int flags, err, copied = 0; 1066 int mss_now = 0, size_goal, copied_syn = 0; 1067 int process_backlog = 0; 1068 int sockc_err = 0; 1069 int zc = 0; 1070 long timeo; 1071 1072 flags = msg->msg_flags; 1073 1074 sockc = (struct sockcm_cookie){ .tsflags = READ_ONCE(sk->sk_tsflags) }; 1075 if (msg->msg_controllen) { 1076 sockc_err = sock_cmsg_send(sk, msg, &sockc); 1077 /* Don't return error until MSG_FASTOPEN has been processed; 1078 * that may succeed even if the cmsg is invalid. 1079 */ 1080 } 1081 1082 if ((flags & MSG_ZEROCOPY) && size) { 1083 if (msg->msg_ubuf) { 1084 uarg = msg->msg_ubuf; 1085 if (sk->sk_route_caps & NETIF_F_SG) 1086 zc = MSG_ZEROCOPY; 1087 } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1088 skb = tcp_write_queue_tail(sk); 1089 uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb), 1090 !sockc_err && sockc.dmabuf_id); 1091 if (!uarg) { 1092 err = -ENOBUFS; 1093 goto out_err; 1094 } 1095 if (sk->sk_route_caps & NETIF_F_SG) 1096 zc = MSG_ZEROCOPY; 1097 else 1098 uarg_to_msgzc(uarg)->zerocopy = 0; 1099 1100 if (!sockc_err && sockc.dmabuf_id) { 1101 binding = net_devmem_get_binding(sk, sockc.dmabuf_id); 1102 if (IS_ERR(binding)) { 1103 err = PTR_ERR(binding); 1104 binding = NULL; 1105 goto out_err; 1106 } 1107 } 1108 } 1109 } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { 1110 if (sk->sk_route_caps & NETIF_F_SG) 1111 zc = MSG_SPLICE_PAGES; 1112 } 1113 1114 if (!sockc_err && sockc.dmabuf_id && 1115 (!(flags & MSG_ZEROCOPY) || !sock_flag(sk, SOCK_ZEROCOPY))) { 1116 err = -EINVAL; 1117 goto out_err; 1118 } 1119 1120 if (unlikely(flags & MSG_FASTOPEN || 1121 inet_test_bit(DEFER_CONNECT, sk)) && 1122 !tp->repair) { 1123 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1124 if (err == -EINPROGRESS && copied_syn > 0) 1125 goto out; 1126 else if (err) 1127 goto out_err; 1128 } 1129 1130 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 1131 1132 tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1133 1134 /* Wait for a connection to finish. One exception is TCP Fast Open 1135 * (passive side) where data is allowed to be sent before a connection 1136 * is fully established. 1137 */ 1138 if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 1139 !tcp_passive_fastopen(sk)) { 1140 err = sk_stream_wait_connect(sk, &timeo); 1141 if (err != 0) 1142 goto do_error; 1143 } 1144 1145 if (unlikely(tp->repair)) { 1146 if (tp->repair_queue == TCP_RECV_QUEUE) { 1147 copied = tcp_send_rcvq(sk, msg, size); 1148 goto out_nopush; 1149 } 1150 1151 err = -EINVAL; 1152 if (tp->repair_queue == TCP_NO_QUEUE) 1153 goto out_err; 1154 1155 /* 'common' sending to sendq */ 1156 } 1157 1158 if (sockc_err) { 1159 err = sockc_err; 1160 goto out_err; 1161 } 1162 1163 /* This should be in poll */ 1164 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1165 1166 /* Ok commence sending. */ 1167 copied = 0; 1168 1169 restart: 1170 mss_now = tcp_send_mss(sk, &size_goal, flags); 1171 1172 err = -EPIPE; 1173 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 1174 goto do_error; 1175 1176 while (msg_data_left(msg)) { 1177 int copy = 0; 1178 1179 skb = tcp_write_queue_tail(sk); 1180 if (skb) 1181 copy = size_goal - skb->len; 1182 1183 trace_tcp_sendmsg_locked(sk, msg, skb, size_goal); 1184 1185 if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 1186 bool first_skb; 1187 1188 new_segment: 1189 if (!sk_stream_memory_free(sk)) 1190 goto wait_for_space; 1191 1192 if (unlikely(process_backlog >= 16)) { 1193 process_backlog = 0; 1194 if (sk_flush_backlog(sk)) 1195 goto restart; 1196 } 1197 first_skb = tcp_rtx_and_write_queues_empty(sk); 1198 skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, 1199 first_skb); 1200 if (!skb) 1201 goto wait_for_space; 1202 1203 process_backlog++; 1204 1205 #ifdef CONFIG_SKB_DECRYPTED 1206 skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); 1207 #endif 1208 tcp_skb_entail(sk, skb); 1209 copy = size_goal; 1210 1211 /* All packets are restored as if they have 1212 * already been sent. skb_mstamp_ns isn't set to 1213 * avoid wrong rtt estimation. 1214 */ 1215 if (tp->repair) 1216 TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 1217 } 1218 1219 /* Try to append data to the end of skb. */ 1220 if (copy > msg_data_left(msg)) 1221 copy = msg_data_left(msg); 1222 1223 if (zc == 0) { 1224 bool merge = true; 1225 int i = skb_shinfo(skb)->nr_frags; 1226 struct page_frag *pfrag = sk_page_frag(sk); 1227 1228 if (!sk_page_frag_refill(sk, pfrag)) 1229 goto wait_for_space; 1230 1231 if (!skb_can_coalesce(skb, i, pfrag->page, 1232 pfrag->offset)) { 1233 if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { 1234 tcp_mark_push(tp, skb); 1235 goto new_segment; 1236 } 1237 merge = false; 1238 } 1239 1240 copy = min_t(int, copy, pfrag->size - pfrag->offset); 1241 1242 if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1243 if (tcp_downgrade_zcopy_pure(sk, skb)) 1244 goto wait_for_space; 1245 skb_zcopy_downgrade_managed(skb); 1246 } 1247 1248 copy = tcp_wmem_schedule(sk, copy); 1249 if (!copy) 1250 goto wait_for_space; 1251 1252 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 1253 pfrag->page, 1254 pfrag->offset, 1255 copy); 1256 if (err) 1257 goto do_error; 1258 1259 /* Update the skb. */ 1260 if (merge) { 1261 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1262 } else { 1263 skb_fill_page_desc(skb, i, pfrag->page, 1264 pfrag->offset, copy); 1265 page_ref_inc(pfrag->page); 1266 } 1267 pfrag->offset += copy; 1268 } else if (zc == MSG_ZEROCOPY) { 1269 /* First append to a fragless skb builds initial 1270 * pure zerocopy skb 1271 */ 1272 if (!skb->len) 1273 skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 1274 1275 if (!skb_zcopy_pure(skb)) { 1276 copy = tcp_wmem_schedule(sk, copy); 1277 if (!copy) 1278 goto wait_for_space; 1279 } 1280 1281 err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg, 1282 binding); 1283 if (err == -EMSGSIZE || err == -EEXIST) { 1284 tcp_mark_push(tp, skb); 1285 goto new_segment; 1286 } 1287 if (err < 0) 1288 goto do_error; 1289 copy = err; 1290 } else if (zc == MSG_SPLICE_PAGES) { 1291 /* Splice in data if we can; copy if we can't. */ 1292 if (tcp_downgrade_zcopy_pure(sk, skb)) 1293 goto wait_for_space; 1294 copy = tcp_wmem_schedule(sk, copy); 1295 if (!copy) 1296 goto wait_for_space; 1297 1298 err = skb_splice_from_iter(skb, &msg->msg_iter, copy); 1299 if (err < 0) { 1300 if (err == -EMSGSIZE) { 1301 tcp_mark_push(tp, skb); 1302 goto new_segment; 1303 } 1304 goto do_error; 1305 } 1306 copy = err; 1307 1308 if (!(flags & MSG_NO_SHARED_FRAGS)) 1309 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1310 1311 sk_wmem_queued_add(sk, copy); 1312 sk_mem_charge(sk, copy); 1313 } 1314 1315 if (!copied) 1316 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 1317 1318 WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1319 TCP_SKB_CB(skb)->end_seq += copy; 1320 tcp_skb_pcount_set(skb, 0); 1321 1322 copied += copy; 1323 if (!msg_data_left(msg)) { 1324 if (unlikely(flags & MSG_EOR)) 1325 TCP_SKB_CB(skb)->eor = 1; 1326 goto out; 1327 } 1328 1329 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 1330 continue; 1331 1332 if (forced_push(tp)) { 1333 tcp_mark_push(tp, skb); 1334 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1335 } else if (skb == tcp_send_head(sk)) 1336 tcp_push_one(sk, mss_now); 1337 continue; 1338 1339 wait_for_space: 1340 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1341 tcp_remove_empty_skb(sk); 1342 if (copied) 1343 tcp_push(sk, flags & ~MSG_MORE, mss_now, 1344 TCP_NAGLE_PUSH, size_goal); 1345 1346 err = sk_stream_wait_memory(sk, &timeo); 1347 if (err != 0) 1348 goto do_error; 1349 1350 mss_now = tcp_send_mss(sk, &size_goal, flags); 1351 } 1352 1353 out: 1354 if (copied) { 1355 tcp_tx_timestamp(sk, &sockc); 1356 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1357 } 1358 out_nopush: 1359 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1360 if (uarg && !msg->msg_ubuf) 1361 net_zcopy_put(uarg); 1362 if (binding) 1363 net_devmem_dmabuf_binding_put(binding); 1364 return copied + copied_syn; 1365 1366 do_error: 1367 tcp_remove_empty_skb(sk); 1368 1369 if (copied + copied_syn) 1370 goto out; 1371 out_err: 1372 /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1373 if (uarg && !msg->msg_ubuf) 1374 net_zcopy_put_abort(uarg, true); 1375 err = sk_stream_error(sk, flags, err); 1376 /* make sure we wake any epoll edge trigger waiter */ 1377 if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1378 sk->sk_write_space(sk); 1379 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1380 } 1381 if (binding) 1382 net_devmem_dmabuf_binding_put(binding); 1383 1384 return err; 1385 } 1386 EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1387 1388 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1389 { 1390 int ret; 1391 1392 lock_sock(sk); 1393 ret = tcp_sendmsg_locked(sk, msg, size); 1394 release_sock(sk); 1395 1396 return ret; 1397 } 1398 EXPORT_SYMBOL(tcp_sendmsg); 1399 1400 void tcp_splice_eof(struct socket *sock) 1401 { 1402 struct sock *sk = sock->sk; 1403 struct tcp_sock *tp = tcp_sk(sk); 1404 int mss_now, size_goal; 1405 1406 if (!tcp_write_queue_tail(sk)) 1407 return; 1408 1409 lock_sock(sk); 1410 mss_now = tcp_send_mss(sk, &size_goal, 0); 1411 tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 1412 release_sock(sk); 1413 } 1414 EXPORT_IPV6_MOD_GPL(tcp_splice_eof); 1415 1416 /* 1417 * Handle reading urgent data. BSD has very simple semantics for 1418 * this, no blocking and very strange errors 8) 1419 */ 1420 1421 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 1422 { 1423 struct tcp_sock *tp = tcp_sk(sk); 1424 1425 /* No URG data to read. */ 1426 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 1427 tp->urg_data == TCP_URG_READ) 1428 return -EINVAL; /* Yes this is right ! */ 1429 1430 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 1431 return -ENOTCONN; 1432 1433 if (tp->urg_data & TCP_URG_VALID) { 1434 int err = 0; 1435 char c = tp->urg_data; 1436 1437 if (!(flags & MSG_PEEK)) 1438 WRITE_ONCE(tp->urg_data, TCP_URG_READ); 1439 1440 /* Read urgent data. */ 1441 msg->msg_flags |= MSG_OOB; 1442 1443 if (len > 0) { 1444 if (!(flags & MSG_TRUNC)) 1445 err = memcpy_to_msg(msg, &c, 1); 1446 len = 1; 1447 } else 1448 msg->msg_flags |= MSG_TRUNC; 1449 1450 return err ? -EFAULT : len; 1451 } 1452 1453 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 1454 return 0; 1455 1456 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 1457 * the available implementations agree in this case: 1458 * this call should never block, independent of the 1459 * blocking state of the socket. 1460 * Mike <pall@rz.uni-karlsruhe.de> 1461 */ 1462 return -EAGAIN; 1463 } 1464 1465 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1466 { 1467 struct sk_buff *skb; 1468 int copied = 0, err = 0; 1469 1470 skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 1471 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1472 if (err) 1473 return err; 1474 copied += skb->len; 1475 } 1476 1477 skb_queue_walk(&sk->sk_write_queue, skb) { 1478 err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1479 if (err) 1480 break; 1481 1482 copied += skb->len; 1483 } 1484 1485 return err ?: copied; 1486 } 1487 1488 /* Clean up the receive buffer for full frames taken by the user, 1489 * then send an ACK if necessary. COPIED is the number of bytes 1490 * tcp_recvmsg has given to the user so far, it speeds up the 1491 * calculation of whether or not we must ACK for the sake of 1492 * a window update. 1493 */ 1494 void __tcp_cleanup_rbuf(struct sock *sk, int copied) 1495 { 1496 struct tcp_sock *tp = tcp_sk(sk); 1497 bool time_to_ack = false; 1498 1499 if (inet_csk_ack_scheduled(sk)) { 1500 const struct inet_connection_sock *icsk = inet_csk(sk); 1501 1502 if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1503 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1504 /* 1505 * If this read emptied read buffer, we send ACK, if 1506 * connection is not bidirectional, user drained 1507 * receive buffer and there was a small segment 1508 * in queue. 1509 */ 1510 (copied > 0 && 1511 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 1512 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1513 !inet_csk_in_pingpong_mode(sk))) && 1514 !atomic_read(&sk->sk_rmem_alloc))) 1515 time_to_ack = true; 1516 } 1517 1518 /* We send an ACK if we can now advertise a non-zero window 1519 * which has been raised "significantly". 1520 * 1521 * Even if window raised up to infinity, do not send window open ACK 1522 * in states, where we will not receive more. It is useless. 1523 */ 1524 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 1525 __u32 rcv_window_now = tcp_receive_window(tp); 1526 1527 /* Optimize, __tcp_select_window() is not cheap. */ 1528 if (2*rcv_window_now <= tp->window_clamp) { 1529 __u32 new_window = __tcp_select_window(sk); 1530 1531 /* Send ACK now, if this read freed lots of space 1532 * in our buffer. Certainly, new_window is new window. 1533 * We can advertise it now, if it is not less than current one. 1534 * "Lots" means "at least twice" here. 1535 */ 1536 if (new_window && new_window >= 2 * rcv_window_now) 1537 time_to_ack = true; 1538 } 1539 } 1540 if (time_to_ack) 1541 tcp_send_ack(sk); 1542 } 1543 1544 void tcp_cleanup_rbuf(struct sock *sk, int copied) 1545 { 1546 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1547 struct tcp_sock *tp = tcp_sk(sk); 1548 1549 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1550 "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1551 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1552 __tcp_cleanup_rbuf(sk, copied); 1553 } 1554 1555 static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 1556 { 1557 __skb_unlink(skb, &sk->sk_receive_queue); 1558 if (likely(skb->destructor == sock_rfree)) { 1559 sock_rfree(skb); 1560 skb->destructor = NULL; 1561 skb->sk = NULL; 1562 return skb_attempt_defer_free(skb); 1563 } 1564 __kfree_skb(skb); 1565 } 1566 1567 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 1568 { 1569 struct sk_buff *skb; 1570 u32 offset; 1571 1572 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1573 offset = seq - TCP_SKB_CB(skb)->seq; 1574 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 1575 pr_err_once("%s: found a SYN, please report !\n", __func__); 1576 offset--; 1577 } 1578 if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 1579 *off = offset; 1580 return skb; 1581 } 1582 /* This looks weird, but this can happen if TCP collapsing 1583 * splitted a fat GRO packet, while we released socket lock 1584 * in skb_splice_bits() 1585 */ 1586 tcp_eat_recv_skb(sk, skb); 1587 } 1588 return NULL; 1589 } 1590 EXPORT_SYMBOL(tcp_recv_skb); 1591 1592 /* 1593 * This routine provides an alternative to tcp_recvmsg() for routines 1594 * that would like to handle copying from skbuffs directly in 'sendfile' 1595 * fashion. 1596 * Note: 1597 * - It is assumed that the socket was locked by the caller. 1598 * - The routine does not block. 1599 * - At present, there is no support for reading OOB data 1600 * or for 'peeking' the socket using this routine 1601 * (although both would be easy to implement). 1602 */ 1603 static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1604 sk_read_actor_t recv_actor, bool noack, 1605 u32 *copied_seq) 1606 { 1607 struct sk_buff *skb; 1608 struct tcp_sock *tp = tcp_sk(sk); 1609 u32 seq = *copied_seq; 1610 u32 offset; 1611 int copied = 0; 1612 1613 if (sk->sk_state == TCP_LISTEN) 1614 return -ENOTCONN; 1615 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1616 if (offset < skb->len) { 1617 int used; 1618 size_t len; 1619 1620 len = skb->len - offset; 1621 /* Stop reading if we hit a patch of urgent data */ 1622 if (unlikely(tp->urg_data)) { 1623 u32 urg_offset = tp->urg_seq - seq; 1624 if (urg_offset < len) 1625 len = urg_offset; 1626 if (!len) 1627 break; 1628 } 1629 used = recv_actor(desc, skb, offset, len); 1630 if (used <= 0) { 1631 if (!copied) 1632 copied = used; 1633 break; 1634 } 1635 if (WARN_ON_ONCE(used > len)) 1636 used = len; 1637 seq += used; 1638 copied += used; 1639 offset += used; 1640 1641 /* If recv_actor drops the lock (e.g. TCP splice 1642 * receive) the skb pointer might be invalid when 1643 * getting here: tcp_collapse might have deleted it 1644 * while aggregating skbs from the socket queue. 1645 */ 1646 skb = tcp_recv_skb(sk, seq - 1, &offset); 1647 if (!skb) 1648 break; 1649 /* TCP coalescing might have appended data to the skb. 1650 * Try to splice more frags 1651 */ 1652 if (offset + 1 != skb->len) 1653 continue; 1654 } 1655 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1656 tcp_eat_recv_skb(sk, skb); 1657 ++seq; 1658 break; 1659 } 1660 tcp_eat_recv_skb(sk, skb); 1661 if (!desc->count) 1662 break; 1663 WRITE_ONCE(*copied_seq, seq); 1664 } 1665 WRITE_ONCE(*copied_seq, seq); 1666 1667 if (noack) 1668 goto out; 1669 1670 tcp_rcv_space_adjust(sk); 1671 1672 /* Clean up data we have read: This will do ACK frames. */ 1673 if (copied > 0) { 1674 tcp_recv_skb(sk, seq, &offset); 1675 tcp_cleanup_rbuf(sk, copied); 1676 } 1677 out: 1678 return copied; 1679 } 1680 1681 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 1682 sk_read_actor_t recv_actor) 1683 { 1684 return __tcp_read_sock(sk, desc, recv_actor, false, 1685 &tcp_sk(sk)->copied_seq); 1686 } 1687 EXPORT_SYMBOL(tcp_read_sock); 1688 1689 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, 1690 sk_read_actor_t recv_actor, bool noack, 1691 u32 *copied_seq) 1692 { 1693 return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq); 1694 } 1695 1696 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 1697 { 1698 struct sk_buff *skb; 1699 int copied = 0; 1700 1701 if (sk->sk_state == TCP_LISTEN) 1702 return -ENOTCONN; 1703 1704 while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1705 u8 tcp_flags; 1706 int used; 1707 1708 __skb_unlink(skb, &sk->sk_receive_queue); 1709 WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1710 tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1711 used = recv_actor(sk, skb); 1712 if (used < 0) { 1713 if (!copied) 1714 copied = used; 1715 break; 1716 } 1717 copied += used; 1718 1719 if (tcp_flags & TCPHDR_FIN) 1720 break; 1721 } 1722 return copied; 1723 } 1724 EXPORT_IPV6_MOD(tcp_read_skb); 1725 1726 void tcp_read_done(struct sock *sk, size_t len) 1727 { 1728 struct tcp_sock *tp = tcp_sk(sk); 1729 u32 seq = tp->copied_seq; 1730 struct sk_buff *skb; 1731 size_t left; 1732 u32 offset; 1733 1734 if (sk->sk_state == TCP_LISTEN) 1735 return; 1736 1737 left = len; 1738 while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1739 int used; 1740 1741 used = min_t(size_t, skb->len - offset, left); 1742 seq += used; 1743 left -= used; 1744 1745 if (skb->len > offset + used) 1746 break; 1747 1748 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 1749 tcp_eat_recv_skb(sk, skb); 1750 ++seq; 1751 break; 1752 } 1753 tcp_eat_recv_skb(sk, skb); 1754 } 1755 WRITE_ONCE(tp->copied_seq, seq); 1756 1757 tcp_rcv_space_adjust(sk); 1758 1759 /* Clean up data we have read: This will do ACK frames. */ 1760 if (left != len) 1761 tcp_cleanup_rbuf(sk, len - left); 1762 } 1763 EXPORT_SYMBOL(tcp_read_done); 1764 1765 int tcp_peek_len(struct socket *sock) 1766 { 1767 return tcp_inq(sock->sk); 1768 } 1769 EXPORT_IPV6_MOD(tcp_peek_len); 1770 1771 /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1772 int tcp_set_rcvlowat(struct sock *sk, int val) 1773 { 1774 int space, cap; 1775 1776 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1777 cap = sk->sk_rcvbuf >> 1; 1778 else 1779 cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1780 val = min(val, cap); 1781 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 1782 1783 /* Check if we need to signal EPOLLIN right now */ 1784 tcp_data_ready(sk); 1785 1786 if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1787 return 0; 1788 1789 space = tcp_space_from_win(sk, val); 1790 if (space > sk->sk_rcvbuf) { 1791 WRITE_ONCE(sk->sk_rcvbuf, space); 1792 WRITE_ONCE(tcp_sk(sk)->window_clamp, val); 1793 } 1794 return 0; 1795 } 1796 EXPORT_IPV6_MOD(tcp_set_rcvlowat); 1797 1798 void tcp_update_recv_tstamps(struct sk_buff *skb, 1799 struct scm_timestamping_internal *tss) 1800 { 1801 if (skb->tstamp) 1802 tss->ts[0] = ktime_to_timespec64(skb->tstamp); 1803 else 1804 tss->ts[0] = (struct timespec64) {0}; 1805 1806 if (skb_hwtstamps(skb)->hwtstamp) 1807 tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 1808 else 1809 tss->ts[2] = (struct timespec64) {0}; 1810 } 1811 1812 #ifdef CONFIG_MMU 1813 static const struct vm_operations_struct tcp_vm_ops = { 1814 }; 1815 1816 int tcp_mmap(struct file *file, struct socket *sock, 1817 struct vm_area_struct *vma) 1818 { 1819 if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 1820 return -EPERM; 1821 vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 1822 1823 /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 1824 vm_flags_set(vma, VM_MIXEDMAP); 1825 1826 vma->vm_ops = &tcp_vm_ops; 1827 return 0; 1828 } 1829 EXPORT_IPV6_MOD(tcp_mmap); 1830 1831 static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 1832 u32 *offset_frag) 1833 { 1834 skb_frag_t *frag; 1835 1836 if (unlikely(offset_skb >= skb->len)) 1837 return NULL; 1838 1839 offset_skb -= skb_headlen(skb); 1840 if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 1841 return NULL; 1842 1843 frag = skb_shinfo(skb)->frags; 1844 while (offset_skb) { 1845 if (skb_frag_size(frag) > offset_skb) { 1846 *offset_frag = offset_skb; 1847 return frag; 1848 } 1849 offset_skb -= skb_frag_size(frag); 1850 ++frag; 1851 } 1852 *offset_frag = 0; 1853 return frag; 1854 } 1855 1856 static bool can_map_frag(const skb_frag_t *frag) 1857 { 1858 struct page *page; 1859 1860 if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag)) 1861 return false; 1862 1863 page = skb_frag_page(frag); 1864 1865 if (PageCompound(page) || page->mapping) 1866 return false; 1867 1868 return true; 1869 } 1870 1871 static int find_next_mappable_frag(const skb_frag_t *frag, 1872 int remaining_in_skb) 1873 { 1874 int offset = 0; 1875 1876 if (likely(can_map_frag(frag))) 1877 return 0; 1878 1879 while (offset < remaining_in_skb && !can_map_frag(frag)) { 1880 offset += skb_frag_size(frag); 1881 ++frag; 1882 } 1883 return offset; 1884 } 1885 1886 static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 1887 struct tcp_zerocopy_receive *zc, 1888 struct sk_buff *skb, u32 offset) 1889 { 1890 u32 frag_offset, partial_frag_remainder = 0; 1891 int mappable_offset; 1892 skb_frag_t *frag; 1893 1894 /* worst case: skip to next skb. try to improve on this case below */ 1895 zc->recv_skip_hint = skb->len - offset; 1896 1897 /* Find the frag containing this offset (and how far into that frag) */ 1898 frag = skb_advance_to_frag(skb, offset, &frag_offset); 1899 if (!frag) 1900 return; 1901 1902 if (frag_offset) { 1903 struct skb_shared_info *info = skb_shinfo(skb); 1904 1905 /* We read part of the last frag, must recvmsg() rest of skb. */ 1906 if (frag == &info->frags[info->nr_frags - 1]) 1907 return; 1908 1909 /* Else, we must at least read the remainder in this frag. */ 1910 partial_frag_remainder = skb_frag_size(frag) - frag_offset; 1911 zc->recv_skip_hint -= partial_frag_remainder; 1912 ++frag; 1913 } 1914 1915 /* partial_frag_remainder: If part way through a frag, must read rest. 1916 * mappable_offset: Bytes till next mappable frag, *not* counting bytes 1917 * in partial_frag_remainder. 1918 */ 1919 mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 1920 zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 1921 } 1922 1923 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1924 int flags, struct scm_timestamping_internal *tss, 1925 int *cmsg_flags); 1926 static int receive_fallback_to_copy(struct sock *sk, 1927 struct tcp_zerocopy_receive *zc, int inq, 1928 struct scm_timestamping_internal *tss) 1929 { 1930 unsigned long copy_address = (unsigned long)zc->copybuf_address; 1931 struct msghdr msg = {}; 1932 int err; 1933 1934 zc->length = 0; 1935 zc->recv_skip_hint = 0; 1936 1937 if (copy_address != zc->copybuf_address) 1938 return -EINVAL; 1939 1940 err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq, 1941 &msg.msg_iter); 1942 if (err) 1943 return err; 1944 1945 err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 1946 tss, &zc->msg_flags); 1947 if (err < 0) 1948 return err; 1949 1950 zc->copybuf_len = err; 1951 if (likely(zc->copybuf_len)) { 1952 struct sk_buff *skb; 1953 u32 offset; 1954 1955 skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 1956 if (skb) 1957 tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 1958 } 1959 return 0; 1960 } 1961 1962 static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 1963 struct sk_buff *skb, u32 copylen, 1964 u32 *offset, u32 *seq) 1965 { 1966 unsigned long copy_address = (unsigned long)zc->copybuf_address; 1967 struct msghdr msg = {}; 1968 int err; 1969 1970 if (copy_address != zc->copybuf_address) 1971 return -EINVAL; 1972 1973 err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen, 1974 &msg.msg_iter); 1975 if (err) 1976 return err; 1977 err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 1978 if (err) 1979 return err; 1980 zc->recv_skip_hint -= copylen; 1981 *offset += copylen; 1982 *seq += copylen; 1983 return (__s32)copylen; 1984 } 1985 1986 static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 1987 struct sock *sk, 1988 struct sk_buff *skb, 1989 u32 *seq, 1990 s32 copybuf_len, 1991 struct scm_timestamping_internal *tss) 1992 { 1993 u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 1994 1995 if (!copylen) 1996 return 0; 1997 /* skb is null if inq < PAGE_SIZE. */ 1998 if (skb) { 1999 offset = *seq - TCP_SKB_CB(skb)->seq; 2000 } else { 2001 skb = tcp_recv_skb(sk, *seq, &offset); 2002 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2003 tcp_update_recv_tstamps(skb, tss); 2004 zc->msg_flags |= TCP_CMSG_TS; 2005 } 2006 } 2007 2008 zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 2009 seq); 2010 return zc->copybuf_len < 0 ? 0 : copylen; 2011 } 2012 2013 static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 2014 struct page **pending_pages, 2015 unsigned long pages_remaining, 2016 unsigned long *address, 2017 u32 *length, 2018 u32 *seq, 2019 struct tcp_zerocopy_receive *zc, 2020 u32 total_bytes_to_map, 2021 int err) 2022 { 2023 /* At least one page did not map. Try zapping if we skipped earlier. */ 2024 if (err == -EBUSY && 2025 zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 2026 u32 maybe_zap_len; 2027 2028 maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 2029 *length + /* Mapped or pending */ 2030 (pages_remaining * PAGE_SIZE); /* Failed map. */ 2031 zap_page_range_single(vma, *address, maybe_zap_len, NULL); 2032 err = 0; 2033 } 2034 2035 if (!err) { 2036 unsigned long leftover_pages = pages_remaining; 2037 int bytes_mapped; 2038 2039 /* We called zap_page_range_single, try to reinsert. */ 2040 err = vm_insert_pages(vma, *address, 2041 pending_pages, 2042 &pages_remaining); 2043 bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 2044 *seq += bytes_mapped; 2045 *address += bytes_mapped; 2046 } 2047 if (err) { 2048 /* Either we were unable to zap, OR we zapped, retried an 2049 * insert, and still had an issue. Either ways, pages_remaining 2050 * is the number of pages we were unable to map, and we unroll 2051 * some state we speculatively touched before. 2052 */ 2053 const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 2054 2055 *length -= bytes_not_mapped; 2056 zc->recv_skip_hint += bytes_not_mapped; 2057 } 2058 return err; 2059 } 2060 2061 static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 2062 struct page **pages, 2063 unsigned int pages_to_map, 2064 unsigned long *address, 2065 u32 *length, 2066 u32 *seq, 2067 struct tcp_zerocopy_receive *zc, 2068 u32 total_bytes_to_map) 2069 { 2070 unsigned long pages_remaining = pages_to_map; 2071 unsigned int pages_mapped; 2072 unsigned int bytes_mapped; 2073 int err; 2074 2075 err = vm_insert_pages(vma, *address, pages, &pages_remaining); 2076 pages_mapped = pages_to_map - (unsigned int)pages_remaining; 2077 bytes_mapped = PAGE_SIZE * pages_mapped; 2078 /* Even if vm_insert_pages fails, it may have partially succeeded in 2079 * mapping (some but not all of the pages). 2080 */ 2081 *seq += bytes_mapped; 2082 *address += bytes_mapped; 2083 2084 if (likely(!err)) 2085 return 0; 2086 2087 /* Error: maybe zap and retry + rollback state for failed inserts. */ 2088 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 2089 pages_remaining, address, length, seq, zc, total_bytes_to_map, 2090 err); 2091 } 2092 2093 #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 2094 static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 2095 struct tcp_zerocopy_receive *zc, 2096 struct scm_timestamping_internal *tss) 2097 { 2098 unsigned long msg_control_addr; 2099 struct msghdr cmsg_dummy; 2100 2101 msg_control_addr = (unsigned long)zc->msg_control; 2102 cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 2103 cmsg_dummy.msg_controllen = 2104 (__kernel_size_t)zc->msg_controllen; 2105 cmsg_dummy.msg_flags = in_compat_syscall() 2106 ? MSG_CMSG_COMPAT : 0; 2107 cmsg_dummy.msg_control_is_user = true; 2108 zc->msg_flags = 0; 2109 if (zc->msg_control == msg_control_addr && 2110 zc->msg_controllen == cmsg_dummy.msg_controllen) { 2111 tcp_recv_timestamp(&cmsg_dummy, sk, tss); 2112 zc->msg_control = (__u64) 2113 ((uintptr_t)cmsg_dummy.msg_control_user); 2114 zc->msg_controllen = 2115 (__u64)cmsg_dummy.msg_controllen; 2116 zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 2117 } 2118 } 2119 2120 static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, 2121 unsigned long address, 2122 bool *mmap_locked) 2123 { 2124 struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); 2125 2126 if (vma) { 2127 if (vma->vm_ops != &tcp_vm_ops) { 2128 vma_end_read(vma); 2129 return NULL; 2130 } 2131 *mmap_locked = false; 2132 return vma; 2133 } 2134 2135 mmap_read_lock(mm); 2136 vma = vma_lookup(mm, address); 2137 if (!vma || vma->vm_ops != &tcp_vm_ops) { 2138 mmap_read_unlock(mm); 2139 return NULL; 2140 } 2141 *mmap_locked = true; 2142 return vma; 2143 } 2144 2145 #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 2146 static int tcp_zerocopy_receive(struct sock *sk, 2147 struct tcp_zerocopy_receive *zc, 2148 struct scm_timestamping_internal *tss) 2149 { 2150 u32 length = 0, offset, vma_len, avail_len, copylen = 0; 2151 unsigned long address = (unsigned long)zc->address; 2152 struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 2153 s32 copybuf_len = zc->copybuf_len; 2154 struct tcp_sock *tp = tcp_sk(sk); 2155 const skb_frag_t *frags = NULL; 2156 unsigned int pages_to_map = 0; 2157 struct vm_area_struct *vma; 2158 struct sk_buff *skb = NULL; 2159 u32 seq = tp->copied_seq; 2160 u32 total_bytes_to_map; 2161 int inq = tcp_inq(sk); 2162 bool mmap_locked; 2163 int ret; 2164 2165 zc->copybuf_len = 0; 2166 zc->msg_flags = 0; 2167 2168 if (address & (PAGE_SIZE - 1) || address != zc->address) 2169 return -EINVAL; 2170 2171 if (sk->sk_state == TCP_LISTEN) 2172 return -ENOTCONN; 2173 2174 sock_rps_record_flow(sk); 2175 2176 if (inq && inq <= copybuf_len) 2177 return receive_fallback_to_copy(sk, zc, inq, tss); 2178 2179 if (inq < PAGE_SIZE) { 2180 zc->length = 0; 2181 zc->recv_skip_hint = inq; 2182 if (!inq && sock_flag(sk, SOCK_DONE)) 2183 return -EIO; 2184 return 0; 2185 } 2186 2187 vma = find_tcp_vma(current->mm, address, &mmap_locked); 2188 if (!vma) 2189 return -EINVAL; 2190 2191 vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 2192 avail_len = min_t(u32, vma_len, inq); 2193 total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 2194 if (total_bytes_to_map) { 2195 if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2196 zap_page_range_single(vma, address, total_bytes_to_map, 2197 NULL); 2198 zc->length = total_bytes_to_map; 2199 zc->recv_skip_hint = 0; 2200 } else { 2201 zc->length = avail_len; 2202 zc->recv_skip_hint = avail_len; 2203 } 2204 ret = 0; 2205 while (length + PAGE_SIZE <= zc->length) { 2206 int mappable_offset; 2207 struct page *page; 2208 2209 if (zc->recv_skip_hint < PAGE_SIZE) { 2210 u32 offset_frag; 2211 2212 if (skb) { 2213 if (zc->recv_skip_hint > 0) 2214 break; 2215 skb = skb->next; 2216 offset = seq - TCP_SKB_CB(skb)->seq; 2217 } else { 2218 skb = tcp_recv_skb(sk, seq, &offset); 2219 } 2220 2221 if (!skb_frags_readable(skb)) 2222 break; 2223 2224 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2225 tcp_update_recv_tstamps(skb, tss); 2226 zc->msg_flags |= TCP_CMSG_TS; 2227 } 2228 zc->recv_skip_hint = skb->len - offset; 2229 frags = skb_advance_to_frag(skb, offset, &offset_frag); 2230 if (!frags || offset_frag) 2231 break; 2232 } 2233 2234 mappable_offset = find_next_mappable_frag(frags, 2235 zc->recv_skip_hint); 2236 if (mappable_offset) { 2237 zc->recv_skip_hint = mappable_offset; 2238 break; 2239 } 2240 page = skb_frag_page(frags); 2241 if (WARN_ON_ONCE(!page)) 2242 break; 2243 2244 prefetchw(page); 2245 pages[pages_to_map++] = page; 2246 length += PAGE_SIZE; 2247 zc->recv_skip_hint -= PAGE_SIZE; 2248 frags++; 2249 if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 2250 zc->recv_skip_hint < PAGE_SIZE) { 2251 /* Either full batch, or we're about to go to next skb 2252 * (and we cannot unroll failed ops across skbs). 2253 */ 2254 ret = tcp_zerocopy_vm_insert_batch(vma, pages, 2255 pages_to_map, 2256 &address, &length, 2257 &seq, zc, 2258 total_bytes_to_map); 2259 if (ret) 2260 goto out; 2261 pages_to_map = 0; 2262 } 2263 } 2264 if (pages_to_map) { 2265 ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 2266 &address, &length, &seq, 2267 zc, total_bytes_to_map); 2268 } 2269 out: 2270 if (mmap_locked) 2271 mmap_read_unlock(current->mm); 2272 else 2273 vma_end_read(vma); 2274 /* Try to copy straggler data. */ 2275 if (!ret) 2276 copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 2277 2278 if (length + copylen) { 2279 WRITE_ONCE(tp->copied_seq, seq); 2280 tcp_rcv_space_adjust(sk); 2281 2282 /* Clean up data we have read: This will do ACK frames. */ 2283 tcp_recv_skb(sk, seq, &offset); 2284 tcp_cleanup_rbuf(sk, length + copylen); 2285 ret = 0; 2286 if (length == zc->length) 2287 zc->recv_skip_hint = 0; 2288 } else { 2289 if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 2290 ret = -EIO; 2291 } 2292 zc->length = length; 2293 return ret; 2294 } 2295 #endif 2296 2297 /* Similar to __sock_recv_timestamp, but does not require an skb */ 2298 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 2299 struct scm_timestamping_internal *tss) 2300 { 2301 int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 2302 u32 tsflags = READ_ONCE(sk->sk_tsflags); 2303 bool has_timestamping = false; 2304 2305 if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 2306 if (sock_flag(sk, SOCK_RCVTSTAMP)) { 2307 if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2308 if (new_tstamp) { 2309 struct __kernel_timespec kts = { 2310 .tv_sec = tss->ts[0].tv_sec, 2311 .tv_nsec = tss->ts[0].tv_nsec, 2312 }; 2313 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2314 sizeof(kts), &kts); 2315 } else { 2316 struct __kernel_old_timespec ts_old = { 2317 .tv_sec = tss->ts[0].tv_sec, 2318 .tv_nsec = tss->ts[0].tv_nsec, 2319 }; 2320 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 2321 sizeof(ts_old), &ts_old); 2322 } 2323 } else { 2324 if (new_tstamp) { 2325 struct __kernel_sock_timeval stv = { 2326 .tv_sec = tss->ts[0].tv_sec, 2327 .tv_usec = tss->ts[0].tv_nsec / 1000, 2328 }; 2329 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2330 sizeof(stv), &stv); 2331 } else { 2332 struct __kernel_old_timeval tv = { 2333 .tv_sec = tss->ts[0].tv_sec, 2334 .tv_usec = tss->ts[0].tv_nsec / 1000, 2335 }; 2336 put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 2337 sizeof(tv), &tv); 2338 } 2339 } 2340 } 2341 2342 if (tsflags & SOF_TIMESTAMPING_SOFTWARE && 2343 (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE || 2344 !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) 2345 has_timestamping = true; 2346 else 2347 tss->ts[0] = (struct timespec64) {0}; 2348 } 2349 2350 if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 2351 if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE && 2352 (tsflags & SOF_TIMESTAMPING_RX_HARDWARE || 2353 !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) 2354 has_timestamping = true; 2355 else 2356 tss->ts[2] = (struct timespec64) {0}; 2357 } 2358 2359 if (has_timestamping) { 2360 tss->ts[1] = (struct timespec64) {0}; 2361 if (sock_flag(sk, SOCK_TSTAMP_NEW)) 2362 put_cmsg_scm_timestamping64(msg, tss); 2363 else 2364 put_cmsg_scm_timestamping(msg, tss); 2365 } 2366 } 2367 2368 static int tcp_inq_hint(struct sock *sk) 2369 { 2370 const struct tcp_sock *tp = tcp_sk(sk); 2371 u32 copied_seq = READ_ONCE(tp->copied_seq); 2372 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2373 int inq; 2374 2375 inq = rcv_nxt - copied_seq; 2376 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2377 lock_sock(sk); 2378 inq = tp->rcv_nxt - tp->copied_seq; 2379 release_sock(sk); 2380 } 2381 /* After receiving a FIN, tell the user-space to continue reading 2382 * by returning a non-zero inq. 2383 */ 2384 if (inq == 0 && sock_flag(sk, SOCK_DONE)) 2385 inq = 1; 2386 return inq; 2387 } 2388 2389 /* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */ 2390 struct tcp_xa_pool { 2391 u8 max; /* max <= MAX_SKB_FRAGS */ 2392 u8 idx; /* idx <= max */ 2393 __u32 tokens[MAX_SKB_FRAGS]; 2394 netmem_ref netmems[MAX_SKB_FRAGS]; 2395 }; 2396 2397 static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p) 2398 { 2399 int i; 2400 2401 /* Commit part that has been copied to user space. */ 2402 for (i = 0; i < p->idx; i++) 2403 __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY, 2404 (__force void *)p->netmems[i], GFP_KERNEL); 2405 /* Rollback what has been pre-allocated and is no longer needed. */ 2406 for (; i < p->max; i++) 2407 __xa_erase(&sk->sk_user_frags, p->tokens[i]); 2408 2409 p->max = 0; 2410 p->idx = 0; 2411 } 2412 2413 static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p) 2414 { 2415 if (!p->max) 2416 return; 2417 2418 xa_lock_bh(&sk->sk_user_frags); 2419 2420 tcp_xa_pool_commit_locked(sk, p); 2421 2422 xa_unlock_bh(&sk->sk_user_frags); 2423 } 2424 2425 static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p, 2426 unsigned int max_frags) 2427 { 2428 int err, k; 2429 2430 if (p->idx < p->max) 2431 return 0; 2432 2433 xa_lock_bh(&sk->sk_user_frags); 2434 2435 tcp_xa_pool_commit_locked(sk, p); 2436 2437 for (k = 0; k < max_frags; k++) { 2438 err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k], 2439 XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL); 2440 if (err) 2441 break; 2442 } 2443 2444 xa_unlock_bh(&sk->sk_user_frags); 2445 2446 p->max = k; 2447 p->idx = 0; 2448 return k ? 0 : err; 2449 } 2450 2451 /* On error, returns the -errno. On success, returns number of bytes sent to the 2452 * user. May not consume all of @remaining_len. 2453 */ 2454 static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb, 2455 unsigned int offset, struct msghdr *msg, 2456 int remaining_len) 2457 { 2458 struct dmabuf_cmsg dmabuf_cmsg = { 0 }; 2459 struct tcp_xa_pool tcp_xa_pool; 2460 unsigned int start; 2461 int i, copy, n; 2462 int sent = 0; 2463 int err = 0; 2464 2465 tcp_xa_pool.max = 0; 2466 tcp_xa_pool.idx = 0; 2467 do { 2468 start = skb_headlen(skb); 2469 2470 if (skb_frags_readable(skb)) { 2471 err = -ENODEV; 2472 goto out; 2473 } 2474 2475 /* Copy header. */ 2476 copy = start - offset; 2477 if (copy > 0) { 2478 copy = min(copy, remaining_len); 2479 2480 n = copy_to_iter(skb->data + offset, copy, 2481 &msg->msg_iter); 2482 if (n != copy) { 2483 err = -EFAULT; 2484 goto out; 2485 } 2486 2487 offset += copy; 2488 remaining_len -= copy; 2489 2490 /* First a dmabuf_cmsg for # bytes copied to user 2491 * buffer. 2492 */ 2493 memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg)); 2494 dmabuf_cmsg.frag_size = copy; 2495 err = put_cmsg_notrunc(msg, SOL_SOCKET, 2496 SO_DEVMEM_LINEAR, 2497 sizeof(dmabuf_cmsg), 2498 &dmabuf_cmsg); 2499 if (err) 2500 goto out; 2501 2502 sent += copy; 2503 2504 if (remaining_len == 0) 2505 goto out; 2506 } 2507 2508 /* after that, send information of dmabuf pages through a 2509 * sequence of cmsg 2510 */ 2511 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2512 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2513 struct net_iov *niov; 2514 u64 frag_offset; 2515 int end; 2516 2517 /* !skb_frags_readable() should indicate that ALL the 2518 * frags in this skb are dmabuf net_iovs. We're checking 2519 * for that flag above, but also check individual frags 2520 * here. If the tcp stack is not setting 2521 * skb_frags_readable() correctly, we still don't want 2522 * to crash here. 2523 */ 2524 if (!skb_frag_net_iov(frag)) { 2525 net_err_ratelimited("Found non-dmabuf skb with net_iov"); 2526 err = -ENODEV; 2527 goto out; 2528 } 2529 2530 niov = skb_frag_net_iov(frag); 2531 if (!net_is_devmem_iov(niov)) { 2532 err = -ENODEV; 2533 goto out; 2534 } 2535 2536 end = start + skb_frag_size(frag); 2537 copy = end - offset; 2538 2539 if (copy > 0) { 2540 copy = min(copy, remaining_len); 2541 2542 frag_offset = net_iov_virtual_addr(niov) + 2543 skb_frag_off(frag) + offset - 2544 start; 2545 dmabuf_cmsg.frag_offset = frag_offset; 2546 dmabuf_cmsg.frag_size = copy; 2547 err = tcp_xa_pool_refill(sk, &tcp_xa_pool, 2548 skb_shinfo(skb)->nr_frags - i); 2549 if (err) 2550 goto out; 2551 2552 /* Will perform the exchange later */ 2553 dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx]; 2554 dmabuf_cmsg.dmabuf_id = net_devmem_iov_binding_id(niov); 2555 2556 offset += copy; 2557 remaining_len -= copy; 2558 2559 err = put_cmsg_notrunc(msg, SOL_SOCKET, 2560 SO_DEVMEM_DMABUF, 2561 sizeof(dmabuf_cmsg), 2562 &dmabuf_cmsg); 2563 if (err) 2564 goto out; 2565 2566 atomic_long_inc(&niov->pp_ref_count); 2567 tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag); 2568 2569 sent += copy; 2570 2571 if (remaining_len == 0) 2572 goto out; 2573 } 2574 start = end; 2575 } 2576 2577 tcp_xa_pool_commit(sk, &tcp_xa_pool); 2578 if (!remaining_len) 2579 goto out; 2580 2581 /* if remaining_len is not satisfied yet, we need to go to the 2582 * next frag in the frag_list to satisfy remaining_len. 2583 */ 2584 skb = skb_shinfo(skb)->frag_list ?: skb->next; 2585 2586 offset = offset - start; 2587 } while (skb); 2588 2589 if (remaining_len) { 2590 err = -EFAULT; 2591 goto out; 2592 } 2593 2594 out: 2595 tcp_xa_pool_commit(sk, &tcp_xa_pool); 2596 if (!sent) 2597 sent = err; 2598 2599 return sent; 2600 } 2601 2602 /* 2603 * This routine copies from a sock struct into the user buffer. 2604 * 2605 * Technical note: in 2.3 we work on _locked_ socket, so that 2606 * tricks with *seq access order and skb->users are not required. 2607 * Probably, code can be easily improved even more. 2608 */ 2609 2610 static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2611 int flags, struct scm_timestamping_internal *tss, 2612 int *cmsg_flags) 2613 { 2614 struct tcp_sock *tp = tcp_sk(sk); 2615 int last_copied_dmabuf = -1; /* uninitialized */ 2616 int copied = 0; 2617 u32 peek_seq; 2618 u32 *seq; 2619 unsigned long used; 2620 int err; 2621 int target; /* Read at least this many bytes */ 2622 long timeo; 2623 struct sk_buff *skb, *last; 2624 u32 peek_offset = 0; 2625 u32 urg_hole = 0; 2626 2627 err = -ENOTCONN; 2628 if (sk->sk_state == TCP_LISTEN) 2629 goto out; 2630 2631 if (tp->recvmsg_inq) { 2632 *cmsg_flags = TCP_CMSG_INQ; 2633 msg->msg_get_inq = 1; 2634 } 2635 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 2636 2637 /* Urgent data needs to be handled specially. */ 2638 if (flags & MSG_OOB) 2639 goto recv_urg; 2640 2641 if (unlikely(tp->repair)) { 2642 err = -EPERM; 2643 if (!(flags & MSG_PEEK)) 2644 goto out; 2645 2646 if (tp->repair_queue == TCP_SEND_QUEUE) 2647 goto recv_sndq; 2648 2649 err = -EINVAL; 2650 if (tp->repair_queue == TCP_NO_QUEUE) 2651 goto out; 2652 2653 /* 'common' recv queue MSG_PEEK-ing */ 2654 } 2655 2656 seq = &tp->copied_seq; 2657 if (flags & MSG_PEEK) { 2658 peek_offset = max(sk_peek_offset(sk, flags), 0); 2659 peek_seq = tp->copied_seq + peek_offset; 2660 seq = &peek_seq; 2661 } 2662 2663 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 2664 2665 do { 2666 u32 offset; 2667 2668 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2669 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 2670 if (copied) 2671 break; 2672 if (signal_pending(current)) { 2673 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 2674 break; 2675 } 2676 } 2677 2678 /* Next get a buffer. */ 2679 2680 last = skb_peek_tail(&sk->sk_receive_queue); 2681 skb_queue_walk(&sk->sk_receive_queue, skb) { 2682 last = skb; 2683 /* Now that we have two receive queues this 2684 * shouldn't happen. 2685 */ 2686 if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2687 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 2688 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2689 flags)) 2690 break; 2691 2692 offset = *seq - TCP_SKB_CB(skb)->seq; 2693 if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 2694 pr_err_once("%s: found a SYN, please report !\n", __func__); 2695 offset--; 2696 } 2697 if (offset < skb->len) 2698 goto found_ok_skb; 2699 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2700 goto found_fin_ok; 2701 WARN(!(flags & MSG_PEEK), 2702 "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 2703 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 2704 } 2705 2706 /* Well, if we have backlog, try to process it now yet. */ 2707 2708 if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 2709 break; 2710 2711 if (copied) { 2712 if (!timeo || 2713 sk->sk_err || 2714 sk->sk_state == TCP_CLOSE || 2715 (sk->sk_shutdown & RCV_SHUTDOWN) || 2716 signal_pending(current)) 2717 break; 2718 } else { 2719 if (sock_flag(sk, SOCK_DONE)) 2720 break; 2721 2722 if (sk->sk_err) { 2723 copied = sock_error(sk); 2724 break; 2725 } 2726 2727 if (sk->sk_shutdown & RCV_SHUTDOWN) 2728 break; 2729 2730 if (sk->sk_state == TCP_CLOSE) { 2731 /* This occurs when user tries to read 2732 * from never connected socket. 2733 */ 2734 copied = -ENOTCONN; 2735 break; 2736 } 2737 2738 if (!timeo) { 2739 copied = -EAGAIN; 2740 break; 2741 } 2742 2743 if (signal_pending(current)) { 2744 copied = sock_intr_errno(timeo); 2745 break; 2746 } 2747 } 2748 2749 if (copied >= target) { 2750 /* Do not sleep, just process backlog. */ 2751 __sk_flush_backlog(sk); 2752 } else { 2753 tcp_cleanup_rbuf(sk, copied); 2754 err = sk_wait_data(sk, &timeo, last); 2755 if (err < 0) { 2756 err = copied ? : err; 2757 goto out; 2758 } 2759 } 2760 2761 if ((flags & MSG_PEEK) && 2762 (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) { 2763 net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2764 current->comm, 2765 task_pid_nr(current)); 2766 peek_seq = tp->copied_seq + peek_offset; 2767 } 2768 continue; 2769 2770 found_ok_skb: 2771 /* Ok so how much can we use? */ 2772 used = skb->len - offset; 2773 if (len < used) 2774 used = len; 2775 2776 /* Do we have urgent data here? */ 2777 if (unlikely(tp->urg_data)) { 2778 u32 urg_offset = tp->urg_seq - *seq; 2779 if (urg_offset < used) { 2780 if (!urg_offset) { 2781 if (!sock_flag(sk, SOCK_URGINLINE)) { 2782 WRITE_ONCE(*seq, *seq + 1); 2783 urg_hole++; 2784 offset++; 2785 used--; 2786 if (!used) 2787 goto skip_copy; 2788 } 2789 } else 2790 used = urg_offset; 2791 } 2792 } 2793 2794 if (!(flags & MSG_TRUNC)) { 2795 if (last_copied_dmabuf != -1 && 2796 last_copied_dmabuf != !skb_frags_readable(skb)) 2797 break; 2798 2799 if (skb_frags_readable(skb)) { 2800 err = skb_copy_datagram_msg(skb, offset, msg, 2801 used); 2802 if (err) { 2803 /* Exception. Bailout! */ 2804 if (!copied) 2805 copied = -EFAULT; 2806 break; 2807 } 2808 } else { 2809 if (!(flags & MSG_SOCK_DEVMEM)) { 2810 /* dmabuf skbs can only be received 2811 * with the MSG_SOCK_DEVMEM flag. 2812 */ 2813 if (!copied) 2814 copied = -EFAULT; 2815 2816 break; 2817 } 2818 2819 err = tcp_recvmsg_dmabuf(sk, skb, offset, msg, 2820 used); 2821 if (err <= 0) { 2822 if (!copied) 2823 copied = -EFAULT; 2824 2825 break; 2826 } 2827 used = err; 2828 } 2829 } 2830 2831 last_copied_dmabuf = !skb_frags_readable(skb); 2832 2833 WRITE_ONCE(*seq, *seq + used); 2834 copied += used; 2835 len -= used; 2836 if (flags & MSG_PEEK) 2837 sk_peek_offset_fwd(sk, used); 2838 else 2839 sk_peek_offset_bwd(sk, used); 2840 tcp_rcv_space_adjust(sk); 2841 2842 skip_copy: 2843 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 2844 WRITE_ONCE(tp->urg_data, 0); 2845 tcp_fast_path_check(sk); 2846 } 2847 2848 if (TCP_SKB_CB(skb)->has_rxtstamp) { 2849 tcp_update_recv_tstamps(skb, tss); 2850 *cmsg_flags |= TCP_CMSG_TS; 2851 } 2852 2853 if (used + offset < skb->len) 2854 continue; 2855 2856 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2857 goto found_fin_ok; 2858 if (!(flags & MSG_PEEK)) 2859 tcp_eat_recv_skb(sk, skb); 2860 continue; 2861 2862 found_fin_ok: 2863 /* Process the FIN. */ 2864 WRITE_ONCE(*seq, *seq + 1); 2865 if (!(flags & MSG_PEEK)) 2866 tcp_eat_recv_skb(sk, skb); 2867 break; 2868 } while (len > 0); 2869 2870 /* According to UNIX98, msg_name/msg_namelen are ignored 2871 * on connected socket. I was just happy when found this 8) --ANK 2872 */ 2873 2874 /* Clean up data we have read: This will do ACK frames. */ 2875 tcp_cleanup_rbuf(sk, copied); 2876 return copied; 2877 2878 out: 2879 return err; 2880 2881 recv_urg: 2882 err = tcp_recv_urg(sk, msg, len, flags); 2883 goto out; 2884 2885 recv_sndq: 2886 err = tcp_peek_sndq(sk, msg, len); 2887 goto out; 2888 } 2889 2890 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2891 int *addr_len) 2892 { 2893 int cmsg_flags = 0, ret; 2894 struct scm_timestamping_internal tss; 2895 2896 if (unlikely(flags & MSG_ERRQUEUE)) 2897 return inet_recv_error(sk, msg, len, addr_len); 2898 2899 if (sk_can_busy_loop(sk) && 2900 skb_queue_empty_lockless(&sk->sk_receive_queue) && 2901 sk->sk_state == TCP_ESTABLISHED) 2902 sk_busy_loop(sk, flags & MSG_DONTWAIT); 2903 2904 lock_sock(sk); 2905 ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 2906 release_sock(sk); 2907 2908 if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2909 if (cmsg_flags & TCP_CMSG_TS) 2910 tcp_recv_timestamp(msg, sk, &tss); 2911 if (msg->msg_get_inq) { 2912 msg->msg_inq = tcp_inq_hint(sk); 2913 if (cmsg_flags & TCP_CMSG_INQ) 2914 put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2915 sizeof(msg->msg_inq), &msg->msg_inq); 2916 } 2917 } 2918 return ret; 2919 } 2920 EXPORT_IPV6_MOD(tcp_recvmsg); 2921 2922 void tcp_set_state(struct sock *sk, int state) 2923 { 2924 int oldstate = sk->sk_state; 2925 2926 /* We defined a new enum for TCP states that are exported in BPF 2927 * so as not force the internal TCP states to be frozen. The 2928 * following checks will detect if an internal state value ever 2929 * differs from the BPF value. If this ever happens, then we will 2930 * need to remap the internal value to the BPF value before calling 2931 * tcp_call_bpf_2arg. 2932 */ 2933 BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2934 BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2935 BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2936 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2937 BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2938 BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2939 BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2940 BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2941 BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2942 BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2943 BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2944 BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2945 BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE); 2946 BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2947 2948 /* bpf uapi header bpf.h defines an anonymous enum with values 2949 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 2950 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 2951 * But clang built vmlinux does not have this enum in DWARF 2952 * since clang removes the above code before generating IR/debuginfo. 2953 * Let us explicitly emit the type debuginfo to ensure the 2954 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 2955 * regardless of which compiler is used. 2956 */ 2957 BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 2958 2959 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2960 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2961 2962 switch (state) { 2963 case TCP_ESTABLISHED: 2964 if (oldstate != TCP_ESTABLISHED) 2965 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2966 break; 2967 case TCP_CLOSE_WAIT: 2968 if (oldstate == TCP_SYN_RECV) 2969 TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2970 break; 2971 2972 case TCP_CLOSE: 2973 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 2974 TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2975 2976 sk->sk_prot->unhash(sk); 2977 if (inet_csk(sk)->icsk_bind_hash && 2978 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2979 inet_put_port(sk); 2980 fallthrough; 2981 default: 2982 if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) 2983 TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2984 } 2985 2986 /* Change state AFTER socket is unhashed to avoid closed 2987 * socket sitting in hash tables. 2988 */ 2989 inet_sk_state_store(sk, state); 2990 } 2991 EXPORT_SYMBOL_GPL(tcp_set_state); 2992 2993 /* 2994 * State processing on a close. This implements the state shift for 2995 * sending our FIN frame. Note that we only send a FIN for some 2996 * states. A shutdown() may have already sent the FIN, or we may be 2997 * closed. 2998 */ 2999 3000 static const unsigned char new_state[16] = { 3001 /* current state: new state: action: */ 3002 [0 /* (Invalid) */] = TCP_CLOSE, 3003 [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 3004 [TCP_SYN_SENT] = TCP_CLOSE, 3005 [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 3006 [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 3007 [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 3008 [TCP_TIME_WAIT] = TCP_CLOSE, 3009 [TCP_CLOSE] = TCP_CLOSE, 3010 [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 3011 [TCP_LAST_ACK] = TCP_LAST_ACK, 3012 [TCP_LISTEN] = TCP_CLOSE, 3013 [TCP_CLOSING] = TCP_CLOSING, 3014 [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 3015 }; 3016 3017 static int tcp_close_state(struct sock *sk) 3018 { 3019 int next = (int)new_state[sk->sk_state]; 3020 int ns = next & TCP_STATE_MASK; 3021 3022 tcp_set_state(sk, ns); 3023 3024 return next & TCP_ACTION_FIN; 3025 } 3026 3027 /* 3028 * Shutdown the sending side of a connection. Much like close except 3029 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 3030 */ 3031 3032 void tcp_shutdown(struct sock *sk, int how) 3033 { 3034 /* We need to grab some memory, and put together a FIN, 3035 * and then put it into the queue to be sent. 3036 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 3037 */ 3038 if (!(how & SEND_SHUTDOWN)) 3039 return; 3040 3041 /* If we've already sent a FIN, or it's a closed state, skip this. */ 3042 if ((1 << sk->sk_state) & 3043 (TCPF_ESTABLISHED | TCPF_SYN_SENT | 3044 TCPF_CLOSE_WAIT)) { 3045 /* Clear out any half completed packets. FIN if needed. */ 3046 if (tcp_close_state(sk)) 3047 tcp_send_fin(sk); 3048 } 3049 } 3050 EXPORT_IPV6_MOD(tcp_shutdown); 3051 3052 int tcp_orphan_count_sum(void) 3053 { 3054 int i, total = 0; 3055 3056 for_each_possible_cpu(i) 3057 total += per_cpu(tcp_orphan_count, i); 3058 3059 return max(total, 0); 3060 } 3061 3062 static int tcp_orphan_cache; 3063 static struct timer_list tcp_orphan_timer; 3064 #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 3065 3066 static void tcp_orphan_update(struct timer_list *unused) 3067 { 3068 WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 3069 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 3070 } 3071 3072 static bool tcp_too_many_orphans(int shift) 3073 { 3074 return READ_ONCE(tcp_orphan_cache) << shift > 3075 READ_ONCE(sysctl_tcp_max_orphans); 3076 } 3077 3078 static bool tcp_out_of_memory(const struct sock *sk) 3079 { 3080 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 3081 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) 3082 return true; 3083 return false; 3084 } 3085 3086 bool tcp_check_oom(const struct sock *sk, int shift) 3087 { 3088 bool too_many_orphans, out_of_socket_memory; 3089 3090 too_many_orphans = tcp_too_many_orphans(shift); 3091 out_of_socket_memory = tcp_out_of_memory(sk); 3092 3093 if (too_many_orphans) 3094 net_info_ratelimited("too many orphaned sockets\n"); 3095 if (out_of_socket_memory) 3096 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 3097 return too_many_orphans || out_of_socket_memory; 3098 } 3099 3100 void __tcp_close(struct sock *sk, long timeout) 3101 { 3102 struct sk_buff *skb; 3103 int data_was_unread = 0; 3104 int state; 3105 3106 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 3107 3108 if (sk->sk_state == TCP_LISTEN) { 3109 tcp_set_state(sk, TCP_CLOSE); 3110 3111 /* Special case. */ 3112 inet_csk_listen_stop(sk); 3113 3114 goto adjudge_to_death; 3115 } 3116 3117 /* We need to flush the recv. buffs. We do this only on the 3118 * descriptor close, not protocol-sourced closes, because the 3119 * reader process may not have drained the data yet! 3120 */ 3121 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 3122 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 3123 3124 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 3125 len--; 3126 data_was_unread += len; 3127 __kfree_skb(skb); 3128 } 3129 3130 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 3131 if (sk->sk_state == TCP_CLOSE) 3132 goto adjudge_to_death; 3133 3134 /* As outlined in RFC 2525, section 2.17, we send a RST here because 3135 * data was lost. To witness the awful effects of the old behavior of 3136 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 3137 * GET in an FTP client, suspend the process, wait for the client to 3138 * advertise a zero window, then kill -9 the FTP client, wheee... 3139 * Note: timeout is always zero in such a case. 3140 */ 3141 if (unlikely(tcp_sk(sk)->repair)) { 3142 sk->sk_prot->disconnect(sk, 0); 3143 } else if (data_was_unread) { 3144 /* Unread data was tossed, zap the connection. */ 3145 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 3146 tcp_set_state(sk, TCP_CLOSE); 3147 tcp_send_active_reset(sk, sk->sk_allocation, 3148 SK_RST_REASON_TCP_ABORT_ON_CLOSE); 3149 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 3150 /* Check zero linger _after_ checking for unread data. */ 3151 sk->sk_prot->disconnect(sk, 0); 3152 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 3153 } else if (tcp_close_state(sk)) { 3154 /* We FIN if the application ate all the data before 3155 * zapping the connection. 3156 */ 3157 3158 /* RED-PEN. Formally speaking, we have broken TCP state 3159 * machine. State transitions: 3160 * 3161 * TCP_ESTABLISHED -> TCP_FIN_WAIT1 3162 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult) 3163 * TCP_CLOSE_WAIT -> TCP_LAST_ACK 3164 * 3165 * are legal only when FIN has been sent (i.e. in window), 3166 * rather than queued out of window. Purists blame. 3167 * 3168 * F.e. "RFC state" is ESTABLISHED, 3169 * if Linux state is FIN-WAIT-1, but FIN is still not sent. 3170 * 3171 * The visible declinations are that sometimes 3172 * we enter time-wait state, when it is not required really 3173 * (harmless), do not send active resets, when they are 3174 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 3175 * they look as CLOSING or LAST_ACK for Linux) 3176 * Probably, I missed some more holelets. 3177 * --ANK 3178 * XXX (TFO) - To start off we don't support SYN+ACK+FIN 3179 * in a single packet! (May consider it later but will 3180 * probably need API support or TCP_CORK SYN-ACK until 3181 * data is written and socket is closed.) 3182 */ 3183 tcp_send_fin(sk); 3184 } 3185 3186 sk_stream_wait_close(sk, timeout); 3187 3188 adjudge_to_death: 3189 state = sk->sk_state; 3190 sock_hold(sk); 3191 sock_orphan(sk); 3192 3193 local_bh_disable(); 3194 bh_lock_sock(sk); 3195 /* remove backlog if any, without releasing ownership. */ 3196 __release_sock(sk); 3197 3198 this_cpu_inc(tcp_orphan_count); 3199 3200 /* Have we already been destroyed by a softirq or backlog? */ 3201 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 3202 goto out; 3203 3204 /* This is a (useful) BSD violating of the RFC. There is a 3205 * problem with TCP as specified in that the other end could 3206 * keep a socket open forever with no application left this end. 3207 * We use a 1 minute timeout (about the same as BSD) then kill 3208 * our end. If they send after that then tough - BUT: long enough 3209 * that we won't make the old 4*rto = almost no time - whoops 3210 * reset mistake. 3211 * 3212 * Nope, it was not mistake. It is really desired behaviour 3213 * f.e. on http servers, when such sockets are useless, but 3214 * consume significant resources. Let's do it with special 3215 * linger2 option. --ANK 3216 */ 3217 3218 if (sk->sk_state == TCP_FIN_WAIT2) { 3219 struct tcp_sock *tp = tcp_sk(sk); 3220 if (READ_ONCE(tp->linger2) < 0) { 3221 tcp_set_state(sk, TCP_CLOSE); 3222 tcp_send_active_reset(sk, GFP_ATOMIC, 3223 SK_RST_REASON_TCP_ABORT_ON_LINGER); 3224 __NET_INC_STATS(sock_net(sk), 3225 LINUX_MIB_TCPABORTONLINGER); 3226 } else { 3227 const int tmo = tcp_fin_time(sk); 3228 3229 if (tmo > TCP_TIMEWAIT_LEN) { 3230 tcp_reset_keepalive_timer(sk, 3231 tmo - TCP_TIMEWAIT_LEN); 3232 } else { 3233 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 3234 goto out; 3235 } 3236 } 3237 } 3238 if (sk->sk_state != TCP_CLOSE) { 3239 if (tcp_check_oom(sk, 0)) { 3240 tcp_set_state(sk, TCP_CLOSE); 3241 tcp_send_active_reset(sk, GFP_ATOMIC, 3242 SK_RST_REASON_TCP_ABORT_ON_MEMORY); 3243 __NET_INC_STATS(sock_net(sk), 3244 LINUX_MIB_TCPABORTONMEMORY); 3245 } else if (!check_net(sock_net(sk))) { 3246 /* Not possible to send reset; just close */ 3247 tcp_set_state(sk, TCP_CLOSE); 3248 } 3249 } 3250 3251 if (sk->sk_state == TCP_CLOSE) { 3252 struct request_sock *req; 3253 3254 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 3255 lockdep_sock_is_held(sk)); 3256 /* We could get here with a non-NULL req if the socket is 3257 * aborted (e.g., closed with unread data) before 3WHS 3258 * finishes. 3259 */ 3260 if (req) 3261 reqsk_fastopen_remove(sk, req, false); 3262 inet_csk_destroy_sock(sk); 3263 } 3264 /* Otherwise, socket is reprieved until protocol close. */ 3265 3266 out: 3267 bh_unlock_sock(sk); 3268 local_bh_enable(); 3269 } 3270 3271 void tcp_close(struct sock *sk, long timeout) 3272 { 3273 lock_sock(sk); 3274 __tcp_close(sk, timeout); 3275 release_sock(sk); 3276 if (!sk->sk_net_refcnt) 3277 inet_csk_clear_xmit_timers_sync(sk); 3278 sock_put(sk); 3279 } 3280 EXPORT_SYMBOL(tcp_close); 3281 3282 /* These states need RST on ABORT according to RFC793 */ 3283 3284 static inline bool tcp_need_reset(int state) 3285 { 3286 return (1 << state) & 3287 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 3288 TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 3289 } 3290 3291 static void tcp_rtx_queue_purge(struct sock *sk) 3292 { 3293 struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 3294 3295 tcp_sk(sk)->highest_sack = NULL; 3296 while (p) { 3297 struct sk_buff *skb = rb_to_skb(p); 3298 3299 p = rb_next(p); 3300 /* Since we are deleting whole queue, no need to 3301 * list_del(&skb->tcp_tsorted_anchor) 3302 */ 3303 tcp_rtx_queue_unlink(skb, sk); 3304 tcp_wmem_free_skb(sk, skb); 3305 } 3306 } 3307 3308 void tcp_write_queue_purge(struct sock *sk) 3309 { 3310 struct sk_buff *skb; 3311 3312 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3313 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 3314 tcp_skb_tsorted_anchor_cleanup(skb); 3315 tcp_wmem_free_skb(sk, skb); 3316 } 3317 tcp_rtx_queue_purge(sk); 3318 INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 3319 tcp_clear_all_retrans_hints(tcp_sk(sk)); 3320 tcp_sk(sk)->packets_out = 0; 3321 inet_csk(sk)->icsk_backoff = 0; 3322 } 3323 3324 int tcp_disconnect(struct sock *sk, int flags) 3325 { 3326 struct inet_sock *inet = inet_sk(sk); 3327 struct inet_connection_sock *icsk = inet_csk(sk); 3328 struct tcp_sock *tp = tcp_sk(sk); 3329 int old_state = sk->sk_state; 3330 u32 seq; 3331 3332 if (old_state != TCP_CLOSE) 3333 tcp_set_state(sk, TCP_CLOSE); 3334 3335 /* ABORT function of RFC793 */ 3336 if (old_state == TCP_LISTEN) { 3337 inet_csk_listen_stop(sk); 3338 } else if (unlikely(tp->repair)) { 3339 WRITE_ONCE(sk->sk_err, ECONNABORTED); 3340 } else if (tcp_need_reset(old_state)) { 3341 tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE); 3342 WRITE_ONCE(sk->sk_err, ECONNRESET); 3343 } else if (tp->snd_nxt != tp->write_seq && 3344 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) { 3345 /* The last check adjusts for discrepancy of Linux wrt. RFC 3346 * states 3347 */ 3348 tcp_send_active_reset(sk, gfp_any(), 3349 SK_RST_REASON_TCP_DISCONNECT_WITH_DATA); 3350 WRITE_ONCE(sk->sk_err, ECONNRESET); 3351 } else if (old_state == TCP_SYN_SENT) 3352 WRITE_ONCE(sk->sk_err, ECONNRESET); 3353 3354 tcp_clear_xmit_timers(sk); 3355 __skb_queue_purge(&sk->sk_receive_queue); 3356 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 3357 WRITE_ONCE(tp->urg_data, 0); 3358 sk_set_peek_off(sk, -1); 3359 tcp_write_queue_purge(sk); 3360 tcp_fastopen_active_disable_ofo_check(sk); 3361 skb_rbtree_purge(&tp->out_of_order_queue); 3362 3363 inet->inet_dport = 0; 3364 3365 inet_bhash2_reset_saddr(sk); 3366 3367 WRITE_ONCE(sk->sk_shutdown, 0); 3368 sock_reset_flag(sk, SOCK_DONE); 3369 tp->srtt_us = 0; 3370 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 3371 tp->rcv_rtt_last_tsecr = 0; 3372 3373 seq = tp->write_seq + tp->max_window + 2; 3374 if (!seq) 3375 seq = 1; 3376 WRITE_ONCE(tp->write_seq, seq); 3377 3378 icsk->icsk_backoff = 0; 3379 icsk->icsk_probes_out = 0; 3380 icsk->icsk_probes_tstamp = 0; 3381 icsk->icsk_rto = TCP_TIMEOUT_INIT; 3382 WRITE_ONCE(icsk->icsk_rto_min, TCP_RTO_MIN); 3383 WRITE_ONCE(icsk->icsk_delack_max, TCP_DELACK_MAX); 3384 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 3385 tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 3386 tp->snd_cwnd_cnt = 0; 3387 tp->is_cwnd_limited = 0; 3388 tp->max_packets_out = 0; 3389 tp->window_clamp = 0; 3390 tp->delivered = 0; 3391 tp->delivered_ce = 0; 3392 if (icsk->icsk_ca_initialized && icsk->icsk_ca_ops->release) 3393 icsk->icsk_ca_ops->release(sk); 3394 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 3395 icsk->icsk_ca_initialized = 0; 3396 tcp_set_ca_state(sk, TCP_CA_Open); 3397 tp->is_sack_reneg = 0; 3398 tcp_clear_retrans(tp); 3399 tp->total_retrans = 0; 3400 inet_csk_delack_init(sk); 3401 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3402 * issue in __tcp_select_window() 3403 */ 3404 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3405 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 3406 __sk_dst_reset(sk); 3407 dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL))); 3408 tcp_saved_syn_free(tp); 3409 tp->compressed_ack = 0; 3410 tp->segs_in = 0; 3411 tp->segs_out = 0; 3412 tp->bytes_sent = 0; 3413 tp->bytes_acked = 0; 3414 tp->bytes_received = 0; 3415 tp->bytes_retrans = 0; 3416 tp->data_segs_in = 0; 3417 tp->data_segs_out = 0; 3418 tp->duplicate_sack[0].start_seq = 0; 3419 tp->duplicate_sack[0].end_seq = 0; 3420 tp->dsack_dups = 0; 3421 tp->reord_seen = 0; 3422 tp->retrans_out = 0; 3423 tp->sacked_out = 0; 3424 tp->tlp_high_seq = 0; 3425 tp->last_oow_ack_time = 0; 3426 tp->plb_rehash = 0; 3427 /* There's a bubble in the pipe until at least the first ACK. */ 3428 tp->app_limited = ~0U; 3429 tp->rate_app_limited = 1; 3430 tp->rack.mstamp = 0; 3431 tp->rack.advanced = 0; 3432 tp->rack.reo_wnd_steps = 1; 3433 tp->rack.last_delivered = 0; 3434 tp->rack.reo_wnd_persist = 0; 3435 tp->rack.dsack_seen = 0; 3436 tp->syn_data_acked = 0; 3437 tp->syn_fastopen_child = 0; 3438 tp->rx_opt.saw_tstamp = 0; 3439 tp->rx_opt.dsack = 0; 3440 tp->rx_opt.num_sacks = 0; 3441 tp->rcv_ooopack = 0; 3442 3443 3444 /* Clean up fastopen related fields */ 3445 tcp_free_fastopen_req(tp); 3446 inet_clear_bit(DEFER_CONNECT, sk); 3447 tp->fastopen_client_fail = 0; 3448 3449 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 3450 3451 if (sk->sk_frag.page) { 3452 put_page(sk->sk_frag.page); 3453 sk->sk_frag.page = NULL; 3454 sk->sk_frag.offset = 0; 3455 } 3456 sk_error_report(sk); 3457 return 0; 3458 } 3459 EXPORT_SYMBOL(tcp_disconnect); 3460 3461 static inline bool tcp_can_repair_sock(const struct sock *sk) 3462 { 3463 return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3464 (sk->sk_state != TCP_LISTEN); 3465 } 3466 3467 static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3468 { 3469 struct tcp_repair_window opt; 3470 3471 if (!tp->repair) 3472 return -EPERM; 3473 3474 if (len != sizeof(opt)) 3475 return -EINVAL; 3476 3477 if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3478 return -EFAULT; 3479 3480 if (opt.max_window < opt.snd_wnd) 3481 return -EINVAL; 3482 3483 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3484 return -EINVAL; 3485 3486 if (after(opt.rcv_wup, tp->rcv_nxt)) 3487 return -EINVAL; 3488 3489 tp->snd_wl1 = opt.snd_wl1; 3490 tp->snd_wnd = opt.snd_wnd; 3491 tp->max_window = opt.max_window; 3492 3493 tp->rcv_wnd = opt.rcv_wnd; 3494 tp->rcv_wup = opt.rcv_wup; 3495 3496 return 0; 3497 } 3498 3499 static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3500 unsigned int len) 3501 { 3502 struct tcp_sock *tp = tcp_sk(sk); 3503 struct tcp_repair_opt opt; 3504 size_t offset = 0; 3505 3506 while (len >= sizeof(opt)) { 3507 if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3508 return -EFAULT; 3509 3510 offset += sizeof(opt); 3511 len -= sizeof(opt); 3512 3513 switch (opt.opt_code) { 3514 case TCPOPT_MSS: 3515 tp->rx_opt.mss_clamp = opt.opt_val; 3516 tcp_mtup_init(sk); 3517 break; 3518 case TCPOPT_WINDOW: 3519 { 3520 u16 snd_wscale = opt.opt_val & 0xFFFF; 3521 u16 rcv_wscale = opt.opt_val >> 16; 3522 3523 if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3524 return -EFBIG; 3525 3526 tp->rx_opt.snd_wscale = snd_wscale; 3527 tp->rx_opt.rcv_wscale = rcv_wscale; 3528 tp->rx_opt.wscale_ok = 1; 3529 } 3530 break; 3531 case TCPOPT_SACK_PERM: 3532 if (opt.opt_val != 0) 3533 return -EINVAL; 3534 3535 tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3536 break; 3537 case TCPOPT_TIMESTAMP: 3538 if (opt.opt_val != 0) 3539 return -EINVAL; 3540 3541 tp->rx_opt.tstamp_ok = 1; 3542 break; 3543 } 3544 } 3545 3546 return 0; 3547 } 3548 3549 DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3550 EXPORT_IPV6_MOD(tcp_tx_delay_enabled); 3551 3552 static void tcp_enable_tx_delay(void) 3553 { 3554 if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3555 static int __tcp_tx_delay_enabled = 0; 3556 3557 if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3558 static_branch_enable(&tcp_tx_delay_enabled); 3559 pr_info("TCP_TX_DELAY enabled\n"); 3560 } 3561 } 3562 } 3563 3564 /* When set indicates to always queue non-full frames. Later the user clears 3565 * this option and we transmit any pending partial frames in the queue. This is 3566 * meant to be used alongside sendfile() to get properly filled frames when the 3567 * user (for example) must write out headers with a write() call first and then 3568 * use sendfile to send out the data parts. 3569 * 3570 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3571 * TCP_NODELAY. 3572 */ 3573 void __tcp_sock_set_cork(struct sock *sk, bool on) 3574 { 3575 struct tcp_sock *tp = tcp_sk(sk); 3576 3577 if (on) { 3578 tp->nonagle |= TCP_NAGLE_CORK; 3579 } else { 3580 tp->nonagle &= ~TCP_NAGLE_CORK; 3581 if (tp->nonagle & TCP_NAGLE_OFF) 3582 tp->nonagle |= TCP_NAGLE_PUSH; 3583 tcp_push_pending_frames(sk); 3584 } 3585 } 3586 3587 void tcp_sock_set_cork(struct sock *sk, bool on) 3588 { 3589 lock_sock(sk); 3590 __tcp_sock_set_cork(sk, on); 3591 release_sock(sk); 3592 } 3593 EXPORT_SYMBOL(tcp_sock_set_cork); 3594 3595 /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 3596 * remembered, but it is not activated until cork is cleared. 3597 * 3598 * However, when TCP_NODELAY is set we make an explicit push, which overrides 3599 * even TCP_CORK for currently queued segments. 3600 */ 3601 void __tcp_sock_set_nodelay(struct sock *sk, bool on) 3602 { 3603 if (on) { 3604 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 3605 tcp_push_pending_frames(sk); 3606 } else { 3607 tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 3608 } 3609 } 3610 3611 void tcp_sock_set_nodelay(struct sock *sk) 3612 { 3613 lock_sock(sk); 3614 __tcp_sock_set_nodelay(sk, true); 3615 release_sock(sk); 3616 } 3617 EXPORT_SYMBOL(tcp_sock_set_nodelay); 3618 3619 static void __tcp_sock_set_quickack(struct sock *sk, int val) 3620 { 3621 if (!val) { 3622 inet_csk_enter_pingpong_mode(sk); 3623 return; 3624 } 3625 3626 inet_csk_exit_pingpong_mode(sk); 3627 if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3628 inet_csk_ack_scheduled(sk)) { 3629 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3630 tcp_cleanup_rbuf(sk, 1); 3631 if (!(val & 1)) 3632 inet_csk_enter_pingpong_mode(sk); 3633 } 3634 } 3635 3636 void tcp_sock_set_quickack(struct sock *sk, int val) 3637 { 3638 lock_sock(sk); 3639 __tcp_sock_set_quickack(sk, val); 3640 release_sock(sk); 3641 } 3642 EXPORT_SYMBOL(tcp_sock_set_quickack); 3643 3644 int tcp_sock_set_syncnt(struct sock *sk, int val) 3645 { 3646 if (val < 1 || val > MAX_TCP_SYNCNT) 3647 return -EINVAL; 3648 3649 WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); 3650 return 0; 3651 } 3652 EXPORT_SYMBOL(tcp_sock_set_syncnt); 3653 3654 int tcp_sock_set_user_timeout(struct sock *sk, int val) 3655 { 3656 /* Cap the max time in ms TCP will retry or probe the window 3657 * before giving up and aborting (ETIMEDOUT) a connection. 3658 */ 3659 if (val < 0) 3660 return -EINVAL; 3661 3662 WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); 3663 return 0; 3664 } 3665 EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3666 3667 int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 3668 { 3669 struct tcp_sock *tp = tcp_sk(sk); 3670 3671 if (val < 1 || val > MAX_TCP_KEEPIDLE) 3672 return -EINVAL; 3673 3674 /* Paired with WRITE_ONCE() in keepalive_time_when() */ 3675 WRITE_ONCE(tp->keepalive_time, val * HZ); 3676 if (sock_flag(sk, SOCK_KEEPOPEN) && 3677 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 3678 u32 elapsed = keepalive_time_elapsed(tp); 3679 3680 if (tp->keepalive_time > elapsed) 3681 elapsed = tp->keepalive_time - elapsed; 3682 else 3683 elapsed = 0; 3684 tcp_reset_keepalive_timer(sk, elapsed); 3685 } 3686 3687 return 0; 3688 } 3689 3690 int tcp_sock_set_keepidle(struct sock *sk, int val) 3691 { 3692 int err; 3693 3694 lock_sock(sk); 3695 err = tcp_sock_set_keepidle_locked(sk, val); 3696 release_sock(sk); 3697 return err; 3698 } 3699 EXPORT_SYMBOL(tcp_sock_set_keepidle); 3700 3701 int tcp_sock_set_keepintvl(struct sock *sk, int val) 3702 { 3703 if (val < 1 || val > MAX_TCP_KEEPINTVL) 3704 return -EINVAL; 3705 3706 WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); 3707 return 0; 3708 } 3709 EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3710 3711 int tcp_sock_set_keepcnt(struct sock *sk, int val) 3712 { 3713 if (val < 1 || val > MAX_TCP_KEEPCNT) 3714 return -EINVAL; 3715 3716 /* Paired with READ_ONCE() in keepalive_probes() */ 3717 WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); 3718 return 0; 3719 } 3720 EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3721 3722 int tcp_set_window_clamp(struct sock *sk, int val) 3723 { 3724 u32 old_window_clamp, new_window_clamp, new_rcv_ssthresh; 3725 struct tcp_sock *tp = tcp_sk(sk); 3726 3727 if (!val) { 3728 if (sk->sk_state != TCP_CLOSE) 3729 return -EINVAL; 3730 WRITE_ONCE(tp->window_clamp, 0); 3731 return 0; 3732 } 3733 3734 old_window_clamp = tp->window_clamp; 3735 new_window_clamp = max_t(int, SOCK_MIN_RCVBUF / 2, val); 3736 3737 if (new_window_clamp == old_window_clamp) 3738 return 0; 3739 3740 WRITE_ONCE(tp->window_clamp, new_window_clamp); 3741 3742 /* Need to apply the reserved mem provisioning only 3743 * when shrinking the window clamp. 3744 */ 3745 if (new_window_clamp < old_window_clamp) { 3746 __tcp_adjust_rcv_ssthresh(sk, new_window_clamp); 3747 } else { 3748 new_rcv_ssthresh = min(tp->rcv_wnd, new_window_clamp); 3749 tp->rcv_ssthresh = max(new_rcv_ssthresh, tp->rcv_ssthresh); 3750 } 3751 return 0; 3752 } 3753 3754 /* 3755 * Socket option code for TCP. 3756 */ 3757 int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3758 sockptr_t optval, unsigned int optlen) 3759 { 3760 struct tcp_sock *tp = tcp_sk(sk); 3761 struct inet_connection_sock *icsk = inet_csk(sk); 3762 struct net *net = sock_net(sk); 3763 int val; 3764 int err = 0; 3765 3766 /* These are data/string values, all the others are ints */ 3767 switch (optname) { 3768 case TCP_CONGESTION: { 3769 char name[TCP_CA_NAME_MAX]; 3770 3771 if (optlen < 1) 3772 return -EINVAL; 3773 3774 val = strncpy_from_sockptr(name, optval, 3775 min_t(long, TCP_CA_NAME_MAX-1, optlen)); 3776 if (val < 0) 3777 return -EFAULT; 3778 name[val] = 0; 3779 3780 sockopt_lock_sock(sk); 3781 err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3782 sockopt_ns_capable(sock_net(sk)->user_ns, 3783 CAP_NET_ADMIN)); 3784 sockopt_release_sock(sk); 3785 return err; 3786 } 3787 case TCP_ULP: { 3788 char name[TCP_ULP_NAME_MAX]; 3789 3790 if (optlen < 1) 3791 return -EINVAL; 3792 3793 val = strncpy_from_sockptr(name, optval, 3794 min_t(long, TCP_ULP_NAME_MAX - 1, 3795 optlen)); 3796 if (val < 0) 3797 return -EFAULT; 3798 name[val] = 0; 3799 3800 sockopt_lock_sock(sk); 3801 err = tcp_set_ulp(sk, name); 3802 sockopt_release_sock(sk); 3803 return err; 3804 } 3805 case TCP_FASTOPEN_KEY: { 3806 __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 3807 __u8 *backup_key = NULL; 3808 3809 /* Allow a backup key as well to facilitate key rotation 3810 * First key is the active one. 3811 */ 3812 if (optlen != TCP_FASTOPEN_KEY_LENGTH && 3813 optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 3814 return -EINVAL; 3815 3816 if (copy_from_sockptr(key, optval, optlen)) 3817 return -EFAULT; 3818 3819 if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 3820 backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 3821 3822 return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 3823 } 3824 default: 3825 /* fallthru */ 3826 break; 3827 } 3828 3829 if (optlen < sizeof(int)) 3830 return -EINVAL; 3831 3832 if (copy_from_sockptr(&val, optval, sizeof(val))) 3833 return -EFAULT; 3834 3835 /* Handle options that can be set without locking the socket. */ 3836 switch (optname) { 3837 case TCP_SYNCNT: 3838 return tcp_sock_set_syncnt(sk, val); 3839 case TCP_USER_TIMEOUT: 3840 return tcp_sock_set_user_timeout(sk, val); 3841 case TCP_KEEPINTVL: 3842 return tcp_sock_set_keepintvl(sk, val); 3843 case TCP_KEEPCNT: 3844 return tcp_sock_set_keepcnt(sk, val); 3845 case TCP_LINGER2: 3846 if (val < 0) 3847 WRITE_ONCE(tp->linger2, -1); 3848 else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3849 WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); 3850 else 3851 WRITE_ONCE(tp->linger2, val * HZ); 3852 return 0; 3853 case TCP_DEFER_ACCEPT: 3854 /* Translate value in seconds to number of retransmits */ 3855 WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, 3856 secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 3857 TCP_RTO_MAX / HZ)); 3858 return 0; 3859 case TCP_RTO_MAX_MS: 3860 if (val < MSEC_PER_SEC || val > TCP_RTO_MAX_SEC * MSEC_PER_SEC) 3861 return -EINVAL; 3862 WRITE_ONCE(inet_csk(sk)->icsk_rto_max, msecs_to_jiffies(val)); 3863 return 0; 3864 case TCP_RTO_MIN_US: { 3865 int rto_min = usecs_to_jiffies(val); 3866 3867 if (rto_min > TCP_RTO_MIN || rto_min < TCP_TIMEOUT_MIN) 3868 return -EINVAL; 3869 WRITE_ONCE(inet_csk(sk)->icsk_rto_min, rto_min); 3870 return 0; 3871 } 3872 case TCP_DELACK_MAX_US: { 3873 int delack_max = usecs_to_jiffies(val); 3874 3875 if (delack_max > TCP_DELACK_MAX || delack_max < TCP_TIMEOUT_MIN) 3876 return -EINVAL; 3877 WRITE_ONCE(inet_csk(sk)->icsk_delack_max, delack_max); 3878 return 0; 3879 } 3880 } 3881 3882 sockopt_lock_sock(sk); 3883 3884 switch (optname) { 3885 case TCP_MAXSEG: 3886 /* Values greater than interface MTU won't take effect. However 3887 * at the point when this call is done we typically don't yet 3888 * know which interface is going to be used 3889 */ 3890 if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 3891 err = -EINVAL; 3892 break; 3893 } 3894 tp->rx_opt.user_mss = val; 3895 break; 3896 3897 case TCP_NODELAY: 3898 __tcp_sock_set_nodelay(sk, val); 3899 break; 3900 3901 case TCP_THIN_LINEAR_TIMEOUTS: 3902 if (val < 0 || val > 1) 3903 err = -EINVAL; 3904 else 3905 tp->thin_lto = val; 3906 break; 3907 3908 case TCP_THIN_DUPACK: 3909 if (val < 0 || val > 1) 3910 err = -EINVAL; 3911 break; 3912 3913 case TCP_REPAIR: 3914 if (!tcp_can_repair_sock(sk)) 3915 err = -EPERM; 3916 else if (val == TCP_REPAIR_ON) { 3917 tp->repair = 1; 3918 sk->sk_reuse = SK_FORCE_REUSE; 3919 tp->repair_queue = TCP_NO_QUEUE; 3920 } else if (val == TCP_REPAIR_OFF) { 3921 tp->repair = 0; 3922 sk->sk_reuse = SK_NO_REUSE; 3923 tcp_send_window_probe(sk); 3924 } else if (val == TCP_REPAIR_OFF_NO_WP) { 3925 tp->repair = 0; 3926 sk->sk_reuse = SK_NO_REUSE; 3927 } else 3928 err = -EINVAL; 3929 3930 break; 3931 3932 case TCP_REPAIR_QUEUE: 3933 if (!tp->repair) 3934 err = -EPERM; 3935 else if ((unsigned int)val < TCP_QUEUES_NR) 3936 tp->repair_queue = val; 3937 else 3938 err = -EINVAL; 3939 break; 3940 3941 case TCP_QUEUE_SEQ: 3942 if (sk->sk_state != TCP_CLOSE) { 3943 err = -EPERM; 3944 } else if (tp->repair_queue == TCP_SEND_QUEUE) { 3945 if (!tcp_rtx_queue_empty(sk)) 3946 err = -EPERM; 3947 else 3948 WRITE_ONCE(tp->write_seq, val); 3949 } else if (tp->repair_queue == TCP_RECV_QUEUE) { 3950 if (tp->rcv_nxt != tp->copied_seq) { 3951 err = -EPERM; 3952 } else { 3953 WRITE_ONCE(tp->rcv_nxt, val); 3954 WRITE_ONCE(tp->copied_seq, val); 3955 } 3956 } else { 3957 err = -EINVAL; 3958 } 3959 break; 3960 3961 case TCP_REPAIR_OPTIONS: 3962 if (!tp->repair) 3963 err = -EINVAL; 3964 else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 3965 err = tcp_repair_options_est(sk, optval, optlen); 3966 else 3967 err = -EPERM; 3968 break; 3969 3970 case TCP_CORK: 3971 __tcp_sock_set_cork(sk, val); 3972 break; 3973 3974 case TCP_KEEPIDLE: 3975 err = tcp_sock_set_keepidle_locked(sk, val); 3976 break; 3977 case TCP_SAVE_SYN: 3978 /* 0: disable, 1: enable, 2: start from ether_header */ 3979 if (val < 0 || val > 2) 3980 err = -EINVAL; 3981 else 3982 tp->save_syn = val; 3983 break; 3984 3985 case TCP_WINDOW_CLAMP: 3986 err = tcp_set_window_clamp(sk, val); 3987 break; 3988 3989 case TCP_QUICKACK: 3990 __tcp_sock_set_quickack(sk, val); 3991 break; 3992 3993 case TCP_AO_REPAIR: 3994 if (!tcp_can_repair_sock(sk)) { 3995 err = -EPERM; 3996 break; 3997 } 3998 err = tcp_ao_set_repair(sk, optval, optlen); 3999 break; 4000 #ifdef CONFIG_TCP_AO 4001 case TCP_AO_ADD_KEY: 4002 case TCP_AO_DEL_KEY: 4003 case TCP_AO_INFO: { 4004 /* If this is the first TCP-AO setsockopt() on the socket, 4005 * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR 4006 * in any state. 4007 */ 4008 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 4009 goto ao_parse; 4010 if (rcu_dereference_protected(tcp_sk(sk)->ao_info, 4011 lockdep_sock_is_held(sk))) 4012 goto ao_parse; 4013 if (tp->repair) 4014 goto ao_parse; 4015 err = -EISCONN; 4016 break; 4017 ao_parse: 4018 err = tp->af_specific->ao_parse(sk, optname, optval, optlen); 4019 break; 4020 } 4021 #endif 4022 #ifdef CONFIG_TCP_MD5SIG 4023 case TCP_MD5SIG: 4024 case TCP_MD5SIG_EXT: 4025 err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 4026 break; 4027 #endif 4028 case TCP_FASTOPEN: 4029 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 4030 TCPF_LISTEN))) { 4031 tcp_fastopen_init_key_once(net); 4032 4033 fastopen_queue_tune(sk, val); 4034 } else { 4035 err = -EINVAL; 4036 } 4037 break; 4038 case TCP_FASTOPEN_CONNECT: 4039 if (val > 1 || val < 0) { 4040 err = -EINVAL; 4041 } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 4042 TFO_CLIENT_ENABLE) { 4043 if (sk->sk_state == TCP_CLOSE) 4044 tp->fastopen_connect = val; 4045 else 4046 err = -EINVAL; 4047 } else { 4048 err = -EOPNOTSUPP; 4049 } 4050 break; 4051 case TCP_FASTOPEN_NO_COOKIE: 4052 if (val > 1 || val < 0) 4053 err = -EINVAL; 4054 else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 4055 err = -EINVAL; 4056 else 4057 tp->fastopen_no_cookie = val; 4058 break; 4059 case TCP_TIMESTAMP: 4060 if (!tp->repair) { 4061 err = -EPERM; 4062 break; 4063 } 4064 /* val is an opaque field, 4065 * and low order bit contains usec_ts enable bit. 4066 * Its a best effort, and we do not care if user makes an error. 4067 */ 4068 tp->tcp_usec_ts = val & 1; 4069 WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts)); 4070 break; 4071 case TCP_REPAIR_WINDOW: 4072 err = tcp_repair_set_window(tp, optval, optlen); 4073 break; 4074 case TCP_NOTSENT_LOWAT: 4075 WRITE_ONCE(tp->notsent_lowat, val); 4076 sk->sk_write_space(sk); 4077 break; 4078 case TCP_INQ: 4079 if (val > 1 || val < 0) 4080 err = -EINVAL; 4081 else 4082 tp->recvmsg_inq = val; 4083 break; 4084 case TCP_TX_DELAY: 4085 if (val) 4086 tcp_enable_tx_delay(); 4087 WRITE_ONCE(tp->tcp_tx_delay, val); 4088 break; 4089 default: 4090 err = -ENOPROTOOPT; 4091 break; 4092 } 4093 4094 sockopt_release_sock(sk); 4095 return err; 4096 } 4097 4098 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 4099 unsigned int optlen) 4100 { 4101 const struct inet_connection_sock *icsk = inet_csk(sk); 4102 4103 if (level != SOL_TCP) 4104 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4105 return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 4106 optval, optlen); 4107 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 4108 } 4109 EXPORT_IPV6_MOD(tcp_setsockopt); 4110 4111 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 4112 struct tcp_info *info) 4113 { 4114 u64 stats[__TCP_CHRONO_MAX], total = 0; 4115 enum tcp_chrono i; 4116 4117 for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 4118 stats[i] = tp->chrono_stat[i - 1]; 4119 if (i == tp->chrono_type) 4120 stats[i] += tcp_jiffies32 - tp->chrono_start; 4121 stats[i] *= USEC_PER_SEC / HZ; 4122 total += stats[i]; 4123 } 4124 4125 info->tcpi_busy_time = total; 4126 info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 4127 info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 4128 } 4129 4130 /* Return information about state of tcp endpoint in API format. */ 4131 void tcp_get_info(struct sock *sk, struct tcp_info *info) 4132 { 4133 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 4134 const struct inet_connection_sock *icsk = inet_csk(sk); 4135 unsigned long rate; 4136 u32 now; 4137 u64 rate64; 4138 bool slow; 4139 4140 memset(info, 0, sizeof(*info)); 4141 if (sk->sk_type != SOCK_STREAM) 4142 return; 4143 4144 info->tcpi_state = inet_sk_state_load(sk); 4145 4146 /* Report meaningful fields for all TCP states, including listeners */ 4147 rate = READ_ONCE(sk->sk_pacing_rate); 4148 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4149 info->tcpi_pacing_rate = rate64; 4150 4151 rate = READ_ONCE(sk->sk_max_pacing_rate); 4152 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4153 info->tcpi_max_pacing_rate = rate64; 4154 4155 info->tcpi_reordering = tp->reordering; 4156 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 4157 4158 if (info->tcpi_state == TCP_LISTEN) { 4159 /* listeners aliased fields : 4160 * tcpi_unacked -> Number of children ready for accept() 4161 * tcpi_sacked -> max backlog 4162 */ 4163 info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 4164 info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 4165 return; 4166 } 4167 4168 slow = lock_sock_fast(sk); 4169 4170 info->tcpi_ca_state = icsk->icsk_ca_state; 4171 info->tcpi_retransmits = icsk->icsk_retransmits; 4172 info->tcpi_probes = icsk->icsk_probes_out; 4173 info->tcpi_backoff = icsk->icsk_backoff; 4174 4175 if (tp->rx_opt.tstamp_ok) 4176 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 4177 if (tcp_is_sack(tp)) 4178 info->tcpi_options |= TCPI_OPT_SACK; 4179 if (tp->rx_opt.wscale_ok) { 4180 info->tcpi_options |= TCPI_OPT_WSCALE; 4181 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 4182 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 4183 } 4184 4185 if (tcp_ecn_mode_any(tp)) 4186 info->tcpi_options |= TCPI_OPT_ECN; 4187 if (tp->ecn_flags & TCP_ECN_SEEN) 4188 info->tcpi_options |= TCPI_OPT_ECN_SEEN; 4189 if (tp->syn_data_acked) 4190 info->tcpi_options |= TCPI_OPT_SYN_DATA; 4191 if (tp->tcp_usec_ts) 4192 info->tcpi_options |= TCPI_OPT_USEC_TS; 4193 if (tp->syn_fastopen_child) 4194 info->tcpi_options |= TCPI_OPT_TFO_CHILD; 4195 4196 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 4197 info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato, 4198 tcp_delack_max(sk))); 4199 info->tcpi_snd_mss = tp->mss_cache; 4200 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 4201 4202 info->tcpi_unacked = tp->packets_out; 4203 info->tcpi_sacked = tp->sacked_out; 4204 4205 info->tcpi_lost = tp->lost_out; 4206 info->tcpi_retrans = tp->retrans_out; 4207 4208 now = tcp_jiffies32; 4209 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 4210 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 4211 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 4212 4213 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 4214 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 4215 info->tcpi_rtt = tp->srtt_us >> 3; 4216 info->tcpi_rttvar = tp->mdev_us >> 2; 4217 info->tcpi_snd_ssthresh = tp->snd_ssthresh; 4218 info->tcpi_advmss = tp->advmss; 4219 4220 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 4221 info->tcpi_rcv_space = tp->rcvq_space.space; 4222 4223 info->tcpi_total_retrans = tp->total_retrans; 4224 4225 info->tcpi_bytes_acked = tp->bytes_acked; 4226 info->tcpi_bytes_received = tp->bytes_received; 4227 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 4228 tcp_get_info_chrono_stats(tp, info); 4229 4230 info->tcpi_segs_out = tp->segs_out; 4231 4232 /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 4233 info->tcpi_segs_in = READ_ONCE(tp->segs_in); 4234 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 4235 4236 info->tcpi_min_rtt = tcp_min_rtt(tp); 4237 info->tcpi_data_segs_out = tp->data_segs_out; 4238 4239 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 4240 rate64 = tcp_compute_delivery_rate(tp); 4241 if (rate64) 4242 info->tcpi_delivery_rate = rate64; 4243 info->tcpi_delivered = tp->delivered; 4244 info->tcpi_delivered_ce = tp->delivered_ce; 4245 info->tcpi_bytes_sent = tp->bytes_sent; 4246 info->tcpi_bytes_retrans = tp->bytes_retrans; 4247 info->tcpi_dsack_dups = tp->dsack_dups; 4248 info->tcpi_reord_seen = tp->reord_seen; 4249 info->tcpi_rcv_ooopack = tp->rcv_ooopack; 4250 info->tcpi_snd_wnd = tp->snd_wnd; 4251 info->tcpi_rcv_wnd = tp->rcv_wnd; 4252 info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 4253 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 4254 4255 info->tcpi_total_rto = tp->total_rto; 4256 info->tcpi_total_rto_recoveries = tp->total_rto_recoveries; 4257 info->tcpi_total_rto_time = tp->total_rto_time; 4258 if (tp->rto_stamp) 4259 info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp; 4260 4261 unlock_sock_fast(sk, slow); 4262 } 4263 EXPORT_SYMBOL_GPL(tcp_get_info); 4264 4265 static size_t tcp_opt_stats_get_size(void) 4266 { 4267 return 4268 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 4269 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 4270 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 4271 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 4272 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 4273 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 4274 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 4275 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 4276 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 4277 nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 4278 nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 4279 nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 4280 nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 4281 nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 4282 nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 4283 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 4284 nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 4285 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 4286 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 4287 nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 4288 nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 4289 nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 4290 nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 4291 nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 4292 nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 4293 nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 4294 nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 4295 0; 4296 } 4297 4298 /* Returns TTL or hop limit of an incoming packet from skb. */ 4299 static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 4300 { 4301 if (skb->protocol == htons(ETH_P_IP)) 4302 return ip_hdr(skb)->ttl; 4303 else if (skb->protocol == htons(ETH_P_IPV6)) 4304 return ipv6_hdr(skb)->hop_limit; 4305 else 4306 return 0; 4307 } 4308 4309 struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 4310 const struct sk_buff *orig_skb, 4311 const struct sk_buff *ack_skb) 4312 { 4313 const struct tcp_sock *tp = tcp_sk(sk); 4314 struct sk_buff *stats; 4315 struct tcp_info info; 4316 unsigned long rate; 4317 u64 rate64; 4318 4319 stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 4320 if (!stats) 4321 return NULL; 4322 4323 tcp_get_info_chrono_stats(tp, &info); 4324 nla_put_u64_64bit(stats, TCP_NLA_BUSY, 4325 info.tcpi_busy_time, TCP_NLA_PAD); 4326 nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 4327 info.tcpi_rwnd_limited, TCP_NLA_PAD); 4328 nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 4329 info.tcpi_sndbuf_limited, TCP_NLA_PAD); 4330 nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 4331 tp->data_segs_out, TCP_NLA_PAD); 4332 nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 4333 tp->total_retrans, TCP_NLA_PAD); 4334 4335 rate = READ_ONCE(sk->sk_pacing_rate); 4336 rate64 = (rate != ~0UL) ? rate : ~0ULL; 4337 nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 4338 4339 rate64 = tcp_compute_delivery_rate(tp); 4340 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 4341 4342 nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 4343 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 4344 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 4345 4346 nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 4347 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 4348 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 4349 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 4350 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 4351 4352 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 4353 nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 4354 4355 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 4356 TCP_NLA_PAD); 4357 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 4358 TCP_NLA_PAD); 4359 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 4360 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 4361 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 4362 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 4363 nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 4364 max_t(int, 0, tp->write_seq - tp->snd_nxt)); 4365 nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 4366 TCP_NLA_PAD); 4367 if (ack_skb) 4368 nla_put_u8(stats, TCP_NLA_TTL, 4369 tcp_skb_ttl_or_hop_limit(ack_skb)); 4370 4371 nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 4372 return stats; 4373 } 4374 4375 int do_tcp_getsockopt(struct sock *sk, int level, 4376 int optname, sockptr_t optval, sockptr_t optlen) 4377 { 4378 struct inet_connection_sock *icsk = inet_csk(sk); 4379 struct tcp_sock *tp = tcp_sk(sk); 4380 struct net *net = sock_net(sk); 4381 int val, len; 4382 4383 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4384 return -EFAULT; 4385 4386 if (len < 0) 4387 return -EINVAL; 4388 4389 len = min_t(unsigned int, len, sizeof(int)); 4390 4391 switch (optname) { 4392 case TCP_MAXSEG: 4393 val = tp->mss_cache; 4394 if (tp->rx_opt.user_mss && 4395 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 4396 val = tp->rx_opt.user_mss; 4397 if (tp->repair) 4398 val = tp->rx_opt.mss_clamp; 4399 break; 4400 case TCP_NODELAY: 4401 val = !!(tp->nonagle&TCP_NAGLE_OFF); 4402 break; 4403 case TCP_CORK: 4404 val = !!(tp->nonagle&TCP_NAGLE_CORK); 4405 break; 4406 case TCP_KEEPIDLE: 4407 val = keepalive_time_when(tp) / HZ; 4408 break; 4409 case TCP_KEEPINTVL: 4410 val = keepalive_intvl_when(tp) / HZ; 4411 break; 4412 case TCP_KEEPCNT: 4413 val = keepalive_probes(tp); 4414 break; 4415 case TCP_SYNCNT: 4416 val = READ_ONCE(icsk->icsk_syn_retries) ? : 4417 READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 4418 break; 4419 case TCP_LINGER2: 4420 val = READ_ONCE(tp->linger2); 4421 if (val >= 0) 4422 val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 4423 break; 4424 case TCP_DEFER_ACCEPT: 4425 val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); 4426 val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ, 4427 TCP_RTO_MAX / HZ); 4428 break; 4429 case TCP_WINDOW_CLAMP: 4430 val = READ_ONCE(tp->window_clamp); 4431 break; 4432 case TCP_INFO: { 4433 struct tcp_info info; 4434 4435 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4436 return -EFAULT; 4437 4438 tcp_get_info(sk, &info); 4439 4440 len = min_t(unsigned int, len, sizeof(info)); 4441 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4442 return -EFAULT; 4443 if (copy_to_sockptr(optval, &info, len)) 4444 return -EFAULT; 4445 return 0; 4446 } 4447 case TCP_CC_INFO: { 4448 const struct tcp_congestion_ops *ca_ops; 4449 union tcp_cc_info info; 4450 size_t sz = 0; 4451 int attr; 4452 4453 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4454 return -EFAULT; 4455 4456 ca_ops = icsk->icsk_ca_ops; 4457 if (ca_ops && ca_ops->get_info) 4458 sz = ca_ops->get_info(sk, ~0U, &attr, &info); 4459 4460 len = min_t(unsigned int, len, sz); 4461 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4462 return -EFAULT; 4463 if (copy_to_sockptr(optval, &info, len)) 4464 return -EFAULT; 4465 return 0; 4466 } 4467 case TCP_QUICKACK: 4468 val = !inet_csk_in_pingpong_mode(sk); 4469 break; 4470 4471 case TCP_CONGESTION: 4472 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4473 return -EFAULT; 4474 len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 4475 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4476 return -EFAULT; 4477 if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 4478 return -EFAULT; 4479 return 0; 4480 4481 case TCP_ULP: 4482 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4483 return -EFAULT; 4484 len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4485 if (!icsk->icsk_ulp_ops) { 4486 len = 0; 4487 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4488 return -EFAULT; 4489 return 0; 4490 } 4491 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4492 return -EFAULT; 4493 if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4494 return -EFAULT; 4495 return 0; 4496 4497 case TCP_FASTOPEN_KEY: { 4498 u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4499 unsigned int key_len; 4500 4501 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4502 return -EFAULT; 4503 4504 key_len = tcp_fastopen_get_cipher(net, icsk, key) * 4505 TCP_FASTOPEN_KEY_LENGTH; 4506 len = min_t(unsigned int, len, key_len); 4507 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4508 return -EFAULT; 4509 if (copy_to_sockptr(optval, key, len)) 4510 return -EFAULT; 4511 return 0; 4512 } 4513 case TCP_THIN_LINEAR_TIMEOUTS: 4514 val = tp->thin_lto; 4515 break; 4516 4517 case TCP_THIN_DUPACK: 4518 val = 0; 4519 break; 4520 4521 case TCP_REPAIR: 4522 val = tp->repair; 4523 break; 4524 4525 case TCP_REPAIR_QUEUE: 4526 if (tp->repair) 4527 val = tp->repair_queue; 4528 else 4529 return -EINVAL; 4530 break; 4531 4532 case TCP_REPAIR_WINDOW: { 4533 struct tcp_repair_window opt; 4534 4535 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4536 return -EFAULT; 4537 4538 if (len != sizeof(opt)) 4539 return -EINVAL; 4540 4541 if (!tp->repair) 4542 return -EPERM; 4543 4544 opt.snd_wl1 = tp->snd_wl1; 4545 opt.snd_wnd = tp->snd_wnd; 4546 opt.max_window = tp->max_window; 4547 opt.rcv_wnd = tp->rcv_wnd; 4548 opt.rcv_wup = tp->rcv_wup; 4549 4550 if (copy_to_sockptr(optval, &opt, len)) 4551 return -EFAULT; 4552 return 0; 4553 } 4554 case TCP_QUEUE_SEQ: 4555 if (tp->repair_queue == TCP_SEND_QUEUE) 4556 val = tp->write_seq; 4557 else if (tp->repair_queue == TCP_RECV_QUEUE) 4558 val = tp->rcv_nxt; 4559 else 4560 return -EINVAL; 4561 break; 4562 4563 case TCP_USER_TIMEOUT: 4564 val = READ_ONCE(icsk->icsk_user_timeout); 4565 break; 4566 4567 case TCP_FASTOPEN: 4568 val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); 4569 break; 4570 4571 case TCP_FASTOPEN_CONNECT: 4572 val = tp->fastopen_connect; 4573 break; 4574 4575 case TCP_FASTOPEN_NO_COOKIE: 4576 val = tp->fastopen_no_cookie; 4577 break; 4578 4579 case TCP_TX_DELAY: 4580 val = READ_ONCE(tp->tcp_tx_delay); 4581 break; 4582 4583 case TCP_TIMESTAMP: 4584 val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset); 4585 if (tp->tcp_usec_ts) 4586 val |= 1; 4587 else 4588 val &= ~1; 4589 break; 4590 case TCP_NOTSENT_LOWAT: 4591 val = READ_ONCE(tp->notsent_lowat); 4592 break; 4593 case TCP_INQ: 4594 val = tp->recvmsg_inq; 4595 break; 4596 case TCP_SAVE_SYN: 4597 val = tp->save_syn; 4598 break; 4599 case TCP_SAVED_SYN: { 4600 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4601 return -EFAULT; 4602 4603 sockopt_lock_sock(sk); 4604 if (tp->saved_syn) { 4605 if (len < tcp_saved_syn_len(tp->saved_syn)) { 4606 len = tcp_saved_syn_len(tp->saved_syn); 4607 if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4608 sockopt_release_sock(sk); 4609 return -EFAULT; 4610 } 4611 sockopt_release_sock(sk); 4612 return -EINVAL; 4613 } 4614 len = tcp_saved_syn_len(tp->saved_syn); 4615 if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4616 sockopt_release_sock(sk); 4617 return -EFAULT; 4618 } 4619 if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4620 sockopt_release_sock(sk); 4621 return -EFAULT; 4622 } 4623 tcp_saved_syn_free(tp); 4624 sockopt_release_sock(sk); 4625 } else { 4626 sockopt_release_sock(sk); 4627 len = 0; 4628 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4629 return -EFAULT; 4630 } 4631 return 0; 4632 } 4633 #ifdef CONFIG_MMU 4634 case TCP_ZEROCOPY_RECEIVE: { 4635 struct scm_timestamping_internal tss; 4636 struct tcp_zerocopy_receive zc = {}; 4637 int err; 4638 4639 if (copy_from_sockptr(&len, optlen, sizeof(int))) 4640 return -EFAULT; 4641 if (len < 0 || 4642 len < offsetofend(struct tcp_zerocopy_receive, length)) 4643 return -EINVAL; 4644 if (unlikely(len > sizeof(zc))) { 4645 err = check_zeroed_sockptr(optval, sizeof(zc), 4646 len - sizeof(zc)); 4647 if (err < 1) 4648 return err == 0 ? -EINVAL : err; 4649 len = sizeof(zc); 4650 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4651 return -EFAULT; 4652 } 4653 if (copy_from_sockptr(&zc, optval, len)) 4654 return -EFAULT; 4655 if (zc.reserved) 4656 return -EINVAL; 4657 if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 4658 return -EINVAL; 4659 sockopt_lock_sock(sk); 4660 err = tcp_zerocopy_receive(sk, &zc, &tss); 4661 err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 4662 &zc, &len, err); 4663 sockopt_release_sock(sk); 4664 if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 4665 goto zerocopy_rcv_cmsg; 4666 switch (len) { 4667 case offsetofend(struct tcp_zerocopy_receive, msg_flags): 4668 goto zerocopy_rcv_cmsg; 4669 case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 4670 case offsetofend(struct tcp_zerocopy_receive, msg_control): 4671 case offsetofend(struct tcp_zerocopy_receive, flags): 4672 case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 4673 case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 4674 case offsetofend(struct tcp_zerocopy_receive, err): 4675 goto zerocopy_rcv_sk_err; 4676 case offsetofend(struct tcp_zerocopy_receive, inq): 4677 goto zerocopy_rcv_inq; 4678 case offsetofend(struct tcp_zerocopy_receive, length): 4679 default: 4680 goto zerocopy_rcv_out; 4681 } 4682 zerocopy_rcv_cmsg: 4683 if (zc.msg_flags & TCP_CMSG_TS) 4684 tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 4685 else 4686 zc.msg_flags = 0; 4687 zerocopy_rcv_sk_err: 4688 if (!err) 4689 zc.err = sock_error(sk); 4690 zerocopy_rcv_inq: 4691 zc.inq = tcp_inq_hint(sk); 4692 zerocopy_rcv_out: 4693 if (!err && copy_to_sockptr(optval, &zc, len)) 4694 err = -EFAULT; 4695 return err; 4696 } 4697 #endif 4698 case TCP_AO_REPAIR: 4699 if (!tcp_can_repair_sock(sk)) 4700 return -EPERM; 4701 return tcp_ao_get_repair(sk, optval, optlen); 4702 case TCP_AO_GET_KEYS: 4703 case TCP_AO_INFO: { 4704 int err; 4705 4706 sockopt_lock_sock(sk); 4707 if (optname == TCP_AO_GET_KEYS) 4708 err = tcp_ao_get_mkts(sk, optval, optlen); 4709 else 4710 err = tcp_ao_get_sock_info(sk, optval, optlen); 4711 sockopt_release_sock(sk); 4712 4713 return err; 4714 } 4715 case TCP_IS_MPTCP: 4716 val = 0; 4717 break; 4718 case TCP_RTO_MAX_MS: 4719 val = jiffies_to_msecs(tcp_rto_max(sk)); 4720 break; 4721 case TCP_RTO_MIN_US: 4722 val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_rto_min)); 4723 break; 4724 case TCP_DELACK_MAX_US: 4725 val = jiffies_to_usecs(READ_ONCE(inet_csk(sk)->icsk_delack_max)); 4726 break; 4727 default: 4728 return -ENOPROTOOPT; 4729 } 4730 4731 if (copy_to_sockptr(optlen, &len, sizeof(int))) 4732 return -EFAULT; 4733 if (copy_to_sockptr(optval, &val, len)) 4734 return -EFAULT; 4735 return 0; 4736 } 4737 4738 bool tcp_bpf_bypass_getsockopt(int level, int optname) 4739 { 4740 /* TCP do_tcp_getsockopt has optimized getsockopt implementation 4741 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 4742 */ 4743 if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 4744 return true; 4745 4746 return false; 4747 } 4748 EXPORT_IPV6_MOD(tcp_bpf_bypass_getsockopt); 4749 4750 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 4751 int __user *optlen) 4752 { 4753 struct inet_connection_sock *icsk = inet_csk(sk); 4754 4755 if (level != SOL_TCP) 4756 /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4757 return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 4758 optval, optlen); 4759 return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 4760 USER_SOCKPTR(optlen)); 4761 } 4762 EXPORT_IPV6_MOD(tcp_getsockopt); 4763 4764 #ifdef CONFIG_TCP_MD5SIG 4765 int tcp_md5_sigpool_id = -1; 4766 EXPORT_IPV6_MOD_GPL(tcp_md5_sigpool_id); 4767 4768 int tcp_md5_alloc_sigpool(void) 4769 { 4770 size_t scratch_size; 4771 int ret; 4772 4773 scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr); 4774 ret = tcp_sigpool_alloc_ahash("md5", scratch_size); 4775 if (ret >= 0) { 4776 /* As long as any md5 sigpool was allocated, the return 4777 * id would stay the same. Re-write the id only for the case 4778 * when previously all MD5 keys were deleted and this call 4779 * allocates the first MD5 key, which may return a different 4780 * sigpool id than was used previously. 4781 */ 4782 WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */ 4783 return 0; 4784 } 4785 return ret; 4786 } 4787 4788 void tcp_md5_release_sigpool(void) 4789 { 4790 tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id)); 4791 } 4792 4793 void tcp_md5_add_sigpool(void) 4794 { 4795 tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id)); 4796 } 4797 4798 int tcp_md5_hash_key(struct tcp_sigpool *hp, 4799 const struct tcp_md5sig_key *key) 4800 { 4801 u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 4802 struct scatterlist sg; 4803 4804 sg_init_one(&sg, key->key, keylen); 4805 ahash_request_set_crypt(hp->req, &sg, NULL, keylen); 4806 4807 /* We use data_race() because tcp_md5_do_add() might change 4808 * key->key under us 4809 */ 4810 return data_race(crypto_ahash_update(hp->req)); 4811 } 4812 EXPORT_IPV6_MOD(tcp_md5_hash_key); 4813 4814 /* Called with rcu_read_lock() */ 4815 static enum skb_drop_reason 4816 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4817 const void *saddr, const void *daddr, 4818 int family, int l3index, const __u8 *hash_location) 4819 { 4820 /* This gets called for each TCP segment that has TCP-MD5 option. 4821 * We have 3 drop cases: 4822 * o No MD5 hash and one expected. 4823 * o MD5 hash and we're not expecting one. 4824 * o MD5 hash and its wrong. 4825 */ 4826 const struct tcp_sock *tp = tcp_sk(sk); 4827 struct tcp_md5sig_key *key; 4828 u8 newhash[16]; 4829 int genhash; 4830 4831 key = tcp_md5_do_lookup(sk, l3index, saddr, family); 4832 4833 if (!key && hash_location) { 4834 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 4835 trace_tcp_hash_md5_unexpected(sk, skb); 4836 return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 4837 } 4838 4839 /* Check the signature. 4840 * To support dual stack listeners, we need to handle 4841 * IPv4-mapped case. 4842 */ 4843 if (family == AF_INET) 4844 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 4845 else 4846 genhash = tp->af_specific->calc_md5_hash(newhash, key, 4847 NULL, skb); 4848 if (genhash || memcmp(hash_location, newhash, 16) != 0) { 4849 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 4850 trace_tcp_hash_md5_mismatch(sk, skb); 4851 return SKB_DROP_REASON_TCP_MD5FAILURE; 4852 } 4853 return SKB_NOT_DROPPED_YET; 4854 } 4855 #else 4856 static inline enum skb_drop_reason 4857 tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4858 const void *saddr, const void *daddr, 4859 int family, int l3index, const __u8 *hash_location) 4860 { 4861 return SKB_NOT_DROPPED_YET; 4862 } 4863 4864 #endif 4865 4866 /* Called with rcu_read_lock() */ 4867 enum skb_drop_reason 4868 tcp_inbound_hash(struct sock *sk, const struct request_sock *req, 4869 const struct sk_buff *skb, 4870 const void *saddr, const void *daddr, 4871 int family, int dif, int sdif) 4872 { 4873 const struct tcphdr *th = tcp_hdr(skb); 4874 const struct tcp_ao_hdr *aoh; 4875 const __u8 *md5_location; 4876 int l3index; 4877 4878 /* Invalid option or two times meet any of auth options */ 4879 if (tcp_parse_auth_options(th, &md5_location, &aoh)) { 4880 trace_tcp_hash_bad_header(sk, skb); 4881 return SKB_DROP_REASON_TCP_AUTH_HDR; 4882 } 4883 4884 if (req) { 4885 if (tcp_rsk_used_ao(req) != !!aoh) { 4886 u8 keyid, rnext, maclen; 4887 4888 if (aoh) { 4889 keyid = aoh->keyid; 4890 rnext = aoh->rnext_keyid; 4891 maclen = tcp_ao_hdr_maclen(aoh); 4892 } else { 4893 keyid = rnext = maclen = 0; 4894 } 4895 4896 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); 4897 trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen); 4898 return SKB_DROP_REASON_TCP_AOFAILURE; 4899 } 4900 } 4901 4902 /* sdif set, means packet ingressed via a device 4903 * in an L3 domain and dif is set to the l3mdev 4904 */ 4905 l3index = sdif ? dif : 0; 4906 4907 /* Fast path: unsigned segments */ 4908 if (likely(!md5_location && !aoh)) { 4909 /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid 4910 * for the remote peer. On TCP-AO established connection 4911 * the last key is impossible to remove, so there's 4912 * always at least one current_key. 4913 */ 4914 if (tcp_ao_required(sk, saddr, family, l3index, true)) { 4915 trace_tcp_hash_ao_required(sk, skb); 4916 return SKB_DROP_REASON_TCP_AONOTFOUND; 4917 } 4918 if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { 4919 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 4920 trace_tcp_hash_md5_required(sk, skb); 4921 return SKB_DROP_REASON_TCP_MD5NOTFOUND; 4922 } 4923 return SKB_NOT_DROPPED_YET; 4924 } 4925 4926 if (aoh) 4927 return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); 4928 4929 return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, 4930 l3index, md5_location); 4931 } 4932 EXPORT_IPV6_MOD_GPL(tcp_inbound_hash); 4933 4934 void tcp_done(struct sock *sk) 4935 { 4936 struct request_sock *req; 4937 4938 /* We might be called with a new socket, after 4939 * inet_csk_prepare_forced_close() has been called 4940 * so we can not use lockdep_sock_is_held(sk) 4941 */ 4942 req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 4943 4944 if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4945 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 4946 4947 tcp_set_state(sk, TCP_CLOSE); 4948 tcp_clear_xmit_timers(sk); 4949 if (req) 4950 reqsk_fastopen_remove(sk, req, false); 4951 4952 WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 4953 4954 if (!sock_flag(sk, SOCK_DEAD)) 4955 sk->sk_state_change(sk); 4956 else 4957 inet_csk_destroy_sock(sk); 4958 } 4959 EXPORT_SYMBOL_GPL(tcp_done); 4960 4961 int tcp_abort(struct sock *sk, int err) 4962 { 4963 int state = inet_sk_state_load(sk); 4964 4965 if (state == TCP_NEW_SYN_RECV) { 4966 struct request_sock *req = inet_reqsk(sk); 4967 4968 local_bh_disable(); 4969 inet_csk_reqsk_queue_drop(req->rsk_listener, req); 4970 local_bh_enable(); 4971 return 0; 4972 } 4973 if (state == TCP_TIME_WAIT) { 4974 struct inet_timewait_sock *tw = inet_twsk(sk); 4975 4976 refcount_inc(&tw->tw_refcnt); 4977 local_bh_disable(); 4978 inet_twsk_deschedule_put(tw); 4979 local_bh_enable(); 4980 return 0; 4981 } 4982 4983 /* BPF context ensures sock locking. */ 4984 if (!has_current_bpf_ctx()) 4985 /* Don't race with userspace socket closes such as tcp_close. */ 4986 lock_sock(sk); 4987 4988 /* Avoid closing the same socket twice. */ 4989 if (sk->sk_state == TCP_CLOSE) { 4990 if (!has_current_bpf_ctx()) 4991 release_sock(sk); 4992 return -ENOENT; 4993 } 4994 4995 if (sk->sk_state == TCP_LISTEN) { 4996 tcp_set_state(sk, TCP_CLOSE); 4997 inet_csk_listen_stop(sk); 4998 } 4999 5000 /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 5001 local_bh_disable(); 5002 bh_lock_sock(sk); 5003 5004 if (tcp_need_reset(sk->sk_state)) 5005 tcp_send_active_reset(sk, GFP_ATOMIC, 5006 SK_RST_REASON_TCP_STATE); 5007 tcp_done_with_error(sk, err); 5008 5009 bh_unlock_sock(sk); 5010 local_bh_enable(); 5011 if (!has_current_bpf_ctx()) 5012 release_sock(sk); 5013 return 0; 5014 } 5015 EXPORT_SYMBOL_GPL(tcp_abort); 5016 5017 extern struct tcp_congestion_ops tcp_reno; 5018 5019 static __initdata unsigned long thash_entries; 5020 static int __init set_thash_entries(char *str) 5021 { 5022 ssize_t ret; 5023 5024 if (!str) 5025 return 0; 5026 5027 ret = kstrtoul(str, 0, &thash_entries); 5028 if (ret) 5029 return 0; 5030 5031 return 1; 5032 } 5033 __setup("thash_entries=", set_thash_entries); 5034 5035 static void __init tcp_init_mem(void) 5036 { 5037 unsigned long limit = nr_free_buffer_pages() / 16; 5038 5039 limit = max(limit, 128UL); 5040 sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 5041 sysctl_tcp_mem[1] = limit; /* 6.25 % */ 5042 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 5043 } 5044 5045 static void __init tcp_struct_check(void) 5046 { 5047 /* TX read-mostly hotpath cache lines */ 5048 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window); 5049 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh); 5050 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering); 5051 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat); 5052 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs); 5053 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint); 5054 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 32); 5055 5056 /* TXRX read-mostly hotpath cache lines */ 5057 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset); 5058 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd); 5059 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache); 5060 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd); 5061 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out); 5062 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out); 5063 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out); 5064 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio); 5065 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32); 5066 5067 /* RX read-mostly hotpath cache lines */ 5068 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq); 5069 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp); 5070 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1); 5071 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq); 5072 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us); 5073 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out); 5074 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss); 5075 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data); 5076 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost); 5077 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min); 5078 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue); 5079 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh); 5080 #if IS_ENABLED(CONFIG_TLS_DEVICE) 5081 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tcp_clean_acked); 5082 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 77); 5083 #else 5084 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69); 5085 #endif 5086 5087 /* TX read-write hotpath cache lines */ 5088 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out); 5089 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out); 5090 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent); 5091 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml); 5092 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start); 5093 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat); 5094 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq); 5095 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq); 5096 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); 5097 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); 5098 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); 5099 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); 5100 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); 5101 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); 5102 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); 5103 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89); 5104 5105 /* TXRX read-write hotpath cache lines */ 5106 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); 5107 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache); 5108 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp); 5109 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); 5110 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); 5111 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); 5112 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp); 5113 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us); 5114 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out); 5115 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up); 5116 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered); 5117 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce); 5118 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); 5119 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); 5120 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); 5121 5122 /* 32bit arches with 8byte alignment on u64 fields might need padding 5123 * before tcp_clock_cache. 5124 */ 5125 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4); 5126 5127 /* RX read-write hotpath cache lines */ 5128 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); 5129 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in); 5130 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in); 5131 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup); 5132 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out); 5133 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq); 5134 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered); 5135 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us); 5136 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr); 5137 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp); 5138 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp); 5139 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked); 5140 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est); 5141 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space); 5142 CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99); 5143 } 5144 5145 void __init tcp_init(void) 5146 { 5147 int max_rshare, max_wshare, cnt; 5148 unsigned long limit; 5149 unsigned int i; 5150 5151 BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 5152 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 5153 sizeof_field(struct sk_buff, cb)); 5154 5155 tcp_struct_check(); 5156 5157 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 5158 5159 timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 5160 mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 5161 5162 inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 5163 thash_entries, 21, /* one slot per 2 MB*/ 5164 0, 64 * 1024); 5165 tcp_hashinfo.bind_bucket_cachep = 5166 kmem_cache_create("tcp_bind_bucket", 5167 sizeof(struct inet_bind_bucket), 0, 5168 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 5169 SLAB_ACCOUNT, 5170 NULL); 5171 tcp_hashinfo.bind2_bucket_cachep = 5172 kmem_cache_create("tcp_bind2_bucket", 5173 sizeof(struct inet_bind2_bucket), 0, 5174 SLAB_HWCACHE_ALIGN | SLAB_PANIC | 5175 SLAB_ACCOUNT, 5176 NULL); 5177 5178 /* Size and allocate the main established and bind bucket 5179 * hash tables. 5180 * 5181 * The methodology is similar to that of the buffer cache. 5182 */ 5183 tcp_hashinfo.ehash = 5184 alloc_large_system_hash("TCP established", 5185 sizeof(struct inet_ehash_bucket), 5186 thash_entries, 5187 17, /* one slot per 128 KB of memory */ 5188 0, 5189 NULL, 5190 &tcp_hashinfo.ehash_mask, 5191 0, 5192 thash_entries ? 0 : 512 * 1024); 5193 for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 5194 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 5195 5196 if (inet_ehash_locks_alloc(&tcp_hashinfo)) 5197 panic("TCP: failed to alloc ehash_locks"); 5198 tcp_hashinfo.bhash = 5199 alloc_large_system_hash("TCP bind", 5200 2 * sizeof(struct inet_bind_hashbucket), 5201 tcp_hashinfo.ehash_mask + 1, 5202 17, /* one slot per 128 KB of memory */ 5203 0, 5204 &tcp_hashinfo.bhash_size, 5205 NULL, 5206 0, 5207 64 * 1024); 5208 tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 5209 tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 5210 for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 5211 spin_lock_init(&tcp_hashinfo.bhash[i].lock); 5212 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 5213 spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 5214 INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 5215 } 5216 5217 tcp_hashinfo.pernet = false; 5218 5219 cnt = tcp_hashinfo.ehash_mask + 1; 5220 sysctl_tcp_max_orphans = cnt / 2; 5221 5222 tcp_init_mem(); 5223 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 5224 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 5225 max_wshare = min(4UL*1024*1024, limit); 5226 max_rshare = min(32UL*1024*1024, limit); 5227 5228 init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 5229 init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 5230 init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 5231 5232 init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 5233 init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 5234 init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 5235 5236 pr_info("Hash tables configured (established %u bind %u)\n", 5237 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 5238 5239 tcp_v4_init(); 5240 tcp_metrics_init(); 5241 BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 5242 tcp_tsq_work_init(); 5243 mptcp_init(); 5244 } 5245