12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * Fixes: 221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls 231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset 241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while 251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect 261da177e4SLinus Torvalds * (tcp_err()). 271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken 281da177e4SLinus Torvalds * pointers passed where wrong and the 291da177e4SLinus Torvalds * socket was looked up backwards. Nobody 301da177e4SLinus Torvalds * tested any icmp error code obviously. 311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It 321da177e4SLinus Torvalds * wakes people on errors. poll 331da177e4SLinus Torvalds * behaves and the icmp error race 341da177e4SLinus Torvalds * has gone by moving it into sock.c 351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for 361da177e4SLinus Torvalds * everything not just packets for 371da177e4SLinus Torvalds * unknown sockets. 381da177e4SLinus Torvalds * Alan Cox : tcp option processing. 391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had 401da177e4SLinus Torvalds * syn rule wrong] 411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes 421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames. 431da177e4SLinus Torvalds * Acking any kind of RST is right out. 441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst 451da177e4SLinus Torvalds * receive otherwise odd bits of prattle 461da177e4SLinus Torvalds * escape still 471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug. 481da177e4SLinus Torvalds * Should stop LAN workplace lockups. 491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list 501da177e4SLinus Torvalds * facilities 511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work 521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts 531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors 541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a 551da177e4SLinus Torvalds * bit to skb ops. 561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential 571da177e4SLinus Torvalds * nasty. 581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the 591da177e4SLinus Torvalds * tcp is hard to follow 601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh 611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix. 621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet). 631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH 641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries. 651da177e4SLinus Torvalds * Also added awareness of them to accept. 661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP) 671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks, 681da177e4SLinus Torvalds * so the kernel can layer network 691da177e4SLinus Torvalds * sockets. 701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings. 711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope). 721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised 731da177e4SLinus Torvalds * state ack error. 741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit. 751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E 761da177e4SLinus Torvalds * window non shrink trick. 771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer 781da177e4SLinus Torvalds * fixes 791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes 801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes 811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight 821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work 831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling 841da177e4SLinus Torvalds * completely 851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling 861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC 871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds 881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors 891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug 901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes. 911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing 921da177e4SLinus Torvalds * (not yet usable) 931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging 951da177e4SLinus Torvalds * Alan Cox : Window clamping 961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check() 971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes 981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the 991da177e4SLinus Torvalds * TCP code (Be very nice to this man if 1001da177e4SLinus Torvalds * tcp finally works 100%) 8) 1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics. 1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug. 1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in 1051da177e4SLinus Torvalds * all cases. 1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg() 1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin). 1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in 1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG 1101da177e4SLinus Torvalds * works now. 1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the 1121da177e4SLinus Torvalds * BSD api. 1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to 1141da177e4SLinus Torvalds * fix a race and a signal problem with 1151da177e4SLinus Torvalds * accept() and async I/O. 1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto(). 1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem. 1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 1191da177e4SLinus Torvalds * clients/servers which listen in on 1201da177e4SLinus Torvalds * fixed ports. 1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to 1221da177e4SLinus Torvalds * a sensible code size. 1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix. 1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast. 1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master 1261da177e4SLinus Torvalds * socket close. 1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code. 1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown. 1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error 1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an 1311da177e4SLinus Torvalds * accept. 1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since 1331da177e4SLinus Torvalds * it's handy for debugging. 1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes. 1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on 1361da177e4SLinus Torvalds * the RFC's for other useful protocol 1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and 1381da177e4SLinus Torvalds * for a reference on the difference 1391da177e4SLinus Torvalds * between specifications and how BSD 1401da177e4SLinus Torvalds * works see the 4.4lite source. 1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy 1421da177e4SLinus Torvalds * close. 1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes. 1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn 1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC 1461da177e4SLinus Torvalds * and using multiple timers for sanity. 1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new 1481da177e4SLinus Torvalds * comments. 1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking 1501da177e4SLinus Torvalds * the buffers (much like datagram.c) 1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe 1521da177e4SLinus Torvalds * now gets fed up of retrying without 1531da177e4SLinus Torvalds * (even a no space) answer. 1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better 1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to 1561da177e4SLinus Torvalds * resemble the RFC. 1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes. 1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming. 1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH 1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack 1611da177e4SLinus Torvalds * generates them. 1621da177e4SLinus Torvalds * Alan Cox : Cache last socket. 1631da177e4SLinus Torvalds * Alan Cox : Per route irtt. 1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error 1651da177e4SLinus Torvalds * Alan Cox : New buffers 1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and 1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed. 1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close, 1691da177e4SLinus Torvalds * and TCP syn retries gets used now. 1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an 1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED. 1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may 1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right 1741da177e4SLinus Torvalds * but it's a start! 1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes. 1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications. 1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors. 1781da177e4SLinus Torvalds * Alan Cox : Block double connect(). 1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP. 1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery. 1811da177e4SLinus Torvalds * Alan Cox : Support soft errors. 1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case 1831da177e4SLinus Torvalds * when the remote claims no mtu! 1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix. 1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in 1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems) 1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack. 1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in 1891da177e4SLinus Torvalds * tcp_do_retransmit() 1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window 1911da177e4SLinus Torvalds * avoidance algorithm to BSD style 1921da177e4SLinus Torvalds * algorithm. This doubles throughput 1931da177e4SLinus Torvalds * against machines running Solaris, 1941da177e4SLinus Torvalds * and seems to result in general 1951da177e4SLinus Torvalds * improvement. 1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 1981da177e4SLinus Torvalds * Mike McLagan : Routing by source 1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in 2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness. 2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with 2021da177e4SLinus Torvalds * shutdown() followed by close(). 2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO 2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call) 2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of 2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible. 2081da177e4SLinus Torvalds * 2091da177e4SLinus Torvalds * Description of States: 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack 2121da177e4SLinus Torvalds * 2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack, 2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake. 2151da177e4SLinus Torvalds * 2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established 2171da177e4SLinus Torvalds * 2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 2191da177e4SLinus Torvalds * transmission of remaining buffered data 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 2221da177e4SLinus Torvalds * to shutdown 2231da177e4SLinus Torvalds * 2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have 2251da177e4SLinus Torvalds * data we have to finish sending 2261da177e4SLinus Torvalds * 2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering 2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2 2291da177e4SLinus Torvalds * or CLOSING. Required because the other end 2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it 2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore) 2321da177e4SLinus Torvalds * 2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown 2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK) 2361da177e4SLinus Torvalds * 2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has 2381da177e4SLinus Torvalds * shutdown. There may still be data in our 2391da177e4SLinus Torvalds * buffer that we have to finish sending 2401da177e4SLinus Torvalds * 2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds 244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt 245afd46503SJoe Perches 246cf80e0e4SHerbert Xu #include <crypto/hash.h> 247172589ccSIlpo Järvinen #include <linux/kernel.h> 2481da177e4SLinus Torvalds #include <linux/module.h> 2491da177e4SLinus Torvalds #include <linux/types.h> 2501da177e4SLinus Torvalds #include <linux/fcntl.h> 2511da177e4SLinus Torvalds #include <linux/poll.h> 2526e9250f5SEric Dumazet #include <linux/inet_diag.h> 2531da177e4SLinus Torvalds #include <linux/init.h> 2541da177e4SLinus Torvalds #include <linux/fs.h> 2559c55e01cSJens Axboe #include <linux/skbuff.h> 25681b23b4aSAndrew Morton #include <linux/scatterlist.h> 2579c55e01cSJens Axboe #include <linux/splice.h> 2589c55e01cSJens Axboe #include <linux/net.h> 2599c55e01cSJens Axboe #include <linux/socket.h> 2601da177e4SLinus Torvalds #include <linux/random.h> 26157c8a661SMike Rapoport #include <linux/memblock.h> 26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h> 263b8059eadSDavid S. Miller #include <linux/cache.h> 264f4c50d99SHerbert Xu #include <linux/err.h> 265da5c78c8SWilliam Allen Simpson #include <linux/time.h> 2665a0e3ad6STejun Heo #include <linux/slab.h> 26798aaa913SMike Maloney #include <linux/errqueue.h> 26860e2a778SUrsula Braun #include <linux/static_key.h> 26997a19cafSYonghong Song #include <linux/btf.h> 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds #include <net/icmp.h> 272cf60af03SYuchung Cheng #include <net/inet_common.h> 2731da177e4SLinus Torvalds #include <net/tcp.h> 274f870fa0bSMat Martineau #include <net/mptcp.h> 2751da177e4SLinus Torvalds #include <net/xfrm.h> 2761da177e4SLinus Torvalds #include <net/ip.h> 2779c55e01cSJens Axboe #include <net/sock.h> 2781da177e4SLinus Torvalds 2797c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2801da177e4SLinus Torvalds #include <asm/ioctls.h> 281076bb0c8SEliezer Tamir #include <net/busy_poll.h> 282490a79faSEric Dumazet #include <net/rps.h> 2831da177e4SLinus Torvalds 284925bba24SArjun Roy /* Track pending CMSGs. */ 285925bba24SArjun Roy enum { 286925bba24SArjun Roy TCP_CMSG_INQ = 1, 287925bba24SArjun Roy TCP_CMSG_TS = 2 288925bba24SArjun Roy }; 289925bba24SArjun Roy 29019757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 29119757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 2920a5578cfSArnaldo Carvalho de Melo 293a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly; 294a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem); 2951da177e4SLinus Torvalds 29691b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 2971da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated); 2980defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 2990defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 3001748376bSEric Dumazet 30160e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 30260e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 30360e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc); 30460e2a778SUrsula Braun #endif 30560e2a778SUrsula Braun 3061748376bSEric Dumazet /* 3071748376bSEric Dumazet * Current number of TCP sockets. 3081748376bSEric Dumazet */ 30991b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 3101da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated); 3111da177e4SLinus Torvalds 3121da177e4SLinus Torvalds /* 3139c55e01cSJens Axboe * TCP splice context 3149c55e01cSJens Axboe */ 3159c55e01cSJens Axboe struct tcp_splice_state { 3169c55e01cSJens Axboe struct pipe_inode_info *pipe; 3179c55e01cSJens Axboe size_t len; 3189c55e01cSJens Axboe unsigned int flags; 3199c55e01cSJens Axboe }; 3209c55e01cSJens Axboe 3219c55e01cSJens Axboe /* 3221da177e4SLinus Torvalds * Pressure flag: try to collapse. 3231da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically. 3243ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting 3251da177e4SLinus Torvalds * is strict, actions are advisory and have some latency. 3261da177e4SLinus Torvalds */ 32706044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly; 32806044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure); 3291da177e4SLinus Torvalds 3305c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk) 3311da177e4SLinus Torvalds { 33206044751SEric Dumazet unsigned long val; 33306044751SEric Dumazet 3341f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure)) 33506044751SEric Dumazet return; 33606044751SEric Dumazet val = jiffies; 33706044751SEric Dumazet 33806044751SEric Dumazet if (!val) 33906044751SEric Dumazet val--; 34006044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val)) 3414e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 3421da177e4SLinus Torvalds } 34306044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 34406044751SEric Dumazet 34506044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk) 34606044751SEric Dumazet { 34706044751SEric Dumazet unsigned long val; 34806044751SEric Dumazet 3491f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure)) 35006044751SEric Dumazet return; 35106044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0); 35206044751SEric Dumazet if (val) 35306044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 35406044751SEric Dumazet jiffies_to_msecs(jiffies - val)); 3551da177e4SLinus Torvalds } 35606044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 3571da177e4SLinus Torvalds 358b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */ 359b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 360b103cf34SJulian Anastasov { 361b103cf34SJulian Anastasov u8 res = 0; 362b103cf34SJulian Anastasov 363b103cf34SJulian Anastasov if (seconds > 0) { 364b103cf34SJulian Anastasov int period = timeout; 365b103cf34SJulian Anastasov 366b103cf34SJulian Anastasov res = 1; 367b103cf34SJulian Anastasov while (seconds > period && res < 255) { 368b103cf34SJulian Anastasov res++; 369b103cf34SJulian Anastasov timeout <<= 1; 370b103cf34SJulian Anastasov if (timeout > rto_max) 371b103cf34SJulian Anastasov timeout = rto_max; 372b103cf34SJulian Anastasov period += timeout; 373b103cf34SJulian Anastasov } 374b103cf34SJulian Anastasov } 375b103cf34SJulian Anastasov return res; 376b103cf34SJulian Anastasov } 377b103cf34SJulian Anastasov 378b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */ 379b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 380b103cf34SJulian Anastasov { 381b103cf34SJulian Anastasov int period = 0; 382b103cf34SJulian Anastasov 383b103cf34SJulian Anastasov if (retrans > 0) { 384b103cf34SJulian Anastasov period = timeout; 385b103cf34SJulian Anastasov while (--retrans) { 386b103cf34SJulian Anastasov timeout <<= 1; 387b103cf34SJulian Anastasov if (timeout > rto_max) 388b103cf34SJulian Anastasov timeout = rto_max; 389b103cf34SJulian Anastasov period += timeout; 390b103cf34SJulian Anastasov } 391b103cf34SJulian Anastasov } 392b103cf34SJulian Anastasov return period; 393b103cf34SJulian Anastasov } 394b103cf34SJulian Anastasov 3950263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 3960263598cSWei Wang { 3970263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered); 3980263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us); 3990263598cSWei Wang u64 rate64 = 0; 4000263598cSWei Wang 4010263598cSWei Wang if (rate && intv) { 4020263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 4030263598cSWei Wang do_div(rate64, intv); 4040263598cSWei Wang } 4050263598cSWei Wang return rate64; 4060263598cSWei Wang } 4070263598cSWei Wang 408900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock. 409900f65d3SNeal Cardwell * 410900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to 411900f65d3SNeal Cardwell * sk_alloc() so need not be done here. 412900f65d3SNeal Cardwell */ 413900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk) 414900f65d3SNeal Cardwell { 415900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk); 416900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk); 417900f65d3SNeal Cardwell 4189f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT; 41975c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT; 420900f65d3SNeal Cardwell tcp_init_xmit_timers(sk); 42146d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node); 422e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue); 423900f65d3SNeal Cardwell 424900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT; 425ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 4262b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 427740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 428ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 429900f65d3SNeal Cardwell 430900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the 431900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control 432900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk 433900f65d3SNeal Cardwell * efficiently to them. -DaveM 434900f65d3SNeal Cardwell */ 43540570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 436900f65d3SNeal Cardwell 437d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */ 438d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U; 439300b655dSDavid Morley tp->rate_app_limited = 1; 440d7722e85SSoheil Hassas Yeganeh 441900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the 442900f65d3SNeal Cardwell * initialization of these values. 443900f65d3SNeal Cardwell */ 444900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 445900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0; 446900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT; 447900f65d3SNeal Cardwell 44846778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 44955d8694fSFlorian Westphal tcp_assign_congestion_control(sk); 450900f65d3SNeal Cardwell 451ceaa1fefSAndrey Vagin tp->tsoffset = 0; 4521f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1; 453ceaa1fefSAndrey Vagin 454900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space; 455900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 456900f65d3SNeal Cardwell 457900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss; 458900f65d3SNeal Cardwell 45902739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 46002739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 461dfa2f048SEric Dumazet tcp_scaling_ratio_init(sk); 462900f65d3SNeal Cardwell 463e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 464900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk); 465900f65d3SNeal Cardwell } 466900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock); 467900f65d3SNeal Cardwell 4684e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 4694ed2d765SWillem de Bruijn { 4704e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 4714e8cc228SEric Dumazet 472ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) { 4734ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 4746b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 4754ed2d765SWillem de Bruijn 476c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 4770a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK) 4780a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1; 4790a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 4804ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 4814ed2d765SWillem de Bruijn } 482f066e2b0SWillem de Bruijn } 4834ed2d765SWillem de Bruijn 48405dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target) 4858934ce2fSJohn Fastabend { 48605dc72abSEric Dumazet if (tcp_epollin_ready(sk, target)) 48705dc72abSEric Dumazet return true; 4887b50ecfcSCong Wang return sk_is_readable(sk); 4898934ce2fSJohn Fastabend } 4908934ce2fSJohn Fastabend 4911da177e4SLinus Torvalds /* 492a11e1d43SLinus Torvalds * Wait for a TCP event. 493a11e1d43SLinus Torvalds * 494a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers 495a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't 496a11e1d43SLinus Torvalds * go look at any of the socket buffers directly. 4971da177e4SLinus Torvalds */ 498a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 4991da177e4SLinus Torvalds { 500a11e1d43SLinus Torvalds __poll_t mask; 5011da177e4SLinus Torvalds struct sock *sk = sock->sk; 502cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 503e14cadfdSEric Dumazet u8 shutdown; 50400fd38d9SEric Dumazet int state; 5051da177e4SLinus Torvalds 50689ab066dSKarsten Graul sock_poll_wait(file, sock, wait); 507a11e1d43SLinus Torvalds 508986ffdfdSYafang Shao state = inet_sk_state_load(sk); 50900fd38d9SEric Dumazet if (state == TCP_LISTEN) 510dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk); 5111da177e4SLinus Torvalds 512a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events 513a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes 514a11e1d43SLinus Torvalds * made by other threads is impossible in any case. 515a11e1d43SLinus Torvalds */ 516a11e1d43SLinus Torvalds 517a11e1d43SLinus Torvalds mask = 0; 518a11e1d43SLinus Torvalds 5191da177e4SLinus Torvalds /* 520a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't 5211da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a 5221da177e4SLinus Torvalds * socket the read side is more interesting. 5231da177e4SLinus Torvalds * 524a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible 525a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this 5261da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many 5271da177e4SLinus Torvalds * bits than too few, and you can easily break real applications 5281da177e4SLinus Torvalds * if you don't tell them that something has hung up! 5291da177e4SLinus Torvalds * 5301da177e4SLinus Torvalds * Check-me. 5311da177e4SLinus Torvalds * 532a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 5331da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF, 5341da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write() 535a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 5361da177e4SLinus Torvalds * if and only if shutdown has been made in both directions. 5371da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX 538a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 5391da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given 5401da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains 541a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 5421da177e4SLinus Torvalds * 5431da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 5441da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK 5451da177e4SLinus Torvalds */ 546e14cadfdSEric Dumazet shutdown = READ_ONCE(sk->sk_shutdown); 547e14cadfdSEric Dumazet if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 548a9a08845SLinus Torvalds mask |= EPOLLHUP; 549e14cadfdSEric Dumazet if (shutdown & RCV_SHUTDOWN) 550a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 5511da177e4SLinus Torvalds 5528336886fSJerry Chu /* Connected or passive Fast Open socket? */ 55300fd38d9SEric Dumazet if (state != TCP_SYN_SENT && 554d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 555c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX); 5567b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data); 557c7004482SDavid S. Miller 558b96c51bdSEric Dumazet if (unlikely(urg_data) && 5597b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 5607b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE)) 561b634f875SAlexandra Kossovsky target++; 562c7004482SDavid S. Miller 56305dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target)) 564a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 5651da177e4SLinus Torvalds 566e14cadfdSEric Dumazet if (!(shutdown & SEND_SHUTDOWN)) { 5678ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) { 568a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5691da177e4SLinus Torvalds } else { /* send SIGIO later */ 5709cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 5711da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 5721da177e4SLinus Torvalds 5731da177e4SLinus Torvalds /* Race breaker. If space is freed after 5741da177e4SLinus Torvalds * wspace test but before the flags are set, 5753c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier 5763c715127Sjbaron@akamai.com * pairs with the input side. 5771da177e4SLinus Torvalds */ 5783c715127Sjbaron@akamai.com smp_mb__after_atomic(); 5798ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) 580a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5811da177e4SLinus Torvalds } 582d84ba638SKOSAKI Motohiro } else 583a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5841da177e4SLinus Torvalds 5857b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID) 586a9a08845SLinus Torvalds mask |= EPOLLPRI; 58708e39c0dSEric Dumazet } else if (state == TCP_SYN_SENT && 58808e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) { 58919f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect 590a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write() 59119f6d3f3SWei Wang * in order for kernel to generate SYN+data 59219f6d3f3SWei Wang */ 593a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5941da177e4SLinus Torvalds } 595a4d25803STom Marshall /* This barrier is coupled with smp_wmb() in tcp_reset() */ 596a4d25803STom Marshall smp_rmb(); 597e13ec3daSEric Dumazet if (READ_ONCE(sk->sk_err) || 598e13ec3daSEric Dumazet !skb_queue_empty_lockless(&sk->sk_error_queue)) 599a9a08845SLinus Torvalds mask |= EPOLLERR; 600a4d25803STom Marshall 6011da177e4SLinus Torvalds return mask; 6021da177e4SLinus Torvalds } 603a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll); 6041da177e4SLinus Torvalds 605e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg) 6061da177e4SLinus Torvalds { 6071da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6081da177e4SLinus Torvalds int answ; 6090e71c55cSEric Dumazet bool slow; 6101da177e4SLinus Torvalds 6111da177e4SLinus Torvalds switch (cmd) { 6121da177e4SLinus Torvalds case SIOCINQ: 6131da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6141da177e4SLinus Torvalds return -EINVAL; 6151da177e4SLinus Torvalds 6160e71c55cSEric Dumazet slow = lock_sock_fast(sk); 617473bd239STom Herbert answ = tcp_inq(sk); 6180e71c55cSEric Dumazet unlock_sock_fast(sk, slow); 6191da177e4SLinus Torvalds break; 6201da177e4SLinus Torvalds case SIOCATMARK: 6217b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) && 622d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 6231da177e4SLinus Torvalds break; 6241da177e4SLinus Torvalds case SIOCOUTQ: 6251da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6261da177e4SLinus Torvalds return -EINVAL; 6271da177e4SLinus Torvalds 6281da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6291da177e4SLinus Torvalds answ = 0; 6301da177e4SLinus Torvalds else 6310f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una; 6321da177e4SLinus Torvalds break; 6332f4e1b39SMario Schuknecht case SIOCOUTQNSD: 6342f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN) 6352f4e1b39SMario Schuknecht return -EINVAL; 6362f4e1b39SMario Schuknecht 6372f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6382f4e1b39SMario Schuknecht answ = 0; 6392f4e1b39SMario Schuknecht else 640e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) - 641e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt); 6422f4e1b39SMario Schuknecht break; 6431da177e4SLinus Torvalds default: 6441da177e4SLinus Torvalds return -ENOIOCTLCMD; 6453ff50b79SStephen Hemminger } 6461da177e4SLinus Torvalds 647e1d001faSBreno Leitao *karg = answ; 648e1d001faSBreno Leitao return 0; 6491da177e4SLinus Torvalds } 6504bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl); 6511da177e4SLinus Torvalds 65204d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 6531da177e4SLinus Torvalds { 6544de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 6551da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq; 6561da177e4SLinus Torvalds } 6571da177e4SLinus Torvalds 658a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp) 6591da177e4SLinus Torvalds { 6601da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 6611da177e4SLinus Torvalds } 6621da177e4SLinus Torvalds 66304d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 6641da177e4SLinus Torvalds { 6659e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 666352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 667352d4800SArnaldo Carvalho de Melo 668352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq; 6694de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK; 670f4a775d1SEric Dumazet __skb_header_release(skb); 671fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 672ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 6733ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 67489ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH) 6751da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH; 6766f021c62SEric Dumazet 6776f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk); 6781da177e4SLinus Torvalds } 6791da177e4SLinus Torvalds 680afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 6811da177e4SLinus Torvalds { 68233f5f57eSIlpo Järvinen if (flags & MSG_OOB) 6831da177e4SLinus Torvalds tp->snd_up = tp->write_seq; 6841da177e4SLinus Torvalds } 6851da177e4SLinus Torvalds 686f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if 687a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues : 688f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance 689f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without 690f54b3111SEric Dumazet * need for a timer, and with no latency trade off. 691f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize 692a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent 693a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues, 694a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet. 695f54b3111SEric Dumazet */ 696f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 697f54b3111SEric Dumazet int size_goal) 6981da177e4SLinus Torvalds { 699f54b3111SEric Dumazet return skb->len < size_goal && 70085225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 701114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) && 702b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 703b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb); 704f54b3111SEric Dumazet } 7059e412ba7SIlpo Järvinen 70635b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now, 707f54b3111SEric Dumazet int nonagle, int size_goal) 708f54b3111SEric Dumazet { 709f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 710f54b3111SEric Dumazet struct sk_buff *skb; 711f54b3111SEric Dumazet 712f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk); 71375c119afSEric Dumazet if (!skb) 71475c119afSEric Dumazet return; 7151da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp)) 716f54b3111SEric Dumazet tcp_mark_push(tp, skb); 717afeca340SKrishna Kumar 718afeca340SKrishna Kumar tcp_mark_urg(tp, flags); 719f54b3111SEric Dumazet 720f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) { 721f54b3111SEric Dumazet 722f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */ 7237aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 724f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 7257aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 7267267e8dcSSalvatore Dipietro smp_mb__after_atomic(); 7271da177e4SLinus Torvalds } 728a181ceb5SEric Dumazet /* It is possible TX completion already happened 729a181ceb5SEric Dumazet * before we set TSQ_THROTTLED. 730a181ceb5SEric Dumazet */ 73114afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 732f54b3111SEric Dumazet return; 733f54b3111SEric Dumazet } 734f54b3111SEric Dumazet 735f54b3111SEric Dumazet if (flags & MSG_MORE) 736f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK; 737f54b3111SEric Dumazet 738f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle); 7391da177e4SLinus Torvalds } 7401da177e4SLinus Torvalds 7416ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 7429c55e01cSJens Axboe unsigned int offset, size_t len) 7439c55e01cSJens Axboe { 7449c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data; 74533966dd0SWilly Tarreau int ret; 7469c55e01cSJens Axboe 747a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 74825869262SAl Viro min(rd_desc->count, len), tss->flags); 74933966dd0SWilly Tarreau if (ret > 0) 75033966dd0SWilly Tarreau rd_desc->count -= ret; 75133966dd0SWilly Tarreau return ret; 7529c55e01cSJens Axboe } 7539c55e01cSJens Axboe 7549c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 7559c55e01cSJens Axboe { 7569c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */ 7579c55e01cSJens Axboe read_descriptor_t rd_desc = { 7589c55e01cSJens Axboe .arg.data = tss, 75933966dd0SWilly Tarreau .count = tss->len, 7609c55e01cSJens Axboe }; 7619c55e01cSJens Axboe 7629c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 7639c55e01cSJens Axboe } 7649c55e01cSJens Axboe 7659c55e01cSJens Axboe /** 7669c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe 7679c55e01cSJens Axboe * @sock: socket to splice from 7689c55e01cSJens Axboe * @ppos: position (not valid) 7699c55e01cSJens Axboe * @pipe: pipe to splice to 7709c55e01cSJens Axboe * @len: number of bytes to splice 7719c55e01cSJens Axboe * @flags: splice modifier flags 7729c55e01cSJens Axboe * 7739c55e01cSJens Axboe * Description: 7749c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe. 7759c55e01cSJens Axboe * 7769c55e01cSJens Axboe **/ 7779c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 7789c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len, 7799c55e01cSJens Axboe unsigned int flags) 7809c55e01cSJens Axboe { 7819c55e01cSJens Axboe struct sock *sk = sock->sk; 7829c55e01cSJens Axboe struct tcp_splice_state tss = { 7839c55e01cSJens Axboe .pipe = pipe, 7849c55e01cSJens Axboe .len = len, 7859c55e01cSJens Axboe .flags = flags, 7869c55e01cSJens Axboe }; 7879c55e01cSJens Axboe long timeo; 7889c55e01cSJens Axboe ssize_t spliced; 7899c55e01cSJens Axboe int ret; 7909c55e01cSJens Axboe 7913a047bf8SChangli Gao sock_rps_record_flow(sk); 7929c55e01cSJens Axboe /* 7939c55e01cSJens Axboe * We can't seek on a socket input 7949c55e01cSJens Axboe */ 7959c55e01cSJens Axboe if (unlikely(*ppos)) 7969c55e01cSJens Axboe return -ESPIPE; 7979c55e01cSJens Axboe 7989c55e01cSJens Axboe ret = spliced = 0; 7999c55e01cSJens Axboe 8009c55e01cSJens Axboe lock_sock(sk); 8019c55e01cSJens Axboe 80242324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 8039c55e01cSJens Axboe while (tss.len) { 8049c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss); 8059c55e01cSJens Axboe if (ret < 0) 8069c55e01cSJens Axboe break; 8079c55e01cSJens Axboe else if (!ret) { 8089c55e01cSJens Axboe if (spliced) 8099c55e01cSJens Axboe break; 8109c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE)) 8119c55e01cSJens Axboe break; 8129c55e01cSJens Axboe if (sk->sk_err) { 8139c55e01cSJens Axboe ret = sock_error(sk); 8149c55e01cSJens Axboe break; 8159c55e01cSJens Axboe } 8169c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN) 8179c55e01cSJens Axboe break; 8189c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) { 8199c55e01cSJens Axboe /* 8209c55e01cSJens Axboe * This occurs when user tries to read 8219c55e01cSJens Axboe * from never connected socket. 8229c55e01cSJens Axboe */ 8239c55e01cSJens Axboe ret = -ENOTCONN; 8249c55e01cSJens Axboe break; 8259c55e01cSJens Axboe } 8269c55e01cSJens Axboe if (!timeo) { 8279c55e01cSJens Axboe ret = -EAGAIN; 8289c55e01cSJens Axboe break; 8299c55e01cSJens Axboe } 830ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have 831ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop. 832ccf7abb9SEric Dumazet * This might happen with URG data. 833ccf7abb9SEric Dumazet */ 834ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue)) 835ccf7abb9SEric Dumazet break; 836419ce133SPaolo Abeni ret = sk_wait_data(sk, &timeo, NULL); 837419ce133SPaolo Abeni if (ret < 0) 838419ce133SPaolo Abeni break; 8399c55e01cSJens Axboe if (signal_pending(current)) { 8409c55e01cSJens Axboe ret = sock_intr_errno(timeo); 8419c55e01cSJens Axboe break; 8429c55e01cSJens Axboe } 8439c55e01cSJens Axboe continue; 8449c55e01cSJens Axboe } 8459c55e01cSJens Axboe tss.len -= ret; 8469c55e01cSJens Axboe spliced += ret; 8479c55e01cSJens Axboe 8482fe11c9dSPavel Begunkov if (!tss.len || !timeo) 84933966dd0SWilly Tarreau break; 8509c55e01cSJens Axboe release_sock(sk); 8519c55e01cSJens Axboe lock_sock(sk); 8529c55e01cSJens Axboe 8539c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE || 85433966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) || 8559c55e01cSJens Axboe signal_pending(current)) 8569c55e01cSJens Axboe break; 8579c55e01cSJens Axboe } 8589c55e01cSJens Axboe 8599c55e01cSJens Axboe release_sock(sk); 8609c55e01cSJens Axboe 8619c55e01cSJens Axboe if (spliced) 8629c55e01cSJens Axboe return spliced; 8639c55e01cSJens Axboe 8649c55e01cSJens Axboe return ret; 8659c55e01cSJens Axboe } 8664bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read); 8679c55e01cSJens Axboe 8685882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 869eb934478SEric Dumazet bool force_schedule) 870f561d0f2SPavel Emelyanov { 871f561d0f2SPavel Emelyanov struct sk_buff *skb; 872f561d0f2SPavel Emelyanov 8735882efffSEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 8748e4d980aSEric Dumazet if (likely(skb)) { 875eb934478SEric Dumazet bool mem_scheduled; 8768e4d980aSEric Dumazet 8779b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 878eb934478SEric Dumazet if (force_schedule) { 879eb934478SEric Dumazet mem_scheduled = true; 8808e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 8818e4d980aSEric Dumazet } else { 882eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 8838e4d980aSEric Dumazet } 884eb934478SEric Dumazet if (likely(mem_scheduled)) { 8858a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 886a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 887e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 888f561d0f2SPavel Emelyanov return skb; 889f561d0f2SPavel Emelyanov } 890f561d0f2SPavel Emelyanov __kfree_skb(skb); 891f561d0f2SPavel Emelyanov } else { 8925c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk); 893f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk); 894f561d0f2SPavel Emelyanov } 895f561d0f2SPavel Emelyanov return NULL; 896f561d0f2SPavel Emelyanov } 897f561d0f2SPavel Emelyanov 8980c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 8990c54b85fSIlpo Järvinen int large_allowed) 9000c54b85fSIlpo Järvinen { 9010c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 9026c09fa09SEric Dumazet u32 new_size_goal, size_goal; 9030c54b85fSIlpo Järvinen 90474d4a8f8SEric Dumazet if (!large_allowed) 905605ad7f1SEric Dumazet return mss_now; 9060c54b85fSIlpo Järvinen 9076c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */ 908ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 9092a3a041cSIlpo Järvinen 9102a3a041cSIlpo Järvinen /* We try hard to avoid divides here */ 911605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 912605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal || 913605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) { 914605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now, 9151485348dSBen Hutchings sk->sk_gso_max_segs); 916605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 9170c54b85fSIlpo Järvinen } 9180c54b85fSIlpo Järvinen 919605ad7f1SEric Dumazet return max(size_goal, mss_now); 9200c54b85fSIlpo Järvinen } 9210c54b85fSIlpo Järvinen 92235b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 9230c54b85fSIlpo Järvinen { 9240c54b85fSIlpo Järvinen int mss_now; 9250c54b85fSIlpo Järvinen 9260c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 9270c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 9280c54b85fSIlpo Järvinen 9290c54b85fSIlpo Järvinen return mss_now; 9300c54b85fSIlpo Järvinen } 9310c54b85fSIlpo Järvinen 93272bf4f17SEric Dumazet /* In some cases, sendmsg() could have added an skb to the write queue, 933dc97391eSDavid Howells * but failed adding payload on it. We need to remove it to consume less 934dc97391eSDavid Howells * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger 93572bf4f17SEric Dumazet * epoll() users. Another reason is that tcp_write_xmit() does not like 93672bf4f17SEric Dumazet * finding an empty skb in the write queue. 937fdfc5c85SEric Dumazet */ 93827728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk) 939fdfc5c85SEric Dumazet { 94027728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 94127728ba8SEric Dumazet 942cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 943fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk); 944fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk)) 945fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 94603271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 947fdfc5c85SEric Dumazet } 948fdfc5c85SEric Dumazet } 949fdfc5c85SEric Dumazet 950f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */ 951f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 952f8d9d938SEric Dumazet { 953f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) { 954f8d9d938SEric Dumazet u32 extra = skb->truesize - 955f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb)); 956f8d9d938SEric Dumazet 957f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra)) 958f8d9d938SEric Dumazet return -ENOMEM; 959f8d9d938SEric Dumazet 960f8d9d938SEric Dumazet sk_mem_charge(sk, extra); 961f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 962f8d9d938SEric Dumazet } 963f8d9d938SEric Dumazet return 0; 964f8d9d938SEric Dumazet } 965f8d9d938SEric Dumazet 966849b425cSEric Dumazet 967fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy) 968f54755f6SEric Dumazet { 969f54755f6SEric Dumazet int left; 970f54755f6SEric Dumazet 971f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy))) 972f54755f6SEric Dumazet return copy; 973f54755f6SEric Dumazet 974f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued. 975f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 976f54755f6SEric Dumazet * to guarantee some progress. 977f54755f6SEric Dumazet */ 978*683a67daSJason Xing left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued; 979f54755f6SEric Dumazet if (left > 0) 980f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy)); 981f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc); 982f54755f6SEric Dumazet } 983f54755f6SEric Dumazet 984cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp) 985cf60af03SYuchung Cheng { 98600db4124SIan Morris if (tp->fastopen_req) { 987cf60af03SYuchung Cheng kfree(tp->fastopen_req); 988cf60af03SYuchung Cheng tp->fastopen_req = NULL; 989cf60af03SYuchung Cheng } 990cf60af03SYuchung Cheng } 991cf60af03SYuchung Cheng 9923242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 9933242abebSBenjamin Hesmans size_t size, struct ubuf_info *uarg) 994cf60af03SYuchung Cheng { 995cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 99619f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk); 997ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name; 998cf60af03SYuchung Cheng int err, flags; 999cf60af03SYuchung Cheng 10005a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 10015a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) || 1002ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1003ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC)) 1004cf60af03SYuchung Cheng return -EOPNOTSUPP; 100500db4124SIan Morris if (tp->fastopen_req) 1006cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */ 1007cf60af03SYuchung Cheng 1008cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1009cf60af03SYuchung Cheng sk->sk_allocation); 101051456b29SIan Morris if (unlikely(!tp->fastopen_req)) 1011cf60af03SYuchung Cheng return -ENOBUFS; 1012cf60af03SYuchung Cheng tp->fastopen_req->data = msg; 1013f5ddcbbbSEric Dumazet tp->fastopen_req->size = size; 1014f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg; 1015cf60af03SYuchung Cheng 101608e39c0dSEric Dumazet if (inet_test_bit(DEFER_CONNECT, sk)) { 101719f6d3f3SWei Wang err = tcp_connect(sk); 101819f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */ 101919f6d3f3SWei Wang if (err) { 102019f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE); 102119f6d3f3SWei Wang inet->inet_dport = 0; 102219f6d3f3SWei Wang sk->sk_route_caps = 0; 102319f6d3f3SWei Wang } 102419f6d3f3SWei Wang } 1025cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1026ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr, 10273979ad7eSWilly Tarreau msg->msg_namelen, flags, 1); 10287db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect 10297db92362SWei Wang * if the connection times out or gets rst 10307db92362SWei Wang */ 10317db92362SWei Wang if (tp->fastopen_req) { 1032f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied; 1033cf60af03SYuchung Cheng tcp_free_fastopen_req(tp); 103408e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk); 10357db92362SWei Wang } 1036cf60af03SYuchung Cheng return err; 1037cf60af03SYuchung Cheng } 1038cf60af03SYuchung Cheng 1039306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 10401da177e4SLinus Torvalds { 10411da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1042f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL; 10431da177e4SLinus Torvalds struct sk_buff *skb; 1044c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc; 104557be5bdaSAl Viro int flags, err, copied = 0; 104657be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0; 10471a991488SEric Dumazet int process_backlog = 0; 1048270a1c3dSDavid Howells int zc = 0; 10491da177e4SLinus Torvalds long timeo; 10501da177e4SLinus Torvalds 10511da177e4SLinus Torvalds flags = msg->msg_flags; 1052f214f915SWillem de Bruijn 1053eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) { 1054eb315a7dSPavel Begunkov if (msg->msg_ubuf) { 1055eb315a7dSPavel Begunkov uarg = msg->msg_ubuf; 1056270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1057270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1058eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1059eea96a3eSPavel Begunkov skb = tcp_write_queue_tail(sk); 10608c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1061f214f915SWillem de Bruijn if (!uarg) { 1062f214f915SWillem de Bruijn err = -ENOBUFS; 1063f214f915SWillem de Bruijn goto out_err; 1064f214f915SWillem de Bruijn } 1065270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1066270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1067270a1c3dSDavid Howells else 1068e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0; 1069f214f915SWillem de Bruijn } 1070270a1c3dSDavid Howells } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { 1071270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1072270a1c3dSDavid Howells zc = MSG_SPLICE_PAGES; 1073eb315a7dSPavel Begunkov } 1074f214f915SWillem de Bruijn 107508e39c0dSEric Dumazet if (unlikely(flags & MSG_FASTOPEN || 107608e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) && 107716ae6aa1SYuchung Cheng !tp->repair) { 1078f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1079cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0) 1080cf60af03SYuchung Cheng goto out; 1081cf60af03SYuchung Cheng else if (err) 1082cf60af03SYuchung Cheng goto out_err; 1083cf60af03SYuchung Cheng } 1084cf60af03SYuchung Cheng 10851da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 10861da177e4SLinus Torvalds 1087d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1088d7722e85SSoheil Hassas Yeganeh 10898336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open 10908336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection 10918336886fSJerry Chu * is fully established. 10928336886fSJerry Chu */ 10938336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 10948336886fSJerry Chu !tcp_passive_fastopen(sk)) { 1095686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo); 1096686a5624SYuvaraja Mariappan if (err != 0) 1097cf60af03SYuchung Cheng goto do_error; 10988336886fSJerry Chu } 10991da177e4SLinus Torvalds 1100c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 1101c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) { 1102c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size); 11035924f17aSChristoph Paasch goto out_nopush; 1104c0e88ff0SPavel Emelyanov } 1105c0e88ff0SPavel Emelyanov 1106c0e88ff0SPavel Emelyanov err = -EINVAL; 1107c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 1108c0e88ff0SPavel Emelyanov goto out_err; 1109c0e88ff0SPavel Emelyanov 1110c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */ 1111c0e88ff0SPavel Emelyanov } 1112c0e88ff0SPavel Emelyanov 1113657a0667SWillem de Bruijn sockcm_init(&sockc, sk); 1114c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) { 1115c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc); 1116c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) { 1117c14ac945SSoheil Hassas Yeganeh err = -EINVAL; 1118c14ac945SSoheil Hassas Yeganeh goto out_err; 1119c14ac945SSoheil Hassas Yeganeh } 1120c14ac945SSoheil Hassas Yeganeh } 1121c14ac945SSoheil Hassas Yeganeh 11221da177e4SLinus Torvalds /* This should be in poll */ 11239cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 11241da177e4SLinus Torvalds 11251da177e4SLinus Torvalds /* Ok commence sending. */ 11261da177e4SLinus Torvalds copied = 0; 11271da177e4SLinus Torvalds 1128d41a69f1SEric Dumazet restart: 1129d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags); 1130d41a69f1SEric Dumazet 11311da177e4SLinus Torvalds err = -EPIPE; 11321da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 113379d8665bSEric Dumazet goto do_error; 11341da177e4SLinus Torvalds 113501e97e65SAl Viro while (msg_data_left(msg)) { 1136270a1c3dSDavid Howells ssize_t copy = 0; 11371da177e4SLinus Torvalds 1138fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk); 113965ec6097SEric Dumazet if (skb) 114065ec6097SEric Dumazet copy = size_goal - skb->len; 11411da177e4SLinus Torvalds 1142c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 11433613b3dbSEric Dumazet bool first_skb; 11443613b3dbSEric Dumazet 11451da177e4SLinus Torvalds new_segment: 11461da177e4SLinus Torvalds if (!sk_stream_memory_free(sk)) 1147afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11481da177e4SLinus Torvalds 11491a991488SEric Dumazet if (unlikely(process_backlog >= 16)) { 11501a991488SEric Dumazet process_backlog = 0; 11511a991488SEric Dumazet if (sk_flush_backlog(sk)) 1152d41a69f1SEric Dumazet goto restart; 1153d4011239SEric Dumazet } 115475c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk); 11555882efffSEric Dumazet skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, 11563613b3dbSEric Dumazet first_skb); 11571da177e4SLinus Torvalds if (!skb) 1158afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11591da177e4SLinus Torvalds 11601a991488SEric Dumazet process_backlog++; 11611da177e4SLinus Torvalds 116204d8825cSPaolo Abeni tcp_skb_entail(sk, skb); 1163c1b4a7e6SDavid S. Miller copy = size_goal; 11649d186cacSAndrey Vagin 11659d186cacSAndrey Vagin /* All packets are restored as if they have 1166d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to 11679d186cacSAndrey Vagin * avoid wrong rtt estimation. 11689d186cacSAndrey Vagin */ 11699d186cacSAndrey Vagin if (tp->repair) 11709d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 11711da177e4SLinus Torvalds } 11721da177e4SLinus Torvalds 11731da177e4SLinus Torvalds /* Try to append data to the end of skb. */ 117401e97e65SAl Viro if (copy > msg_data_left(msg)) 117501e97e65SAl Viro copy = msg_data_left(msg); 11761da177e4SLinus Torvalds 1177270a1c3dSDavid Howells if (zc == 0) { 11785640f768SEric Dumazet bool merge = true; 11791da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags; 11805640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 1181761965eaSEric Dumazet 11825640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 1183afb83012SSoheil Hassas Yeganeh goto wait_for_space; 1184761965eaSEric Dumazet 11855640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page, 11865640f768SEric Dumazet pfrag->offset)) { 1187657b991aSKuniyuki Iwashima if (i >= READ_ONCE(sysctl_max_skb_frags)) { 11881da177e4SLinus Torvalds tcp_mark_push(tp, skb); 11891da177e4SLinus Torvalds goto new_segment; 11901da177e4SLinus Torvalds } 11915640f768SEric Dumazet merge = false; 11925640f768SEric Dumazet } 1193ef015786SHerbert Xu 11945640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset); 1195ef015786SHerbert Xu 1196eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1197849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb)) 1198849b425cSEric Dumazet goto wait_for_space; 1199eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb); 1200eb315a7dSPavel Begunkov } 1201849b425cSEric Dumazet 1202849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1203849b425cSEric Dumazet if (!copy) 1204afb83012SSoheil Hassas Yeganeh goto wait_for_space; 12051da177e4SLinus Torvalds 120657be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 12075640f768SEric Dumazet pfrag->page, 12085640f768SEric Dumazet pfrag->offset, 12095640f768SEric Dumazet copy); 12105640f768SEric Dumazet if (err) 12111da177e4SLinus Torvalds goto do_error; 12121da177e4SLinus Torvalds 12131da177e4SLinus Torvalds /* Update the skb. */ 12141da177e4SLinus Torvalds if (merge) { 12159e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 12161da177e4SLinus Torvalds } else { 12175640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page, 12185640f768SEric Dumazet pfrag->offset, copy); 12194e33e346SEric Dumazet page_ref_inc(pfrag->page); 12201da177e4SLinus Torvalds } 12215640f768SEric Dumazet pfrag->offset += copy; 1222270a1c3dSDavid Howells } else if (zc == MSG_ZEROCOPY) { 12239b65b17dSTalal Ahmad /* First append to a fragless skb builds initial 12249b65b17dSTalal Ahmad * pure zerocopy skb 12259b65b17dSTalal Ahmad */ 12269b65b17dSTalal Ahmad if (!skb->len) 12279b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 12289b65b17dSTalal Ahmad 12299b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) { 1230849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1231849b425cSEric Dumazet if (!copy) 1232358ed624STalal Ahmad goto wait_for_space; 12339b65b17dSTalal Ahmad } 1234358ed624STalal Ahmad 1235f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1236111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) { 1237111856c7SWillem de Bruijn tcp_mark_push(tp, skb); 1238f214f915SWillem de Bruijn goto new_segment; 1239111856c7SWillem de Bruijn } 1240f214f915SWillem de Bruijn if (err < 0) 1241f214f915SWillem de Bruijn goto do_error; 1242f214f915SWillem de Bruijn copy = err; 1243270a1c3dSDavid Howells } else if (zc == MSG_SPLICE_PAGES) { 1244270a1c3dSDavid Howells /* Splice in data if we can; copy if we can't. */ 1245270a1c3dSDavid Howells if (tcp_downgrade_zcopy_pure(sk, skb)) 1246270a1c3dSDavid Howells goto wait_for_space; 1247270a1c3dSDavid Howells copy = tcp_wmem_schedule(sk, copy); 1248270a1c3dSDavid Howells if (!copy) 1249270a1c3dSDavid Howells goto wait_for_space; 1250270a1c3dSDavid Howells 1251270a1c3dSDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy, 1252270a1c3dSDavid Howells sk->sk_allocation); 1253270a1c3dSDavid Howells if (err < 0) { 1254270a1c3dSDavid Howells if (err == -EMSGSIZE) { 1255270a1c3dSDavid Howells tcp_mark_push(tp, skb); 1256270a1c3dSDavid Howells goto new_segment; 1257270a1c3dSDavid Howells } 1258270a1c3dSDavid Howells goto do_error; 1259270a1c3dSDavid Howells } 1260270a1c3dSDavid Howells copy = err; 1261270a1c3dSDavid Howells 1262270a1c3dSDavid Howells if (!(flags & MSG_NO_SHARED_FRAGS)) 1263270a1c3dSDavid Howells skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1264270a1c3dSDavid Howells 1265270a1c3dSDavid Howells sk_wmem_queued_add(sk, copy); 1266270a1c3dSDavid Howells sk_mem_charge(sk, copy); 12671da177e4SLinus Torvalds } 12681da177e4SLinus Torvalds 12691da177e4SLinus Torvalds if (!copied) 12704de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 12711da177e4SLinus Torvalds 12720f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 12731da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy; 1274cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0); 12751da177e4SLinus Torvalds 12761da177e4SLinus Torvalds copied += copy; 127701e97e65SAl Viro if (!msg_data_left(msg)) { 1278c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR)) 1279c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1; 12801da177e4SLinus Torvalds goto out; 12814ed2d765SWillem de Bruijn } 12821da177e4SLinus Torvalds 128365ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 12841da177e4SLinus Torvalds continue; 12851da177e4SLinus Torvalds 12861da177e4SLinus Torvalds if (forced_push(tp)) { 12871da177e4SLinus Torvalds tcp_mark_push(tp, skb); 12889e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1289fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk)) 12901da177e4SLinus Torvalds tcp_push_one(sk, mss_now); 12911da177e4SLinus Torvalds continue; 12921da177e4SLinus Torvalds 1293afb83012SSoheil Hassas Yeganeh wait_for_space: 12941da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 129572bf4f17SEric Dumazet tcp_remove_empty_skb(sk); 1296ec342325SAndrew Vagin if (copied) 1297f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now, 1298f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal); 12991da177e4SLinus Torvalds 1300686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo); 1301686a5624SYuvaraja Mariappan if (err != 0) 13021da177e4SLinus Torvalds goto do_error; 13031da177e4SLinus Torvalds 13040c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 13051da177e4SLinus Torvalds } 13061da177e4SLinus Torvalds 13071da177e4SLinus Torvalds out: 1308ad02c4f5SSoheil Hassas Yeganeh if (copied) { 13094e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags); 1310f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1311ad02c4f5SSoheil Hassas Yeganeh } 13125924f17aSChristoph Paasch out_nopush: 1313a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1314a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13158e044917SJonathan Lemon net_zcopy_put(uarg); 1316cf60af03SYuchung Cheng return copied + copied_syn; 13171da177e4SLinus Torvalds 13181da177e4SLinus Torvalds do_error: 131927728ba8SEric Dumazet tcp_remove_empty_skb(sk); 1320fdfc5c85SEric Dumazet 1321cf60af03SYuchung Cheng if (copied + copied_syn) 13221da177e4SLinus Torvalds goto out; 13231da177e4SLinus Torvalds out_err: 1324a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1325a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13268e044917SJonathan Lemon net_zcopy_put_abort(uarg, true); 13271da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err); 1328ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */ 1329216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1330ce5ec440SJason Baron sk->sk_write_space(sk); 1331b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1332b0f71bd3SFrancis Yan } 13331da177e4SLinus Torvalds return err; 13341da177e4SLinus Torvalds } 1335774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1336306b13ebSTom Herbert 1337306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1338306b13ebSTom Herbert { 1339306b13ebSTom Herbert int ret; 1340306b13ebSTom Herbert 1341306b13ebSTom Herbert lock_sock(sk); 1342306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size); 1343306b13ebSTom Herbert release_sock(sk); 1344306b13ebSTom Herbert 1345306b13ebSTom Herbert return ret; 1346306b13ebSTom Herbert } 13474bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg); 13481da177e4SLinus Torvalds 13491d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock) 13501d7e4538SDavid Howells { 13511d7e4538SDavid Howells struct sock *sk = sock->sk; 13521d7e4538SDavid Howells struct tcp_sock *tp = tcp_sk(sk); 13531d7e4538SDavid Howells int mss_now, size_goal; 13541d7e4538SDavid Howells 13551d7e4538SDavid Howells if (!tcp_write_queue_tail(sk)) 13561d7e4538SDavid Howells return; 13571d7e4538SDavid Howells 13581d7e4538SDavid Howells lock_sock(sk); 13591d7e4538SDavid Howells mss_now = tcp_send_mss(sk, &size_goal, 0); 13601d7e4538SDavid Howells tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 13611d7e4538SDavid Howells release_sock(sk); 13621d7e4538SDavid Howells } 13631d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof); 13641d7e4538SDavid Howells 13651da177e4SLinus Torvalds /* 13661da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for 13671da177e4SLinus Torvalds * this, no blocking and very strange errors 8) 13681da177e4SLinus Torvalds */ 13691da177e4SLinus Torvalds 1370377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 13711da177e4SLinus Torvalds { 13721da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13731da177e4SLinus Torvalds 13741da177e4SLinus Torvalds /* No URG data to read. */ 13751da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 13761da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ) 13771da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */ 13781da177e4SLinus Torvalds 13791da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 13801da177e4SLinus Torvalds return -ENOTCONN; 13811da177e4SLinus Torvalds 13821da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) { 13831da177e4SLinus Torvalds int err = 0; 13841da177e4SLinus Torvalds char c = tp->urg_data; 13851da177e4SLinus Torvalds 13861da177e4SLinus Torvalds if (!(flags & MSG_PEEK)) 13877b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ); 13881da177e4SLinus Torvalds 13891da177e4SLinus Torvalds /* Read urgent data. */ 13901da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB; 13911da177e4SLinus Torvalds 13921da177e4SLinus Torvalds if (len > 0) { 13931da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) 13947eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1); 13951da177e4SLinus Torvalds len = 1; 13961da177e4SLinus Torvalds } else 13971da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC; 13981da177e4SLinus Torvalds 13991da177e4SLinus Torvalds return err ? -EFAULT : len; 14001da177e4SLinus Torvalds } 14011da177e4SLinus Torvalds 14021da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 14031da177e4SLinus Torvalds return 0; 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 14061da177e4SLinus Torvalds * the available implementations agree in this case: 14071da177e4SLinus Torvalds * this call should never block, independent of the 14081da177e4SLinus Torvalds * blocking state of the socket. 14091da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de> 14101da177e4SLinus Torvalds */ 14111da177e4SLinus Torvalds return -EAGAIN; 14121da177e4SLinus Torvalds } 14131da177e4SLinus Torvalds 1414c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1415c0e88ff0SPavel Emelyanov { 1416c0e88ff0SPavel Emelyanov struct sk_buff *skb; 1417c0e88ff0SPavel Emelyanov int copied = 0, err = 0; 1418c0e88ff0SPavel Emelyanov 1419c0e88ff0SPavel Emelyanov /* XXX -- need to support SO_PEEK_OFF */ 1420c0e88ff0SPavel Emelyanov 142175c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 142275c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 142375c119afSEric Dumazet if (err) 142475c119afSEric Dumazet return err; 142575c119afSEric Dumazet copied += skb->len; 142675c119afSEric Dumazet } 142775c119afSEric Dumazet 1428c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) { 142951f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1430c0e88ff0SPavel Emelyanov if (err) 1431c0e88ff0SPavel Emelyanov break; 1432c0e88ff0SPavel Emelyanov 1433c0e88ff0SPavel Emelyanov copied += skb->len; 1434c0e88ff0SPavel Emelyanov } 1435c0e88ff0SPavel Emelyanov 1436c0e88ff0SPavel Emelyanov return err ?: copied; 1437c0e88ff0SPavel Emelyanov } 1438c0e88ff0SPavel Emelyanov 14391da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user, 14401da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes 14411da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the 14421da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of 14431da177e4SLinus Torvalds * a window update. 14441da177e4SLinus Torvalds */ 1445e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied) 14461da177e4SLinus Torvalds { 14471da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1448a2a385d6SEric Dumazet bool time_to_ack = false; 14491da177e4SLinus Torvalds 1450463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) { 1451463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1452b6b6d653SEric Dumazet 1453b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1454463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 14551da177e4SLinus Torvalds /* 14561da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if 14571da177e4SLinus Torvalds * connection is not bidirectional, user drained 14581da177e4SLinus Torvalds * receive buffer and there was a small segment 14591da177e4SLinus Torvalds * in queue. 14601da177e4SLinus Torvalds */ 14611ef9696cSAlexey Kuznetsov (copied > 0 && 14621ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 14631ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 146431954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) && 14651ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc))) 1466a2a385d6SEric Dumazet time_to_ack = true; 14671da177e4SLinus Torvalds } 14681da177e4SLinus Torvalds 14691da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window 14701da177e4SLinus Torvalds * which has been raised "significantly". 14711da177e4SLinus Torvalds * 14721da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK 14731da177e4SLinus Torvalds * in states, where we will not receive more. It is useless. 14741da177e4SLinus Torvalds */ 14751da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 14761da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp); 14771da177e4SLinus Torvalds 14781da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */ 14791da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) { 14801da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk); 14811da177e4SLinus Torvalds 14821da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space 14831da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window. 14841da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one. 14851da177e4SLinus Torvalds * "Lots" means "at least twice" here. 14861da177e4SLinus Torvalds */ 14871da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now) 1488a2a385d6SEric Dumazet time_to_ack = true; 14891da177e4SLinus Torvalds } 14901da177e4SLinus Torvalds } 14911da177e4SLinus Torvalds if (time_to_ack) 14921da177e4SLinus Torvalds tcp_send_ack(sk); 14931da177e4SLinus Torvalds } 14941da177e4SLinus Torvalds 1495c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied) 1496c457985aSCong Wang { 1497c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1498c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk); 1499c457985aSCong Wang 1500c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1501c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1502c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1503c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied); 1504c457985aSCong Wang } 1505c457985aSCong Wang 15063df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 15073df684c1SEric Dumazet { 1508f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue); 15093df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) { 15103df684c1SEric Dumazet sock_rfree(skb); 15113df684c1SEric Dumazet skb->destructor = NULL; 15123df684c1SEric Dumazet skb->sk = NULL; 151368822bdfSEric Dumazet return skb_attempt_defer_free(skb); 1514f35f8219SEric Dumazet } 1515f35f8219SEric Dumazet __kfree_skb(skb); 15163df684c1SEric Dumazet } 15173df684c1SEric Dumazet 15183f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 15191da177e4SLinus Torvalds { 15201da177e4SLinus Torvalds struct sk_buff *skb; 15211da177e4SLinus Torvalds u32 offset; 15221da177e4SLinus Torvalds 1523f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 15241da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq; 15259d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 15269d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 15271da177e4SLinus Torvalds offset--; 15289d691539SEric Dumazet } 1529e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 15301da177e4SLinus Torvalds *off = offset; 15311da177e4SLinus Torvalds return skb; 15321da177e4SLinus Torvalds } 1533f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing 1534f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock 1535f26845b4SEric Dumazet * in skb_splice_bits() 1536f26845b4SEric Dumazet */ 15373df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 15381da177e4SLinus Torvalds } 15391da177e4SLinus Torvalds return NULL; 15401da177e4SLinus Torvalds } 15413f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb); 15421da177e4SLinus Torvalds 15431da177e4SLinus Torvalds /* 15441da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines 15451da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile' 15461da177e4SLinus Torvalds * fashion. 15471da177e4SLinus Torvalds * Note: 15481da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller. 15491da177e4SLinus Torvalds * - The routine does not block. 15501da177e4SLinus Torvalds * - At present, there is no support for reading OOB data 15511da177e4SLinus Torvalds * or for 'peeking' the socket using this routine 15521da177e4SLinus Torvalds * (although both would be easy to implement). 15531da177e4SLinus Torvalds */ 15541da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 15551da177e4SLinus Torvalds sk_read_actor_t recv_actor) 15561da177e4SLinus Torvalds { 15571da177e4SLinus Torvalds struct sk_buff *skb; 15581da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15591da177e4SLinus Torvalds u32 seq = tp->copied_seq; 15601da177e4SLinus Torvalds u32 offset; 15611da177e4SLinus Torvalds int copied = 0; 15621da177e4SLinus Torvalds 15631da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 15641da177e4SLinus Torvalds return -ENOTCONN; 15651da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 15661da177e4SLinus Torvalds if (offset < skb->len) { 1567374e7b59SOctavian Purdila int used; 1568374e7b59SOctavian Purdila size_t len; 15691da177e4SLinus Torvalds 15701da177e4SLinus Torvalds len = skb->len - offset; 15711da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */ 1572b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 15731da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq; 15741da177e4SLinus Torvalds if (urg_offset < len) 15751da177e4SLinus Torvalds len = urg_offset; 15761da177e4SLinus Torvalds if (!len) 15771da177e4SLinus Torvalds break; 15781da177e4SLinus Torvalds } 15791da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len); 1580ff905b1eSEric Dumazet if (used <= 0) { 1581ddb61a57SJens Axboe if (!copied) 1582ddb61a57SJens Axboe copied = used; 1583ddb61a57SJens Axboe break; 1584e3d5ea2cSEric Dumazet } 1585e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len)) 1586e3d5ea2cSEric Dumazet used = len; 15871da177e4SLinus Torvalds seq += used; 15881da177e4SLinus Torvalds copied += used; 15891da177e4SLinus Torvalds offset += used; 1590e3d5ea2cSEric Dumazet 159102275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice 1592293ad604SOctavian Purdila * receive) the skb pointer might be invalid when 1593293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it 1594293ad604SOctavian Purdila * while aggregating skbs from the socket queue. 1595293ad604SOctavian Purdila */ 1596293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset); 159702275a2eSWilly Tarreau if (!skb) 15981da177e4SLinus Torvalds break; 159902275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb. 160002275a2eSWilly Tarreau * Try to splice more frags 160102275a2eSWilly Tarreau */ 160202275a2eSWilly Tarreau if (offset + 1 != skb->len) 160302275a2eSWilly Tarreau continue; 16041da177e4SLinus Torvalds } 1605e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16063df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16071da177e4SLinus Torvalds ++seq; 16081da177e4SLinus Torvalds break; 16091da177e4SLinus Torvalds } 16103df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16111da177e4SLinus Torvalds if (!desc->count) 16121da177e4SLinus Torvalds break; 16137db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16141da177e4SLinus Torvalds } 16157db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16161da177e4SLinus Torvalds 16171da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 16181da177e4SLinus Torvalds 16191da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 1620f26845b4SEric Dumazet if (copied > 0) { 1621f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset); 16220e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 1623f26845b4SEric Dumazet } 16241da177e4SLinus Torvalds return copied; 16251da177e4SLinus Torvalds } 16264bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock); 16271da177e4SLinus Torvalds 1628965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 162904919bedSCong Wang { 163004919bedSCong Wang struct sk_buff *skb; 163104919bedSCong Wang int copied = 0; 163204919bedSCong Wang 163304919bedSCong Wang if (sk->sk_state == TCP_LISTEN) 163404919bedSCong Wang return -ENOTCONN; 163504919bedSCong Wang 16369b7177b1SJohn Fastabend while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1637db4192a7SCong Wang u8 tcp_flags; 1638db4192a7SCong Wang int used; 163904919bedSCong Wang 164004919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue); 164196628951SPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1642db4192a7SCong Wang tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1643db4192a7SCong Wang used = recv_actor(sk, skb); 1644db4192a7SCong Wang if (used < 0) { 1645db4192a7SCong Wang if (!copied) 1646db4192a7SCong Wang copied = used; 1647db4192a7SCong Wang break; 1648db4192a7SCong Wang } 1649db4192a7SCong Wang copied += used; 1650db4192a7SCong Wang 16519b7177b1SJohn Fastabend if (tcp_flags & TCPHDR_FIN) 1652db4192a7SCong Wang break; 1653db4192a7SCong Wang } 165404919bedSCong Wang return copied; 165504919bedSCong Wang } 165604919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb); 165704919bedSCong Wang 16583f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len) 16593f92a64eSJakub Kicinski { 16603f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk); 16613f92a64eSJakub Kicinski u32 seq = tp->copied_seq; 16623f92a64eSJakub Kicinski struct sk_buff *skb; 16633f92a64eSJakub Kicinski size_t left; 16643f92a64eSJakub Kicinski u32 offset; 16653f92a64eSJakub Kicinski 16663f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN) 16673f92a64eSJakub Kicinski return; 16683f92a64eSJakub Kicinski 16693f92a64eSJakub Kicinski left = len; 16703f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 16713f92a64eSJakub Kicinski int used; 16723f92a64eSJakub Kicinski 16733f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left); 16743f92a64eSJakub Kicinski seq += used; 16753f92a64eSJakub Kicinski left -= used; 16763f92a64eSJakub Kicinski 16773f92a64eSJakub Kicinski if (skb->len > offset + used) 16783f92a64eSJakub Kicinski break; 16793f92a64eSJakub Kicinski 16803f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16813f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 16823f92a64eSJakub Kicinski ++seq; 16833f92a64eSJakub Kicinski break; 16843f92a64eSJakub Kicinski } 16853f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 16863f92a64eSJakub Kicinski } 16873f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq); 16883f92a64eSJakub Kicinski 16893f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk); 16903f92a64eSJakub Kicinski 16913f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */ 16923f92a64eSJakub Kicinski if (left != len) 16933f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left); 16943f92a64eSJakub Kicinski } 16953f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done); 16963f92a64eSJakub Kicinski 169732035585STom Herbert int tcp_peek_len(struct socket *sock) 169832035585STom Herbert { 169932035585STom Herbert return tcp_inq(sock->sk); 170032035585STom Herbert } 170132035585STom Herbert EXPORT_SYMBOL(tcp_peek_len); 170232035585STom Herbert 1703d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1704d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val) 1705d1361840SEric Dumazet { 1706dfa2f048SEric Dumazet int space, cap; 1707867f816bSSoheil Hassas Yeganeh 1708867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1709867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1; 1710867f816bSSoheil Hassas Yeganeh else 171102739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1712867f816bSSoheil Hassas Yeganeh val = min(val, cap); 1713eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 171403f45c88SEric Dumazet 171503f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */ 171603f45c88SEric Dumazet tcp_data_ready(sk); 171703f45c88SEric Dumazet 1718d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1719d1361840SEric Dumazet return 0; 1720d1361840SEric Dumazet 1721dfa2f048SEric Dumazet space = tcp_space_from_win(sk, val); 1722dfa2f048SEric Dumazet if (space > sk->sk_rcvbuf) { 1723dfa2f048SEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, space); 1724dfa2f048SEric Dumazet tcp_sk(sk)->window_clamp = val; 1725d1361840SEric Dumazet } 1726d1361840SEric Dumazet return 0; 1727d1361840SEric Dumazet } 1728d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat); 1729d1361840SEric Dumazet 1730892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb, 17317eeba170SArjun Roy struct scm_timestamping_internal *tss) 17327eeba170SArjun Roy { 17337eeba170SArjun Roy if (skb->tstamp) 17347eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp); 17357eeba170SArjun Roy else 17367eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0}; 17377eeba170SArjun Roy 17387eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp) 17397eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 17407eeba170SArjun Roy else 17417eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0}; 17427eeba170SArjun Roy } 17437eeba170SArjun Roy 174405255b82SEric Dumazet #ifdef CONFIG_MMU 1745350f6bbcSMatthew Wilcox (Oracle) static const struct vm_operations_struct tcp_vm_ops = { 174605255b82SEric Dumazet }; 174705255b82SEric Dumazet 174893ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock, 174993ab6cc6SEric Dumazet struct vm_area_struct *vma) 175093ab6cc6SEric Dumazet { 175105255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 175205255b82SEric Dumazet return -EPERM; 17531c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 175405255b82SEric Dumazet 17553e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 17561c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MIXEDMAP); 175705255b82SEric Dumazet 175805255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops; 175905255b82SEric Dumazet return 0; 176005255b82SEric Dumazet } 176105255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap); 176205255b82SEric Dumazet 17637fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 17647fba5309SArjun Roy u32 *offset_frag) 17657fba5309SArjun Roy { 17667fba5309SArjun Roy skb_frag_t *frag; 17677fba5309SArjun Roy 176870701b83SArjun Roy if (unlikely(offset_skb >= skb->len)) 176970701b83SArjun Roy return NULL; 177070701b83SArjun Roy 17717fba5309SArjun Roy offset_skb -= skb_headlen(skb); 17727fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 17737fba5309SArjun Roy return NULL; 17747fba5309SArjun Roy 17757fba5309SArjun Roy frag = skb_shinfo(skb)->frags; 17767fba5309SArjun Roy while (offset_skb) { 17777fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) { 17787fba5309SArjun Roy *offset_frag = offset_skb; 17797fba5309SArjun Roy return frag; 17807fba5309SArjun Roy } 17817fba5309SArjun Roy offset_skb -= skb_frag_size(frag); 17827fba5309SArjun Roy ++frag; 17837fba5309SArjun Roy } 17847fba5309SArjun Roy *offset_frag = 0; 17857fba5309SArjun Roy return frag; 17867fba5309SArjun Roy } 17877fba5309SArjun Roy 178898917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag) 178998917cf0SArjun Roy { 1790577e4432SEric Dumazet struct page *page; 1791577e4432SEric Dumazet 1792577e4432SEric Dumazet if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag)) 1793577e4432SEric Dumazet return false; 1794577e4432SEric Dumazet 1795577e4432SEric Dumazet page = skb_frag_page(frag); 1796577e4432SEric Dumazet 1797577e4432SEric Dumazet if (PageCompound(page) || page->mapping) 1798577e4432SEric Dumazet return false; 1799577e4432SEric Dumazet 1800577e4432SEric Dumazet return true; 180198917cf0SArjun Roy } 180298917cf0SArjun Roy 180398917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag, 180498917cf0SArjun Roy int remaining_in_skb) 180598917cf0SArjun Roy { 180698917cf0SArjun Roy int offset = 0; 180798917cf0SArjun Roy 180898917cf0SArjun Roy if (likely(can_map_frag(frag))) 180998917cf0SArjun Roy return 0; 181098917cf0SArjun Roy 181198917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) { 181298917cf0SArjun Roy offset += skb_frag_size(frag); 181398917cf0SArjun Roy ++frag; 181498917cf0SArjun Roy } 181598917cf0SArjun Roy return offset; 181698917cf0SArjun Roy } 181798917cf0SArjun Roy 18180c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 18190c3936d3SArjun Roy struct tcp_zerocopy_receive *zc, 18200c3936d3SArjun Roy struct sk_buff *skb, u32 offset) 18210c3936d3SArjun Roy { 18220c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0; 18230c3936d3SArjun Roy int mappable_offset; 18240c3936d3SArjun Roy skb_frag_t *frag; 18250c3936d3SArjun Roy 18260c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */ 18270c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset; 18280c3936d3SArjun Roy 18290c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */ 18300c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset); 18310c3936d3SArjun Roy if (!frag) 18320c3936d3SArjun Roy return; 18330c3936d3SArjun Roy 18340c3936d3SArjun Roy if (frag_offset) { 18350c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb); 18360c3936d3SArjun Roy 18370c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */ 18380c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1]) 18390c3936d3SArjun Roy return; 18400c3936d3SArjun Roy 18410c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */ 18420c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset; 18430c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder; 18440c3936d3SArjun Roy ++frag; 18450c3936d3SArjun Roy } 18460c3936d3SArjun Roy 18470c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest. 18480c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes 18490c3936d3SArjun Roy * in partial_frag_remainder. 18500c3936d3SArjun Roy */ 18510c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 18520c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 18530c3936d3SArjun Roy } 18540c3936d3SArjun Roy 1855f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1856ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 1857f21a3c48SArjun Roy int *cmsg_flags); 1858f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk, 18597eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq, 18607eeba170SArjun Roy struct scm_timestamping_internal *tss) 1861f21a3c48SArjun Roy { 1862f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 1863f21a3c48SArjun Roy struct msghdr msg = {}; 18647eeba170SArjun Roy int err; 1865f21a3c48SArjun Roy 1866f21a3c48SArjun Roy zc->length = 0; 1867f21a3c48SArjun Roy zc->recv_skip_hint = 0; 1868f21a3c48SArjun Roy 1869f21a3c48SArjun Roy if (copy_address != zc->copybuf_address) 1870f21a3c48SArjun Roy return -EINVAL; 1871f21a3c48SArjun Roy 18729fd7874cSJens Axboe err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq, 18739fd7874cSJens Axboe &msg.msg_iter); 1874f21a3c48SArjun Roy if (err) 1875f21a3c48SArjun Roy return err; 1876f21a3c48SArjun Roy 1877ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 18787eeba170SArjun Roy tss, &zc->msg_flags); 1879f21a3c48SArjun Roy if (err < 0) 1880f21a3c48SArjun Roy return err; 1881f21a3c48SArjun Roy 1882f21a3c48SArjun Roy zc->copybuf_len = err; 18830c3936d3SArjun Roy if (likely(zc->copybuf_len)) { 18840c3936d3SArjun Roy struct sk_buff *skb; 18850c3936d3SArjun Roy u32 offset; 18860c3936d3SArjun Roy 18870c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 18880c3936d3SArjun Roy if (skb) 18890c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 18900c3936d3SArjun Roy } 1891f21a3c48SArjun Roy return 0; 1892f21a3c48SArjun Roy } 1893f21a3c48SArjun Roy 189418fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 189518fb76edSArjun Roy struct sk_buff *skb, u32 copylen, 189618fb76edSArjun Roy u32 *offset, u32 *seq) 189718fb76edSArjun Roy { 189818fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 189918fb76edSArjun Roy struct msghdr msg = {}; 190018fb76edSArjun Roy int err; 190118fb76edSArjun Roy 190218fb76edSArjun Roy if (copy_address != zc->copybuf_address) 190318fb76edSArjun Roy return -EINVAL; 190418fb76edSArjun Roy 19059fd7874cSJens Axboe err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen, 19069fd7874cSJens Axboe &msg.msg_iter); 190718fb76edSArjun Roy if (err) 190818fb76edSArjun Roy return err; 190918fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 191018fb76edSArjun Roy if (err) 191118fb76edSArjun Roy return err; 191218fb76edSArjun Roy zc->recv_skip_hint -= copylen; 191318fb76edSArjun Roy *offset += copylen; 191418fb76edSArjun Roy *seq += copylen; 191518fb76edSArjun Roy return (__s32)copylen; 191618fb76edSArjun Roy } 191718fb76edSArjun Roy 19187eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 191918fb76edSArjun Roy struct sock *sk, 192018fb76edSArjun Roy struct sk_buff *skb, 192118fb76edSArjun Roy u32 *seq, 19227eeba170SArjun Roy s32 copybuf_len, 19237eeba170SArjun Roy struct scm_timestamping_internal *tss) 192418fb76edSArjun Roy { 192518fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 192618fb76edSArjun Roy 192718fb76edSArjun Roy if (!copylen) 192818fb76edSArjun Roy return 0; 192918fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */ 19307eeba170SArjun Roy if (skb) { 193118fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq; 19327eeba170SArjun Roy } else { 193318fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset); 19347eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 19357eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 19367eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 19377eeba170SArjun Roy } 19387eeba170SArjun Roy } 193918fb76edSArjun Roy 194018fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 194118fb76edSArjun Roy seq); 194218fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen; 194318fb76edSArjun Roy } 194418fb76edSArjun Roy 194594ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 194694ab9eb9SArjun Roy struct page **pending_pages, 194794ab9eb9SArjun Roy unsigned long pages_remaining, 194894ab9eb9SArjun Roy unsigned long *address, 194994ab9eb9SArjun Roy u32 *length, 195094ab9eb9SArjun Roy u32 *seq, 195194ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 195294ab9eb9SArjun Roy u32 total_bytes_to_map, 195394ab9eb9SArjun Roy int err) 195494ab9eb9SArjun Roy { 195594ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */ 195694ab9eb9SArjun Roy if (err == -EBUSY && 195794ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 195894ab9eb9SArjun Roy u32 maybe_zap_len; 195994ab9eb9SArjun Roy 196094ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 196194ab9eb9SArjun Roy *length + /* Mapped or pending */ 196294ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */ 1963e9adcfecSMike Kravetz zap_page_range_single(vma, *address, maybe_zap_len, NULL); 196494ab9eb9SArjun Roy err = 0; 196594ab9eb9SArjun Roy } 196694ab9eb9SArjun Roy 196794ab9eb9SArjun Roy if (!err) { 196894ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining; 196994ab9eb9SArjun Roy int bytes_mapped; 197094ab9eb9SArjun Roy 1971e9adcfecSMike Kravetz /* We called zap_page_range_single, try to reinsert. */ 197294ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, 197394ab9eb9SArjun Roy pending_pages, 197494ab9eb9SArjun Roy &pages_remaining); 197594ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 197694ab9eb9SArjun Roy *seq += bytes_mapped; 197794ab9eb9SArjun Roy *address += bytes_mapped; 197894ab9eb9SArjun Roy } 197994ab9eb9SArjun Roy if (err) { 198094ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an 198194ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining 198294ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll 198394ab9eb9SArjun Roy * some state we speculatively touched before. 198494ab9eb9SArjun Roy */ 198594ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 198694ab9eb9SArjun Roy 198794ab9eb9SArjun Roy *length -= bytes_not_mapped; 198894ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped; 198994ab9eb9SArjun Roy } 199094ab9eb9SArjun Roy return err; 199194ab9eb9SArjun Roy } 199294ab9eb9SArjun Roy 19933763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 19943763a24cSArjun Roy struct page **pages, 199594ab9eb9SArjun Roy unsigned int pages_to_map, 199694ab9eb9SArjun Roy unsigned long *address, 199794ab9eb9SArjun Roy u32 *length, 19983763a24cSArjun Roy u32 *seq, 199994ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 200094ab9eb9SArjun Roy u32 total_bytes_to_map) 20013763a24cSArjun Roy { 20023763a24cSArjun Roy unsigned long pages_remaining = pages_to_map; 200394ab9eb9SArjun Roy unsigned int pages_mapped; 200494ab9eb9SArjun Roy unsigned int bytes_mapped; 200594ab9eb9SArjun Roy int err; 20063763a24cSArjun Roy 200794ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining); 200894ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining; 200994ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped; 20103763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in 20113763a24cSArjun Roy * mapping (some but not all of the pages). 20123763a24cSArjun Roy */ 20133763a24cSArjun Roy *seq += bytes_mapped; 201494ab9eb9SArjun Roy *address += bytes_mapped; 201594ab9eb9SArjun Roy 201694ab9eb9SArjun Roy if (likely(!err)) 201794ab9eb9SArjun Roy return 0; 201894ab9eb9SArjun Roy 201994ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */ 202094ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 202194ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map, 202294ab9eb9SArjun Roy err); 20233763a24cSArjun Roy } 20243763a24cSArjun Roy 20253c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 20267eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 20277eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20287eeba170SArjun Roy struct scm_timestamping_internal *tss) 20297eeba170SArjun Roy { 20307eeba170SArjun Roy unsigned long msg_control_addr; 20317eeba170SArjun Roy struct msghdr cmsg_dummy; 20327eeba170SArjun Roy 20337eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control; 2034c39ef213SKevin Brodsky cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 20357eeba170SArjun Roy cmsg_dummy.msg_controllen = 20367eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen; 20377eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall() 20387eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0; 2039a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true; 20407eeba170SArjun Roy zc->msg_flags = 0; 20417eeba170SArjun Roy if (zc->msg_control == msg_control_addr && 20427eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) { 20437eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss); 20447eeba170SArjun Roy zc->msg_control = (__u64) 2045c39ef213SKevin Brodsky ((uintptr_t)cmsg_dummy.msg_control_user); 20467eeba170SArjun Roy zc->msg_controllen = 20477eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen; 20487eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 20497eeba170SArjun Roy } 20507eeba170SArjun Roy } 20517eeba170SArjun Roy 20527a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, 20537a7f0946SArjun Roy unsigned long address, 20547a7f0946SArjun Roy bool *mmap_locked) 20557a7f0946SArjun Roy { 2056350f6bbcSMatthew Wilcox (Oracle) struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); 20577a7f0946SArjun Roy 20587a7f0946SArjun Roy if (vma) { 2059350f6bbcSMatthew Wilcox (Oracle) if (vma->vm_ops != &tcp_vm_ops) { 20607a7f0946SArjun Roy vma_end_read(vma); 20617a7f0946SArjun Roy return NULL; 20627a7f0946SArjun Roy } 20637a7f0946SArjun Roy *mmap_locked = false; 20647a7f0946SArjun Roy return vma; 20657a7f0946SArjun Roy } 20667a7f0946SArjun Roy 20677a7f0946SArjun Roy mmap_read_lock(mm); 20687a7f0946SArjun Roy vma = vma_lookup(mm, address); 2069350f6bbcSMatthew Wilcox (Oracle) if (!vma || vma->vm_ops != &tcp_vm_ops) { 20707a7f0946SArjun Roy mmap_read_unlock(mm); 20717a7f0946SArjun Roy return NULL; 20727a7f0946SArjun Roy } 20737a7f0946SArjun Roy *mmap_locked = true; 20747a7f0946SArjun Roy return vma; 20757a7f0946SArjun Roy } 20767a7f0946SArjun Roy 207794ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 207805255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk, 20797eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20807eeba170SArjun Roy struct scm_timestamping_internal *tss) 208105255b82SEric Dumazet { 208294ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0; 208305255b82SEric Dumazet unsigned long address = (unsigned long)zc->address; 208494ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 208518fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len; 208618fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk); 208705255b82SEric Dumazet const skb_frag_t *frags = NULL; 208894ab9eb9SArjun Roy unsigned int pages_to_map = 0; 208905255b82SEric Dumazet struct vm_area_struct *vma; 209005255b82SEric Dumazet struct sk_buff *skb = NULL; 209118fb76edSArjun Roy u32 seq = tp->copied_seq; 209294ab9eb9SArjun Roy u32 total_bytes_to_map; 209318fb76edSArjun Roy int inq = tcp_inq(sk); 20947a7f0946SArjun Roy bool mmap_locked; 209593ab6cc6SEric Dumazet int ret; 209693ab6cc6SEric Dumazet 209718fb76edSArjun Roy zc->copybuf_len = 0; 20987eeba170SArjun Roy zc->msg_flags = 0; 209918fb76edSArjun Roy 210005255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address) 210193ab6cc6SEric Dumazet return -EINVAL; 210293ab6cc6SEric Dumazet 210393ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN) 210405255b82SEric Dumazet return -ENOTCONN; 210593ab6cc6SEric Dumazet 210693ab6cc6SEric Dumazet sock_rps_record_flow(sk); 210793ab6cc6SEric Dumazet 2108f21a3c48SArjun Roy if (inq && inq <= copybuf_len) 21097eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss); 2110f21a3c48SArjun Roy 2111936ced41SArjun Roy if (inq < PAGE_SIZE) { 2112936ced41SArjun Roy zc->length = 0; 2113936ced41SArjun Roy zc->recv_skip_hint = inq; 2114936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE)) 2115936ced41SArjun Roy return -EIO; 2116936ced41SArjun Roy return 0; 2117936ced41SArjun Roy } 2118936ced41SArjun Roy 21197a7f0946SArjun Roy vma = find_tcp_vma(current->mm, address, &mmap_locked); 21207a7f0946SArjun Roy if (!vma) 2121e776af60SEric Dumazet return -EINVAL; 21227a7f0946SArjun Roy 212318fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 212418fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq); 212594ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 212694ab9eb9SArjun Roy if (total_bytes_to_map) { 212794ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2128e9adcfecSMike Kravetz zap_page_range_single(vma, address, total_bytes_to_map, 2129e9adcfecSMike Kravetz NULL); 213094ab9eb9SArjun Roy zc->length = total_bytes_to_map; 213105255b82SEric Dumazet zc->recv_skip_hint = 0; 21328f2b0293SSoheil Hassas Yeganeh } else { 213318fb76edSArjun Roy zc->length = avail_len; 213418fb76edSArjun Roy zc->recv_skip_hint = avail_len; 21358f2b0293SSoheil Hassas Yeganeh } 213605255b82SEric Dumazet ret = 0; 213705255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) { 213898917cf0SArjun Roy int mappable_offset; 213994ab9eb9SArjun Roy struct page *page; 214098917cf0SArjun Roy 214105255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) { 21427fba5309SArjun Roy u32 offset_frag; 21437fba5309SArjun Roy 214405255b82SEric Dumazet if (skb) { 21450e627190SArjun Roy if (zc->recv_skip_hint > 0) 21460e627190SArjun Roy break; 214705255b82SEric Dumazet skb = skb->next; 214805255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq; 214905255b82SEric Dumazet } else { 215093ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset); 215105255b82SEric Dumazet } 21527eeba170SArjun Roy 21537eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 21547eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 21557eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 21567eeba170SArjun Roy } 215705255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset; 21587fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag); 21597fba5309SArjun Roy if (!frags || offset_frag) 216005255b82SEric Dumazet break; 216105255b82SEric Dumazet } 2162789762ceSSoheil Hassas Yeganeh 216398917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags, 216498917cf0SArjun Roy zc->recv_skip_hint); 216598917cf0SArjun Roy if (mappable_offset) { 216698917cf0SArjun Roy zc->recv_skip_hint = mappable_offset; 216705255b82SEric Dumazet break; 2168789762ceSSoheil Hassas Yeganeh } 216994ab9eb9SArjun Roy page = skb_frag_page(frags); 217094ab9eb9SArjun Roy prefetchw(page); 217194ab9eb9SArjun Roy pages[pages_to_map++] = page; 217205255b82SEric Dumazet length += PAGE_SIZE; 217305255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE; 217405255b82SEric Dumazet frags++; 217594ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 217694ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) { 217794ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb 217894ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs). 217994ab9eb9SArjun Roy */ 218094ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, 218194ab9eb9SArjun Roy pages_to_map, 218294ab9eb9SArjun Roy &address, &length, 218394ab9eb9SArjun Roy &seq, zc, 218494ab9eb9SArjun Roy total_bytes_to_map); 21853763a24cSArjun Roy if (ret) 21863763a24cSArjun Roy goto out; 218794ab9eb9SArjun Roy pages_to_map = 0; 21883763a24cSArjun Roy } 21893763a24cSArjun Roy } 219094ab9eb9SArjun Roy if (pages_to_map) { 219194ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 219294ab9eb9SArjun Roy &address, &length, &seq, 219394ab9eb9SArjun Roy zc, total_bytes_to_map); 219493ab6cc6SEric Dumazet } 219505255b82SEric Dumazet out: 21967a7f0946SArjun Roy if (mmap_locked) 2197d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 21987a7f0946SArjun Roy else 21997a7f0946SArjun Roy vma_end_read(vma); 220018fb76edSArjun Roy /* Try to copy straggler data. */ 220118fb76edSArjun Roy if (!ret) 22027eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 220318fb76edSArjun Roy 220418fb76edSArjun Roy if (length + copylen) { 22057db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 220693ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk); 220793ab6cc6SEric Dumazet 220893ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */ 220993ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset); 221018fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen); 221193ab6cc6SEric Dumazet ret = 0; 221205255b82SEric Dumazet if (length == zc->length) 221305255b82SEric Dumazet zc->recv_skip_hint = 0; 221405255b82SEric Dumazet } else { 221505255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 221605255b82SEric Dumazet ret = -EIO; 221705255b82SEric Dumazet } 221805255b82SEric Dumazet zc->length = length; 221993ab6cc6SEric Dumazet return ret; 222093ab6cc6SEric Dumazet } 222105255b82SEric Dumazet #endif 222293ab6cc6SEric Dumazet 222398aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */ 2224892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 22259718475eSDeepa Dinamani struct scm_timestamping_internal *tss) 222698aaa913SMike Maloney { 2227887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 222898aaa913SMike Maloney bool has_timestamping = false; 222998aaa913SMike Maloney 223098aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 223198aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) { 223298aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2233887feae3SDeepa Dinamani if (new_tstamp) { 2234df1b4ba9SArnd Bergmann struct __kernel_timespec kts = { 2235df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2236df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2237df1b4ba9SArnd Bergmann }; 2238887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2239887feae3SDeepa Dinamani sizeof(kts), &kts); 2240887feae3SDeepa Dinamani } else { 2241df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = { 2242df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2243df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2244df1b4ba9SArnd Bergmann }; 22457f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 22469718475eSDeepa Dinamani sizeof(ts_old), &ts_old); 2247887feae3SDeepa Dinamani } 224898aaa913SMike Maloney } else { 2249887feae3SDeepa Dinamani if (new_tstamp) { 2250df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = { 2251df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2252df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2253df1b4ba9SArnd Bergmann }; 2254887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2255887feae3SDeepa Dinamani sizeof(stv), &stv); 2256887feae3SDeepa Dinamani } else { 2257df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = { 2258df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2259df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2260df1b4ba9SArnd Bergmann }; 22617f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 226298aaa913SMike Maloney sizeof(tv), &tv); 226398aaa913SMike Maloney } 226498aaa913SMike Maloney } 2265887feae3SDeepa Dinamani } 226698aaa913SMike Maloney 2267e3390b30SEric Dumazet if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE) 226898aaa913SMike Maloney has_timestamping = true; 226998aaa913SMike Maloney else 22709718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0}; 227198aaa913SMike Maloney } 227298aaa913SMike Maloney 227398aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 2274e3390b30SEric Dumazet if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE) 227598aaa913SMike Maloney has_timestamping = true; 227698aaa913SMike Maloney else 22779718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0}; 227898aaa913SMike Maloney } 227998aaa913SMike Maloney 228098aaa913SMike Maloney if (has_timestamping) { 22819718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0}; 22829718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW)) 22839718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss); 22849718475eSDeepa Dinamani else 22859718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss); 228698aaa913SMike Maloney } 228798aaa913SMike Maloney } 228898aaa913SMike Maloney 2289b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk) 2290b75eba76SSoheil Hassas Yeganeh { 2291b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk); 2292b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq); 2293b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2294b75eba76SSoheil Hassas Yeganeh int inq; 2295b75eba76SSoheil Hassas Yeganeh 2296b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq; 2297b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2298b75eba76SSoheil Hassas Yeganeh lock_sock(sk); 2299b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq; 2300b75eba76SSoheil Hassas Yeganeh release_sock(sk); 2301b75eba76SSoheil Hassas Yeganeh } 23026466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading 23036466e715SSoheil Hassas Yeganeh * by returning a non-zero inq. 23046466e715SSoheil Hassas Yeganeh */ 23056466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE)) 23066466e715SSoheil Hassas Yeganeh inq = 1; 2307b75eba76SSoheil Hassas Yeganeh return inq; 2308b75eba76SSoheil Hassas Yeganeh } 2309b75eba76SSoheil Hassas Yeganeh 23101da177e4SLinus Torvalds /* 23111da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer. 23121da177e4SLinus Torvalds * 23131da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that 23141da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required. 23151da177e4SLinus Torvalds * Probably, code can be easily improved even more. 23161da177e4SLinus Torvalds */ 23171da177e4SLinus Torvalds 23182cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2319ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 23202cd81161SArjun Roy int *cmsg_flags) 23211da177e4SLinus Torvalds { 23221da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23231da177e4SLinus Torvalds int copied = 0; 23241da177e4SLinus Torvalds u32 peek_seq; 23251da177e4SLinus Torvalds u32 *seq; 23261da177e4SLinus Torvalds unsigned long used; 23272cd81161SArjun Roy int err; 23281da177e4SLinus Torvalds int target; /* Read at least this many bytes */ 23291da177e4SLinus Torvalds long timeo; 2330dfbafc99SSabrina Dubroca struct sk_buff *skb, *last; 233177527313SIlpo Järvinen u32 urg_hole = 0; 23321da177e4SLinus Torvalds 23331da177e4SLinus Torvalds err = -ENOTCONN; 23341da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 23351da177e4SLinus Torvalds goto out; 23361da177e4SLinus Torvalds 2337f94fd25cSJens Axboe if (tp->recvmsg_inq) { 2338925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ; 2339f94fd25cSJens Axboe msg->msg_get_inq = 1; 2340f94fd25cSJens Axboe } 2341ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 23421da177e4SLinus Torvalds 23431da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */ 23441da177e4SLinus Torvalds if (flags & MSG_OOB) 23451da177e4SLinus Torvalds goto recv_urg; 23461da177e4SLinus Torvalds 2347c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 2348c0e88ff0SPavel Emelyanov err = -EPERM; 2349c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK)) 2350c0e88ff0SPavel Emelyanov goto out; 2351c0e88ff0SPavel Emelyanov 2352c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 2353c0e88ff0SPavel Emelyanov goto recv_sndq; 2354c0e88ff0SPavel Emelyanov 2355c0e88ff0SPavel Emelyanov err = -EINVAL; 2356c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 2357c0e88ff0SPavel Emelyanov goto out; 2358c0e88ff0SPavel Emelyanov 2359c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */ 2360c0e88ff0SPavel Emelyanov } 2361c0e88ff0SPavel Emelyanov 23621da177e4SLinus Torvalds seq = &tp->copied_seq; 23631da177e4SLinus Torvalds if (flags & MSG_PEEK) { 23641da177e4SLinus Torvalds peek_seq = tp->copied_seq; 23651da177e4SLinus Torvalds seq = &peek_seq; 23661da177e4SLinus Torvalds } 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 23691da177e4SLinus Torvalds 23701da177e4SLinus Torvalds do { 23711da177e4SLinus Torvalds u32 offset; 23721da177e4SLinus Torvalds 23731da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2374b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 23751da177e4SLinus Torvalds if (copied) 23761da177e4SLinus Torvalds break; 23771da177e4SLinus Torvalds if (signal_pending(current)) { 23781da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 23791da177e4SLinus Torvalds break; 23801da177e4SLinus Torvalds } 23811da177e4SLinus Torvalds } 23821da177e4SLinus Torvalds 23831da177e4SLinus Torvalds /* Next get a buffer. */ 23841da177e4SLinus Torvalds 2385dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue); 238691521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) { 2387dfbafc99SSabrina Dubroca last = skb; 23881da177e4SLinus Torvalds /* Now that we have two receive queues this 23891da177e4SLinus Torvalds * shouldn't happen. 23901da177e4SLinus Torvalds */ 2391d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2392e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 23932af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2394d792c100SIlpo Järvinen flags)) 23951da177e4SLinus Torvalds break; 2396d792c100SIlpo Järvinen 23971da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq; 23989d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 23999d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 24001da177e4SLinus Torvalds offset--; 24019d691539SEric Dumazet } 24021da177e4SLinus Torvalds if (offset < skb->len) 24031da177e4SLinus Torvalds goto found_ok_skb; 2404e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 24051da177e4SLinus Torvalds goto found_fin_ok; 24062af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK), 2407e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 24082af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 240991521944SDavid S. Miller } 24101da177e4SLinus Torvalds 24111da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */ 24121da177e4SLinus Torvalds 24139ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 24141da177e4SLinus Torvalds break; 24151da177e4SLinus Torvalds 24161da177e4SLinus Torvalds if (copied) { 24178bd172b7SEric Dumazet if (!timeo || 24188bd172b7SEric Dumazet sk->sk_err || 24191da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE || 24201da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) || 2421518a09efSDavid S. Miller signal_pending(current)) 24221da177e4SLinus Torvalds break; 24231da177e4SLinus Torvalds } else { 24241da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE)) 24251da177e4SLinus Torvalds break; 24261da177e4SLinus Torvalds 24271da177e4SLinus Torvalds if (sk->sk_err) { 24281da177e4SLinus Torvalds copied = sock_error(sk); 24291da177e4SLinus Torvalds break; 24301da177e4SLinus Torvalds } 24311da177e4SLinus Torvalds 24321da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) 24331da177e4SLinus Torvalds break; 24341da177e4SLinus Torvalds 24351da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) { 24361da177e4SLinus Torvalds /* This occurs when user tries to read 24371da177e4SLinus Torvalds * from never connected socket. 24381da177e4SLinus Torvalds */ 24391da177e4SLinus Torvalds copied = -ENOTCONN; 24401da177e4SLinus Torvalds break; 24411da177e4SLinus Torvalds } 24421da177e4SLinus Torvalds 24431da177e4SLinus Torvalds if (!timeo) { 24441da177e4SLinus Torvalds copied = -EAGAIN; 24451da177e4SLinus Torvalds break; 24461da177e4SLinus Torvalds } 24471da177e4SLinus Torvalds 24481da177e4SLinus Torvalds if (signal_pending(current)) { 24491da177e4SLinus Torvalds copied = sock_intr_errno(timeo); 24501da177e4SLinus Torvalds break; 24511da177e4SLinus Torvalds } 24521da177e4SLinus Torvalds } 24531da177e4SLinus Torvalds 24541da177e4SLinus Torvalds if (copied >= target) { 24551da177e4SLinus Torvalds /* Do not sleep, just process backlog. */ 245693afcfd1SEric Dumazet __sk_flush_backlog(sk); 2457dfbafc99SSabrina Dubroca } else { 245829fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied); 2459419ce133SPaolo Abeni err = sk_wait_data(sk, &timeo, last); 2460419ce133SPaolo Abeni if (err < 0) { 2461419ce133SPaolo Abeni err = copied ? : err; 2462419ce133SPaolo Abeni goto out; 2463419ce133SPaolo Abeni } 2464dfbafc99SSabrina Dubroca } 24651da177e4SLinus Torvalds 246677527313SIlpo Järvinen if ((flags & MSG_PEEK) && 246777527313SIlpo Järvinen (peek_seq - copied - urg_hole != tp->copied_seq)) { 2468e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2469e87cc472SJoe Perches current->comm, 2470e87cc472SJoe Perches task_pid_nr(current)); 24711da177e4SLinus Torvalds peek_seq = tp->copied_seq; 24721da177e4SLinus Torvalds } 24731da177e4SLinus Torvalds continue; 24741da177e4SLinus Torvalds 24751da177e4SLinus Torvalds found_ok_skb: 24761da177e4SLinus Torvalds /* Ok so how much can we use? */ 24771da177e4SLinus Torvalds used = skb->len - offset; 24781da177e4SLinus Torvalds if (len < used) 24791da177e4SLinus Torvalds used = len; 24801da177e4SLinus Torvalds 24811da177e4SLinus Torvalds /* Do we have urgent data here? */ 2482b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 24831da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq; 24841da177e4SLinus Torvalds if (urg_offset < used) { 24851da177e4SLinus Torvalds if (!urg_offset) { 24861da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) { 24877db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 248877527313SIlpo Järvinen urg_hole++; 24891da177e4SLinus Torvalds offset++; 24901da177e4SLinus Torvalds used--; 24911da177e4SLinus Torvalds if (!used) 24921da177e4SLinus Torvalds goto skip_copy; 24931da177e4SLinus Torvalds } 24941da177e4SLinus Torvalds } else 24951da177e4SLinus Torvalds used = urg_offset; 24961da177e4SLinus Torvalds } 24971da177e4SLinus Torvalds } 24981da177e4SLinus Torvalds 24991da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) { 250051f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, offset, msg, used); 25011da177e4SLinus Torvalds if (err) { 25021da177e4SLinus Torvalds /* Exception. Bailout! */ 25031da177e4SLinus Torvalds if (!copied) 25041da177e4SLinus Torvalds copied = -EFAULT; 25051da177e4SLinus Torvalds break; 25061da177e4SLinus Torvalds } 25071da177e4SLinus Torvalds } 25081da177e4SLinus Torvalds 25097db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used); 25101da177e4SLinus Torvalds copied += used; 25111da177e4SLinus Torvalds len -= used; 25121da177e4SLinus Torvalds 25131da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 25141da177e4SLinus Torvalds 25151da177e4SLinus Torvalds skip_copy: 2516b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 25177b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 251831770e34SFlorian Westphal tcp_fast_path_check(sk); 251931770e34SFlorian Westphal } 25201da177e4SLinus Torvalds 252198aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) { 25222cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss); 2523925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS; 252498aaa913SMike Maloney } 2525cc4de047SKelly Littlepage 2526cc4de047SKelly Littlepage if (used + offset < skb->len) 2527cc4de047SKelly Littlepage continue; 2528cc4de047SKelly Littlepage 2529e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 25301da177e4SLinus Torvalds goto found_fin_ok; 25317bced397SDan Williams if (!(flags & MSG_PEEK)) 25323df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25331da177e4SLinus Torvalds continue; 25341da177e4SLinus Torvalds 25351da177e4SLinus Torvalds found_fin_ok: 25361da177e4SLinus Torvalds /* Process the FIN. */ 25377db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 25387bced397SDan Williams if (!(flags & MSG_PEEK)) 25393df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25401da177e4SLinus Torvalds break; 25411da177e4SLinus Torvalds } while (len > 0); 25421da177e4SLinus Torvalds 25431da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored 25441da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK 25451da177e4SLinus Torvalds */ 25461da177e4SLinus Torvalds 25471da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 25480e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 25491da177e4SLinus Torvalds return copied; 25501da177e4SLinus Torvalds 25511da177e4SLinus Torvalds out: 25521da177e4SLinus Torvalds return err; 25531da177e4SLinus Torvalds 25541da177e4SLinus Torvalds recv_urg: 2555377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags); 25561da177e4SLinus Torvalds goto out; 2557c0e88ff0SPavel Emelyanov 2558c0e88ff0SPavel Emelyanov recv_sndq: 2559c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len); 2560c0e88ff0SPavel Emelyanov goto out; 25611da177e4SLinus Torvalds } 25622cd81161SArjun Roy 2563ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2564ec095263SOliver Hartkopp int *addr_len) 25652cd81161SArjun Roy { 2566f94fd25cSJens Axboe int cmsg_flags = 0, ret; 25672cd81161SArjun Roy struct scm_timestamping_internal tss; 25682cd81161SArjun Roy 25692cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE)) 25702cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len); 25712cd81161SArjun Roy 25722cd81161SArjun Roy if (sk_can_busy_loop(sk) && 25732cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) && 25742cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED) 2575ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT); 25762cd81161SArjun Roy 25772cd81161SArjun Roy lock_sock(sk); 2578ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 25792cd81161SArjun Roy release_sock(sk); 25802cd81161SArjun Roy 2581f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2582925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS) 25832cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss); 2584f94fd25cSJens Axboe if (msg->msg_get_inq) { 2585f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk); 2586f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ) 2587f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2588f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq); 25892cd81161SArjun Roy } 25902cd81161SArjun Roy } 25912cd81161SArjun Roy return ret; 25922cd81161SArjun Roy } 25934bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg); 25941da177e4SLinus Torvalds 2595490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state) 2596490d5046SIlpo Järvinen { 2597490d5046SIlpo Järvinen int oldstate = sk->sk_state; 2598490d5046SIlpo Järvinen 2599d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF 2600d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The 2601d4487491SLawrence Brakmo * following checks will detect if an internal state value ever 2602d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will 2603d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling 2604d4487491SLawrence Brakmo * tcp_call_bpf_2arg. 2605d4487491SLawrence Brakmo */ 2606d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2607d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2608d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2609d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2610d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2611d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2612d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2613d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2614d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2615d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2616d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2617d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 261891051f00SGuillaume Nault BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE); 2619d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2620d4487491SLawrence Brakmo 262197a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values 262297a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 262397a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 262497a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF 262597a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo. 262697a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the 262797a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 262897a19cafSYonghong Song * regardless of which compiler is used. 262997a19cafSYonghong Song */ 263097a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 263197a19cafSYonghong Song 2632d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2633d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2634e8fce239SSong Liu 2635490d5046SIlpo Järvinen switch (state) { 2636490d5046SIlpo Järvinen case TCP_ESTABLISHED: 2637490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED) 263881cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2639490d5046SIlpo Järvinen break; 2640490d5046SIlpo Järvinen 2641490d5046SIlpo Järvinen case TCP_CLOSE: 2642490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 264381cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2644490d5046SIlpo Järvinen 2645490d5046SIlpo Järvinen sk->sk_prot->unhash(sk); 2646490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash && 2647490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2648ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk); 2649a8eceea8SJoe Perches fallthrough; 2650490d5046SIlpo Järvinen default: 2651490d5046SIlpo Järvinen if (oldstate == TCP_ESTABLISHED) 265274688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2653490d5046SIlpo Järvinen } 2654490d5046SIlpo Järvinen 2655490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed 2656490d5046SIlpo Järvinen * socket sitting in hash tables. 2657490d5046SIlpo Järvinen */ 2658563e0bb0SYafang Shao inet_sk_state_store(sk, state); 2659490d5046SIlpo Järvinen } 2660490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state); 2661490d5046SIlpo Järvinen 26621da177e4SLinus Torvalds /* 26631da177e4SLinus Torvalds * State processing on a close. This implements the state shift for 26641da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some 26651da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be 26661da177e4SLinus Torvalds * closed. 26671da177e4SLinus Torvalds */ 26681da177e4SLinus Torvalds 26699b5b5cffSArjan van de Ven static const unsigned char new_state[16] = { 26701da177e4SLinus Torvalds /* current state: new state: action: */ 26710980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE, 26720980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 26730980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE, 26740980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 26750980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 26760980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 26770980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE, 26780980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE, 26790980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 26800980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK, 26810980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE, 26820980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING, 26830980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 26841da177e4SLinus Torvalds }; 26851da177e4SLinus Torvalds 26861da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk) 26871da177e4SLinus Torvalds { 26881da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state]; 26891da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK; 26901da177e4SLinus Torvalds 26911da177e4SLinus Torvalds tcp_set_state(sk, ns); 26921da177e4SLinus Torvalds 26931da177e4SLinus Torvalds return next & TCP_ACTION_FIN; 26941da177e4SLinus Torvalds } 26951da177e4SLinus Torvalds 26961da177e4SLinus Torvalds /* 26971da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except 26981f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 26991da177e4SLinus Torvalds */ 27001da177e4SLinus Torvalds 27011da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how) 27021da177e4SLinus Torvalds { 27031da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN, 27041da177e4SLinus Torvalds * and then put it into the queue to be sent. 27051da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 27061da177e4SLinus Torvalds */ 27071da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN)) 27081da177e4SLinus Torvalds return; 27091da177e4SLinus Torvalds 27101da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */ 27111da177e4SLinus Torvalds if ((1 << sk->sk_state) & 27121da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT | 27131da177e4SLinus Torvalds TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 27141da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */ 27151da177e4SLinus Torvalds if (tcp_close_state(sk)) 27161da177e4SLinus Torvalds tcp_send_fin(sk); 27171da177e4SLinus Torvalds } 27181da177e4SLinus Torvalds } 27194bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown); 27201da177e4SLinus Torvalds 272119757cebSEric Dumazet int tcp_orphan_count_sum(void) 272219757cebSEric Dumazet { 272319757cebSEric Dumazet int i, total = 0; 272419757cebSEric Dumazet 272519757cebSEric Dumazet for_each_possible_cpu(i) 272619757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i); 272719757cebSEric Dumazet 272819757cebSEric Dumazet return max(total, 0); 272919757cebSEric Dumazet } 273019757cebSEric Dumazet 273119757cebSEric Dumazet static int tcp_orphan_cache; 273219757cebSEric Dumazet static struct timer_list tcp_orphan_timer; 273319757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 273419757cebSEric Dumazet 273519757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused) 273619757cebSEric Dumazet { 273719757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 273819757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 273919757cebSEric Dumazet } 274019757cebSEric Dumazet 274119757cebSEric Dumazet static bool tcp_too_many_orphans(int shift) 274219757cebSEric Dumazet { 274347e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift > 274447e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans); 274519757cebSEric Dumazet } 274619757cebSEric Dumazet 2747efcdbf24SArun Sharma bool tcp_check_oom(struct sock *sk, int shift) 2748efcdbf24SArun Sharma { 2749efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory; 2750efcdbf24SArun Sharma 275119757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift); 2752efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk); 2753efcdbf24SArun Sharma 2754e87cc472SJoe Perches if (too_many_orphans) 2755e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n"); 2756e87cc472SJoe Perches if (out_of_socket_memory) 2757e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2758efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory; 2759efcdbf24SArun Sharma } 2760efcdbf24SArun Sharma 276177c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout) 27621da177e4SLinus Torvalds { 27631da177e4SLinus Torvalds struct sk_buff *skb; 27641da177e4SLinus Torvalds int data_was_unread = 0; 276575c2d907SHerbert Xu int state; 27661da177e4SLinus Torvalds 2767e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 27681da177e4SLinus Torvalds 27691da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) { 27701da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 27711da177e4SLinus Torvalds 27721da177e4SLinus Torvalds /* Special case. */ 27730a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 27741da177e4SLinus Torvalds 27751da177e4SLinus Torvalds goto adjudge_to_death; 27761da177e4SLinus Torvalds } 27771da177e4SLinus Torvalds 27781da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the 27791da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the 27801da177e4SLinus Torvalds * reader process may not have drained the data yet! 27811da177e4SLinus Torvalds */ 27821da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2783e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2784e11ecddfSEric Dumazet 2785e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2786e11ecddfSEric Dumazet len--; 27871da177e4SLinus Torvalds data_was_unread += len; 27881da177e4SLinus Torvalds __kfree_skb(skb); 27891da177e4SLinus Torvalds } 27901da177e4SLinus Torvalds 2791565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2792565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE) 2793565b7b2dSKonstantin Khorenko goto adjudge_to_death; 2794565b7b2dSKonstantin Khorenko 279565bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because 279665bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of 279765bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 279865bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to 279965bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee... 280065bb723cSGerrit Renker * Note: timeout is always zero in such a case. 28011da177e4SLinus Torvalds */ 2802ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) { 2803ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0); 2804ee995283SPavel Emelyanov } else if (data_was_unread) { 28051da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */ 28066aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 28071da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 2808aa133076SWu Fengguang tcp_send_active_reset(sk, sk->sk_allocation); 28091da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 28101da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */ 28111da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0); 28126aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 28131da177e4SLinus Torvalds } else if (tcp_close_state(sk)) { 28141da177e4SLinus Torvalds /* We FIN if the application ate all the data before 28151da177e4SLinus Torvalds * zapping the connection. 28161da177e4SLinus Torvalds */ 28171da177e4SLinus Torvalds 28181da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state 28191da177e4SLinus Torvalds * machine. State transitions: 28201da177e4SLinus Torvalds * 28211da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1 28221da177e4SLinus Torvalds * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 28231da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK 28241da177e4SLinus Torvalds * 28251da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window), 28261da177e4SLinus Torvalds * rather than queued out of window. Purists blame. 28271da177e4SLinus Torvalds * 28281da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED, 28291da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent. 28301da177e4SLinus Torvalds * 28311da177e4SLinus Torvalds * The visible declinations are that sometimes 28321da177e4SLinus Torvalds * we enter time-wait state, when it is not required really 28331da177e4SLinus Torvalds * (harmless), do not send active resets, when they are 28341da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 28351da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux) 28361da177e4SLinus Torvalds * Probably, I missed some more holelets. 28371da177e4SLinus Torvalds * --ANK 28388336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN 28398336886fSJerry Chu * in a single packet! (May consider it later but will 28408336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until 28418336886fSJerry Chu * data is written and socket is closed.) 28421da177e4SLinus Torvalds */ 28431da177e4SLinus Torvalds tcp_send_fin(sk); 28441da177e4SLinus Torvalds } 28451da177e4SLinus Torvalds 28461da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout); 28471da177e4SLinus Torvalds 28481da177e4SLinus Torvalds adjudge_to_death: 284975c2d907SHerbert Xu state = sk->sk_state; 285075c2d907SHerbert Xu sock_hold(sk); 285175c2d907SHerbert Xu sock_orphan(sk); 285275c2d907SHerbert Xu 28531da177e4SLinus Torvalds local_bh_disable(); 28541da177e4SLinus Torvalds bh_lock_sock(sk); 28558873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */ 28568873c064SEric Dumazet __release_sock(sk); 28571da177e4SLinus Torvalds 285819757cebSEric Dumazet this_cpu_inc(tcp_orphan_count); 2859eb4dea58SHerbert Xu 286075c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */ 286175c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 286275c2d907SHerbert Xu goto out; 28631da177e4SLinus Torvalds 28641da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a 28651da177e4SLinus Torvalds * problem with TCP as specified in that the other end could 28661da177e4SLinus Torvalds * keep a socket open forever with no application left this end. 2867b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill 28681da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough 28691da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops 28701da177e4SLinus Torvalds * reset mistake. 28711da177e4SLinus Torvalds * 28721da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour 28731da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but 28741da177e4SLinus Torvalds * consume significant resources. Let's do it with special 28751da177e4SLinus Torvalds * linger2 option. --ANK 28761da177e4SLinus Torvalds */ 28771da177e4SLinus Torvalds 28781da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) { 28791da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2880a81722ddSEric Dumazet if (READ_ONCE(tp->linger2) < 0) { 28811da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 28821da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 288302a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2884de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER); 28851da177e4SLinus Torvalds } else { 2886463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk); 28871da177e4SLinus Torvalds 28881da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 288952499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk, 289052499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN); 28911da177e4SLinus Torvalds } else { 28921da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 28931da177e4SLinus Torvalds goto out; 28941da177e4SLinus Torvalds } 28951da177e4SLinus Torvalds } 28961da177e4SLinus Torvalds } 28971da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 2898efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) { 28991da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29001da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 290102a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2902de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY); 29034ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) { 29044ee806d5SDan Streetman /* Not possible to send reset; just close */ 29054ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE); 29061da177e4SLinus Torvalds } 29071da177e4SLinus Torvalds } 29081da177e4SLinus Torvalds 29098336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) { 2910d983ea6fSEric Dumazet struct request_sock *req; 2911d983ea6fSEric Dumazet 2912d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 2913d983ea6fSEric Dumazet lockdep_sock_is_held(sk)); 29148336886fSJerry Chu /* We could get here with a non-NULL req if the socket is 29158336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS 29168336886fSJerry Chu * finishes. 29178336886fSJerry Chu */ 291800db4124SIan Morris if (req) 29198336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 29200a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk); 29218336886fSJerry Chu } 29221da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */ 29231da177e4SLinus Torvalds 29241da177e4SLinus Torvalds out: 29251da177e4SLinus Torvalds bh_unlock_sock(sk); 29261da177e4SLinus Torvalds local_bh_enable(); 292777c3c956SPaolo Abeni } 292877c3c956SPaolo Abeni 292977c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout) 293077c3c956SPaolo Abeni { 293177c3c956SPaolo Abeni lock_sock(sk); 293277c3c956SPaolo Abeni __tcp_close(sk, timeout); 29338873c064SEric Dumazet release_sock(sk); 29341da177e4SLinus Torvalds sock_put(sk); 29351da177e4SLinus Torvalds } 29364bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close); 29371da177e4SLinus Torvalds 29381da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */ 29391da177e4SLinus Torvalds 2940a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state) 29411da177e4SLinus Torvalds { 29421da177e4SLinus Torvalds return (1 << state) & 29431da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2944a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 29451da177e4SLinus Torvalds } 29461da177e4SLinus Torvalds 294775c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk) 294875c119afSEric Dumazet { 294975c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 295075c119afSEric Dumazet 29512bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL; 295275c119afSEric Dumazet while (p) { 295375c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p); 295475c119afSEric Dumazet 295575c119afSEric Dumazet p = rb_next(p); 295675c119afSEric Dumazet /* Since we are deleting whole queue, no need to 295775c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor) 295875c119afSEric Dumazet */ 295975c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk); 296003271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 296175c119afSEric Dumazet } 296275c119afSEric Dumazet } 296375c119afSEric Dumazet 2964ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk) 2965ac3f09baSEric Dumazet { 2966ac3f09baSEric Dumazet struct sk_buff *skb; 2967ac3f09baSEric Dumazet 2968ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 2969ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 2970ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb); 297103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 2972ac3f09baSEric Dumazet } 297375c119afSEric Dumazet tcp_rtx_queue_purge(sk); 2974ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 2975ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk)); 2976bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0; 297704c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0; 2978ac3f09baSEric Dumazet } 2979ac3f09baSEric Dumazet 29801da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags) 29811da177e4SLinus Torvalds { 29821da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 2983463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 29841da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29851da177e4SLinus Torvalds int old_state = sk->sk_state; 29860f317464SEric Dumazet u32 seq; 29871da177e4SLinus Torvalds 29881da177e4SLinus Torvalds if (old_state != TCP_CLOSE) 29891da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29901da177e4SLinus Torvalds 29911da177e4SLinus Torvalds /* ABORT function of RFC793 */ 29921da177e4SLinus Torvalds if (old_state == TCP_LISTEN) { 29930a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 2994ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) { 2995e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNABORTED); 29961da177e4SLinus Torvalds } else if (tcp_need_reset(old_state) || 29971da177e4SLinus Torvalds (tp->snd_nxt != tp->write_seq && 29981da177e4SLinus Torvalds (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2999caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC 30001da177e4SLinus Torvalds * states 30011da177e4SLinus Torvalds */ 30021da177e4SLinus Torvalds tcp_send_active_reset(sk, gfp_any()); 3003e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 3004a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT) 3005e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 30061da177e4SLinus Torvalds 30071da177e4SLinus Torvalds tcp_clear_xmit_timers(sk); 30081da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue); 30097db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 30107b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 3011fe067e8aSDavid S. Miller tcp_write_queue_purge(sk); 3012cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk); 30139f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue); 30141da177e4SLinus Torvalds 3015c720c7e8SEric Dumazet inet->inet_dport = 0; 30161da177e4SLinus Torvalds 3017e0833d1fSKuniyuki Iwashima inet_bhash2_reset_saddr(sk); 30181da177e4SLinus Torvalds 3019e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, 0); 30201da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 3021740b0f18SEric Dumazet tp->srtt_us = 0; 3022b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 30233f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0; 30240f317464SEric Dumazet 30250f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2; 30260f317464SEric Dumazet if (!seq) 30270f317464SEric Dumazet seq = 1; 30280f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq); 30290f317464SEric Dumazet 3030463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 30316687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 30329d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0; 30336a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT; 3034ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 30352b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 30360b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 303740570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 30381da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 3039f4ce91ceSNeal Cardwell tp->is_cwnd_limited = 0; 3040f4ce91ceSNeal Cardwell tp->max_packets_out = 0; 30411fdf475aSEric Dumazet tp->window_clamp = 0; 30422fbdd562SEric Dumazet tp->delivered = 0; 3043e21db6f6SYuchung Cheng tp->delivered_ce = 0; 3044ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release) 3045ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk); 3046ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 30478919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0; 30486687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 3049d4761754SYousuk Seung tp->is_sack_reneg = 0; 30501da177e4SLinus Torvalds tcp_clear_retrans(tp); 3051c13c48c0SEric Dumazet tp->total_retrans = 0; 3052463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk); 3053499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3054499350a5SWei Wang * issue in __tcp_select_window() 3055499350a5SWei Wang */ 3056499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3057b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 30581da177e4SLinus Torvalds __sk_dst_reset(sk); 30598f905c0eSEric Dumazet dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); 306017c3060bSEric Dumazet tcp_saved_syn_free(tp); 30615d9f4262SEric Dumazet tp->compressed_ack = 0; 3062784f8344SEric Dumazet tp->segs_in = 0; 3063784f8344SEric Dumazet tp->segs_out = 0; 3064ba113c3aSWei Wang tp->bytes_sent = 0; 3065e858faf5SChristoph Paasch tp->bytes_acked = 0; 3066e858faf5SChristoph Paasch tp->bytes_received = 0; 3067fb31c9b9SWei Wang tp->bytes_retrans = 0; 3068db7ffee6SEric Dumazet tp->data_segs_in = 0; 3069db7ffee6SEric Dumazet tp->data_segs_out = 0; 30707788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0; 30717788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0; 30727e10b655SWei Wang tp->dsack_dups = 0; 30737ec65372SWei Wang tp->reord_seen = 0; 30745c701549SEric Dumazet tp->retrans_out = 0; 30755c701549SEric Dumazet tp->sacked_out = 0; 30765c701549SEric Dumazet tp->tlp_high_seq = 0; 30775c701549SEric Dumazet tp->last_oow_ack_time = 0; 307829c1c446SMubashir Adnan Qureshi tp->plb_rehash = 0; 30796cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */ 30806cda8b74SEric Dumazet tp->app_limited = ~0U; 3081300b655dSDavid Morley tp->rate_app_limited = 1; 3082792c4354SEric Dumazet tp->rack.mstamp = 0; 3083792c4354SEric Dumazet tp->rack.advanced = 0; 3084792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1; 3085792c4354SEric Dumazet tp->rack.last_delivered = 0; 3086792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0; 3087792c4354SEric Dumazet tp->rack.dsack_seen = 0; 30886bcdc40dSEric Dumazet tp->syn_data_acked = 0; 30896bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0; 30906bcdc40dSEric Dumazet tp->rx_opt.dsack = 0; 30916bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0; 3092f9af2dbbSThomas Higdon tp->rcv_ooopack = 0; 30936cda8b74SEric Dumazet 30941da177e4SLinus Torvalds 30957db92362SWei Wang /* Clean up fastopen related fields */ 30967db92362SWei Wang tcp_free_fastopen_req(tp); 309708e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk); 309848027478SJason Baron tp->fastopen_client_fail = 0; 30997db92362SWei Wang 3100c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 31011da177e4SLinus Torvalds 31029b42d55aSLi RongQing if (sk->sk_frag.page) { 31039b42d55aSLi RongQing put_page(sk->sk_frag.page); 31049b42d55aSLi RongQing sk->sk_frag.page = NULL; 31059b42d55aSLi RongQing sk->sk_frag.offset = 0; 31069b42d55aSLi RongQing } 3107e3ae2365SAlexander Aring sk_error_report(sk); 3108a01512b1SYueHaibing return 0; 31091da177e4SLinus Torvalds } 31104bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect); 31111da177e4SLinus Torvalds 3112a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk) 3113ee995283SPavel Emelyanov { 3114cb388e7eSMartin KaFai Lau return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3115319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN); 3116ee995283SPavel Emelyanov } 3117ee995283SPavel Emelyanov 3118d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3119b1ed4c4fSAndrey Vagin { 3120b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 3121b1ed4c4fSAndrey Vagin 3122b1ed4c4fSAndrey Vagin if (!tp->repair) 3123b1ed4c4fSAndrey Vagin return -EPERM; 3124b1ed4c4fSAndrey Vagin 3125b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 3126b1ed4c4fSAndrey Vagin return -EINVAL; 3127b1ed4c4fSAndrey Vagin 3128d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3129b1ed4c4fSAndrey Vagin return -EFAULT; 3130b1ed4c4fSAndrey Vagin 3131b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd) 3132b1ed4c4fSAndrey Vagin return -EINVAL; 3133b1ed4c4fSAndrey Vagin 3134b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3135b1ed4c4fSAndrey Vagin return -EINVAL; 3136b1ed4c4fSAndrey Vagin 3137b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt)) 3138b1ed4c4fSAndrey Vagin return -EINVAL; 3139b1ed4c4fSAndrey Vagin 3140b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1; 3141b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd; 3142b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window; 3143b1ed4c4fSAndrey Vagin 3144b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd; 3145b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup; 3146b1ed4c4fSAndrey Vagin 3147b1ed4c4fSAndrey Vagin return 0; 3148b1ed4c4fSAndrey Vagin } 3149b1ed4c4fSAndrey Vagin 3150d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3151d38d2b00SChristoph Hellwig unsigned int len) 3152b139ba4eSPavel Emelyanov { 315315e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk); 3154de248a75SPavel Emelyanov struct tcp_repair_opt opt; 3155d3c48151SChristoph Hellwig size_t offset = 0; 3156b139ba4eSPavel Emelyanov 3157de248a75SPavel Emelyanov while (len >= sizeof(opt)) { 3158d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3159b139ba4eSPavel Emelyanov return -EFAULT; 3160b139ba4eSPavel Emelyanov 3161d3c48151SChristoph Hellwig offset += sizeof(opt); 3162de248a75SPavel Emelyanov len -= sizeof(opt); 3163b139ba4eSPavel Emelyanov 3164de248a75SPavel Emelyanov switch (opt.opt_code) { 3165de248a75SPavel Emelyanov case TCPOPT_MSS: 3166de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val; 316715e56515SDouglas Caetano dos Santos tcp_mtup_init(sk); 3168b139ba4eSPavel Emelyanov break; 3169de248a75SPavel Emelyanov case TCPOPT_WINDOW: 3170bc26ccd8SAndrey Vagin { 3171bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF; 3172bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16; 3173bc26ccd8SAndrey Vagin 3174589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3175b139ba4eSPavel Emelyanov return -EFBIG; 3176b139ba4eSPavel Emelyanov 3177bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale; 3178bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale; 3179bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1; 3180bc26ccd8SAndrey Vagin } 3181b139ba4eSPavel Emelyanov break; 3182b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM: 3183de248a75SPavel Emelyanov if (opt.opt_val != 0) 3184de248a75SPavel Emelyanov return -EINVAL; 3185de248a75SPavel Emelyanov 3186b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3187b139ba4eSPavel Emelyanov break; 3188b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP: 3189de248a75SPavel Emelyanov if (opt.opt_val != 0) 3190de248a75SPavel Emelyanov return -EINVAL; 3191de248a75SPavel Emelyanov 3192b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1; 3193b139ba4eSPavel Emelyanov break; 3194b139ba4eSPavel Emelyanov } 3195b139ba4eSPavel Emelyanov } 3196b139ba4eSPavel Emelyanov 3197b139ba4eSPavel Emelyanov return 0; 3198b139ba4eSPavel Emelyanov } 3199b139ba4eSPavel Emelyanov 3200a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3201a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled); 3202a842fe14SEric Dumazet 3203a842fe14SEric Dumazet static void tcp_enable_tx_delay(void) 3204a842fe14SEric Dumazet { 3205a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3206a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0; 3207a842fe14SEric Dumazet 3208a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3209a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled); 3210a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n"); 3211a842fe14SEric Dumazet } 3212a842fe14SEric Dumazet } 3213a842fe14SEric Dumazet } 3214a842fe14SEric Dumazet 3215db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears 3216db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is 3217db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the 3218db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then 3219db10538aSChristoph Hellwig * use sendfile to send out the data parts. 3220db10538aSChristoph Hellwig * 3221db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3222db10538aSChristoph Hellwig * TCP_NODELAY. 3223db10538aSChristoph Hellwig */ 32246fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on) 3225db10538aSChristoph Hellwig { 3226db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 3227db10538aSChristoph Hellwig 3228db10538aSChristoph Hellwig if (on) { 3229db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK; 3230db10538aSChristoph Hellwig } else { 3231db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK; 3232db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF) 3233db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH; 3234db10538aSChristoph Hellwig tcp_push_pending_frames(sk); 3235db10538aSChristoph Hellwig } 3236db10538aSChristoph Hellwig } 3237db10538aSChristoph Hellwig 3238db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on) 3239db10538aSChristoph Hellwig { 3240db10538aSChristoph Hellwig lock_sock(sk); 3241db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on); 3242db10538aSChristoph Hellwig release_sock(sk); 3243db10538aSChristoph Hellwig } 3244db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork); 3245db10538aSChristoph Hellwig 324612abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 324712abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared. 324812abc5eeSChristoph Hellwig * 324912abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides 325012abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments. 325112abc5eeSChristoph Hellwig */ 32526fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on) 325312abc5eeSChristoph Hellwig { 325412abc5eeSChristoph Hellwig if (on) { 325512abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 325612abc5eeSChristoph Hellwig tcp_push_pending_frames(sk); 325712abc5eeSChristoph Hellwig } else { 325812abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 325912abc5eeSChristoph Hellwig } 326012abc5eeSChristoph Hellwig } 326112abc5eeSChristoph Hellwig 326212abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk) 326312abc5eeSChristoph Hellwig { 326412abc5eeSChristoph Hellwig lock_sock(sk); 326512abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true); 326612abc5eeSChristoph Hellwig release_sock(sk); 326712abc5eeSChristoph Hellwig } 326812abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay); 326912abc5eeSChristoph Hellwig 3270ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val) 3271ddd061b8SChristoph Hellwig { 3272ddd061b8SChristoph Hellwig if (!val) { 3273ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3274ddd061b8SChristoph Hellwig return; 3275ddd061b8SChristoph Hellwig } 3276ddd061b8SChristoph Hellwig 3277ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk); 3278ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3279ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) { 3280ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3281ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1); 3282ddd061b8SChristoph Hellwig if (!(val & 1)) 3283ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3284ddd061b8SChristoph Hellwig } 3285ddd061b8SChristoph Hellwig } 3286ddd061b8SChristoph Hellwig 3287ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val) 3288ddd061b8SChristoph Hellwig { 3289ddd061b8SChristoph Hellwig lock_sock(sk); 3290ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 3291ddd061b8SChristoph Hellwig release_sock(sk); 3292ddd061b8SChristoph Hellwig } 3293ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack); 3294ddd061b8SChristoph Hellwig 3295557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val) 3296557eadfcSChristoph Hellwig { 3297557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT) 3298557eadfcSChristoph Hellwig return -EINVAL; 3299557eadfcSChristoph Hellwig 33003a037f0fSEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); 3301557eadfcSChristoph Hellwig return 0; 3302557eadfcSChristoph Hellwig } 3303557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt); 3304557eadfcSChristoph Hellwig 3305d58f2e15SEric Dumazet int tcp_sock_set_user_timeout(struct sock *sk, int val) 3306c488aeadSChristoph Hellwig { 3307d58f2e15SEric Dumazet /* Cap the max time in ms TCP will retry or probe the window 3308d58f2e15SEric Dumazet * before giving up and aborting (ETIMEDOUT) a connection. 3309d58f2e15SEric Dumazet */ 3310d58f2e15SEric Dumazet if (val < 0) 3311d58f2e15SEric Dumazet return -EINVAL; 3312d58f2e15SEric Dumazet 331326023e91SEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); 3314d58f2e15SEric Dumazet return 0; 3315c488aeadSChristoph Hellwig } 3316c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3317c488aeadSChristoph Hellwig 3318aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 331971c48eb8SChristoph Hellwig { 332071c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 332171c48eb8SChristoph Hellwig 332271c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE) 332371c48eb8SChristoph Hellwig return -EINVAL; 332471c48eb8SChristoph Hellwig 33254164245cSEric Dumazet /* Paired with WRITE_ONCE() in keepalive_time_when() */ 33264164245cSEric Dumazet WRITE_ONCE(tp->keepalive_time, val * HZ); 332771c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) && 332871c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 332971c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp); 333071c48eb8SChristoph Hellwig 333171c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed) 333271c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed; 333371c48eb8SChristoph Hellwig else 333471c48eb8SChristoph Hellwig elapsed = 0; 333571c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed); 333671c48eb8SChristoph Hellwig } 333771c48eb8SChristoph Hellwig 333871c48eb8SChristoph Hellwig return 0; 333971c48eb8SChristoph Hellwig } 334071c48eb8SChristoph Hellwig 334171c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val) 334271c48eb8SChristoph Hellwig { 334371c48eb8SChristoph Hellwig int err; 334471c48eb8SChristoph Hellwig 334571c48eb8SChristoph Hellwig lock_sock(sk); 3346aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 334771c48eb8SChristoph Hellwig release_sock(sk); 334871c48eb8SChristoph Hellwig return err; 334971c48eb8SChristoph Hellwig } 335071c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle); 335171c48eb8SChristoph Hellwig 3352d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val) 3353d41ecaacSChristoph Hellwig { 3354d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL) 3355d41ecaacSChristoph Hellwig return -EINVAL; 3356d41ecaacSChristoph Hellwig 33575ecf9d4fSEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); 3358d41ecaacSChristoph Hellwig return 0; 3359d41ecaacSChristoph Hellwig } 3360d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3361d41ecaacSChristoph Hellwig 3362480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val) 3363480aeb96SChristoph Hellwig { 3364480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT) 3365480aeb96SChristoph Hellwig return -EINVAL; 3366480aeb96SChristoph Hellwig 33676e5e1de6SEric Dumazet /* Paired with READ_ONCE() in keepalive_probes() */ 33686e5e1de6SEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); 3369480aeb96SChristoph Hellwig return 0; 3370480aeb96SChristoph Hellwig } 3371480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3372480aeb96SChristoph Hellwig 3373cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val) 3374cb811109SPrankur gupta { 3375cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk); 3376cb811109SPrankur gupta 3377cb811109SPrankur gupta if (!val) { 3378cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE) 3379cb811109SPrankur gupta return -EINVAL; 3380cb811109SPrankur gupta tp->window_clamp = 0; 3381cb811109SPrankur gupta } else { 338258d3aadeSPaolo Abeni u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; 338358d3aadeSPaolo Abeni u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3384cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val; 338558d3aadeSPaolo Abeni 338658d3aadeSPaolo Abeni if (new_window_clamp == old_window_clamp) 338758d3aadeSPaolo Abeni return 0; 338858d3aadeSPaolo Abeni 338958d3aadeSPaolo Abeni tp->window_clamp = new_window_clamp; 339058d3aadeSPaolo Abeni if (new_window_clamp < old_window_clamp) { 339158d3aadeSPaolo Abeni /* need to apply the reserved mem provisioning only 339258d3aadeSPaolo Abeni * when shrinking the window clamp 339358d3aadeSPaolo Abeni */ 339458d3aadeSPaolo Abeni __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp); 339558d3aadeSPaolo Abeni 339658d3aadeSPaolo Abeni } else { 339758d3aadeSPaolo Abeni new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 339858d3aadeSPaolo Abeni tp->rcv_ssthresh = max(new_rcv_ssthresh, 339958d3aadeSPaolo Abeni tp->rcv_ssthresh); 340058d3aadeSPaolo Abeni } 3401cb811109SPrankur gupta } 3402cb811109SPrankur gupta return 0; 3403cb811109SPrankur gupta } 3404cb811109SPrankur gupta 34051da177e4SLinus Torvalds /* 34061da177e4SLinus Torvalds * Socket option code for TCP. 34071da177e4SLinus Torvalds */ 34080c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3409d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen) 34101da177e4SLinus Torvalds { 34111da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3412463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 34131e579caaSNikolay Borisov struct net *net = sock_net(sk); 34141da177e4SLinus Torvalds int val; 34151da177e4SLinus Torvalds int err = 0; 34161da177e4SLinus Torvalds 3417e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */ 3418e56fb50fSWilliam Allen Simpson switch (optname) { 3419e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: { 34205f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX]; 34215f8ef48dSStephen Hemminger 34225f8ef48dSStephen Hemminger if (optlen < 1) 34235f8ef48dSStephen Hemminger return -EINVAL; 34245f8ef48dSStephen Hemminger 3425d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 34264fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen)); 34275f8ef48dSStephen Hemminger if (val < 0) 34285f8ef48dSStephen Hemminger return -EFAULT; 34295f8ef48dSStephen Hemminger name[val] = 0; 34305f8ef48dSStephen Hemminger 3431cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 343284e5a0f2SMartin KaFai Lau err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3433cb388e7eSMartin KaFai Lau sockopt_ns_capable(sock_net(sk)->user_ns, 34348d650cdeSEric Dumazet CAP_NET_ADMIN)); 3435cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 34365f8ef48dSStephen Hemminger return err; 34375f8ef48dSStephen Hemminger } 3438734942ccSDave Watson case TCP_ULP: { 3439734942ccSDave Watson char name[TCP_ULP_NAME_MAX]; 3440734942ccSDave Watson 3441734942ccSDave Watson if (optlen < 1) 3442734942ccSDave Watson return -EINVAL; 3443734942ccSDave Watson 3444d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 3445734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1, 3446734942ccSDave Watson optlen)); 3447734942ccSDave Watson if (val < 0) 3448734942ccSDave Watson return -EFAULT; 3449734942ccSDave Watson name[val] = 0; 3450734942ccSDave Watson 3451cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 3452734942ccSDave Watson err = tcp_set_ulp(sk, name); 3453cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 3454734942ccSDave Watson return err; 3455734942ccSDave Watson } 34561fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 34570f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 34580f1ce023SJason Baron __u8 *backup_key = NULL; 34591fba70e5SYuchung Cheng 34600f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation 34610f1ce023SJason Baron * First key is the active one. 34620f1ce023SJason Baron */ 34630f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH && 34640f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 34651fba70e5SYuchung Cheng return -EINVAL; 34661fba70e5SYuchung Cheng 3467d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen)) 34681fba70e5SYuchung Cheng return -EFAULT; 34691fba70e5SYuchung Cheng 34700f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 34710f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 34720f1ce023SJason Baron 3473438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 34741fba70e5SYuchung Cheng } 3475e56fb50fSWilliam Allen Simpson default: 3476e56fb50fSWilliam Allen Simpson /* fallthru */ 3477e56fb50fSWilliam Allen Simpson break; 3478ccbd6a5aSJoe Perches } 34795f8ef48dSStephen Hemminger 34801da177e4SLinus Torvalds if (optlen < sizeof(int)) 34811da177e4SLinus Torvalds return -EINVAL; 34821da177e4SLinus Torvalds 3483d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val))) 34841da177e4SLinus Torvalds return -EFAULT; 34851da177e4SLinus Torvalds 3486d44fd4a7SEric Dumazet /* Handle options that can be set without locking the socket. */ 3487d44fd4a7SEric Dumazet switch (optname) { 3488d44fd4a7SEric Dumazet case TCP_SYNCNT: 3489d44fd4a7SEric Dumazet return tcp_sock_set_syncnt(sk, val); 3490d58f2e15SEric Dumazet case TCP_USER_TIMEOUT: 3491d58f2e15SEric Dumazet return tcp_sock_set_user_timeout(sk, val); 34926fd70a6bSEric Dumazet case TCP_KEEPINTVL: 34936fd70a6bSEric Dumazet return tcp_sock_set_keepintvl(sk, val); 349484485080SEric Dumazet case TCP_KEEPCNT: 349584485080SEric Dumazet return tcp_sock_set_keepcnt(sk, val); 3496a81722ddSEric Dumazet case TCP_LINGER2: 3497a81722ddSEric Dumazet if (val < 0) 3498a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, -1); 3499a81722ddSEric Dumazet else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3500a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); 3501a81722ddSEric Dumazet else 3502a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, val * HZ); 3503a81722ddSEric Dumazet return 0; 35046e97ba55SEric Dumazet case TCP_DEFER_ACCEPT: 35056e97ba55SEric Dumazet /* Translate value in seconds to number of retransmits */ 35066e97ba55SEric Dumazet WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, 35076e97ba55SEric Dumazet secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 35086e97ba55SEric Dumazet TCP_RTO_MAX / HZ)); 35096e97ba55SEric Dumazet return 0; 3510d44fd4a7SEric Dumazet } 3511d44fd4a7SEric Dumazet 3512cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 35131da177e4SLinus Torvalds 35141da177e4SLinus Torvalds switch (optname) { 35151da177e4SLinus Torvalds case TCP_MAXSEG: 35161da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However 35171da177e4SLinus Torvalds * at the point when this call is done we typically don't yet 3518a777f715SRohit Chavan * know which interface is going to be used 3519a777f715SRohit Chavan */ 3520cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 35211da177e4SLinus Torvalds err = -EINVAL; 35221da177e4SLinus Torvalds break; 35231da177e4SLinus Torvalds } 35241da177e4SLinus Torvalds tp->rx_opt.user_mss = val; 35251da177e4SLinus Torvalds break; 35261da177e4SLinus Torvalds 35271da177e4SLinus Torvalds case TCP_NODELAY: 352812abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val); 35291da177e4SLinus Torvalds break; 35301da177e4SLinus Torvalds 353136e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS: 353236e31b0aSAndreas Petlund if (val < 0 || val > 1) 353336e31b0aSAndreas Petlund err = -EINVAL; 353436e31b0aSAndreas Petlund else 353536e31b0aSAndreas Petlund tp->thin_lto = val; 353636e31b0aSAndreas Petlund break; 353736e31b0aSAndreas Petlund 35387e380175SAndreas Petlund case TCP_THIN_DUPACK: 35397e380175SAndreas Petlund if (val < 0 || val > 1) 35407e380175SAndreas Petlund err = -EINVAL; 35417e380175SAndreas Petlund break; 35427e380175SAndreas Petlund 3543ee995283SPavel Emelyanov case TCP_REPAIR: 3544ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk)) 3545ee995283SPavel Emelyanov err = -EPERM; 354631048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) { 3547ee995283SPavel Emelyanov tp->repair = 1; 3548ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE; 3549ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE; 355031048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) { 3551ee995283SPavel Emelyanov tp->repair = 0; 3552ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE; 3553ee995283SPavel Emelyanov tcp_send_window_probe(sk); 355431048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) { 355531048d7aSStefan Baranoff tp->repair = 0; 355631048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE; 3557ee995283SPavel Emelyanov } else 3558ee995283SPavel Emelyanov err = -EINVAL; 3559ee995283SPavel Emelyanov 3560ee995283SPavel Emelyanov break; 3561ee995283SPavel Emelyanov 3562ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 3563ee995283SPavel Emelyanov if (!tp->repair) 3564ee995283SPavel Emelyanov err = -EPERM; 3565bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR) 3566ee995283SPavel Emelyanov tp->repair_queue = val; 3567ee995283SPavel Emelyanov else 3568ee995283SPavel Emelyanov err = -EINVAL; 3569ee995283SPavel Emelyanov break; 3570ee995283SPavel Emelyanov 3571ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 35728811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) { 3573ee995283SPavel Emelyanov err = -EPERM; 35748811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) { 35758811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk)) 35768811f4a9SEric Dumazet err = -EPERM; 35778811f4a9SEric Dumazet else 35780f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val); 35798811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) { 35808811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) { 35818811f4a9SEric Dumazet err = -EPERM; 35828811f4a9SEric Dumazet } else { 3583dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val); 35846cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val); 35856cd6cbf5SEric Dumazet } 35868811f4a9SEric Dumazet } else { 3587ee995283SPavel Emelyanov err = -EINVAL; 35888811f4a9SEric Dumazet } 3589ee995283SPavel Emelyanov break; 3590ee995283SPavel Emelyanov 3591b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS: 3592b139ba4eSPavel Emelyanov if (!tp->repair) 3593b139ba4eSPavel Emelyanov err = -EINVAL; 35940c175da7SLu Wei else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 3595d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen); 3596b139ba4eSPavel Emelyanov else 3597b139ba4eSPavel Emelyanov err = -EPERM; 3598b139ba4eSPavel Emelyanov break; 3599b139ba4eSPavel Emelyanov 36001da177e4SLinus Torvalds case TCP_CORK: 3601db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val); 36021da177e4SLinus Torvalds break; 36031da177e4SLinus Torvalds 36041da177e4SLinus Torvalds case TCP_KEEPIDLE: 3605aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 36061da177e4SLinus Torvalds break; 3607cd8ae852SEric Dumazet case TCP_SAVE_SYN: 3608267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */ 3609267cf9faSMartin KaFai Lau if (val < 0 || val > 2) 3610cd8ae852SEric Dumazet err = -EINVAL; 3611cd8ae852SEric Dumazet else 3612cd8ae852SEric Dumazet tp->save_syn = val; 3613cd8ae852SEric Dumazet break; 3614cd8ae852SEric Dumazet 36151da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 3616cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val); 36171da177e4SLinus Torvalds break; 36181da177e4SLinus Torvalds 36191da177e4SLinus Torvalds case TCP_QUICKACK: 3620ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 36211da177e4SLinus Torvalds break; 36221da177e4SLinus Torvalds 3623faadfabaSDmitry Safonov case TCP_AO_REPAIR: 3624965c00e4SDmitry Safonov if (!tcp_can_repair_sock(sk)) { 3625965c00e4SDmitry Safonov err = -EPERM; 3626965c00e4SDmitry Safonov break; 3627965c00e4SDmitry Safonov } 3628faadfabaSDmitry Safonov err = tcp_ao_set_repair(sk, optval, optlen); 3629faadfabaSDmitry Safonov break; 36304954f17dSDmitry Safonov #ifdef CONFIG_TCP_AO 36314954f17dSDmitry Safonov case TCP_AO_ADD_KEY: 36324954f17dSDmitry Safonov case TCP_AO_DEL_KEY: 36334954f17dSDmitry Safonov case TCP_AO_INFO: { 36344954f17dSDmitry Safonov /* If this is the first TCP-AO setsockopt() on the socket, 3635faadfabaSDmitry Safonov * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR 3636faadfabaSDmitry Safonov * in any state. 36374954f17dSDmitry Safonov */ 3638faadfabaSDmitry Safonov if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 3639faadfabaSDmitry Safonov goto ao_parse; 3640faadfabaSDmitry Safonov if (rcu_dereference_protected(tcp_sk(sk)->ao_info, 36414954f17dSDmitry Safonov lockdep_sock_is_held(sk))) 3642faadfabaSDmitry Safonov goto ao_parse; 3643faadfabaSDmitry Safonov if (tp->repair) 3644faadfabaSDmitry Safonov goto ao_parse; 36454954f17dSDmitry Safonov err = -EISCONN; 36464954f17dSDmitry Safonov break; 3647faadfabaSDmitry Safonov ao_parse: 3648faadfabaSDmitry Safonov err = tp->af_specific->ao_parse(sk, optname, optval, optlen); 3649faadfabaSDmitry Safonov break; 36504954f17dSDmitry Safonov } 36514954f17dSDmitry Safonov #endif 3652cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3653cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG: 36548917a777SIvan Delalande case TCP_MD5SIG_EXT: 3655d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3656cfb6eeb4SYOSHIFUJI Hideaki break; 3657cfb6eeb4SYOSHIFUJI Hideaki #endif 36588336886fSJerry Chu case TCP_FASTOPEN: 36598336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3660dfea2aa6SChristoph Paasch TCPF_LISTEN))) { 366143713848SHaishuang Yan tcp_fastopen_init_key_once(net); 3662dfea2aa6SChristoph Paasch 36630536fcc0SEric Dumazet fastopen_queue_tune(sk, val); 3664dfea2aa6SChristoph Paasch } else { 36658336886fSJerry Chu err = -EINVAL; 3666dfea2aa6SChristoph Paasch } 36678336886fSJerry Chu break; 366819f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 366919f6d3f3SWei Wang if (val > 1 || val < 0) { 367019f6d3f3SWei Wang err = -EINVAL; 36715a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 36725a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) { 367319f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE) 367419f6d3f3SWei Wang tp->fastopen_connect = val; 367519f6d3f3SWei Wang else 367619f6d3f3SWei Wang err = -EINVAL; 367719f6d3f3SWei Wang } else { 367819f6d3f3SWei Wang err = -EOPNOTSUPP; 367919f6d3f3SWei Wang } 368019f6d3f3SWei Wang break; 368171c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 368271c02379SChristoph Paasch if (val > 1 || val < 0) 368371c02379SChristoph Paasch err = -EINVAL; 368471c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 368571c02379SChristoph Paasch err = -EINVAL; 368671c02379SChristoph Paasch else 368771c02379SChristoph Paasch tp->fastopen_no_cookie = val; 368871c02379SChristoph Paasch break; 368993be6ce0SAndrey Vagin case TCP_TIMESTAMP: 3690614e8316SEric Dumazet if (!tp->repair) { 369193be6ce0SAndrey Vagin err = -EPERM; 3692614e8316SEric Dumazet break; 3693614e8316SEric Dumazet } 3694614e8316SEric Dumazet /* val is an opaque field, 3695614e8316SEric Dumazet * and low order bit contains usec_ts enable bit. 3696614e8316SEric Dumazet * Its a best effort, and we do not care if user makes an error. 3697614e8316SEric Dumazet */ 3698614e8316SEric Dumazet tp->tcp_usec_ts = val & 1; 3699614e8316SEric Dumazet WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts)); 370093be6ce0SAndrey Vagin break; 3701b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: 3702b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen); 3703b1ed4c4fSAndrey Vagin break; 3704c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 37051aeb87bcSEric Dumazet WRITE_ONCE(tp->notsent_lowat, val); 3706c9bee3b7SEric Dumazet sk->sk_write_space(sk); 3707c9bee3b7SEric Dumazet break; 3708b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 3709b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0) 3710b75eba76SSoheil Hassas Yeganeh err = -EINVAL; 3711b75eba76SSoheil Hassas Yeganeh else 3712b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val; 3713b75eba76SSoheil Hassas Yeganeh break; 3714a842fe14SEric Dumazet case TCP_TX_DELAY: 3715a842fe14SEric Dumazet if (val) 3716a842fe14SEric Dumazet tcp_enable_tx_delay(); 3717348b81b6SEric Dumazet WRITE_ONCE(tp->tcp_tx_delay, val); 3718a842fe14SEric Dumazet break; 37191da177e4SLinus Torvalds default: 37201da177e4SLinus Torvalds err = -ENOPROTOOPT; 37211da177e4SLinus Torvalds break; 37223ff50b79SStephen Hemminger } 37233ff50b79SStephen Hemminger 3724cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 37251da177e4SLinus Torvalds return err; 37261da177e4SLinus Torvalds } 37271da177e4SLinus Torvalds 3728a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3729b7058842SDavid S. Miller unsigned int optlen) 37303fdadf7dSDmitry Mishin { 3731cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 37323fdadf7dSDmitry Mishin 37333fdadf7dSDmitry Mishin if (level != SOL_TCP) 3734f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 3735f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 37363fdadf7dSDmitry Mishin optval, optlen); 3737a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen); 37383fdadf7dSDmitry Mishin } 37394bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt); 37403fdadf7dSDmitry Mishin 3741efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3742efd90174SFrancis Yan struct tcp_info *info) 3743efd90174SFrancis Yan { 3744efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0; 3745efd90174SFrancis Yan enum tcp_chrono i; 3746efd90174SFrancis Yan 3747efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3748efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1]; 3749efd90174SFrancis Yan if (i == tp->chrono_type) 3750628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start; 3751efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ; 3752efd90174SFrancis Yan total += stats[i]; 3753efd90174SFrancis Yan } 3754efd90174SFrancis Yan 3755efd90174SFrancis Yan info->tcpi_busy_time = total; 3756efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3757efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3758efd90174SFrancis Yan } 3759efd90174SFrancis Yan 37601da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */ 37610df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info) 37621da177e4SLinus Torvalds { 376335ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3764463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 376576a9ebe8SEric Dumazet unsigned long rate; 37660263598cSWei Wang u32 now; 3767ff5d7497SEric Dumazet u64 rate64; 376867db3e4bSEric Dumazet bool slow; 37691da177e4SLinus Torvalds 37701da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 377135ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM) 377235ac838aSCraig Gallek return; 37731da177e4SLinus Torvalds 3774986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk); 377500fd38d9SEric Dumazet 3776ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */ 3777ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate); 377876a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3779f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64; 3780ccbf3bfaSEric Dumazet 3781ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate); 378276a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3783f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64; 3784ccbf3bfaSEric Dumazet 3785ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering; 378640570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3787ccbf3bfaSEric Dumazet 3788ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) { 3789ccbf3bfaSEric Dumazet /* listeners aliased fields : 3790ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept() 3791ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog 3792ccbf3bfaSEric Dumazet */ 3793288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3794099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3795ccbf3bfaSEric Dumazet return; 3796ccbf3bfaSEric Dumazet } 3797b369e7fdSEric Dumazet 3798b369e7fdSEric Dumazet slow = lock_sock_fast(sk); 3799b369e7fdSEric Dumazet 38006687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state; 3801463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits; 38026687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out; 3803463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff; 38041da177e4SLinus Torvalds 38051da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 38061da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3807e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 38081da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK; 38091da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) { 38101da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE; 38111da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 38121da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 38131da177e4SLinus Torvalds } 38141da177e4SLinus Torvalds 38151da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK) 38161da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN; 3817b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN) 3818b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN; 38196f73601eSYuchung Cheng if (tp->syn_data_acked) 38206f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA; 3821a77a0f5cSEric Dumazet if (tp->tcp_usec_ts) 3822a77a0f5cSEric Dumazet info->tcpi_options |= TCPI_OPT_USEC_TS; 38231da177e4SLinus Torvalds 3824463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 382595b9a87cSDavid Morley info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato, 3826bbf80d71SEric Dumazet tcp_delack_max(sk))); 3827c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache; 3828463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 38291da177e4SLinus Torvalds 38301da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out; 38311da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out; 3832ccbf3bfaSEric Dumazet 38331da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out; 38341da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out; 38351da177e4SLinus Torvalds 3836d635fbe2SEric Dumazet now = tcp_jiffies32; 38371da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3838463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 38391da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 38401da177e4SLinus Torvalds 3841d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 38421da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3843740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3; 3844740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2; 38451da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh; 38461da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss; 38471da177e4SLinus Torvalds 3848645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 38491da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space; 38501da177e4SLinus Torvalds 38511da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans; 3852977cb0ecSEric Dumazet 3853f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked; 3854f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received; 385567db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3856efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info); 385767db3e4bSEric Dumazet 38582efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out; 38590307a0b7SEric Dumazet 38600307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 38610307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in); 38620307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3863cd9b2660SEric Dumazet 3864cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp); 3865a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out; 3866eb8329e0SYuchung Cheng 3867eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 38680263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp); 38690263598cSWei Wang if (rate64) 3870f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64; 3871feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered; 3872feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce; 3873ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent; 3874fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans; 38757e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups; 38767ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen; 3877f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack; 38788f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd; 387971fc7047SMubashir Adnan Qureshi info->tcpi_rcv_wnd = tp->rcv_wnd; 388071fc7047SMubashir Adnan Qureshi info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 388148027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 38823868ab0fSAananth V 38833868ab0fSAananth V info->tcpi_total_rto = tp->total_rto; 38843868ab0fSAananth V info->tcpi_total_rto_recoveries = tp->total_rto_recoveries; 38853868ab0fSAananth V info->tcpi_total_rto_time = tp->total_rto_time; 38862a7c8d29SEric Dumazet if (tp->rto_stamp) 38872a7c8d29SEric Dumazet info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp; 38883868ab0fSAananth V 3889b369e7fdSEric Dumazet unlock_sock_fast(sk, slow); 38901da177e4SLinus Torvalds } 38911da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info); 38921da177e4SLinus Torvalds 3893984988aaSWei Wang static size_t tcp_opt_stats_get_size(void) 3894984988aaSWei Wang { 3895984988aaSWei Wang return 3896984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3897984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3898984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3899984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3900984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3901984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3902984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3903984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3904984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3905984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3906984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3907984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3908984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3909984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3910984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3911984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3912984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3913ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3914fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 39157e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 39167ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3917e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 391832efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3919e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 392048040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3921e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 392229c1c446SMubashir Adnan Qureshi nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 3923984988aaSWei Wang 0; 3924984988aaSWei Wang } 3925984988aaSWei Wang 3926e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */ 3927e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3928e7ed11eeSYousuk Seung { 3929e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP)) 3930e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl; 3931e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6)) 3932e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit; 3933e7ed11eeSYousuk Seung else 3934e7ed11eeSYousuk Seung return 0; 3935e7ed11eeSYousuk Seung } 3936e7ed11eeSYousuk Seung 393748040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3938e7ed11eeSYousuk Seung const struct sk_buff *orig_skb, 3939e7ed11eeSYousuk Seung const struct sk_buff *ack_skb) 39401c885808SFrancis Yan { 39411c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk); 39421c885808SFrancis Yan struct sk_buff *stats; 39431c885808SFrancis Yan struct tcp_info info; 394476a9ebe8SEric Dumazet unsigned long rate; 3945bb7c19f9SWei Wang u64 rate64; 39461c885808SFrancis Yan 3947984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 39481c885808SFrancis Yan if (!stats) 39491c885808SFrancis Yan return NULL; 39501c885808SFrancis Yan 39511c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info); 39521c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY, 39531c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD); 39541c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 39551c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD); 39561c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 39571c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD); 39587e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 39597e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD); 39607e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 39617e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD); 3962bb7c19f9SWei Wang 3963bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate); 396476a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3965bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 3966bb7c19f9SWei Wang 3967bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp); 3968bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 3969bb7c19f9SWei Wang 397040570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 3971bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 3972bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 3973bb7c19f9SWei Wang 3974bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 3975bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 39767156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 3977feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 3978feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 397987ecc95dSPriyaranjan Jha 398087ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 3981be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 3982feb5f2ecSYuchung Cheng 3983ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 3984ba113c3aSWei Wang TCP_NLA_PAD); 3985fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 3986fb31c9b9SWei Wang TCP_NLA_PAD); 39877e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 39887ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 3989e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 399032efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 3991e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 3992e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt)); 399348040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 399448040793SYousuk Seung TCP_NLA_PAD); 3995e7ed11eeSYousuk Seung if (ack_skb) 3996e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL, 3997e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb)); 3998ba113c3aSWei Wang 399929c1c446SMubashir Adnan Qureshi nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 40001c885808SFrancis Yan return stats; 40011c885808SFrancis Yan } 40021c885808SFrancis Yan 4003273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level, 400434704ef0SMartin KaFai Lau int optname, sockptr_t optval, sockptr_t optlen) 40051da177e4SLinus Torvalds { 4006295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 40071da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 40086fa25166SNikolay Borisov struct net *net = sock_net(sk); 40091da177e4SLinus Torvalds int val, len; 40101da177e4SLinus Torvalds 401134704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40121da177e4SLinus Torvalds return -EFAULT; 40131da177e4SLinus Torvalds 40141da177e4SLinus Torvalds if (len < 0) 40151da177e4SLinus Torvalds return -EINVAL; 40161da177e4SLinus Torvalds 4017716edc97SGavrilov Ilia len = min_t(unsigned int, len, sizeof(int)); 4018716edc97SGavrilov Ilia 40191da177e4SLinus Torvalds switch (optname) { 40201da177e4SLinus Torvalds case TCP_MAXSEG: 4021c1b4a7e6SDavid S. Miller val = tp->mss_cache; 402234dfde4aSCambda Zhu if (tp->rx_opt.user_mss && 402334dfde4aSCambda Zhu ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 40241da177e4SLinus Torvalds val = tp->rx_opt.user_mss; 40255e6a3ce6SPavel Emelyanov if (tp->repair) 40265e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp; 40271da177e4SLinus Torvalds break; 40281da177e4SLinus Torvalds case TCP_NODELAY: 40291da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF); 40301da177e4SLinus Torvalds break; 40311da177e4SLinus Torvalds case TCP_CORK: 40321da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK); 40331da177e4SLinus Torvalds break; 40341da177e4SLinus Torvalds case TCP_KEEPIDLE: 4035df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ; 40361da177e4SLinus Torvalds break; 40371da177e4SLinus Torvalds case TCP_KEEPINTVL: 4038df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ; 40391da177e4SLinus Torvalds break; 40401da177e4SLinus Torvalds case TCP_KEEPCNT: 4041df19a626SEric Dumazet val = keepalive_probes(tp); 40421da177e4SLinus Torvalds break; 40431da177e4SLinus Torvalds case TCP_SYNCNT: 40443a037f0fSEric Dumazet val = READ_ONCE(icsk->icsk_syn_retries) ? : 404520a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 40461da177e4SLinus Torvalds break; 40471da177e4SLinus Torvalds case TCP_LINGER2: 40489df5335cSEric Dumazet val = READ_ONCE(tp->linger2); 40491da177e4SLinus Torvalds if (val >= 0) 405039e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 40511da177e4SLinus Torvalds break; 40521da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 4053ae488c74SEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); 4054ae488c74SEric Dumazet val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ, 4055ae488c74SEric Dumazet TCP_RTO_MAX / HZ); 40561da177e4SLinus Torvalds break; 40571da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 40581da177e4SLinus Torvalds val = tp->window_clamp; 40591da177e4SLinus Torvalds break; 40601da177e4SLinus Torvalds case TCP_INFO: { 40611da177e4SLinus Torvalds struct tcp_info info; 40621da177e4SLinus Torvalds 406334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40641da177e4SLinus Torvalds return -EFAULT; 40651da177e4SLinus Torvalds 40661da177e4SLinus Torvalds tcp_get_info(sk, &info); 40671da177e4SLinus Torvalds 40681da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info)); 406934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40701da177e4SLinus Torvalds return -EFAULT; 407134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 40721da177e4SLinus Torvalds return -EFAULT; 40731da177e4SLinus Torvalds return 0; 40741da177e4SLinus Torvalds } 40756e9250f5SEric Dumazet case TCP_CC_INFO: { 40766e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops; 40776e9250f5SEric Dumazet union tcp_cc_info info; 40786e9250f5SEric Dumazet size_t sz = 0; 40796e9250f5SEric Dumazet int attr; 40806e9250f5SEric Dumazet 408134704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40826e9250f5SEric Dumazet return -EFAULT; 40836e9250f5SEric Dumazet 40846e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops; 40856e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info) 40866e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info); 40876e9250f5SEric Dumazet 40886e9250f5SEric Dumazet len = min_t(unsigned int, len, sz); 408934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40906e9250f5SEric Dumazet return -EFAULT; 409134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 40926e9250f5SEric Dumazet return -EFAULT; 40936e9250f5SEric Dumazet return 0; 40946e9250f5SEric Dumazet } 40951da177e4SLinus Torvalds case TCP_QUICKACK: 409631954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk); 40971da177e4SLinus Torvalds break; 40985f8ef48dSStephen Hemminger 40995f8ef48dSStephen Hemminger case TCP_CONGESTION: 410034704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41015f8ef48dSStephen Hemminger return -EFAULT; 41025f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 410334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41045f8ef48dSStephen Hemminger return -EFAULT; 410534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 41065f8ef48dSStephen Hemminger return -EFAULT; 41075f8ef48dSStephen Hemminger return 0; 4108e56fb50fSWilliam Allen Simpson 4109734942ccSDave Watson case TCP_ULP: 411034704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4111734942ccSDave Watson return -EFAULT; 4112734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4113d97af30fSDave Watson if (!icsk->icsk_ulp_ops) { 411434704ef0SMartin KaFai Lau len = 0; 411534704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4116d97af30fSDave Watson return -EFAULT; 4117d97af30fSDave Watson return 0; 4118d97af30fSDave Watson } 411934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4120734942ccSDave Watson return -EFAULT; 412134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4122734942ccSDave Watson return -EFAULT; 4123734942ccSDave Watson return 0; 4124734942ccSDave Watson 41251fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 4126f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4127f19008e6SJason Baron unsigned int key_len; 41281fba70e5SYuchung Cheng 412934704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41301fba70e5SYuchung Cheng return -EFAULT; 41311fba70e5SYuchung Cheng 4132f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) * 41330f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH; 41340f1ce023SJason Baron len = min_t(unsigned int, len, key_len); 413534704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41361fba70e5SYuchung Cheng return -EFAULT; 413734704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, key, len)) 41381fba70e5SYuchung Cheng return -EFAULT; 41391fba70e5SYuchung Cheng return 0; 41401fba70e5SYuchung Cheng } 41413c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS: 41423c0fef0bSJosh Hunt val = tp->thin_lto; 41433c0fef0bSJosh Hunt break; 41444a7f6009SYuchung Cheng 41453c0fef0bSJosh Hunt case TCP_THIN_DUPACK: 41464a7f6009SYuchung Cheng val = 0; 41473c0fef0bSJosh Hunt break; 4148dca43c75SJerry Chu 4149ee995283SPavel Emelyanov case TCP_REPAIR: 4150ee995283SPavel Emelyanov val = tp->repair; 4151ee995283SPavel Emelyanov break; 4152ee995283SPavel Emelyanov 4153ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 4154ee995283SPavel Emelyanov if (tp->repair) 4155ee995283SPavel Emelyanov val = tp->repair_queue; 4156ee995283SPavel Emelyanov else 4157ee995283SPavel Emelyanov return -EINVAL; 4158ee995283SPavel Emelyanov break; 4159ee995283SPavel Emelyanov 4160b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: { 4161b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 4162b1ed4c4fSAndrey Vagin 416334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4164b1ed4c4fSAndrey Vagin return -EFAULT; 4165b1ed4c4fSAndrey Vagin 4166b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 4167b1ed4c4fSAndrey Vagin return -EINVAL; 4168b1ed4c4fSAndrey Vagin 4169b1ed4c4fSAndrey Vagin if (!tp->repair) 4170b1ed4c4fSAndrey Vagin return -EPERM; 4171b1ed4c4fSAndrey Vagin 4172b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1; 4173b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd; 4174b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window; 4175b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd; 4176b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup; 4177b1ed4c4fSAndrey Vagin 417834704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &opt, len)) 4179b1ed4c4fSAndrey Vagin return -EFAULT; 4180b1ed4c4fSAndrey Vagin return 0; 4181b1ed4c4fSAndrey Vagin } 4182ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 4183ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 4184ee995283SPavel Emelyanov val = tp->write_seq; 4185ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE) 4186ee995283SPavel Emelyanov val = tp->rcv_nxt; 4187ee995283SPavel Emelyanov else 4188ee995283SPavel Emelyanov return -EINVAL; 4189ee995283SPavel Emelyanov break; 4190ee995283SPavel Emelyanov 4191dca43c75SJerry Chu case TCP_USER_TIMEOUT: 419226023e91SEric Dumazet val = READ_ONCE(icsk->icsk_user_timeout); 4193dca43c75SJerry Chu break; 41941536e285SKenjiro Nakayama 41951536e285SKenjiro Nakayama case TCP_FASTOPEN: 419670f360ddSEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); 41971536e285SKenjiro Nakayama break; 41981536e285SKenjiro Nakayama 419919f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 420019f6d3f3SWei Wang val = tp->fastopen_connect; 420119f6d3f3SWei Wang break; 420219f6d3f3SWei Wang 420371c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 420471c02379SChristoph Paasch val = tp->fastopen_no_cookie; 420571c02379SChristoph Paasch break; 420671c02379SChristoph Paasch 4207a842fe14SEric Dumazet case TCP_TX_DELAY: 4208348b81b6SEric Dumazet val = READ_ONCE(tp->tcp_tx_delay); 4209a842fe14SEric Dumazet break; 4210a842fe14SEric Dumazet 421193be6ce0SAndrey Vagin case TCP_TIMESTAMP: 4212614e8316SEric Dumazet val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset); 4213614e8316SEric Dumazet if (tp->tcp_usec_ts) 4214614e8316SEric Dumazet val |= 1; 4215614e8316SEric Dumazet else 4216614e8316SEric Dumazet val &= ~1; 421793be6ce0SAndrey Vagin break; 4218c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 42191aeb87bcSEric Dumazet val = READ_ONCE(tp->notsent_lowat); 4220c9bee3b7SEric Dumazet break; 4221b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 4222b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq; 4223b75eba76SSoheil Hassas Yeganeh break; 4224cd8ae852SEric Dumazet case TCP_SAVE_SYN: 4225cd8ae852SEric Dumazet val = tp->save_syn; 4226cd8ae852SEric Dumazet break; 4227cd8ae852SEric Dumazet case TCP_SAVED_SYN: { 422834704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4229cd8ae852SEric Dumazet return -EFAULT; 4230cd8ae852SEric Dumazet 4231d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 4232cd8ae852SEric Dumazet if (tp->saved_syn) { 423370a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) { 423434704ef0SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 423534704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4236d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4237aea0929eSEric B Munson return -EFAULT; 4238aea0929eSEric B Munson } 4239d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4240aea0929eSEric B Munson return -EINVAL; 4241aea0929eSEric B Munson } 424270a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 424334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4244d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4245cd8ae852SEric Dumazet return -EFAULT; 4246cd8ae852SEric Dumazet } 424734704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4248d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4249cd8ae852SEric Dumazet return -EFAULT; 4250cd8ae852SEric Dumazet } 4251cd8ae852SEric Dumazet tcp_saved_syn_free(tp); 4252d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4253cd8ae852SEric Dumazet } else { 4254d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4255cd8ae852SEric Dumazet len = 0; 425634704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4257cd8ae852SEric Dumazet return -EFAULT; 4258cd8ae852SEric Dumazet } 4259cd8ae852SEric Dumazet return 0; 4260cd8ae852SEric Dumazet } 426105255b82SEric Dumazet #ifdef CONFIG_MMU 426205255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: { 42637eeba170SArjun Roy struct scm_timestamping_internal tss; 4264e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {}; 426505255b82SEric Dumazet int err; 426605255b82SEric Dumazet 426734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 426805255b82SEric Dumazet return -EFAULT; 42692107d45fSArjun Roy if (len < 0 || 42702107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length)) 427105255b82SEric Dumazet return -EINVAL; 42723c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) { 427334704ef0SMartin KaFai Lau err = check_zeroed_sockptr(optval, sizeof(zc), 42743c5a2fd0SArjun Roy len - sizeof(zc)); 42753c5a2fd0SArjun Roy if (err < 1) 42763c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err; 4277c8856c05SArjun Roy len = sizeof(zc); 427834704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 42790b7f41f6SArjun Roy return -EFAULT; 42800b7f41f6SArjun Roy } 428134704ef0SMartin KaFai Lau if (copy_from_sockptr(&zc, optval, len)) 428205255b82SEric Dumazet return -EFAULT; 42833c5a2fd0SArjun Roy if (zc.reserved) 42843c5a2fd0SArjun Roy return -EINVAL; 42853c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 42863c5a2fd0SArjun Roy return -EINVAL; 4287d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 42887eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss); 42899cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 42909cacf81fSStanislav Fomichev &zc, &len, err); 4291d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 42927eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 42937eeba170SArjun Roy goto zerocopy_rcv_cmsg; 4294c8856c05SArjun Roy switch (len) { 42957eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags): 42967eeba170SArjun Roy goto zerocopy_rcv_cmsg; 42977eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 42987eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control): 42997eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags): 43007eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 43017eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 430233946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err): 430333946518SArjun Roy goto zerocopy_rcv_sk_err; 4304c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq): 4305c8856c05SArjun Roy goto zerocopy_rcv_inq; 4306c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length): 4307c8856c05SArjun Roy default: 4308c8856c05SArjun Roy goto zerocopy_rcv_out; 4309c8856c05SArjun Roy } 43107eeba170SArjun Roy zerocopy_rcv_cmsg: 43117eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS) 43127eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 43137eeba170SArjun Roy else 43147eeba170SArjun Roy zc.msg_flags = 0; 431533946518SArjun Roy zerocopy_rcv_sk_err: 431633946518SArjun Roy if (!err) 431733946518SArjun Roy zc.err = sock_error(sk); 4318c8856c05SArjun Roy zerocopy_rcv_inq: 4319c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk); 4320c8856c05SArjun Roy zerocopy_rcv_out: 432134704ef0SMartin KaFai Lau if (!err && copy_to_sockptr(optval, &zc, len)) 432205255b82SEric Dumazet err = -EFAULT; 432305255b82SEric Dumazet return err; 432405255b82SEric Dumazet } 432505255b82SEric Dumazet #endif 4326faadfabaSDmitry Safonov case TCP_AO_REPAIR: 4327965c00e4SDmitry Safonov if (!tcp_can_repair_sock(sk)) 4328965c00e4SDmitry Safonov return -EPERM; 4329faadfabaSDmitry Safonov return tcp_ao_get_repair(sk, optval, optlen); 4330ef84703aSDmitry Safonov case TCP_AO_GET_KEYS: 4331ef84703aSDmitry Safonov case TCP_AO_INFO: { 4332ef84703aSDmitry Safonov int err; 4333ef84703aSDmitry Safonov 4334ef84703aSDmitry Safonov sockopt_lock_sock(sk); 4335ef84703aSDmitry Safonov if (optname == TCP_AO_GET_KEYS) 4336ef84703aSDmitry Safonov err = tcp_ao_get_mkts(sk, optval, optlen); 4337ef84703aSDmitry Safonov else 4338ef84703aSDmitry Safonov err = tcp_ao_get_sock_info(sk, optval, optlen); 4339ef84703aSDmitry Safonov sockopt_release_sock(sk); 4340ef84703aSDmitry Safonov 4341ef84703aSDmitry Safonov return err; 4342ef84703aSDmitry Safonov } 43431da177e4SLinus Torvalds default: 43441da177e4SLinus Torvalds return -ENOPROTOOPT; 43453ff50b79SStephen Hemminger } 43461da177e4SLinus Torvalds 434734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 43481da177e4SLinus Torvalds return -EFAULT; 434934704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &val, len)) 43501da177e4SLinus Torvalds return -EFAULT; 43511da177e4SLinus Torvalds return 0; 43521da177e4SLinus Torvalds } 43531da177e4SLinus Torvalds 43549cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname) 43559cacf81fSStanislav Fomichev { 43569cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation 43579cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 43589cacf81fSStanislav Fomichev */ 43599cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 43609cacf81fSStanislav Fomichev return true; 43619cacf81fSStanislav Fomichev 43629cacf81fSStanislav Fomichev return false; 43639cacf81fSStanislav Fomichev } 43649cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 43659cacf81fSStanislav Fomichev 43663fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 43673fdadf7dSDmitry Mishin int __user *optlen) 43683fdadf7dSDmitry Mishin { 43693fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk); 43703fdadf7dSDmitry Mishin 43713fdadf7dSDmitry Mishin if (level != SOL_TCP) 4372f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4373f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 43743fdadf7dSDmitry Mishin optval, optlen); 437534704ef0SMartin KaFai Lau return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 437634704ef0SMartin KaFai Lau USER_SOCKPTR(optlen)); 43773fdadf7dSDmitry Mishin } 43784bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt); 43793fdadf7dSDmitry Mishin 4380cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 43818c73b263SDmitry Safonov int tcp_md5_sigpool_id = -1; 43828c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id); 4383cfb6eeb4SYOSHIFUJI Hideaki 43848c73b263SDmitry Safonov int tcp_md5_alloc_sigpool(void) 4385cfb6eeb4SYOSHIFUJI Hideaki { 43868c73b263SDmitry Safonov size_t scratch_size; 43878c73b263SDmitry Safonov int ret; 4388cfb6eeb4SYOSHIFUJI Hideaki 43898c73b263SDmitry Safonov scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr); 43908c73b263SDmitry Safonov ret = tcp_sigpool_alloc_ahash("md5", scratch_size); 43918c73b263SDmitry Safonov if (ret >= 0) { 43928c73b263SDmitry Safonov /* As long as any md5 sigpool was allocated, the return 43938c73b263SDmitry Safonov * id would stay the same. Re-write the id only for the case 43948c73b263SDmitry Safonov * when previously all MD5 keys were deleted and this call 43958c73b263SDmitry Safonov * allocates the first MD5 key, which may return a different 43968c73b263SDmitry Safonov * sigpool id than was used previously. 439771cea17eSEric Dumazet */ 43988c73b263SDmitry Safonov WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */ 439949a72dfbSAdam Langley return 0; 440049a72dfbSAdam Langley } 44018c73b263SDmitry Safonov return ret; 44028c73b263SDmitry Safonov } 440349a72dfbSAdam Langley 44048c73b263SDmitry Safonov void tcp_md5_release_sigpool(void) 44058c73b263SDmitry Safonov { 44068c73b263SDmitry Safonov tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id)); 44078c73b263SDmitry Safonov } 44088c73b263SDmitry Safonov 44098c73b263SDmitry Safonov void tcp_md5_add_sigpool(void) 44108c73b263SDmitry Safonov { 44118c73b263SDmitry Safonov tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id)); 44128c73b263SDmitry Safonov } 44138c73b263SDmitry Safonov 44148c73b263SDmitry Safonov int tcp_md5_hash_key(struct tcp_sigpool *hp, 44158c73b263SDmitry Safonov const struct tcp_md5sig_key *key) 441649a72dfbSAdam Langley { 4417e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 441849a72dfbSAdam Langley struct scatterlist sg; 441949a72dfbSAdam Langley 44206a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen); 44218c73b263SDmitry Safonov ahash_request_set_crypt(hp->req, &sg, NULL, keylen); 4422e6ced831SEric Dumazet 44238c73b263SDmitry Safonov /* We use data_race() because tcp_md5_do_add() might change 44248c73b263SDmitry Safonov * key->key under us 44258c73b263SDmitry Safonov */ 44268c73b263SDmitry Safonov return data_race(crypto_ahash_update(hp->req)); 442749a72dfbSAdam Langley } 442849a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key); 442949a72dfbSAdam Langley 44307bbb765bSDmitry Safonov /* Called with rcu_read_lock() */ 44311330b6efSJakub Kicinski enum skb_drop_reason 44321330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 44337bbb765bSDmitry Safonov const void *saddr, const void *daddr, 44340a3a8090SDmitry Safonov int family, int l3index, const __u8 *hash_location) 44357bbb765bSDmitry Safonov { 44360a3a8090SDmitry Safonov /* This gets called for each TCP segment that has TCP-MD5 option. 44377bbb765bSDmitry Safonov * We have 3 drop cases: 44387bbb765bSDmitry Safonov * o No MD5 hash and one expected. 44397bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one. 44407bbb765bSDmitry Safonov * o MD5 hash and its wrong. 44417bbb765bSDmitry Safonov */ 4442e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 44430a3a8090SDmitry Safonov struct tcp_md5sig_key *key; 44447bbb765bSDmitry Safonov u8 newhash[16]; 44450a3a8090SDmitry Safonov int genhash; 44467bbb765bSDmitry Safonov 44470a3a8090SDmitry Safonov key = tcp_md5_do_lookup(sk, l3index, saddr, family); 44487bbb765bSDmitry Safonov 44490a3a8090SDmitry Safonov if (!key && hash_location) { 44507bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 44512717b5adSDmitry Safonov tcp_hash_fail("Unexpected MD5 Hash found", family, skb, ""); 44521330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 44537bbb765bSDmitry Safonov } 44547bbb765bSDmitry Safonov 4455e62d2e11SEric Dumazet /* Check the signature. 4456e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle 4457e62d2e11SEric Dumazet * IPv4-mapped case. 4458e62d2e11SEric Dumazet */ 4459e62d2e11SEric Dumazet if (family == AF_INET) 44600a3a8090SDmitry Safonov genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 4461e62d2e11SEric Dumazet else 44620a3a8090SDmitry Safonov genhash = tp->af_specific->calc_md5_hash(newhash, key, 44637bbb765bSDmitry Safonov NULL, skb); 44647bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) { 44657bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 44667bbb765bSDmitry Safonov if (family == AF_INET) { 44672717b5adSDmitry Safonov tcp_hash_fail("MD5 Hash failed", AF_INET, skb, "%s L3 index %d", 44687bbb765bSDmitry Safonov genhash ? "tcp_v4_calc_md5_hash failed" 44697bbb765bSDmitry Safonov : "", l3index); 44707bbb765bSDmitry Safonov } else { 44712717b5adSDmitry Safonov if (genhash) { 44722717b5adSDmitry Safonov tcp_hash_fail("MD5 Hash failed", 44732717b5adSDmitry Safonov AF_INET6, skb, "L3 index %d", 44742717b5adSDmitry Safonov l3index); 44752717b5adSDmitry Safonov } else { 44762717b5adSDmitry Safonov tcp_hash_fail("MD5 Hash mismatch", 44772717b5adSDmitry Safonov AF_INET6, skb, "L3 index %d", 44782717b5adSDmitry Safonov l3index); 44792717b5adSDmitry Safonov } 44807bbb765bSDmitry Safonov } 44811330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE; 44827bbb765bSDmitry Safonov } 44831330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 44847bbb765bSDmitry Safonov } 44857bbb765bSDmitry Safonov EXPORT_SYMBOL(tcp_inbound_md5_hash); 44867bbb765bSDmitry Safonov 4487cfb6eeb4SYOSHIFUJI Hideaki #endif 4488cfb6eeb4SYOSHIFUJI Hideaki 44894ac02babSAndi Kleen void tcp_done(struct sock *sk) 44904ac02babSAndi Kleen { 4491d983ea6fSEric Dumazet struct request_sock *req; 44928336886fSJerry Chu 4493cab209e5SEric Dumazet /* We might be called with a new socket, after 4494cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called 4495cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk) 4496cab209e5SEric Dumazet */ 4497cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 44984ac02babSAndi Kleen 44994ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4500c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 45014ac02babSAndi Kleen 45024ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE); 45034ac02babSAndi Kleen tcp_clear_xmit_timers(sk); 450400db4124SIan Morris if (req) 45058336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 45064ac02babSAndi Kleen 4507e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 45084ac02babSAndi Kleen 45094ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD)) 45104ac02babSAndi Kleen sk->sk_state_change(sk); 45114ac02babSAndi Kleen else 45124ac02babSAndi Kleen inet_csk_destroy_sock(sk); 45134ac02babSAndi Kleen } 45144ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done); 45154ac02babSAndi Kleen 4516c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err) 4517c1e64e29SLorenzo Colitti { 4518af9784d0SEric Dumazet int state = inet_sk_state_load(sk); 4519af9784d0SEric Dumazet 4520af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) { 452107f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk); 452207f6f4a3SEric Dumazet 452307f6f4a3SEric Dumazet local_bh_disable(); 4524acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req); 452507f6f4a3SEric Dumazet local_bh_enable(); 452607f6f4a3SEric Dumazet return 0; 452707f6f4a3SEric Dumazet } 4528af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) { 4529af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk); 4530af9784d0SEric Dumazet 4531af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt); 4532af9784d0SEric Dumazet local_bh_disable(); 4533af9784d0SEric Dumazet inet_twsk_deschedule_put(tw); 4534af9784d0SEric Dumazet local_bh_enable(); 4535af9784d0SEric Dumazet return 0; 4536c1e64e29SLorenzo Colitti } 4537c1e64e29SLorenzo Colitti 45384ddbcb88SAditi Ghag /* BPF context ensures sock locking. */ 45394ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4540c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */ 4541c1e64e29SLorenzo Colitti lock_sock(sk); 4542c1e64e29SLorenzo Colitti 45432010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) { 45442010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE); 45452010b93eSLorenzo Colitti inet_csk_listen_stop(sk); 45462010b93eSLorenzo Colitti } 45472010b93eSLorenzo Colitti 4548c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4549c1e64e29SLorenzo Colitti local_bh_disable(); 4550c1e64e29SLorenzo Colitti bh_lock_sock(sk); 4551c1e64e29SLorenzo Colitti 4552c1e64e29SLorenzo Colitti if (!sock_flag(sk, SOCK_DEAD)) { 4553e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, err); 4554c1e64e29SLorenzo Colitti /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4555c1e64e29SLorenzo Colitti smp_wmb(); 4556e3ae2365SAlexander Aring sk_error_report(sk); 4557c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state)) 4558c1e64e29SLorenzo Colitti tcp_send_active_reset(sk, GFP_ATOMIC); 4559c1e64e29SLorenzo Colitti tcp_done(sk); 4560c1e64e29SLorenzo Colitti } 4561c1e64e29SLorenzo Colitti 4562c1e64e29SLorenzo Colitti bh_unlock_sock(sk); 4563c1e64e29SLorenzo Colitti local_bh_enable(); 4564e05836acSSoheil Hassas Yeganeh tcp_write_queue_purge(sk); 45654ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4566c1e64e29SLorenzo Colitti release_sock(sk); 4567c1e64e29SLorenzo Colitti return 0; 4568c1e64e29SLorenzo Colitti } 4569c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort); 4570c1e64e29SLorenzo Colitti 45715f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno; 45721da177e4SLinus Torvalds 45731da177e4SLinus Torvalds static __initdata unsigned long thash_entries; 45741da177e4SLinus Torvalds static int __init set_thash_entries(char *str) 45751da177e4SLinus Torvalds { 4576413c27d8SEldad Zack ssize_t ret; 4577413c27d8SEldad Zack 45781da177e4SLinus Torvalds if (!str) 45791da177e4SLinus Torvalds return 0; 4580413c27d8SEldad Zack 4581413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries); 4582413c27d8SEldad Zack if (ret) 4583413c27d8SEldad Zack return 0; 4584413c27d8SEldad Zack 45851da177e4SLinus Torvalds return 1; 45861da177e4SLinus Torvalds } 45871da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries); 45881da177e4SLinus Torvalds 458947d7a88cSFabian Frederick static void __init tcp_init_mem(void) 45904acb4190SGlauber Costa { 4591b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16; 4592b66e91ccSEric Dumazet 45934acb4190SGlauber Costa limit = max(limit, 128UL); 4594b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4595b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4596b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 45974acb4190SGlauber Costa } 45984acb4190SGlauber Costa 4599d5fed5adSCoco Li static void __init tcp_struct_check(void) 4600d5fed5adSCoco Li { 4601d5fed5adSCoco Li /* TX read-mostly hotpath cache lines */ 4602d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window); 4603d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh); 4604d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering); 4605d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat); 4606d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs); 4607d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint); 4608d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint); 4609d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40); 4610d5fed5adSCoco Li 4611d5fed5adSCoco Li /* TXRX read-mostly hotpath cache lines */ 4612d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset); 4613d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd); 4614d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache); 4615d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd); 4616d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out); 4617d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out); 4618d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out); 4619119ff048SEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio); 4620119ff048SEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32); 4621d5fed5adSCoco Li 4622d5fed5adSCoco Li /* RX read-mostly hotpath cache lines */ 4623d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq); 4624d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp); 4625d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1); 4626d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq); 4627d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us); 4628d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out); 4629d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss); 4630d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data); 4631d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost); 4632d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min); 4633d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue); 4634d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh); 4635d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69); 4636d5fed5adSCoco Li 4637d5fed5adSCoco Li /* TX read-write hotpath cache lines */ 4638d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out); 4639d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out); 4640d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent); 4641d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml); 4642d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start); 4643d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat); 4644d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq); 4645d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq); 4646d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); 4647d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); 4648d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); 4649d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_clock_cache); 4650d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_mstamp); 4651d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); 4652d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); 4653d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); 4654d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); 4655345a6e26SEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105); 4656d5fed5adSCoco Li 4657d5fed5adSCoco Li /* TXRX read-write hotpath cache lines */ 4658d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); 4659d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); 4660d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); 4661d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); 4662d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp); 4663d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us); 4664d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out); 4665d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up); 4666d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered); 4667d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce); 4668d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); 4669d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); 4670d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); 4671d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 76); 4672d5fed5adSCoco Li 4673d5fed5adSCoco Li /* RX read-write hotpath cache lines */ 4674d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); 4675d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in); 4676d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in); 4677d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup); 4678d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out); 4679d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq); 4680d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered); 4681d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us); 4682d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr); 4683d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp); 4684d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp); 4685d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked); 4686d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est); 4687d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space); 4688d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99); 4689d5fed5adSCoco Li } 4690d5fed5adSCoco Li 46911da177e4SLinus Torvalds void __init tcp_init(void) 46921da177e4SLinus Torvalds { 4693b49960a0SEric Dumazet int max_rshare, max_wshare, cnt; 4694b2d3ea4aSEric Dumazet unsigned long limit; 4695074b8517SDimitri Sivanich unsigned int i; 46961da177e4SLinus Torvalds 46973b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4698b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4699c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb)); 47001da177e4SLinus Torvalds 4701d5fed5adSCoco Li tcp_struct_check(); 4702d5fed5adSCoco Li 4703908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 470419757cebSEric Dumazet 470519757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 470619757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 470719757cebSEric Dumazet 470827da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 470927da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/ 471027da6d37SMartin KaFai Lau 0, 64 * 1024); 47116e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep = 47126e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket", 47136e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0, 4714990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4715990c74e3SVasily Averin SLAB_ACCOUNT, 4716990c74e3SVasily Averin NULL); 471728044fc1SJoanne Koong tcp_hashinfo.bind2_bucket_cachep = 471828044fc1SJoanne Koong kmem_cache_create("tcp_bind2_bucket", 471928044fc1SJoanne Koong sizeof(struct inet_bind2_bucket), 0, 472028044fc1SJoanne Koong SLAB_HWCACHE_ALIGN | SLAB_PANIC | 472128044fc1SJoanne Koong SLAB_ACCOUNT, 472228044fc1SJoanne Koong NULL); 47231da177e4SLinus Torvalds 47241da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket 47251da177e4SLinus Torvalds * hash tables. 47261da177e4SLinus Torvalds * 47271da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache. 47281da177e4SLinus Torvalds */ 47296e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash = 47301da177e4SLinus Torvalds alloc_large_system_hash("TCP established", 47310f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket), 47321da177e4SLinus Torvalds thash_entries, 4733fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 47349e950efaSJohn Heffner 0, 47351da177e4SLinus Torvalds NULL, 4736f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask, 473731fe62b9STim Bird 0, 47380ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024); 473905dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 47403ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 474105dbc7b5SEric Dumazet 4742230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4743230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks"); 47446e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash = 4745593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind", 474628044fc1SJoanne Koong 2 * sizeof(struct inet_bind_hashbucket), 4747f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, 4748fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 47499e950efaSJohn Heffner 0, 47506e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size, 47511da177e4SLinus Torvalds NULL, 475231fe62b9STim Bird 0, 47531da177e4SLinus Torvalds 64 * 1024); 4754074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 475528044fc1SJoanne Koong tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 47566e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 47576e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock); 47586e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 475928044fc1SJoanne Koong spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 476028044fc1SJoanne Koong INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 47611da177e4SLinus Torvalds } 47621da177e4SLinus Torvalds 4763d1e5e640SKuniyuki Iwashima tcp_hashinfo.pernet = false; 4764c5ed63d6SEric Dumazet 4765c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1; 4766c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2; 47671da177e4SLinus Torvalds 4768a4fe34bfSEric W. Biederman tcp_init_mem(); 4769c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */ 47705fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4771b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit); 4772b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit); 47737b4f4b5eSJohn Heffner 4774100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 4775356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4776356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 47777b4f4b5eSJohn Heffner 4778100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 4779a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4780a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 47811da177e4SLinus Torvalds 4782afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n", 4783f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4784317a76f9SStephen Hemminger 47851946e672SHaishuang Yan tcp_v4_init(); 478651c5d0c4SDavid S. Miller tcp_metrics_init(); 478755d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 478846d3ceabSEric Dumazet tcp_tasklet_init(); 4789f870fa0bSMat Martineau mptcp_init(); 47901da177e4SLinus Torvalds } 4791