12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * Fixes: 221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls 231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset 241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while 251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect 261da177e4SLinus Torvalds * (tcp_err()). 271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken 281da177e4SLinus Torvalds * pointers passed where wrong and the 291da177e4SLinus Torvalds * socket was looked up backwards. Nobody 301da177e4SLinus Torvalds * tested any icmp error code obviously. 311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It 321da177e4SLinus Torvalds * wakes people on errors. poll 331da177e4SLinus Torvalds * behaves and the icmp error race 341da177e4SLinus Torvalds * has gone by moving it into sock.c 351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for 361da177e4SLinus Torvalds * everything not just packets for 371da177e4SLinus Torvalds * unknown sockets. 381da177e4SLinus Torvalds * Alan Cox : tcp option processing. 391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had 401da177e4SLinus Torvalds * syn rule wrong] 411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes 421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames. 431da177e4SLinus Torvalds * Acking any kind of RST is right out. 441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst 451da177e4SLinus Torvalds * receive otherwise odd bits of prattle 461da177e4SLinus Torvalds * escape still 471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug. 481da177e4SLinus Torvalds * Should stop LAN workplace lockups. 491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list 501da177e4SLinus Torvalds * facilities 511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work 521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts 531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors 541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a 551da177e4SLinus Torvalds * bit to skb ops. 561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential 571da177e4SLinus Torvalds * nasty. 581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the 591da177e4SLinus Torvalds * tcp is hard to follow 601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh 611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix. 621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet). 631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH 641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries. 651da177e4SLinus Torvalds * Also added awareness of them to accept. 661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP) 671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks, 681da177e4SLinus Torvalds * so the kernel can layer network 691da177e4SLinus Torvalds * sockets. 701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings. 711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope). 721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised 731da177e4SLinus Torvalds * state ack error. 741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit. 751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E 761da177e4SLinus Torvalds * window non shrink trick. 771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer 781da177e4SLinus Torvalds * fixes 791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes 801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes 811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight 821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work 831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling 841da177e4SLinus Torvalds * completely 851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling 861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC 871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds 881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors 891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug 901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes. 911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing 921da177e4SLinus Torvalds * (not yet usable) 931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging 951da177e4SLinus Torvalds * Alan Cox : Window clamping 961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check() 971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes 981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the 991da177e4SLinus Torvalds * TCP code (Be very nice to this man if 1001da177e4SLinus Torvalds * tcp finally works 100%) 8) 1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics. 1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug. 1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in 1051da177e4SLinus Torvalds * all cases. 1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg() 1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin). 1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in 1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG 1101da177e4SLinus Torvalds * works now. 1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the 1121da177e4SLinus Torvalds * BSD api. 1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to 1141da177e4SLinus Torvalds * fix a race and a signal problem with 1151da177e4SLinus Torvalds * accept() and async I/O. 1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto(). 1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem. 1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 1191da177e4SLinus Torvalds * clients/servers which listen in on 1201da177e4SLinus Torvalds * fixed ports. 1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to 1221da177e4SLinus Torvalds * a sensible code size. 1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix. 1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast. 1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master 1261da177e4SLinus Torvalds * socket close. 1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code. 1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown. 1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error 1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an 1311da177e4SLinus Torvalds * accept. 1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since 1331da177e4SLinus Torvalds * it's handy for debugging. 1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes. 1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on 1361da177e4SLinus Torvalds * the RFC's for other useful protocol 1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and 1381da177e4SLinus Torvalds * for a reference on the difference 1391da177e4SLinus Torvalds * between specifications and how BSD 1401da177e4SLinus Torvalds * works see the 4.4lite source. 1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy 1421da177e4SLinus Torvalds * close. 1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes. 1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn 1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC 1461da177e4SLinus Torvalds * and using multiple timers for sanity. 1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new 1481da177e4SLinus Torvalds * comments. 1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking 1501da177e4SLinus Torvalds * the buffers (much like datagram.c) 1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe 1521da177e4SLinus Torvalds * now gets fed up of retrying without 1531da177e4SLinus Torvalds * (even a no space) answer. 1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better 1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to 1561da177e4SLinus Torvalds * resemble the RFC. 1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes. 1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming. 1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH 1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack 1611da177e4SLinus Torvalds * generates them. 1621da177e4SLinus Torvalds * Alan Cox : Cache last socket. 1631da177e4SLinus Torvalds * Alan Cox : Per route irtt. 1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error 1651da177e4SLinus Torvalds * Alan Cox : New buffers 1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and 1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed. 1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close, 1691da177e4SLinus Torvalds * and TCP syn retries gets used now. 1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an 1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED. 1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may 1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right 1741da177e4SLinus Torvalds * but it's a start! 1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes. 1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications. 1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors. 1781da177e4SLinus Torvalds * Alan Cox : Block double connect(). 1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP. 1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery. 1811da177e4SLinus Torvalds * Alan Cox : Support soft errors. 1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case 1831da177e4SLinus Torvalds * when the remote claims no mtu! 1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix. 1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in 1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems) 1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack. 1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in 1891da177e4SLinus Torvalds * tcp_do_retransmit() 1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window 1911da177e4SLinus Torvalds * avoidance algorithm to BSD style 1921da177e4SLinus Torvalds * algorithm. This doubles throughput 1931da177e4SLinus Torvalds * against machines running Solaris, 1941da177e4SLinus Torvalds * and seems to result in general 1951da177e4SLinus Torvalds * improvement. 1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 1981da177e4SLinus Torvalds * Mike McLagan : Routing by source 1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in 2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness. 2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with 2021da177e4SLinus Torvalds * shutdown() followed by close(). 2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO 2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call) 2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of 2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible. 2081da177e4SLinus Torvalds * 2091da177e4SLinus Torvalds * Description of States: 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack 2121da177e4SLinus Torvalds * 2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack, 2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake. 2151da177e4SLinus Torvalds * 2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established 2171da177e4SLinus Torvalds * 2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 2191da177e4SLinus Torvalds * transmission of remaining buffered data 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 2221da177e4SLinus Torvalds * to shutdown 2231da177e4SLinus Torvalds * 2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have 2251da177e4SLinus Torvalds * data we have to finish sending 2261da177e4SLinus Torvalds * 2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering 2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2 2291da177e4SLinus Torvalds * or CLOSING. Required because the other end 2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it 2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore) 2321da177e4SLinus Torvalds * 2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown 2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK) 2361da177e4SLinus Torvalds * 2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has 2381da177e4SLinus Torvalds * shutdown. There may still be data in our 2391da177e4SLinus Torvalds * buffer that we have to finish sending 2401da177e4SLinus Torvalds * 2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds 244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt 245afd46503SJoe Perches 246cf80e0e4SHerbert Xu #include <crypto/hash.h> 247172589ccSIlpo Järvinen #include <linux/kernel.h> 2481da177e4SLinus Torvalds #include <linux/module.h> 2491da177e4SLinus Torvalds #include <linux/types.h> 2501da177e4SLinus Torvalds #include <linux/fcntl.h> 2511da177e4SLinus Torvalds #include <linux/poll.h> 2526e9250f5SEric Dumazet #include <linux/inet_diag.h> 2531da177e4SLinus Torvalds #include <linux/init.h> 2541da177e4SLinus Torvalds #include <linux/fs.h> 2559c55e01cSJens Axboe #include <linux/skbuff.h> 25681b23b4aSAndrew Morton #include <linux/scatterlist.h> 2579c55e01cSJens Axboe #include <linux/splice.h> 2589c55e01cSJens Axboe #include <linux/net.h> 2599c55e01cSJens Axboe #include <linux/socket.h> 2601da177e4SLinus Torvalds #include <linux/random.h> 26157c8a661SMike Rapoport #include <linux/memblock.h> 26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h> 263b8059eadSDavid S. Miller #include <linux/cache.h> 264f4c50d99SHerbert Xu #include <linux/err.h> 265da5c78c8SWilliam Allen Simpson #include <linux/time.h> 2665a0e3ad6STejun Heo #include <linux/slab.h> 26798aaa913SMike Maloney #include <linux/errqueue.h> 26860e2a778SUrsula Braun #include <linux/static_key.h> 26997a19cafSYonghong Song #include <linux/btf.h> 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds #include <net/icmp.h> 272cf60af03SYuchung Cheng #include <net/inet_common.h> 2731da177e4SLinus Torvalds #include <net/tcp.h> 274f870fa0bSMat Martineau #include <net/mptcp.h> 275f3d93817SEric Dumazet #include <net/proto_memory.h> 2761da177e4SLinus Torvalds #include <net/xfrm.h> 2771da177e4SLinus Torvalds #include <net/ip.h> 2789c55e01cSJens Axboe #include <net/sock.h> 2795691276bSJason Xing #include <net/rstreason.h> 2801da177e4SLinus Torvalds 2817c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2821da177e4SLinus Torvalds #include <asm/ioctls.h> 283076bb0c8SEliezer Tamir #include <net/busy_poll.h> 284a86a0661SEric Dumazet #include <net/hotdata.h> 28596be3dcdSDmitry Safonov #include <trace/events/tcp.h> 286490a79faSEric Dumazet #include <net/rps.h> 2871da177e4SLinus Torvalds 288925bba24SArjun Roy /* Track pending CMSGs. */ 289925bba24SArjun Roy enum { 290925bba24SArjun Roy TCP_CMSG_INQ = 1, 291925bba24SArjun Roy TCP_CMSG_TS = 2 292925bba24SArjun Roy }; 293925bba24SArjun Roy 29419757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 29519757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 2960a5578cfSArnaldo Carvalho de Melo 29741eecbd7SEric Dumazet DEFINE_PER_CPU(u32, tcp_tw_isn); 29841eecbd7SEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn); 29941eecbd7SEric Dumazet 300a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly; 301a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem); 3021da177e4SLinus Torvalds 30391b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 3041da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated); 3050defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 3060defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 3071748376bSEric Dumazet 30860e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 30960e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 31060e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc); 31160e2a778SUrsula Braun #endif 31260e2a778SUrsula Braun 3131748376bSEric Dumazet /* 3141748376bSEric Dumazet * Current number of TCP sockets. 3151748376bSEric Dumazet */ 31691b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 3171da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated); 3181da177e4SLinus Torvalds 3191da177e4SLinus Torvalds /* 3209c55e01cSJens Axboe * TCP splice context 3219c55e01cSJens Axboe */ 3229c55e01cSJens Axboe struct tcp_splice_state { 3239c55e01cSJens Axboe struct pipe_inode_info *pipe; 3249c55e01cSJens Axboe size_t len; 3259c55e01cSJens Axboe unsigned int flags; 3269c55e01cSJens Axboe }; 3279c55e01cSJens Axboe 3289c55e01cSJens Axboe /* 3291da177e4SLinus Torvalds * Pressure flag: try to collapse. 3301da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically. 3313ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting 3321da177e4SLinus Torvalds * is strict, actions are advisory and have some latency. 3331da177e4SLinus Torvalds */ 33406044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly; 33506044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure); 3361da177e4SLinus Torvalds 3375c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk) 3381da177e4SLinus Torvalds { 33906044751SEric Dumazet unsigned long val; 34006044751SEric Dumazet 3411f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure)) 34206044751SEric Dumazet return; 34306044751SEric Dumazet val = jiffies; 34406044751SEric Dumazet 34506044751SEric Dumazet if (!val) 34606044751SEric Dumazet val--; 34706044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val)) 3484e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 3491da177e4SLinus Torvalds } 35006044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 35106044751SEric Dumazet 35206044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk) 35306044751SEric Dumazet { 35406044751SEric Dumazet unsigned long val; 35506044751SEric Dumazet 3561f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure)) 35706044751SEric Dumazet return; 35806044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0); 35906044751SEric Dumazet if (val) 36006044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 36106044751SEric Dumazet jiffies_to_msecs(jiffies - val)); 3621da177e4SLinus Torvalds } 36306044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 3641da177e4SLinus Torvalds 365b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */ 366b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 367b103cf34SJulian Anastasov { 368b103cf34SJulian Anastasov u8 res = 0; 369b103cf34SJulian Anastasov 370b103cf34SJulian Anastasov if (seconds > 0) { 371b103cf34SJulian Anastasov int period = timeout; 372b103cf34SJulian Anastasov 373b103cf34SJulian Anastasov res = 1; 374b103cf34SJulian Anastasov while (seconds > period && res < 255) { 375b103cf34SJulian Anastasov res++; 376b103cf34SJulian Anastasov timeout <<= 1; 377b103cf34SJulian Anastasov if (timeout > rto_max) 378b103cf34SJulian Anastasov timeout = rto_max; 379b103cf34SJulian Anastasov period += timeout; 380b103cf34SJulian Anastasov } 381b103cf34SJulian Anastasov } 382b103cf34SJulian Anastasov return res; 383b103cf34SJulian Anastasov } 384b103cf34SJulian Anastasov 385b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */ 386b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 387b103cf34SJulian Anastasov { 388b103cf34SJulian Anastasov int period = 0; 389b103cf34SJulian Anastasov 390b103cf34SJulian Anastasov if (retrans > 0) { 391b103cf34SJulian Anastasov period = timeout; 392b103cf34SJulian Anastasov while (--retrans) { 393b103cf34SJulian Anastasov timeout <<= 1; 394b103cf34SJulian Anastasov if (timeout > rto_max) 395b103cf34SJulian Anastasov timeout = rto_max; 396b103cf34SJulian Anastasov period += timeout; 397b103cf34SJulian Anastasov } 398b103cf34SJulian Anastasov } 399b103cf34SJulian Anastasov return period; 400b103cf34SJulian Anastasov } 401b103cf34SJulian Anastasov 4020263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 4030263598cSWei Wang { 4040263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered); 4050263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us); 4060263598cSWei Wang u64 rate64 = 0; 4070263598cSWei Wang 4080263598cSWei Wang if (rate && intv) { 4090263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 4100263598cSWei Wang do_div(rate64, intv); 4110263598cSWei Wang } 4120263598cSWei Wang return rate64; 4130263598cSWei Wang } 4140263598cSWei Wang 415900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock. 416900f65d3SNeal Cardwell * 417900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to 418900f65d3SNeal Cardwell * sk_alloc() so need not be done here. 419900f65d3SNeal Cardwell */ 420900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk) 421900f65d3SNeal Cardwell { 422900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk); 423900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk); 424f086edefSKevin Yang int rto_min_us; 425900f65d3SNeal Cardwell 4269f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT; 42775c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT; 428900f65d3SNeal Cardwell tcp_init_xmit_timers(sk); 42946d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node); 430e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue); 431900f65d3SNeal Cardwell 432900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT; 433f086edefSKevin Yang rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us); 434f086edefSKevin Yang icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us); 4352b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 436740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 437ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 438900f65d3SNeal Cardwell 439900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the 440900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control 441900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk 442900f65d3SNeal Cardwell * efficiently to them. -DaveM 443900f65d3SNeal Cardwell */ 44440570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 445900f65d3SNeal Cardwell 446d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */ 447d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U; 448300b655dSDavid Morley tp->rate_app_limited = 1; 449d7722e85SSoheil Hassas Yeganeh 450900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the 451900f65d3SNeal Cardwell * initialization of these values. 452900f65d3SNeal Cardwell */ 453900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 454900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0; 455900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT; 456900f65d3SNeal Cardwell 45746778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 45855d8694fSFlorian Westphal tcp_assign_congestion_control(sk); 459900f65d3SNeal Cardwell 460ceaa1fefSAndrey Vagin tp->tsoffset = 0; 4611f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1; 462ceaa1fefSAndrey Vagin 463900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space; 464900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 465900f65d3SNeal Cardwell 466900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss; 467900f65d3SNeal Cardwell 46802739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 46902739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 470dfa2f048SEric Dumazet tcp_scaling_ratio_init(sk); 471900f65d3SNeal Cardwell 472e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 473900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk); 474900f65d3SNeal Cardwell } 475900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock); 476900f65d3SNeal Cardwell 4774e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 4784ed2d765SWillem de Bruijn { 4794e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 4804e8cc228SEric Dumazet 481ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) { 4824ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 4836b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 4844ed2d765SWillem de Bruijn 485c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 4860a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK) 4870a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1; 4880a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 4894ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 4904ed2d765SWillem de Bruijn } 491f066e2b0SWillem de Bruijn } 4924ed2d765SWillem de Bruijn 49305dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target) 4948934ce2fSJohn Fastabend { 49505dc72abSEric Dumazet if (tcp_epollin_ready(sk, target)) 49605dc72abSEric Dumazet return true; 4977b50ecfcSCong Wang return sk_is_readable(sk); 4988934ce2fSJohn Fastabend } 4998934ce2fSJohn Fastabend 5001da177e4SLinus Torvalds /* 501a11e1d43SLinus Torvalds * Wait for a TCP event. 502a11e1d43SLinus Torvalds * 503a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers 504a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't 505a11e1d43SLinus Torvalds * go look at any of the socket buffers directly. 5061da177e4SLinus Torvalds */ 507a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 5081da177e4SLinus Torvalds { 509a11e1d43SLinus Torvalds __poll_t mask; 5101da177e4SLinus Torvalds struct sock *sk = sock->sk; 511cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 512e14cadfdSEric Dumazet u8 shutdown; 51300fd38d9SEric Dumazet int state; 5141da177e4SLinus Torvalds 51589ab066dSKarsten Graul sock_poll_wait(file, sock, wait); 516a11e1d43SLinus Torvalds 517986ffdfdSYafang Shao state = inet_sk_state_load(sk); 51800fd38d9SEric Dumazet if (state == TCP_LISTEN) 519dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk); 5201da177e4SLinus Torvalds 521a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events 522a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes 523a11e1d43SLinus Torvalds * made by other threads is impossible in any case. 524a11e1d43SLinus Torvalds */ 525a11e1d43SLinus Torvalds 526a11e1d43SLinus Torvalds mask = 0; 527a11e1d43SLinus Torvalds 5281da177e4SLinus Torvalds /* 529a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't 5301da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a 5311da177e4SLinus Torvalds * socket the read side is more interesting. 5321da177e4SLinus Torvalds * 533a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible 534a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this 5351da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many 5361da177e4SLinus Torvalds * bits than too few, and you can easily break real applications 5371da177e4SLinus Torvalds * if you don't tell them that something has hung up! 5381da177e4SLinus Torvalds * 5391da177e4SLinus Torvalds * Check-me. 5401da177e4SLinus Torvalds * 541a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 5421da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF, 5431da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write() 544a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 5451da177e4SLinus Torvalds * if and only if shutdown has been made in both directions. 5461da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX 547a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 5481da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given 5491da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains 550a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 5511da177e4SLinus Torvalds * 5521da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 5531da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK 5541da177e4SLinus Torvalds */ 555e14cadfdSEric Dumazet shutdown = READ_ONCE(sk->sk_shutdown); 556e14cadfdSEric Dumazet if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 557a9a08845SLinus Torvalds mask |= EPOLLHUP; 558e14cadfdSEric Dumazet if (shutdown & RCV_SHUTDOWN) 559a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 5601da177e4SLinus Torvalds 5618336886fSJerry Chu /* Connected or passive Fast Open socket? */ 56200fd38d9SEric Dumazet if (state != TCP_SYN_SENT && 563d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 564c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX); 5657b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data); 566c7004482SDavid S. Miller 567b96c51bdSEric Dumazet if (unlikely(urg_data) && 5687b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 5697b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE)) 570b634f875SAlexandra Kossovsky target++; 571c7004482SDavid S. Miller 57205dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target)) 573a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 5741da177e4SLinus Torvalds 575e14cadfdSEric Dumazet if (!(shutdown & SEND_SHUTDOWN)) { 5768ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) { 577a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5781da177e4SLinus Torvalds } else { /* send SIGIO later */ 5799cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 5801da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 5811da177e4SLinus Torvalds 5821da177e4SLinus Torvalds /* Race breaker. If space is freed after 5831da177e4SLinus Torvalds * wspace test but before the flags are set, 5843c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier 5853c715127Sjbaron@akamai.com * pairs with the input side. 5861da177e4SLinus Torvalds */ 5873c715127Sjbaron@akamai.com smp_mb__after_atomic(); 5888ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) 589a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5901da177e4SLinus Torvalds } 591d84ba638SKOSAKI Motohiro } else 592a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5931da177e4SLinus Torvalds 5947b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID) 595a9a08845SLinus Torvalds mask |= EPOLLPRI; 59608e39c0dSEric Dumazet } else if (state == TCP_SYN_SENT && 59708e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) { 59819f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect 599a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write() 60019f6d3f3SWei Wang * in order for kernel to generate SYN+data 60119f6d3f3SWei Wang */ 602a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 6031da177e4SLinus Torvalds } 6045e514f1cSEric Dumazet /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */ 605a4d25803STom Marshall smp_rmb(); 606e13ec3daSEric Dumazet if (READ_ONCE(sk->sk_err) || 607e13ec3daSEric Dumazet !skb_queue_empty_lockless(&sk->sk_error_queue)) 608a9a08845SLinus Torvalds mask |= EPOLLERR; 609a4d25803STom Marshall 6101da177e4SLinus Torvalds return mask; 6111da177e4SLinus Torvalds } 612a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll); 6131da177e4SLinus Torvalds 614e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg) 6151da177e4SLinus Torvalds { 6161da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6171da177e4SLinus Torvalds int answ; 6180e71c55cSEric Dumazet bool slow; 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds switch (cmd) { 6211da177e4SLinus Torvalds case SIOCINQ: 6221da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6231da177e4SLinus Torvalds return -EINVAL; 6241da177e4SLinus Torvalds 6250e71c55cSEric Dumazet slow = lock_sock_fast(sk); 626473bd239STom Herbert answ = tcp_inq(sk); 6270e71c55cSEric Dumazet unlock_sock_fast(sk, slow); 6281da177e4SLinus Torvalds break; 6291da177e4SLinus Torvalds case SIOCATMARK: 6307b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) && 631d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 6321da177e4SLinus Torvalds break; 6331da177e4SLinus Torvalds case SIOCOUTQ: 6341da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6351da177e4SLinus Torvalds return -EINVAL; 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6381da177e4SLinus Torvalds answ = 0; 6391da177e4SLinus Torvalds else 6400f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una; 6411da177e4SLinus Torvalds break; 6422f4e1b39SMario Schuknecht case SIOCOUTQNSD: 6432f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN) 6442f4e1b39SMario Schuknecht return -EINVAL; 6452f4e1b39SMario Schuknecht 6462f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6472f4e1b39SMario Schuknecht answ = 0; 6482f4e1b39SMario Schuknecht else 649e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) - 650e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt); 6512f4e1b39SMario Schuknecht break; 6521da177e4SLinus Torvalds default: 6531da177e4SLinus Torvalds return -ENOIOCTLCMD; 6543ff50b79SStephen Hemminger } 6551da177e4SLinus Torvalds 656e1d001faSBreno Leitao *karg = answ; 657e1d001faSBreno Leitao return 0; 6581da177e4SLinus Torvalds } 6594bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl); 6601da177e4SLinus Torvalds 66104d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 6621da177e4SLinus Torvalds { 6634de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 6641da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq; 6651da177e4SLinus Torvalds } 6661da177e4SLinus Torvalds 667a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp) 6681da177e4SLinus Torvalds { 6691da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 6701da177e4SLinus Torvalds } 6711da177e4SLinus Torvalds 67204d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 6731da177e4SLinus Torvalds { 6749e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 675352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 676352d4800SArnaldo Carvalho de Melo 677352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq; 6784de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK; 679f4a775d1SEric Dumazet __skb_header_release(skb); 680fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 681ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 6823ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 68389ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH) 6841da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH; 6856f021c62SEric Dumazet 6866f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk); 6871da177e4SLinus Torvalds } 6881da177e4SLinus Torvalds 689afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 6901da177e4SLinus Torvalds { 69133f5f57eSIlpo Järvinen if (flags & MSG_OOB) 6921da177e4SLinus Torvalds tp->snd_up = tp->write_seq; 6931da177e4SLinus Torvalds } 6941da177e4SLinus Torvalds 695f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if 696a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues : 697f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance 698f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without 699f54b3111SEric Dumazet * need for a timer, and with no latency trade off. 700f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize 701a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent 702a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues, 703a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet. 704f54b3111SEric Dumazet */ 705f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 706f54b3111SEric Dumazet int size_goal) 7071da177e4SLinus Torvalds { 708f54b3111SEric Dumazet return skb->len < size_goal && 70985225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 710114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) && 711b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 712b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb); 713f54b3111SEric Dumazet } 7149e412ba7SIlpo Järvinen 71535b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now, 716f54b3111SEric Dumazet int nonagle, int size_goal) 717f54b3111SEric Dumazet { 718f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 719f54b3111SEric Dumazet struct sk_buff *skb; 720f54b3111SEric Dumazet 721f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk); 72275c119afSEric Dumazet if (!skb) 72375c119afSEric Dumazet return; 7241da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp)) 725f54b3111SEric Dumazet tcp_mark_push(tp, skb); 726afeca340SKrishna Kumar 727afeca340SKrishna Kumar tcp_mark_urg(tp, flags); 728f54b3111SEric Dumazet 729f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) { 730f54b3111SEric Dumazet 731f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */ 7327aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 733f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 7347aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 7357267e8dcSSalvatore Dipietro smp_mb__after_atomic(); 7361da177e4SLinus Torvalds } 737a181ceb5SEric Dumazet /* It is possible TX completion already happened 738a181ceb5SEric Dumazet * before we set TSQ_THROTTLED. 739a181ceb5SEric Dumazet */ 74014afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 741f54b3111SEric Dumazet return; 742f54b3111SEric Dumazet } 743f54b3111SEric Dumazet 744f54b3111SEric Dumazet if (flags & MSG_MORE) 745f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK; 746f54b3111SEric Dumazet 747f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle); 7481da177e4SLinus Torvalds } 7491da177e4SLinus Torvalds 7506ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 7519c55e01cSJens Axboe unsigned int offset, size_t len) 7529c55e01cSJens Axboe { 7539c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data; 75433966dd0SWilly Tarreau int ret; 7559c55e01cSJens Axboe 756a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 75725869262SAl Viro min(rd_desc->count, len), tss->flags); 75833966dd0SWilly Tarreau if (ret > 0) 75933966dd0SWilly Tarreau rd_desc->count -= ret; 76033966dd0SWilly Tarreau return ret; 7619c55e01cSJens Axboe } 7629c55e01cSJens Axboe 7639c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 7649c55e01cSJens Axboe { 7659c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */ 7669c55e01cSJens Axboe read_descriptor_t rd_desc = { 7679c55e01cSJens Axboe .arg.data = tss, 76833966dd0SWilly Tarreau .count = tss->len, 7699c55e01cSJens Axboe }; 7709c55e01cSJens Axboe 7719c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 7729c55e01cSJens Axboe } 7739c55e01cSJens Axboe 7749c55e01cSJens Axboe /** 7759c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe 7769c55e01cSJens Axboe * @sock: socket to splice from 7779c55e01cSJens Axboe * @ppos: position (not valid) 7789c55e01cSJens Axboe * @pipe: pipe to splice to 7799c55e01cSJens Axboe * @len: number of bytes to splice 7809c55e01cSJens Axboe * @flags: splice modifier flags 7819c55e01cSJens Axboe * 7829c55e01cSJens Axboe * Description: 7839c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe. 7849c55e01cSJens Axboe * 7859c55e01cSJens Axboe **/ 7869c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 7879c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len, 7889c55e01cSJens Axboe unsigned int flags) 7899c55e01cSJens Axboe { 7909c55e01cSJens Axboe struct sock *sk = sock->sk; 7919c55e01cSJens Axboe struct tcp_splice_state tss = { 7929c55e01cSJens Axboe .pipe = pipe, 7939c55e01cSJens Axboe .len = len, 7949c55e01cSJens Axboe .flags = flags, 7959c55e01cSJens Axboe }; 7969c55e01cSJens Axboe long timeo; 7979c55e01cSJens Axboe ssize_t spliced; 7989c55e01cSJens Axboe int ret; 7999c55e01cSJens Axboe 8003a047bf8SChangli Gao sock_rps_record_flow(sk); 8019c55e01cSJens Axboe /* 8029c55e01cSJens Axboe * We can't seek on a socket input 8039c55e01cSJens Axboe */ 8049c55e01cSJens Axboe if (unlikely(*ppos)) 8059c55e01cSJens Axboe return -ESPIPE; 8069c55e01cSJens Axboe 8079c55e01cSJens Axboe ret = spliced = 0; 8089c55e01cSJens Axboe 8099c55e01cSJens Axboe lock_sock(sk); 8109c55e01cSJens Axboe 81142324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 8129c55e01cSJens Axboe while (tss.len) { 8139c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss); 8149c55e01cSJens Axboe if (ret < 0) 8159c55e01cSJens Axboe break; 8169c55e01cSJens Axboe else if (!ret) { 8179c55e01cSJens Axboe if (spliced) 8189c55e01cSJens Axboe break; 8199c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE)) 8209c55e01cSJens Axboe break; 8219c55e01cSJens Axboe if (sk->sk_err) { 8229c55e01cSJens Axboe ret = sock_error(sk); 8239c55e01cSJens Axboe break; 8249c55e01cSJens Axboe } 8259c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN) 8269c55e01cSJens Axboe break; 8279c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) { 8289c55e01cSJens Axboe /* 8299c55e01cSJens Axboe * This occurs when user tries to read 8309c55e01cSJens Axboe * from never connected socket. 8319c55e01cSJens Axboe */ 8329c55e01cSJens Axboe ret = -ENOTCONN; 8339c55e01cSJens Axboe break; 8349c55e01cSJens Axboe } 8359c55e01cSJens Axboe if (!timeo) { 8369c55e01cSJens Axboe ret = -EAGAIN; 8379c55e01cSJens Axboe break; 8389c55e01cSJens Axboe } 839ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have 840ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop. 841ccf7abb9SEric Dumazet * This might happen with URG data. 842ccf7abb9SEric Dumazet */ 843ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue)) 844ccf7abb9SEric Dumazet break; 845419ce133SPaolo Abeni ret = sk_wait_data(sk, &timeo, NULL); 846419ce133SPaolo Abeni if (ret < 0) 847419ce133SPaolo Abeni break; 8489c55e01cSJens Axboe if (signal_pending(current)) { 8499c55e01cSJens Axboe ret = sock_intr_errno(timeo); 8509c55e01cSJens Axboe break; 8519c55e01cSJens Axboe } 8529c55e01cSJens Axboe continue; 8539c55e01cSJens Axboe } 8549c55e01cSJens Axboe tss.len -= ret; 8559c55e01cSJens Axboe spliced += ret; 8569c55e01cSJens Axboe 8572fe11c9dSPavel Begunkov if (!tss.len || !timeo) 85833966dd0SWilly Tarreau break; 8599c55e01cSJens Axboe release_sock(sk); 8609c55e01cSJens Axboe lock_sock(sk); 8619c55e01cSJens Axboe 8629c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE || 86333966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) || 8649c55e01cSJens Axboe signal_pending(current)) 8659c55e01cSJens Axboe break; 8669c55e01cSJens Axboe } 8679c55e01cSJens Axboe 8689c55e01cSJens Axboe release_sock(sk); 8699c55e01cSJens Axboe 8709c55e01cSJens Axboe if (spliced) 8719c55e01cSJens Axboe return spliced; 8729c55e01cSJens Axboe 8739c55e01cSJens Axboe return ret; 8749c55e01cSJens Axboe } 8754bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read); 8769c55e01cSJens Axboe 8775882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 878eb934478SEric Dumazet bool force_schedule) 879f561d0f2SPavel Emelyanov { 880f561d0f2SPavel Emelyanov struct sk_buff *skb; 881f561d0f2SPavel Emelyanov 8825882efffSEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 8838e4d980aSEric Dumazet if (likely(skb)) { 884eb934478SEric Dumazet bool mem_scheduled; 8858e4d980aSEric Dumazet 8869b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 887eb934478SEric Dumazet if (force_schedule) { 888eb934478SEric Dumazet mem_scheduled = true; 8898e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 8908e4d980aSEric Dumazet } else { 891eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 8928e4d980aSEric Dumazet } 893eb934478SEric Dumazet if (likely(mem_scheduled)) { 8948a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 895a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 896e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 897f561d0f2SPavel Emelyanov return skb; 898f561d0f2SPavel Emelyanov } 899f561d0f2SPavel Emelyanov __kfree_skb(skb); 900f561d0f2SPavel Emelyanov } else { 9015c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk); 902f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk); 903f561d0f2SPavel Emelyanov } 904f561d0f2SPavel Emelyanov return NULL; 905f561d0f2SPavel Emelyanov } 906f561d0f2SPavel Emelyanov 9070c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 9080c54b85fSIlpo Järvinen int large_allowed) 9090c54b85fSIlpo Järvinen { 9100c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 9116c09fa09SEric Dumazet u32 new_size_goal, size_goal; 9120c54b85fSIlpo Järvinen 91374d4a8f8SEric Dumazet if (!large_allowed) 914605ad7f1SEric Dumazet return mss_now; 9150c54b85fSIlpo Järvinen 9166c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */ 917ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 9182a3a041cSIlpo Järvinen 9192a3a041cSIlpo Järvinen /* We try hard to avoid divides here */ 920605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 921605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal || 922605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) { 923605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now, 9241485348dSBen Hutchings sk->sk_gso_max_segs); 925605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 9260c54b85fSIlpo Järvinen } 9270c54b85fSIlpo Järvinen 928605ad7f1SEric Dumazet return max(size_goal, mss_now); 9290c54b85fSIlpo Järvinen } 9300c54b85fSIlpo Järvinen 93135b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 9320c54b85fSIlpo Järvinen { 9330c54b85fSIlpo Järvinen int mss_now; 9340c54b85fSIlpo Järvinen 9350c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 9360c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 9370c54b85fSIlpo Järvinen 9380c54b85fSIlpo Järvinen return mss_now; 9390c54b85fSIlpo Järvinen } 9400c54b85fSIlpo Järvinen 94172bf4f17SEric Dumazet /* In some cases, sendmsg() could have added an skb to the write queue, 942dc97391eSDavid Howells * but failed adding payload on it. We need to remove it to consume less 943dc97391eSDavid Howells * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger 94472bf4f17SEric Dumazet * epoll() users. Another reason is that tcp_write_xmit() does not like 94572bf4f17SEric Dumazet * finding an empty skb in the write queue. 946fdfc5c85SEric Dumazet */ 94727728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk) 948fdfc5c85SEric Dumazet { 94927728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 95027728ba8SEric Dumazet 951cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 952fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk); 953fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk)) 954fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 95503271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 956fdfc5c85SEric Dumazet } 957fdfc5c85SEric Dumazet } 958fdfc5c85SEric Dumazet 959f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */ 960f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 961f8d9d938SEric Dumazet { 962f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) { 963f8d9d938SEric Dumazet u32 extra = skb->truesize - 964f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb)); 965f8d9d938SEric Dumazet 966f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra)) 967f8d9d938SEric Dumazet return -ENOMEM; 968f8d9d938SEric Dumazet 969f8d9d938SEric Dumazet sk_mem_charge(sk, extra); 970f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 971f8d9d938SEric Dumazet } 972f8d9d938SEric Dumazet return 0; 973f8d9d938SEric Dumazet } 974f8d9d938SEric Dumazet 975849b425cSEric Dumazet 976fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy) 977f54755f6SEric Dumazet { 978f54755f6SEric Dumazet int left; 979f54755f6SEric Dumazet 980f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy))) 981f54755f6SEric Dumazet return copy; 982f54755f6SEric Dumazet 983f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued. 984f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 985f54755f6SEric Dumazet * to guarantee some progress. 986f54755f6SEric Dumazet */ 987683a67daSJason Xing left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued; 988f54755f6SEric Dumazet if (left > 0) 989f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy)); 990f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc); 991f54755f6SEric Dumazet } 992f54755f6SEric Dumazet 993cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp) 994cf60af03SYuchung Cheng { 99500db4124SIan Morris if (tp->fastopen_req) { 996cf60af03SYuchung Cheng kfree(tp->fastopen_req); 997cf60af03SYuchung Cheng tp->fastopen_req = NULL; 998cf60af03SYuchung Cheng } 999cf60af03SYuchung Cheng } 1000cf60af03SYuchung Cheng 10013242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 10023242abebSBenjamin Hesmans size_t size, struct ubuf_info *uarg) 1003cf60af03SYuchung Cheng { 1004cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 100519f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk); 1006ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name; 1007cf60af03SYuchung Cheng int err, flags; 1008cf60af03SYuchung Cheng 10095a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 10105a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) || 1011ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1012ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC)) 1013cf60af03SYuchung Cheng return -EOPNOTSUPP; 101400db4124SIan Morris if (tp->fastopen_req) 1015cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */ 1016cf60af03SYuchung Cheng 1017cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1018cf60af03SYuchung Cheng sk->sk_allocation); 101951456b29SIan Morris if (unlikely(!tp->fastopen_req)) 1020cf60af03SYuchung Cheng return -ENOBUFS; 1021cf60af03SYuchung Cheng tp->fastopen_req->data = msg; 1022f5ddcbbbSEric Dumazet tp->fastopen_req->size = size; 1023f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg; 1024cf60af03SYuchung Cheng 102508e39c0dSEric Dumazet if (inet_test_bit(DEFER_CONNECT, sk)) { 102619f6d3f3SWei Wang err = tcp_connect(sk); 102719f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */ 102819f6d3f3SWei Wang if (err) { 102919f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE); 103019f6d3f3SWei Wang inet->inet_dport = 0; 103119f6d3f3SWei Wang sk->sk_route_caps = 0; 103219f6d3f3SWei Wang } 103319f6d3f3SWei Wang } 1034cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1035ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr, 10363979ad7eSWilly Tarreau msg->msg_namelen, flags, 1); 10377db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect 10387db92362SWei Wang * if the connection times out or gets rst 10397db92362SWei Wang */ 10407db92362SWei Wang if (tp->fastopen_req) { 1041f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied; 1042cf60af03SYuchung Cheng tcp_free_fastopen_req(tp); 104308e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk); 10447db92362SWei Wang } 1045cf60af03SYuchung Cheng return err; 1046cf60af03SYuchung Cheng } 1047cf60af03SYuchung Cheng 1048306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 10491da177e4SLinus Torvalds { 10501da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1051f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL; 10521da177e4SLinus Torvalds struct sk_buff *skb; 1053c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc; 105457be5bdaSAl Viro int flags, err, copied = 0; 105557be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0; 10561a991488SEric Dumazet int process_backlog = 0; 1057270a1c3dSDavid Howells int zc = 0; 10581da177e4SLinus Torvalds long timeo; 10591da177e4SLinus Torvalds 10601da177e4SLinus Torvalds flags = msg->msg_flags; 1061f214f915SWillem de Bruijn 1062eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) { 1063eb315a7dSPavel Begunkov if (msg->msg_ubuf) { 1064eb315a7dSPavel Begunkov uarg = msg->msg_ubuf; 1065270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1066270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1067eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1068eea96a3eSPavel Begunkov skb = tcp_write_queue_tail(sk); 10698c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1070f214f915SWillem de Bruijn if (!uarg) { 1071f214f915SWillem de Bruijn err = -ENOBUFS; 1072f214f915SWillem de Bruijn goto out_err; 1073f214f915SWillem de Bruijn } 1074270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1075270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1076270a1c3dSDavid Howells else 1077e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0; 1078f214f915SWillem de Bruijn } 1079270a1c3dSDavid Howells } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { 1080270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1081270a1c3dSDavid Howells zc = MSG_SPLICE_PAGES; 1082eb315a7dSPavel Begunkov } 1083f214f915SWillem de Bruijn 108408e39c0dSEric Dumazet if (unlikely(flags & MSG_FASTOPEN || 108508e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) && 108616ae6aa1SYuchung Cheng !tp->repair) { 1087f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1088cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0) 1089cf60af03SYuchung Cheng goto out; 1090cf60af03SYuchung Cheng else if (err) 1091cf60af03SYuchung Cheng goto out_err; 1092cf60af03SYuchung Cheng } 1093cf60af03SYuchung Cheng 10941da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 10951da177e4SLinus Torvalds 1096d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1097d7722e85SSoheil Hassas Yeganeh 10988336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open 10998336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection 11008336886fSJerry Chu * is fully established. 11018336886fSJerry Chu */ 11028336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 11038336886fSJerry Chu !tcp_passive_fastopen(sk)) { 1104686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo); 1105686a5624SYuvaraja Mariappan if (err != 0) 1106cf60af03SYuchung Cheng goto do_error; 11078336886fSJerry Chu } 11081da177e4SLinus Torvalds 1109c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 1110c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) { 1111c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size); 11125924f17aSChristoph Paasch goto out_nopush; 1113c0e88ff0SPavel Emelyanov } 1114c0e88ff0SPavel Emelyanov 1115c0e88ff0SPavel Emelyanov err = -EINVAL; 1116c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 1117c0e88ff0SPavel Emelyanov goto out_err; 1118c0e88ff0SPavel Emelyanov 1119c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */ 1120c0e88ff0SPavel Emelyanov } 1121c0e88ff0SPavel Emelyanov 1122657a0667SWillem de Bruijn sockcm_init(&sockc, sk); 1123c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) { 1124c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc); 1125c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) { 1126c14ac945SSoheil Hassas Yeganeh err = -EINVAL; 1127c14ac945SSoheil Hassas Yeganeh goto out_err; 1128c14ac945SSoheil Hassas Yeganeh } 1129c14ac945SSoheil Hassas Yeganeh } 1130c14ac945SSoheil Hassas Yeganeh 11311da177e4SLinus Torvalds /* This should be in poll */ 11329cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 11331da177e4SLinus Torvalds 11341da177e4SLinus Torvalds /* Ok commence sending. */ 11351da177e4SLinus Torvalds copied = 0; 11361da177e4SLinus Torvalds 1137d41a69f1SEric Dumazet restart: 1138d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags); 1139d41a69f1SEric Dumazet 11401da177e4SLinus Torvalds err = -EPIPE; 11411da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 114279d8665bSEric Dumazet goto do_error; 11431da177e4SLinus Torvalds 114401e97e65SAl Viro while (msg_data_left(msg)) { 1145270a1c3dSDavid Howells ssize_t copy = 0; 11461da177e4SLinus Torvalds 1147fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk); 114865ec6097SEric Dumazet if (skb) 114965ec6097SEric Dumazet copy = size_goal - skb->len; 11501da177e4SLinus Torvalds 1151c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 11523613b3dbSEric Dumazet bool first_skb; 11533613b3dbSEric Dumazet 11541da177e4SLinus Torvalds new_segment: 11551da177e4SLinus Torvalds if (!sk_stream_memory_free(sk)) 1156afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11571da177e4SLinus Torvalds 11581a991488SEric Dumazet if (unlikely(process_backlog >= 16)) { 11591a991488SEric Dumazet process_backlog = 0; 11601a991488SEric Dumazet if (sk_flush_backlog(sk)) 1161d41a69f1SEric Dumazet goto restart; 1162d4011239SEric Dumazet } 116375c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk); 11645882efffSEric Dumazet skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, 11653613b3dbSEric Dumazet first_skb); 11661da177e4SLinus Torvalds if (!skb) 1167afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11681da177e4SLinus Torvalds 11691a991488SEric Dumazet process_backlog++; 11701da177e4SLinus Torvalds 1171a535d594SJakub Kicinski #ifdef CONFIG_SKB_DECRYPTED 1172a535d594SJakub Kicinski skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); 1173a535d594SJakub Kicinski #endif 117404d8825cSPaolo Abeni tcp_skb_entail(sk, skb); 1175c1b4a7e6SDavid S. Miller copy = size_goal; 11769d186cacSAndrey Vagin 11779d186cacSAndrey Vagin /* All packets are restored as if they have 1178d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to 11799d186cacSAndrey Vagin * avoid wrong rtt estimation. 11809d186cacSAndrey Vagin */ 11819d186cacSAndrey Vagin if (tp->repair) 11829d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 11831da177e4SLinus Torvalds } 11841da177e4SLinus Torvalds 11851da177e4SLinus Torvalds /* Try to append data to the end of skb. */ 118601e97e65SAl Viro if (copy > msg_data_left(msg)) 118701e97e65SAl Viro copy = msg_data_left(msg); 11881da177e4SLinus Torvalds 1189270a1c3dSDavid Howells if (zc == 0) { 11905640f768SEric Dumazet bool merge = true; 11911da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags; 11925640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 1193761965eaSEric Dumazet 11945640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 1195afb83012SSoheil Hassas Yeganeh goto wait_for_space; 1196761965eaSEric Dumazet 11975640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page, 11985640f768SEric Dumazet pfrag->offset)) { 1199a86a0661SEric Dumazet if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { 12001da177e4SLinus Torvalds tcp_mark_push(tp, skb); 12011da177e4SLinus Torvalds goto new_segment; 12021da177e4SLinus Torvalds } 12035640f768SEric Dumazet merge = false; 12045640f768SEric Dumazet } 1205ef015786SHerbert Xu 12065640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset); 1207ef015786SHerbert Xu 1208eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1209849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb)) 1210849b425cSEric Dumazet goto wait_for_space; 1211eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb); 1212eb315a7dSPavel Begunkov } 1213849b425cSEric Dumazet 1214849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1215849b425cSEric Dumazet if (!copy) 1216afb83012SSoheil Hassas Yeganeh goto wait_for_space; 12171da177e4SLinus Torvalds 121857be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 12195640f768SEric Dumazet pfrag->page, 12205640f768SEric Dumazet pfrag->offset, 12215640f768SEric Dumazet copy); 12225640f768SEric Dumazet if (err) 12231da177e4SLinus Torvalds goto do_error; 12241da177e4SLinus Torvalds 12251da177e4SLinus Torvalds /* Update the skb. */ 12261da177e4SLinus Torvalds if (merge) { 12279e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 12281da177e4SLinus Torvalds } else { 12295640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page, 12305640f768SEric Dumazet pfrag->offset, copy); 12314e33e346SEric Dumazet page_ref_inc(pfrag->page); 12321da177e4SLinus Torvalds } 12335640f768SEric Dumazet pfrag->offset += copy; 1234270a1c3dSDavid Howells } else if (zc == MSG_ZEROCOPY) { 12359b65b17dSTalal Ahmad /* First append to a fragless skb builds initial 12369b65b17dSTalal Ahmad * pure zerocopy skb 12379b65b17dSTalal Ahmad */ 12389b65b17dSTalal Ahmad if (!skb->len) 12399b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 12409b65b17dSTalal Ahmad 12419b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) { 1242849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1243849b425cSEric Dumazet if (!copy) 1244358ed624STalal Ahmad goto wait_for_space; 12459b65b17dSTalal Ahmad } 1246358ed624STalal Ahmad 1247f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1248111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) { 1249111856c7SWillem de Bruijn tcp_mark_push(tp, skb); 1250f214f915SWillem de Bruijn goto new_segment; 1251111856c7SWillem de Bruijn } 1252f214f915SWillem de Bruijn if (err < 0) 1253f214f915SWillem de Bruijn goto do_error; 1254f214f915SWillem de Bruijn copy = err; 1255270a1c3dSDavid Howells } else if (zc == MSG_SPLICE_PAGES) { 1256270a1c3dSDavid Howells /* Splice in data if we can; copy if we can't. */ 1257270a1c3dSDavid Howells if (tcp_downgrade_zcopy_pure(sk, skb)) 1258270a1c3dSDavid Howells goto wait_for_space; 1259270a1c3dSDavid Howells copy = tcp_wmem_schedule(sk, copy); 1260270a1c3dSDavid Howells if (!copy) 1261270a1c3dSDavid Howells goto wait_for_space; 1262270a1c3dSDavid Howells 1263270a1c3dSDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy, 1264270a1c3dSDavid Howells sk->sk_allocation); 1265270a1c3dSDavid Howells if (err < 0) { 1266270a1c3dSDavid Howells if (err == -EMSGSIZE) { 1267270a1c3dSDavid Howells tcp_mark_push(tp, skb); 1268270a1c3dSDavid Howells goto new_segment; 1269270a1c3dSDavid Howells } 1270270a1c3dSDavid Howells goto do_error; 1271270a1c3dSDavid Howells } 1272270a1c3dSDavid Howells copy = err; 1273270a1c3dSDavid Howells 1274270a1c3dSDavid Howells if (!(flags & MSG_NO_SHARED_FRAGS)) 1275270a1c3dSDavid Howells skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1276270a1c3dSDavid Howells 1277270a1c3dSDavid Howells sk_wmem_queued_add(sk, copy); 1278270a1c3dSDavid Howells sk_mem_charge(sk, copy); 12791da177e4SLinus Torvalds } 12801da177e4SLinus Torvalds 12811da177e4SLinus Torvalds if (!copied) 12824de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 12831da177e4SLinus Torvalds 12840f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 12851da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy; 1286cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0); 12871da177e4SLinus Torvalds 12881da177e4SLinus Torvalds copied += copy; 128901e97e65SAl Viro if (!msg_data_left(msg)) { 1290c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR)) 1291c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1; 12921da177e4SLinus Torvalds goto out; 12934ed2d765SWillem de Bruijn } 12941da177e4SLinus Torvalds 129565ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 12961da177e4SLinus Torvalds continue; 12971da177e4SLinus Torvalds 12981da177e4SLinus Torvalds if (forced_push(tp)) { 12991da177e4SLinus Torvalds tcp_mark_push(tp, skb); 13009e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1301fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk)) 13021da177e4SLinus Torvalds tcp_push_one(sk, mss_now); 13031da177e4SLinus Torvalds continue; 13041da177e4SLinus Torvalds 1305afb83012SSoheil Hassas Yeganeh wait_for_space: 13061da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 130772bf4f17SEric Dumazet tcp_remove_empty_skb(sk); 1308ec342325SAndrew Vagin if (copied) 1309f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now, 1310f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal); 13111da177e4SLinus Torvalds 1312686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo); 1313686a5624SYuvaraja Mariappan if (err != 0) 13141da177e4SLinus Torvalds goto do_error; 13151da177e4SLinus Torvalds 13160c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 13171da177e4SLinus Torvalds } 13181da177e4SLinus Torvalds 13191da177e4SLinus Torvalds out: 1320ad02c4f5SSoheil Hassas Yeganeh if (copied) { 13214e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags); 1322f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1323ad02c4f5SSoheil Hassas Yeganeh } 13245924f17aSChristoph Paasch out_nopush: 1325a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1326a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13278e044917SJonathan Lemon net_zcopy_put(uarg); 1328cf60af03SYuchung Cheng return copied + copied_syn; 13291da177e4SLinus Torvalds 13301da177e4SLinus Torvalds do_error: 133127728ba8SEric Dumazet tcp_remove_empty_skb(sk); 1332fdfc5c85SEric Dumazet 1333cf60af03SYuchung Cheng if (copied + copied_syn) 13341da177e4SLinus Torvalds goto out; 13351da177e4SLinus Torvalds out_err: 1336a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1337a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13388e044917SJonathan Lemon net_zcopy_put_abort(uarg, true); 13391da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err); 1340ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */ 1341216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1342ce5ec440SJason Baron sk->sk_write_space(sk); 1343b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1344b0f71bd3SFrancis Yan } 13451da177e4SLinus Torvalds return err; 13461da177e4SLinus Torvalds } 1347774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1348306b13ebSTom Herbert 1349306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1350306b13ebSTom Herbert { 1351306b13ebSTom Herbert int ret; 1352306b13ebSTom Herbert 1353306b13ebSTom Herbert lock_sock(sk); 1354306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size); 1355306b13ebSTom Herbert release_sock(sk); 1356306b13ebSTom Herbert 1357306b13ebSTom Herbert return ret; 1358306b13ebSTom Herbert } 13594bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg); 13601da177e4SLinus Torvalds 13611d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock) 13621d7e4538SDavid Howells { 13631d7e4538SDavid Howells struct sock *sk = sock->sk; 13641d7e4538SDavid Howells struct tcp_sock *tp = tcp_sk(sk); 13651d7e4538SDavid Howells int mss_now, size_goal; 13661d7e4538SDavid Howells 13671d7e4538SDavid Howells if (!tcp_write_queue_tail(sk)) 13681d7e4538SDavid Howells return; 13691d7e4538SDavid Howells 13701d7e4538SDavid Howells lock_sock(sk); 13711d7e4538SDavid Howells mss_now = tcp_send_mss(sk, &size_goal, 0); 13721d7e4538SDavid Howells tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 13731d7e4538SDavid Howells release_sock(sk); 13741d7e4538SDavid Howells } 13751d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof); 13761d7e4538SDavid Howells 13771da177e4SLinus Torvalds /* 13781da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for 13791da177e4SLinus Torvalds * this, no blocking and very strange errors 8) 13801da177e4SLinus Torvalds */ 13811da177e4SLinus Torvalds 1382377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 13831da177e4SLinus Torvalds { 13841da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13851da177e4SLinus Torvalds 13861da177e4SLinus Torvalds /* No URG data to read. */ 13871da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 13881da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ) 13891da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */ 13901da177e4SLinus Torvalds 13911da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 13921da177e4SLinus Torvalds return -ENOTCONN; 13931da177e4SLinus Torvalds 13941da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) { 13951da177e4SLinus Torvalds int err = 0; 13961da177e4SLinus Torvalds char c = tp->urg_data; 13971da177e4SLinus Torvalds 13981da177e4SLinus Torvalds if (!(flags & MSG_PEEK)) 13997b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ); 14001da177e4SLinus Torvalds 14011da177e4SLinus Torvalds /* Read urgent data. */ 14021da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB; 14031da177e4SLinus Torvalds 14041da177e4SLinus Torvalds if (len > 0) { 14051da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) 14067eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1); 14071da177e4SLinus Torvalds len = 1; 14081da177e4SLinus Torvalds } else 14091da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC; 14101da177e4SLinus Torvalds 14111da177e4SLinus Torvalds return err ? -EFAULT : len; 14121da177e4SLinus Torvalds } 14131da177e4SLinus Torvalds 14141da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 14151da177e4SLinus Torvalds return 0; 14161da177e4SLinus Torvalds 14171da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 14181da177e4SLinus Torvalds * the available implementations agree in this case: 14191da177e4SLinus Torvalds * this call should never block, independent of the 14201da177e4SLinus Torvalds * blocking state of the socket. 14211da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de> 14221da177e4SLinus Torvalds */ 14231da177e4SLinus Torvalds return -EAGAIN; 14241da177e4SLinus Torvalds } 14251da177e4SLinus Torvalds 1426c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1427c0e88ff0SPavel Emelyanov { 1428c0e88ff0SPavel Emelyanov struct sk_buff *skb; 1429c0e88ff0SPavel Emelyanov int copied = 0, err = 0; 1430c0e88ff0SPavel Emelyanov 143175c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 143275c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 143375c119afSEric Dumazet if (err) 143475c119afSEric Dumazet return err; 143575c119afSEric Dumazet copied += skb->len; 143675c119afSEric Dumazet } 143775c119afSEric Dumazet 1438c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) { 143951f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1440c0e88ff0SPavel Emelyanov if (err) 1441c0e88ff0SPavel Emelyanov break; 1442c0e88ff0SPavel Emelyanov 1443c0e88ff0SPavel Emelyanov copied += skb->len; 1444c0e88ff0SPavel Emelyanov } 1445c0e88ff0SPavel Emelyanov 1446c0e88ff0SPavel Emelyanov return err ?: copied; 1447c0e88ff0SPavel Emelyanov } 1448c0e88ff0SPavel Emelyanov 14491da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user, 14501da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes 14511da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the 14521da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of 14531da177e4SLinus Torvalds * a window update. 14541da177e4SLinus Torvalds */ 1455e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied) 14561da177e4SLinus Torvalds { 14571da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1458a2a385d6SEric Dumazet bool time_to_ack = false; 14591da177e4SLinus Torvalds 1460463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) { 1461463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1462b6b6d653SEric Dumazet 1463b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1464463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 14651da177e4SLinus Torvalds /* 14661da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if 14671da177e4SLinus Torvalds * connection is not bidirectional, user drained 14681da177e4SLinus Torvalds * receive buffer and there was a small segment 14691da177e4SLinus Torvalds * in queue. 14701da177e4SLinus Torvalds */ 14711ef9696cSAlexey Kuznetsov (copied > 0 && 14721ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 14731ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 147431954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) && 14751ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc))) 1476a2a385d6SEric Dumazet time_to_ack = true; 14771da177e4SLinus Torvalds } 14781da177e4SLinus Torvalds 14791da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window 14801da177e4SLinus Torvalds * which has been raised "significantly". 14811da177e4SLinus Torvalds * 14821da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK 14831da177e4SLinus Torvalds * in states, where we will not receive more. It is useless. 14841da177e4SLinus Torvalds */ 14851da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 14861da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp); 14871da177e4SLinus Torvalds 14881da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */ 14891da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) { 14901da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk); 14911da177e4SLinus Torvalds 14921da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space 14931da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window. 14941da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one. 14951da177e4SLinus Torvalds * "Lots" means "at least twice" here. 14961da177e4SLinus Torvalds */ 14971da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now) 1498a2a385d6SEric Dumazet time_to_ack = true; 14991da177e4SLinus Torvalds } 15001da177e4SLinus Torvalds } 15011da177e4SLinus Torvalds if (time_to_ack) 15021da177e4SLinus Torvalds tcp_send_ack(sk); 15031da177e4SLinus Torvalds } 15041da177e4SLinus Torvalds 1505c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied) 1506c457985aSCong Wang { 1507c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1508c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk); 1509c457985aSCong Wang 1510c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1511c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1512c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1513c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied); 1514c457985aSCong Wang } 1515c457985aSCong Wang 15163df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 15173df684c1SEric Dumazet { 1518f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue); 15193df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) { 15203df684c1SEric Dumazet sock_rfree(skb); 15213df684c1SEric Dumazet skb->destructor = NULL; 15223df684c1SEric Dumazet skb->sk = NULL; 152368822bdfSEric Dumazet return skb_attempt_defer_free(skb); 1524f35f8219SEric Dumazet } 1525f35f8219SEric Dumazet __kfree_skb(skb); 15263df684c1SEric Dumazet } 15273df684c1SEric Dumazet 15283f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 15291da177e4SLinus Torvalds { 15301da177e4SLinus Torvalds struct sk_buff *skb; 15311da177e4SLinus Torvalds u32 offset; 15321da177e4SLinus Torvalds 1533f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 15341da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq; 15359d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 15369d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 15371da177e4SLinus Torvalds offset--; 15389d691539SEric Dumazet } 1539e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 15401da177e4SLinus Torvalds *off = offset; 15411da177e4SLinus Torvalds return skb; 15421da177e4SLinus Torvalds } 1543f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing 1544f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock 1545f26845b4SEric Dumazet * in skb_splice_bits() 1546f26845b4SEric Dumazet */ 15473df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 15481da177e4SLinus Torvalds } 15491da177e4SLinus Torvalds return NULL; 15501da177e4SLinus Torvalds } 15513f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb); 15521da177e4SLinus Torvalds 15531da177e4SLinus Torvalds /* 15541da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines 15551da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile' 15561da177e4SLinus Torvalds * fashion. 15571da177e4SLinus Torvalds * Note: 15581da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller. 15591da177e4SLinus Torvalds * - The routine does not block. 15601da177e4SLinus Torvalds * - At present, there is no support for reading OOB data 15611da177e4SLinus Torvalds * or for 'peeking' the socket using this routine 15621da177e4SLinus Torvalds * (although both would be easy to implement). 15631da177e4SLinus Torvalds */ 15641da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 15651da177e4SLinus Torvalds sk_read_actor_t recv_actor) 15661da177e4SLinus Torvalds { 15671da177e4SLinus Torvalds struct sk_buff *skb; 15681da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15691da177e4SLinus Torvalds u32 seq = tp->copied_seq; 15701da177e4SLinus Torvalds u32 offset; 15711da177e4SLinus Torvalds int copied = 0; 15721da177e4SLinus Torvalds 15731da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 15741da177e4SLinus Torvalds return -ENOTCONN; 15751da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 15761da177e4SLinus Torvalds if (offset < skb->len) { 1577374e7b59SOctavian Purdila int used; 1578374e7b59SOctavian Purdila size_t len; 15791da177e4SLinus Torvalds 15801da177e4SLinus Torvalds len = skb->len - offset; 15811da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */ 1582b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 15831da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq; 15841da177e4SLinus Torvalds if (urg_offset < len) 15851da177e4SLinus Torvalds len = urg_offset; 15861da177e4SLinus Torvalds if (!len) 15871da177e4SLinus Torvalds break; 15881da177e4SLinus Torvalds } 15891da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len); 1590ff905b1eSEric Dumazet if (used <= 0) { 1591ddb61a57SJens Axboe if (!copied) 1592ddb61a57SJens Axboe copied = used; 1593ddb61a57SJens Axboe break; 1594e3d5ea2cSEric Dumazet } 1595e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len)) 1596e3d5ea2cSEric Dumazet used = len; 15971da177e4SLinus Torvalds seq += used; 15981da177e4SLinus Torvalds copied += used; 15991da177e4SLinus Torvalds offset += used; 1600e3d5ea2cSEric Dumazet 160102275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice 1602293ad604SOctavian Purdila * receive) the skb pointer might be invalid when 1603293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it 1604293ad604SOctavian Purdila * while aggregating skbs from the socket queue. 1605293ad604SOctavian Purdila */ 1606293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset); 160702275a2eSWilly Tarreau if (!skb) 16081da177e4SLinus Torvalds break; 160902275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb. 161002275a2eSWilly Tarreau * Try to splice more frags 161102275a2eSWilly Tarreau */ 161202275a2eSWilly Tarreau if (offset + 1 != skb->len) 161302275a2eSWilly Tarreau continue; 16141da177e4SLinus Torvalds } 1615e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16163df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16171da177e4SLinus Torvalds ++seq; 16181da177e4SLinus Torvalds break; 16191da177e4SLinus Torvalds } 16203df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16211da177e4SLinus Torvalds if (!desc->count) 16221da177e4SLinus Torvalds break; 16237db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16241da177e4SLinus Torvalds } 16257db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16261da177e4SLinus Torvalds 16271da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 16281da177e4SLinus Torvalds 16291da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 1630f26845b4SEric Dumazet if (copied > 0) { 1631f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset); 16320e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 1633f26845b4SEric Dumazet } 16341da177e4SLinus Torvalds return copied; 16351da177e4SLinus Torvalds } 16364bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock); 16371da177e4SLinus Torvalds 1638965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 163904919bedSCong Wang { 164004919bedSCong Wang struct sk_buff *skb; 164104919bedSCong Wang int copied = 0; 164204919bedSCong Wang 164304919bedSCong Wang if (sk->sk_state == TCP_LISTEN) 164404919bedSCong Wang return -ENOTCONN; 164504919bedSCong Wang 16469b7177b1SJohn Fastabend while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 1647db4192a7SCong Wang u8 tcp_flags; 1648db4192a7SCong Wang int used; 164904919bedSCong Wang 165004919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue); 165196628951SPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1652db4192a7SCong Wang tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1653db4192a7SCong Wang used = recv_actor(sk, skb); 1654db4192a7SCong Wang if (used < 0) { 1655db4192a7SCong Wang if (!copied) 1656db4192a7SCong Wang copied = used; 1657db4192a7SCong Wang break; 1658db4192a7SCong Wang } 1659db4192a7SCong Wang copied += used; 1660db4192a7SCong Wang 16619b7177b1SJohn Fastabend if (tcp_flags & TCPHDR_FIN) 1662db4192a7SCong Wang break; 1663db4192a7SCong Wang } 166404919bedSCong Wang return copied; 166504919bedSCong Wang } 166604919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb); 166704919bedSCong Wang 16683f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len) 16693f92a64eSJakub Kicinski { 16703f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk); 16713f92a64eSJakub Kicinski u32 seq = tp->copied_seq; 16723f92a64eSJakub Kicinski struct sk_buff *skb; 16733f92a64eSJakub Kicinski size_t left; 16743f92a64eSJakub Kicinski u32 offset; 16753f92a64eSJakub Kicinski 16763f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN) 16773f92a64eSJakub Kicinski return; 16783f92a64eSJakub Kicinski 16793f92a64eSJakub Kicinski left = len; 16803f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 16813f92a64eSJakub Kicinski int used; 16823f92a64eSJakub Kicinski 16833f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left); 16843f92a64eSJakub Kicinski seq += used; 16853f92a64eSJakub Kicinski left -= used; 16863f92a64eSJakub Kicinski 16873f92a64eSJakub Kicinski if (skb->len > offset + used) 16883f92a64eSJakub Kicinski break; 16893f92a64eSJakub Kicinski 16903f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16913f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 16923f92a64eSJakub Kicinski ++seq; 16933f92a64eSJakub Kicinski break; 16943f92a64eSJakub Kicinski } 16953f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 16963f92a64eSJakub Kicinski } 16973f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq); 16983f92a64eSJakub Kicinski 16993f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk); 17003f92a64eSJakub Kicinski 17013f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */ 17023f92a64eSJakub Kicinski if (left != len) 17033f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left); 17043f92a64eSJakub Kicinski } 17053f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done); 17063f92a64eSJakub Kicinski 170732035585STom Herbert int tcp_peek_len(struct socket *sock) 170832035585STom Herbert { 170932035585STom Herbert return tcp_inq(sock->sk); 171032035585STom Herbert } 171132035585STom Herbert EXPORT_SYMBOL(tcp_peek_len); 171232035585STom Herbert 1713d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1714d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val) 1715d1361840SEric Dumazet { 1716dfa2f048SEric Dumazet int space, cap; 1717867f816bSSoheil Hassas Yeganeh 1718867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1719867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1; 1720867f816bSSoheil Hassas Yeganeh else 172102739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1722867f816bSSoheil Hassas Yeganeh val = min(val, cap); 1723eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 172403f45c88SEric Dumazet 172503f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */ 172603f45c88SEric Dumazet tcp_data_ready(sk); 172703f45c88SEric Dumazet 1728d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1729d1361840SEric Dumazet return 0; 1730d1361840SEric Dumazet 1731dfa2f048SEric Dumazet space = tcp_space_from_win(sk, val); 1732dfa2f048SEric Dumazet if (space > sk->sk_rcvbuf) { 1733dfa2f048SEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, space); 1734f410cbeaSEric Dumazet WRITE_ONCE(tcp_sk(sk)->window_clamp, val); 1735d1361840SEric Dumazet } 1736d1361840SEric Dumazet return 0; 1737d1361840SEric Dumazet } 1738d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat); 1739d1361840SEric Dumazet 1740892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb, 17417eeba170SArjun Roy struct scm_timestamping_internal *tss) 17427eeba170SArjun Roy { 17437eeba170SArjun Roy if (skb->tstamp) 17447eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp); 17457eeba170SArjun Roy else 17467eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0}; 17477eeba170SArjun Roy 17487eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp) 17497eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 17507eeba170SArjun Roy else 17517eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0}; 17527eeba170SArjun Roy } 17537eeba170SArjun Roy 175405255b82SEric Dumazet #ifdef CONFIG_MMU 1755350f6bbcSMatthew Wilcox (Oracle) static const struct vm_operations_struct tcp_vm_ops = { 175605255b82SEric Dumazet }; 175705255b82SEric Dumazet 175893ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock, 175993ab6cc6SEric Dumazet struct vm_area_struct *vma) 176093ab6cc6SEric Dumazet { 176105255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 176205255b82SEric Dumazet return -EPERM; 17631c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 176405255b82SEric Dumazet 17653e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 17661c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MIXEDMAP); 176705255b82SEric Dumazet 176805255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops; 176905255b82SEric Dumazet return 0; 177005255b82SEric Dumazet } 177105255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap); 177205255b82SEric Dumazet 17737fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 17747fba5309SArjun Roy u32 *offset_frag) 17757fba5309SArjun Roy { 17767fba5309SArjun Roy skb_frag_t *frag; 17777fba5309SArjun Roy 177870701b83SArjun Roy if (unlikely(offset_skb >= skb->len)) 177970701b83SArjun Roy return NULL; 178070701b83SArjun Roy 17817fba5309SArjun Roy offset_skb -= skb_headlen(skb); 17827fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 17837fba5309SArjun Roy return NULL; 17847fba5309SArjun Roy 17857fba5309SArjun Roy frag = skb_shinfo(skb)->frags; 17867fba5309SArjun Roy while (offset_skb) { 17877fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) { 17887fba5309SArjun Roy *offset_frag = offset_skb; 17897fba5309SArjun Roy return frag; 17907fba5309SArjun Roy } 17917fba5309SArjun Roy offset_skb -= skb_frag_size(frag); 17927fba5309SArjun Roy ++frag; 17937fba5309SArjun Roy } 17947fba5309SArjun Roy *offset_frag = 0; 17957fba5309SArjun Roy return frag; 17967fba5309SArjun Roy } 17977fba5309SArjun Roy 179898917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag) 179998917cf0SArjun Roy { 1800577e4432SEric Dumazet struct page *page; 1801577e4432SEric Dumazet 1802577e4432SEric Dumazet if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag)) 1803577e4432SEric Dumazet return false; 1804577e4432SEric Dumazet 1805577e4432SEric Dumazet page = skb_frag_page(frag); 1806577e4432SEric Dumazet 1807577e4432SEric Dumazet if (PageCompound(page) || page->mapping) 1808577e4432SEric Dumazet return false; 1809577e4432SEric Dumazet 1810577e4432SEric Dumazet return true; 181198917cf0SArjun Roy } 181298917cf0SArjun Roy 181398917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag, 181498917cf0SArjun Roy int remaining_in_skb) 181598917cf0SArjun Roy { 181698917cf0SArjun Roy int offset = 0; 181798917cf0SArjun Roy 181898917cf0SArjun Roy if (likely(can_map_frag(frag))) 181998917cf0SArjun Roy return 0; 182098917cf0SArjun Roy 182198917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) { 182298917cf0SArjun Roy offset += skb_frag_size(frag); 182398917cf0SArjun Roy ++frag; 182498917cf0SArjun Roy } 182598917cf0SArjun Roy return offset; 182698917cf0SArjun Roy } 182798917cf0SArjun Roy 18280c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 18290c3936d3SArjun Roy struct tcp_zerocopy_receive *zc, 18300c3936d3SArjun Roy struct sk_buff *skb, u32 offset) 18310c3936d3SArjun Roy { 18320c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0; 18330c3936d3SArjun Roy int mappable_offset; 18340c3936d3SArjun Roy skb_frag_t *frag; 18350c3936d3SArjun Roy 18360c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */ 18370c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset; 18380c3936d3SArjun Roy 18390c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */ 18400c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset); 18410c3936d3SArjun Roy if (!frag) 18420c3936d3SArjun Roy return; 18430c3936d3SArjun Roy 18440c3936d3SArjun Roy if (frag_offset) { 18450c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb); 18460c3936d3SArjun Roy 18470c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */ 18480c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1]) 18490c3936d3SArjun Roy return; 18500c3936d3SArjun Roy 18510c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */ 18520c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset; 18530c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder; 18540c3936d3SArjun Roy ++frag; 18550c3936d3SArjun Roy } 18560c3936d3SArjun Roy 18570c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest. 18580c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes 18590c3936d3SArjun Roy * in partial_frag_remainder. 18600c3936d3SArjun Roy */ 18610c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 18620c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 18630c3936d3SArjun Roy } 18640c3936d3SArjun Roy 1865f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1866ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 1867f21a3c48SArjun Roy int *cmsg_flags); 1868f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk, 18697eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq, 18707eeba170SArjun Roy struct scm_timestamping_internal *tss) 1871f21a3c48SArjun Roy { 1872f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 1873f21a3c48SArjun Roy struct msghdr msg = {}; 18747eeba170SArjun Roy int err; 1875f21a3c48SArjun Roy 1876f21a3c48SArjun Roy zc->length = 0; 1877f21a3c48SArjun Roy zc->recv_skip_hint = 0; 1878f21a3c48SArjun Roy 1879f21a3c48SArjun Roy if (copy_address != zc->copybuf_address) 1880f21a3c48SArjun Roy return -EINVAL; 1881f21a3c48SArjun Roy 18829fd7874cSJens Axboe err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq, 18839fd7874cSJens Axboe &msg.msg_iter); 1884f21a3c48SArjun Roy if (err) 1885f21a3c48SArjun Roy return err; 1886f21a3c48SArjun Roy 1887ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 18887eeba170SArjun Roy tss, &zc->msg_flags); 1889f21a3c48SArjun Roy if (err < 0) 1890f21a3c48SArjun Roy return err; 1891f21a3c48SArjun Roy 1892f21a3c48SArjun Roy zc->copybuf_len = err; 18930c3936d3SArjun Roy if (likely(zc->copybuf_len)) { 18940c3936d3SArjun Roy struct sk_buff *skb; 18950c3936d3SArjun Roy u32 offset; 18960c3936d3SArjun Roy 18970c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 18980c3936d3SArjun Roy if (skb) 18990c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 19000c3936d3SArjun Roy } 1901f21a3c48SArjun Roy return 0; 1902f21a3c48SArjun Roy } 1903f21a3c48SArjun Roy 190418fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 190518fb76edSArjun Roy struct sk_buff *skb, u32 copylen, 190618fb76edSArjun Roy u32 *offset, u32 *seq) 190718fb76edSArjun Roy { 190818fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 190918fb76edSArjun Roy struct msghdr msg = {}; 191018fb76edSArjun Roy int err; 191118fb76edSArjun Roy 191218fb76edSArjun Roy if (copy_address != zc->copybuf_address) 191318fb76edSArjun Roy return -EINVAL; 191418fb76edSArjun Roy 19159fd7874cSJens Axboe err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen, 19169fd7874cSJens Axboe &msg.msg_iter); 191718fb76edSArjun Roy if (err) 191818fb76edSArjun Roy return err; 191918fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 192018fb76edSArjun Roy if (err) 192118fb76edSArjun Roy return err; 192218fb76edSArjun Roy zc->recv_skip_hint -= copylen; 192318fb76edSArjun Roy *offset += copylen; 192418fb76edSArjun Roy *seq += copylen; 192518fb76edSArjun Roy return (__s32)copylen; 192618fb76edSArjun Roy } 192718fb76edSArjun Roy 19287eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 192918fb76edSArjun Roy struct sock *sk, 193018fb76edSArjun Roy struct sk_buff *skb, 193118fb76edSArjun Roy u32 *seq, 19327eeba170SArjun Roy s32 copybuf_len, 19337eeba170SArjun Roy struct scm_timestamping_internal *tss) 193418fb76edSArjun Roy { 193518fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 193618fb76edSArjun Roy 193718fb76edSArjun Roy if (!copylen) 193818fb76edSArjun Roy return 0; 193918fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */ 19407eeba170SArjun Roy if (skb) { 194118fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq; 19427eeba170SArjun Roy } else { 194318fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset); 19447eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 19457eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 19467eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 19477eeba170SArjun Roy } 19487eeba170SArjun Roy } 194918fb76edSArjun Roy 195018fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 195118fb76edSArjun Roy seq); 195218fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen; 195318fb76edSArjun Roy } 195418fb76edSArjun Roy 195594ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 195694ab9eb9SArjun Roy struct page **pending_pages, 195794ab9eb9SArjun Roy unsigned long pages_remaining, 195894ab9eb9SArjun Roy unsigned long *address, 195994ab9eb9SArjun Roy u32 *length, 196094ab9eb9SArjun Roy u32 *seq, 196194ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 196294ab9eb9SArjun Roy u32 total_bytes_to_map, 196394ab9eb9SArjun Roy int err) 196494ab9eb9SArjun Roy { 196594ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */ 196694ab9eb9SArjun Roy if (err == -EBUSY && 196794ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 196894ab9eb9SArjun Roy u32 maybe_zap_len; 196994ab9eb9SArjun Roy 197094ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 197194ab9eb9SArjun Roy *length + /* Mapped or pending */ 197294ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */ 1973e9adcfecSMike Kravetz zap_page_range_single(vma, *address, maybe_zap_len, NULL); 197494ab9eb9SArjun Roy err = 0; 197594ab9eb9SArjun Roy } 197694ab9eb9SArjun Roy 197794ab9eb9SArjun Roy if (!err) { 197894ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining; 197994ab9eb9SArjun Roy int bytes_mapped; 198094ab9eb9SArjun Roy 1981e9adcfecSMike Kravetz /* We called zap_page_range_single, try to reinsert. */ 198294ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, 198394ab9eb9SArjun Roy pending_pages, 198494ab9eb9SArjun Roy &pages_remaining); 198594ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 198694ab9eb9SArjun Roy *seq += bytes_mapped; 198794ab9eb9SArjun Roy *address += bytes_mapped; 198894ab9eb9SArjun Roy } 198994ab9eb9SArjun Roy if (err) { 199094ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an 199194ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining 199294ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll 199394ab9eb9SArjun Roy * some state we speculatively touched before. 199494ab9eb9SArjun Roy */ 199594ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 199694ab9eb9SArjun Roy 199794ab9eb9SArjun Roy *length -= bytes_not_mapped; 199894ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped; 199994ab9eb9SArjun Roy } 200094ab9eb9SArjun Roy return err; 200194ab9eb9SArjun Roy } 200294ab9eb9SArjun Roy 20033763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 20043763a24cSArjun Roy struct page **pages, 200594ab9eb9SArjun Roy unsigned int pages_to_map, 200694ab9eb9SArjun Roy unsigned long *address, 200794ab9eb9SArjun Roy u32 *length, 20083763a24cSArjun Roy u32 *seq, 200994ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 201094ab9eb9SArjun Roy u32 total_bytes_to_map) 20113763a24cSArjun Roy { 20123763a24cSArjun Roy unsigned long pages_remaining = pages_to_map; 201394ab9eb9SArjun Roy unsigned int pages_mapped; 201494ab9eb9SArjun Roy unsigned int bytes_mapped; 201594ab9eb9SArjun Roy int err; 20163763a24cSArjun Roy 201794ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining); 201894ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining; 201994ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped; 20203763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in 20213763a24cSArjun Roy * mapping (some but not all of the pages). 20223763a24cSArjun Roy */ 20233763a24cSArjun Roy *seq += bytes_mapped; 202494ab9eb9SArjun Roy *address += bytes_mapped; 202594ab9eb9SArjun Roy 202694ab9eb9SArjun Roy if (likely(!err)) 202794ab9eb9SArjun Roy return 0; 202894ab9eb9SArjun Roy 202994ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */ 203094ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 203194ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map, 203294ab9eb9SArjun Roy err); 20333763a24cSArjun Roy } 20343763a24cSArjun Roy 20353c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 20367eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 20377eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20387eeba170SArjun Roy struct scm_timestamping_internal *tss) 20397eeba170SArjun Roy { 20407eeba170SArjun Roy unsigned long msg_control_addr; 20417eeba170SArjun Roy struct msghdr cmsg_dummy; 20427eeba170SArjun Roy 20437eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control; 2044c39ef213SKevin Brodsky cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 20457eeba170SArjun Roy cmsg_dummy.msg_controllen = 20467eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen; 20477eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall() 20487eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0; 2049a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true; 20507eeba170SArjun Roy zc->msg_flags = 0; 20517eeba170SArjun Roy if (zc->msg_control == msg_control_addr && 20527eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) { 20537eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss); 20547eeba170SArjun Roy zc->msg_control = (__u64) 2055c39ef213SKevin Brodsky ((uintptr_t)cmsg_dummy.msg_control_user); 20567eeba170SArjun Roy zc->msg_controllen = 20577eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen; 20587eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 20597eeba170SArjun Roy } 20607eeba170SArjun Roy } 20617eeba170SArjun Roy 20627a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, 20637a7f0946SArjun Roy unsigned long address, 20647a7f0946SArjun Roy bool *mmap_locked) 20657a7f0946SArjun Roy { 2066350f6bbcSMatthew Wilcox (Oracle) struct vm_area_struct *vma = lock_vma_under_rcu(mm, address); 20677a7f0946SArjun Roy 20687a7f0946SArjun Roy if (vma) { 2069350f6bbcSMatthew Wilcox (Oracle) if (vma->vm_ops != &tcp_vm_ops) { 20707a7f0946SArjun Roy vma_end_read(vma); 20717a7f0946SArjun Roy return NULL; 20727a7f0946SArjun Roy } 20737a7f0946SArjun Roy *mmap_locked = false; 20747a7f0946SArjun Roy return vma; 20757a7f0946SArjun Roy } 20767a7f0946SArjun Roy 20777a7f0946SArjun Roy mmap_read_lock(mm); 20787a7f0946SArjun Roy vma = vma_lookup(mm, address); 2079350f6bbcSMatthew Wilcox (Oracle) if (!vma || vma->vm_ops != &tcp_vm_ops) { 20807a7f0946SArjun Roy mmap_read_unlock(mm); 20817a7f0946SArjun Roy return NULL; 20827a7f0946SArjun Roy } 20837a7f0946SArjun Roy *mmap_locked = true; 20847a7f0946SArjun Roy return vma; 20857a7f0946SArjun Roy } 20867a7f0946SArjun Roy 208794ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 208805255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk, 20897eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20907eeba170SArjun Roy struct scm_timestamping_internal *tss) 209105255b82SEric Dumazet { 209294ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0; 209305255b82SEric Dumazet unsigned long address = (unsigned long)zc->address; 209494ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 209518fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len; 209618fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk); 209705255b82SEric Dumazet const skb_frag_t *frags = NULL; 209894ab9eb9SArjun Roy unsigned int pages_to_map = 0; 209905255b82SEric Dumazet struct vm_area_struct *vma; 210005255b82SEric Dumazet struct sk_buff *skb = NULL; 210118fb76edSArjun Roy u32 seq = tp->copied_seq; 210294ab9eb9SArjun Roy u32 total_bytes_to_map; 210318fb76edSArjun Roy int inq = tcp_inq(sk); 21047a7f0946SArjun Roy bool mmap_locked; 210593ab6cc6SEric Dumazet int ret; 210693ab6cc6SEric Dumazet 210718fb76edSArjun Roy zc->copybuf_len = 0; 21087eeba170SArjun Roy zc->msg_flags = 0; 210918fb76edSArjun Roy 211005255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address) 211193ab6cc6SEric Dumazet return -EINVAL; 211293ab6cc6SEric Dumazet 211393ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN) 211405255b82SEric Dumazet return -ENOTCONN; 211593ab6cc6SEric Dumazet 211693ab6cc6SEric Dumazet sock_rps_record_flow(sk); 211793ab6cc6SEric Dumazet 2118f21a3c48SArjun Roy if (inq && inq <= copybuf_len) 21197eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss); 2120f21a3c48SArjun Roy 2121936ced41SArjun Roy if (inq < PAGE_SIZE) { 2122936ced41SArjun Roy zc->length = 0; 2123936ced41SArjun Roy zc->recv_skip_hint = inq; 2124936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE)) 2125936ced41SArjun Roy return -EIO; 2126936ced41SArjun Roy return 0; 2127936ced41SArjun Roy } 2128936ced41SArjun Roy 21297a7f0946SArjun Roy vma = find_tcp_vma(current->mm, address, &mmap_locked); 21307a7f0946SArjun Roy if (!vma) 2131e776af60SEric Dumazet return -EINVAL; 21327a7f0946SArjun Roy 213318fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 213418fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq); 213594ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 213694ab9eb9SArjun Roy if (total_bytes_to_map) { 213794ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2138e9adcfecSMike Kravetz zap_page_range_single(vma, address, total_bytes_to_map, 2139e9adcfecSMike Kravetz NULL); 214094ab9eb9SArjun Roy zc->length = total_bytes_to_map; 214105255b82SEric Dumazet zc->recv_skip_hint = 0; 21428f2b0293SSoheil Hassas Yeganeh } else { 214318fb76edSArjun Roy zc->length = avail_len; 214418fb76edSArjun Roy zc->recv_skip_hint = avail_len; 21458f2b0293SSoheil Hassas Yeganeh } 214605255b82SEric Dumazet ret = 0; 214705255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) { 214898917cf0SArjun Roy int mappable_offset; 214994ab9eb9SArjun Roy struct page *page; 215098917cf0SArjun Roy 215105255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) { 21527fba5309SArjun Roy u32 offset_frag; 21537fba5309SArjun Roy 215405255b82SEric Dumazet if (skb) { 21550e627190SArjun Roy if (zc->recv_skip_hint > 0) 21560e627190SArjun Roy break; 215705255b82SEric Dumazet skb = skb->next; 215805255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq; 215905255b82SEric Dumazet } else { 216093ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset); 216105255b82SEric Dumazet } 21627eeba170SArjun Roy 21637eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 21647eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 21657eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 21667eeba170SArjun Roy } 216705255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset; 21687fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag); 21697fba5309SArjun Roy if (!frags || offset_frag) 217005255b82SEric Dumazet break; 217105255b82SEric Dumazet } 2172789762ceSSoheil Hassas Yeganeh 217398917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags, 217498917cf0SArjun Roy zc->recv_skip_hint); 217598917cf0SArjun Roy if (mappable_offset) { 217698917cf0SArjun Roy zc->recv_skip_hint = mappable_offset; 217705255b82SEric Dumazet break; 2178789762ceSSoheil Hassas Yeganeh } 217994ab9eb9SArjun Roy page = skb_frag_page(frags); 2180*9f6b619eSMina Almasry if (WARN_ON_ONCE(!page)) 2181*9f6b619eSMina Almasry break; 2182*9f6b619eSMina Almasry 218394ab9eb9SArjun Roy prefetchw(page); 218494ab9eb9SArjun Roy pages[pages_to_map++] = page; 218505255b82SEric Dumazet length += PAGE_SIZE; 218605255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE; 218705255b82SEric Dumazet frags++; 218894ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 218994ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) { 219094ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb 219194ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs). 219294ab9eb9SArjun Roy */ 219394ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, 219494ab9eb9SArjun Roy pages_to_map, 219594ab9eb9SArjun Roy &address, &length, 219694ab9eb9SArjun Roy &seq, zc, 219794ab9eb9SArjun Roy total_bytes_to_map); 21983763a24cSArjun Roy if (ret) 21993763a24cSArjun Roy goto out; 220094ab9eb9SArjun Roy pages_to_map = 0; 22013763a24cSArjun Roy } 22023763a24cSArjun Roy } 220394ab9eb9SArjun Roy if (pages_to_map) { 220494ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 220594ab9eb9SArjun Roy &address, &length, &seq, 220694ab9eb9SArjun Roy zc, total_bytes_to_map); 220793ab6cc6SEric Dumazet } 220805255b82SEric Dumazet out: 22097a7f0946SArjun Roy if (mmap_locked) 2210d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 22117a7f0946SArjun Roy else 22127a7f0946SArjun Roy vma_end_read(vma); 221318fb76edSArjun Roy /* Try to copy straggler data. */ 221418fb76edSArjun Roy if (!ret) 22157eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 221618fb76edSArjun Roy 221718fb76edSArjun Roy if (length + copylen) { 22187db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 221993ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk); 222093ab6cc6SEric Dumazet 222193ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */ 222293ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset); 222318fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen); 222493ab6cc6SEric Dumazet ret = 0; 222505255b82SEric Dumazet if (length == zc->length) 222605255b82SEric Dumazet zc->recv_skip_hint = 0; 222705255b82SEric Dumazet } else { 222805255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 222905255b82SEric Dumazet ret = -EIO; 223005255b82SEric Dumazet } 223105255b82SEric Dumazet zc->length = length; 223293ab6cc6SEric Dumazet return ret; 223393ab6cc6SEric Dumazet } 223405255b82SEric Dumazet #endif 223593ab6cc6SEric Dumazet 223698aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */ 2237892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 22389718475eSDeepa Dinamani struct scm_timestamping_internal *tss) 223998aaa913SMike Maloney { 2240887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 2241be8e9eb3SJason Xing u32 tsflags = READ_ONCE(sk->sk_tsflags); 224298aaa913SMike Maloney bool has_timestamping = false; 224398aaa913SMike Maloney 224498aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 224598aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) { 224698aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2247887feae3SDeepa Dinamani if (new_tstamp) { 2248df1b4ba9SArnd Bergmann struct __kernel_timespec kts = { 2249df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2250df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2251df1b4ba9SArnd Bergmann }; 2252887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2253887feae3SDeepa Dinamani sizeof(kts), &kts); 2254887feae3SDeepa Dinamani } else { 2255df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = { 2256df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2257df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2258df1b4ba9SArnd Bergmann }; 22597f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 22609718475eSDeepa Dinamani sizeof(ts_old), &ts_old); 2261887feae3SDeepa Dinamani } 226298aaa913SMike Maloney } else { 2263887feae3SDeepa Dinamani if (new_tstamp) { 2264df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = { 2265df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2266df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2267df1b4ba9SArnd Bergmann }; 2268887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2269887feae3SDeepa Dinamani sizeof(stv), &stv); 2270887feae3SDeepa Dinamani } else { 2271df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = { 2272df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2273df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2274df1b4ba9SArnd Bergmann }; 22757f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 227698aaa913SMike Maloney sizeof(tv), &tv); 227798aaa913SMike Maloney } 227898aaa913SMike Maloney } 2279887feae3SDeepa Dinamani } 228098aaa913SMike Maloney 2281be8e9eb3SJason Xing if (tsflags & SOF_TIMESTAMPING_SOFTWARE && 2282be8e9eb3SJason Xing (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE || 2283be8e9eb3SJason Xing !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) 228498aaa913SMike Maloney has_timestamping = true; 228598aaa913SMike Maloney else 22869718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0}; 228798aaa913SMike Maloney } 228898aaa913SMike Maloney 228998aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 2290be8e9eb3SJason Xing if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE && 2291be8e9eb3SJason Xing (tsflags & SOF_TIMESTAMPING_RX_HARDWARE || 2292be8e9eb3SJason Xing !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER))) 229398aaa913SMike Maloney has_timestamping = true; 229498aaa913SMike Maloney else 22959718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0}; 229698aaa913SMike Maloney } 229798aaa913SMike Maloney 229898aaa913SMike Maloney if (has_timestamping) { 22999718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0}; 23009718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW)) 23019718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss); 23029718475eSDeepa Dinamani else 23039718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss); 230498aaa913SMike Maloney } 230598aaa913SMike Maloney } 230698aaa913SMike Maloney 2307b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk) 2308b75eba76SSoheil Hassas Yeganeh { 2309b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk); 2310b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq); 2311b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2312b75eba76SSoheil Hassas Yeganeh int inq; 2313b75eba76SSoheil Hassas Yeganeh 2314b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq; 2315b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2316b75eba76SSoheil Hassas Yeganeh lock_sock(sk); 2317b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq; 2318b75eba76SSoheil Hassas Yeganeh release_sock(sk); 2319b75eba76SSoheil Hassas Yeganeh } 23206466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading 23216466e715SSoheil Hassas Yeganeh * by returning a non-zero inq. 23226466e715SSoheil Hassas Yeganeh */ 23236466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE)) 23246466e715SSoheil Hassas Yeganeh inq = 1; 2325b75eba76SSoheil Hassas Yeganeh return inq; 2326b75eba76SSoheil Hassas Yeganeh } 2327b75eba76SSoheil Hassas Yeganeh 23281da177e4SLinus Torvalds /* 23291da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer. 23301da177e4SLinus Torvalds * 23311da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that 23321da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required. 23331da177e4SLinus Torvalds * Probably, code can be easily improved even more. 23341da177e4SLinus Torvalds */ 23351da177e4SLinus Torvalds 23362cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2337ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 23382cd81161SArjun Roy int *cmsg_flags) 23391da177e4SLinus Torvalds { 23401da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23411da177e4SLinus Torvalds int copied = 0; 23421da177e4SLinus Torvalds u32 peek_seq; 23431da177e4SLinus Torvalds u32 *seq; 23441da177e4SLinus Torvalds unsigned long used; 23452cd81161SArjun Roy int err; 23461da177e4SLinus Torvalds int target; /* Read at least this many bytes */ 23471da177e4SLinus Torvalds long timeo; 2348dfbafc99SSabrina Dubroca struct sk_buff *skb, *last; 234905ea4916SJon Maloy u32 peek_offset = 0; 235077527313SIlpo Järvinen u32 urg_hole = 0; 23511da177e4SLinus Torvalds 23521da177e4SLinus Torvalds err = -ENOTCONN; 23531da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 23541da177e4SLinus Torvalds goto out; 23551da177e4SLinus Torvalds 2356f94fd25cSJens Axboe if (tp->recvmsg_inq) { 2357925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ; 2358f94fd25cSJens Axboe msg->msg_get_inq = 1; 2359f94fd25cSJens Axboe } 2360ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 23611da177e4SLinus Torvalds 23621da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */ 23631da177e4SLinus Torvalds if (flags & MSG_OOB) 23641da177e4SLinus Torvalds goto recv_urg; 23651da177e4SLinus Torvalds 2366c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 2367c0e88ff0SPavel Emelyanov err = -EPERM; 2368c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK)) 2369c0e88ff0SPavel Emelyanov goto out; 2370c0e88ff0SPavel Emelyanov 2371c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 2372c0e88ff0SPavel Emelyanov goto recv_sndq; 2373c0e88ff0SPavel Emelyanov 2374c0e88ff0SPavel Emelyanov err = -EINVAL; 2375c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 2376c0e88ff0SPavel Emelyanov goto out; 2377c0e88ff0SPavel Emelyanov 2378c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */ 2379c0e88ff0SPavel Emelyanov } 2380c0e88ff0SPavel Emelyanov 23811da177e4SLinus Torvalds seq = &tp->copied_seq; 23821da177e4SLinus Torvalds if (flags & MSG_PEEK) { 238305ea4916SJon Maloy peek_offset = max(sk_peek_offset(sk, flags), 0); 238405ea4916SJon Maloy peek_seq = tp->copied_seq + peek_offset; 23851da177e4SLinus Torvalds seq = &peek_seq; 23861da177e4SLinus Torvalds } 23871da177e4SLinus Torvalds 23881da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 23891da177e4SLinus Torvalds 23901da177e4SLinus Torvalds do { 23911da177e4SLinus Torvalds u32 offset; 23921da177e4SLinus Torvalds 23931da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2394b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 23951da177e4SLinus Torvalds if (copied) 23961da177e4SLinus Torvalds break; 23971da177e4SLinus Torvalds if (signal_pending(current)) { 23981da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 23991da177e4SLinus Torvalds break; 24001da177e4SLinus Torvalds } 24011da177e4SLinus Torvalds } 24021da177e4SLinus Torvalds 24031da177e4SLinus Torvalds /* Next get a buffer. */ 24041da177e4SLinus Torvalds 2405dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue); 240691521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) { 2407dfbafc99SSabrina Dubroca last = skb; 24081da177e4SLinus Torvalds /* Now that we have two receive queues this 24091da177e4SLinus Torvalds * shouldn't happen. 24101da177e4SLinus Torvalds */ 2411d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2412e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 24132af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2414d792c100SIlpo Järvinen flags)) 24151da177e4SLinus Torvalds break; 2416d792c100SIlpo Järvinen 24171da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq; 24189d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 24199d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 24201da177e4SLinus Torvalds offset--; 24219d691539SEric Dumazet } 24221da177e4SLinus Torvalds if (offset < skb->len) 24231da177e4SLinus Torvalds goto found_ok_skb; 2424e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 24251da177e4SLinus Torvalds goto found_fin_ok; 24262af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK), 2427e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 24282af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 242991521944SDavid S. Miller } 24301da177e4SLinus Torvalds 24311da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */ 24321da177e4SLinus Torvalds 24339ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 24341da177e4SLinus Torvalds break; 24351da177e4SLinus Torvalds 24361da177e4SLinus Torvalds if (copied) { 24378bd172b7SEric Dumazet if (!timeo || 24388bd172b7SEric Dumazet sk->sk_err || 24391da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE || 24401da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) || 2441518a09efSDavid S. Miller signal_pending(current)) 24421da177e4SLinus Torvalds break; 24431da177e4SLinus Torvalds } else { 24441da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE)) 24451da177e4SLinus Torvalds break; 24461da177e4SLinus Torvalds 24471da177e4SLinus Torvalds if (sk->sk_err) { 24481da177e4SLinus Torvalds copied = sock_error(sk); 24491da177e4SLinus Torvalds break; 24501da177e4SLinus Torvalds } 24511da177e4SLinus Torvalds 24521da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) 24531da177e4SLinus Torvalds break; 24541da177e4SLinus Torvalds 24551da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) { 24561da177e4SLinus Torvalds /* This occurs when user tries to read 24571da177e4SLinus Torvalds * from never connected socket. 24581da177e4SLinus Torvalds */ 24591da177e4SLinus Torvalds copied = -ENOTCONN; 24601da177e4SLinus Torvalds break; 24611da177e4SLinus Torvalds } 24621da177e4SLinus Torvalds 24631da177e4SLinus Torvalds if (!timeo) { 24641da177e4SLinus Torvalds copied = -EAGAIN; 24651da177e4SLinus Torvalds break; 24661da177e4SLinus Torvalds } 24671da177e4SLinus Torvalds 24681da177e4SLinus Torvalds if (signal_pending(current)) { 24691da177e4SLinus Torvalds copied = sock_intr_errno(timeo); 24701da177e4SLinus Torvalds break; 24711da177e4SLinus Torvalds } 24721da177e4SLinus Torvalds } 24731da177e4SLinus Torvalds 24741da177e4SLinus Torvalds if (copied >= target) { 24751da177e4SLinus Torvalds /* Do not sleep, just process backlog. */ 247693afcfd1SEric Dumazet __sk_flush_backlog(sk); 2477dfbafc99SSabrina Dubroca } else { 247829fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied); 2479419ce133SPaolo Abeni err = sk_wait_data(sk, &timeo, last); 2480419ce133SPaolo Abeni if (err < 0) { 2481419ce133SPaolo Abeni err = copied ? : err; 2482419ce133SPaolo Abeni goto out; 2483419ce133SPaolo Abeni } 2484dfbafc99SSabrina Dubroca } 24851da177e4SLinus Torvalds 248677527313SIlpo Järvinen if ((flags & MSG_PEEK) && 248705ea4916SJon Maloy (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) { 2488e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2489e87cc472SJoe Perches current->comm, 2490e87cc472SJoe Perches task_pid_nr(current)); 249105ea4916SJon Maloy peek_seq = tp->copied_seq + peek_offset; 24921da177e4SLinus Torvalds } 24931da177e4SLinus Torvalds continue; 24941da177e4SLinus Torvalds 24951da177e4SLinus Torvalds found_ok_skb: 24961da177e4SLinus Torvalds /* Ok so how much can we use? */ 24971da177e4SLinus Torvalds used = skb->len - offset; 24981da177e4SLinus Torvalds if (len < used) 24991da177e4SLinus Torvalds used = len; 25001da177e4SLinus Torvalds 25011da177e4SLinus Torvalds /* Do we have urgent data here? */ 2502b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 25031da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq; 25041da177e4SLinus Torvalds if (urg_offset < used) { 25051da177e4SLinus Torvalds if (!urg_offset) { 25061da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) { 25077db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 250877527313SIlpo Järvinen urg_hole++; 25091da177e4SLinus Torvalds offset++; 25101da177e4SLinus Torvalds used--; 25111da177e4SLinus Torvalds if (!used) 25121da177e4SLinus Torvalds goto skip_copy; 25131da177e4SLinus Torvalds } 25141da177e4SLinus Torvalds } else 25151da177e4SLinus Torvalds used = urg_offset; 25161da177e4SLinus Torvalds } 25171da177e4SLinus Torvalds } 25181da177e4SLinus Torvalds 25191da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) { 252051f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, offset, msg, used); 25211da177e4SLinus Torvalds if (err) { 25221da177e4SLinus Torvalds /* Exception. Bailout! */ 25231da177e4SLinus Torvalds if (!copied) 25241da177e4SLinus Torvalds copied = -EFAULT; 25251da177e4SLinus Torvalds break; 25261da177e4SLinus Torvalds } 25271da177e4SLinus Torvalds } 25281da177e4SLinus Torvalds 25297db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used); 25301da177e4SLinus Torvalds copied += used; 25311da177e4SLinus Torvalds len -= used; 253205ea4916SJon Maloy if (flags & MSG_PEEK) 253305ea4916SJon Maloy sk_peek_offset_fwd(sk, used); 253405ea4916SJon Maloy else 253505ea4916SJon Maloy sk_peek_offset_bwd(sk, used); 25361da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds skip_copy: 2539b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 25407b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 254131770e34SFlorian Westphal tcp_fast_path_check(sk); 254231770e34SFlorian Westphal } 25431da177e4SLinus Torvalds 254498aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) { 25452cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss); 2546925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS; 254798aaa913SMike Maloney } 2548cc4de047SKelly Littlepage 2549cc4de047SKelly Littlepage if (used + offset < skb->len) 2550cc4de047SKelly Littlepage continue; 2551cc4de047SKelly Littlepage 2552e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 25531da177e4SLinus Torvalds goto found_fin_ok; 25547bced397SDan Williams if (!(flags & MSG_PEEK)) 25553df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25561da177e4SLinus Torvalds continue; 25571da177e4SLinus Torvalds 25581da177e4SLinus Torvalds found_fin_ok: 25591da177e4SLinus Torvalds /* Process the FIN. */ 25607db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 25617bced397SDan Williams if (!(flags & MSG_PEEK)) 25623df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25631da177e4SLinus Torvalds break; 25641da177e4SLinus Torvalds } while (len > 0); 25651da177e4SLinus Torvalds 25661da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored 25671da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK 25681da177e4SLinus Torvalds */ 25691da177e4SLinus Torvalds 25701da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 25710e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 25721da177e4SLinus Torvalds return copied; 25731da177e4SLinus Torvalds 25741da177e4SLinus Torvalds out: 25751da177e4SLinus Torvalds return err; 25761da177e4SLinus Torvalds 25771da177e4SLinus Torvalds recv_urg: 2578377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags); 25791da177e4SLinus Torvalds goto out; 2580c0e88ff0SPavel Emelyanov 2581c0e88ff0SPavel Emelyanov recv_sndq: 2582c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len); 2583c0e88ff0SPavel Emelyanov goto out; 25841da177e4SLinus Torvalds } 25852cd81161SArjun Roy 2586ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2587ec095263SOliver Hartkopp int *addr_len) 25882cd81161SArjun Roy { 2589f94fd25cSJens Axboe int cmsg_flags = 0, ret; 25902cd81161SArjun Roy struct scm_timestamping_internal tss; 25912cd81161SArjun Roy 25922cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE)) 25932cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len); 25942cd81161SArjun Roy 25952cd81161SArjun Roy if (sk_can_busy_loop(sk) && 25962cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) && 25972cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED) 2598ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT); 25992cd81161SArjun Roy 26002cd81161SArjun Roy lock_sock(sk); 2601ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 26022cd81161SArjun Roy release_sock(sk); 26032cd81161SArjun Roy 2604f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2605925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS) 26062cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss); 2607f94fd25cSJens Axboe if (msg->msg_get_inq) { 2608f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk); 2609f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ) 2610f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2611f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq); 26122cd81161SArjun Roy } 26132cd81161SArjun Roy } 26142cd81161SArjun Roy return ret; 26152cd81161SArjun Roy } 26164bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg); 26171da177e4SLinus Torvalds 2618490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state) 2619490d5046SIlpo Järvinen { 2620490d5046SIlpo Järvinen int oldstate = sk->sk_state; 2621490d5046SIlpo Järvinen 2622d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF 2623d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The 2624d4487491SLawrence Brakmo * following checks will detect if an internal state value ever 2625d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will 2626d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling 2627d4487491SLawrence Brakmo * tcp_call_bpf_2arg. 2628d4487491SLawrence Brakmo */ 2629d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2630d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2631d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2632d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2633d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2634d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2635d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2636d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2637d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2638d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2639d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2640d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 264191051f00SGuillaume Nault BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE); 2642d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2643d4487491SLawrence Brakmo 264497a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values 264597a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 264697a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 264797a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF 264897a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo. 264997a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the 265097a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 265197a19cafSYonghong Song * regardless of which compiler is used. 265297a19cafSYonghong Song */ 265397a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 265497a19cafSYonghong Song 2655d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2656d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2657e8fce239SSong Liu 2658490d5046SIlpo Järvinen switch (state) { 2659490d5046SIlpo Järvinen case TCP_ESTABLISHED: 2660490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED) 266181cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2662490d5046SIlpo Järvinen break; 2663a46d0ea5SJason Xing case TCP_CLOSE_WAIT: 2664a46d0ea5SJason Xing if (oldstate == TCP_SYN_RECV) 2665a46d0ea5SJason Xing TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2666a46d0ea5SJason Xing break; 2667490d5046SIlpo Järvinen 2668490d5046SIlpo Järvinen case TCP_CLOSE: 2669490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 267081cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2671490d5046SIlpo Järvinen 2672490d5046SIlpo Järvinen sk->sk_prot->unhash(sk); 2673490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash && 2674490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2675ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk); 2676a8eceea8SJoe Perches fallthrough; 2677490d5046SIlpo Järvinen default: 2678a46d0ea5SJason Xing if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT) 267974688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2680490d5046SIlpo Järvinen } 2681490d5046SIlpo Järvinen 2682490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed 2683490d5046SIlpo Järvinen * socket sitting in hash tables. 2684490d5046SIlpo Järvinen */ 2685563e0bb0SYafang Shao inet_sk_state_store(sk, state); 2686490d5046SIlpo Järvinen } 2687490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state); 2688490d5046SIlpo Järvinen 26891da177e4SLinus Torvalds /* 26901da177e4SLinus Torvalds * State processing on a close. This implements the state shift for 26911da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some 26921da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be 26931da177e4SLinus Torvalds * closed. 26941da177e4SLinus Torvalds */ 26951da177e4SLinus Torvalds 26969b5b5cffSArjan van de Ven static const unsigned char new_state[16] = { 26971da177e4SLinus Torvalds /* current state: new state: action: */ 26980980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE, 26990980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 27000980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE, 27010980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 27020980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 27030980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 27040980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE, 27050980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE, 27060980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 27070980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK, 27080980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE, 27090980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING, 27100980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 27111da177e4SLinus Torvalds }; 27121da177e4SLinus Torvalds 27131da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk) 27141da177e4SLinus Torvalds { 27151da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state]; 27161da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK; 27171da177e4SLinus Torvalds 27181da177e4SLinus Torvalds tcp_set_state(sk, ns); 27191da177e4SLinus Torvalds 27201da177e4SLinus Torvalds return next & TCP_ACTION_FIN; 27211da177e4SLinus Torvalds } 27221da177e4SLinus Torvalds 27231da177e4SLinus Torvalds /* 27241da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except 27251f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 27261da177e4SLinus Torvalds */ 27271da177e4SLinus Torvalds 27281da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how) 27291da177e4SLinus Torvalds { 27301da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN, 27311da177e4SLinus Torvalds * and then put it into the queue to be sent. 27321da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 27331da177e4SLinus Torvalds */ 27341da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN)) 27351da177e4SLinus Torvalds return; 27361da177e4SLinus Torvalds 27371da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */ 27381da177e4SLinus Torvalds if ((1 << sk->sk_state) & 27391da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT | 274094062790SEric Dumazet TCPF_CLOSE_WAIT)) { 27411da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */ 27421da177e4SLinus Torvalds if (tcp_close_state(sk)) 27431da177e4SLinus Torvalds tcp_send_fin(sk); 27441da177e4SLinus Torvalds } 27451da177e4SLinus Torvalds } 27464bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown); 27471da177e4SLinus Torvalds 274819757cebSEric Dumazet int tcp_orphan_count_sum(void) 274919757cebSEric Dumazet { 275019757cebSEric Dumazet int i, total = 0; 275119757cebSEric Dumazet 275219757cebSEric Dumazet for_each_possible_cpu(i) 275319757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i); 275419757cebSEric Dumazet 275519757cebSEric Dumazet return max(total, 0); 275619757cebSEric Dumazet } 275719757cebSEric Dumazet 275819757cebSEric Dumazet static int tcp_orphan_cache; 275919757cebSEric Dumazet static struct timer_list tcp_orphan_timer; 276019757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 276119757cebSEric Dumazet 276219757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused) 276319757cebSEric Dumazet { 276419757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 276519757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 276619757cebSEric Dumazet } 276719757cebSEric Dumazet 276819757cebSEric Dumazet static bool tcp_too_many_orphans(int shift) 276919757cebSEric Dumazet { 277047e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift > 277147e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans); 277219757cebSEric Dumazet } 277319757cebSEric Dumazet 2774dda4d96aSEric Dumazet static bool tcp_out_of_memory(const struct sock *sk) 2775dda4d96aSEric Dumazet { 2776dda4d96aSEric Dumazet if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 2777dda4d96aSEric Dumazet sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) 2778dda4d96aSEric Dumazet return true; 2779dda4d96aSEric Dumazet return false; 2780dda4d96aSEric Dumazet } 2781dda4d96aSEric Dumazet 2782dda4d96aSEric Dumazet bool tcp_check_oom(const struct sock *sk, int shift) 2783efcdbf24SArun Sharma { 2784efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory; 2785efcdbf24SArun Sharma 278619757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift); 2787efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk); 2788efcdbf24SArun Sharma 2789e87cc472SJoe Perches if (too_many_orphans) 2790e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n"); 2791e87cc472SJoe Perches if (out_of_socket_memory) 2792e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2793efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory; 2794efcdbf24SArun Sharma } 2795efcdbf24SArun Sharma 279677c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout) 27971da177e4SLinus Torvalds { 27981da177e4SLinus Torvalds struct sk_buff *skb; 27991da177e4SLinus Torvalds int data_was_unread = 0; 280075c2d907SHerbert Xu int state; 28011da177e4SLinus Torvalds 2802e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 28031da177e4SLinus Torvalds 28041da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) { 28051da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 28061da177e4SLinus Torvalds 28071da177e4SLinus Torvalds /* Special case. */ 28080a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 28091da177e4SLinus Torvalds 28101da177e4SLinus Torvalds goto adjudge_to_death; 28111da177e4SLinus Torvalds } 28121da177e4SLinus Torvalds 28131da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the 28141da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the 28151da177e4SLinus Torvalds * reader process may not have drained the data yet! 28161da177e4SLinus Torvalds */ 28171da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2818e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2819e11ecddfSEric Dumazet 2820e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2821e11ecddfSEric Dumazet len--; 28221da177e4SLinus Torvalds data_was_unread += len; 28231da177e4SLinus Torvalds __kfree_skb(skb); 28241da177e4SLinus Torvalds } 28251da177e4SLinus Torvalds 2826565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2827565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE) 2828565b7b2dSKonstantin Khorenko goto adjudge_to_death; 2829565b7b2dSKonstantin Khorenko 283065bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because 283165bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of 283265bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 283365bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to 283465bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee... 283565bb723cSGerrit Renker * Note: timeout is always zero in such a case. 28361da177e4SLinus Torvalds */ 2837ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) { 2838ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0); 2839ee995283SPavel Emelyanov } else if (data_was_unread) { 28401da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */ 28416aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 28421da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 28435691276bSJason Xing tcp_send_active_reset(sk, sk->sk_allocation, 284490c36325SJason Xing SK_RST_REASON_TCP_ABORT_ON_CLOSE); 28451da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 28461da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */ 28471da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0); 28486aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 28491da177e4SLinus Torvalds } else if (tcp_close_state(sk)) { 28501da177e4SLinus Torvalds /* We FIN if the application ate all the data before 28511da177e4SLinus Torvalds * zapping the connection. 28521da177e4SLinus Torvalds */ 28531da177e4SLinus Torvalds 28541da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state 28551da177e4SLinus Torvalds * machine. State transitions: 28561da177e4SLinus Torvalds * 28571da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1 285894062790SEric Dumazet * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult) 28591da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK 28601da177e4SLinus Torvalds * 28611da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window), 28621da177e4SLinus Torvalds * rather than queued out of window. Purists blame. 28631da177e4SLinus Torvalds * 28641da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED, 28651da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent. 28661da177e4SLinus Torvalds * 28671da177e4SLinus Torvalds * The visible declinations are that sometimes 28681da177e4SLinus Torvalds * we enter time-wait state, when it is not required really 28691da177e4SLinus Torvalds * (harmless), do not send active resets, when they are 28701da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 28711da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux) 28721da177e4SLinus Torvalds * Probably, I missed some more holelets. 28731da177e4SLinus Torvalds * --ANK 28748336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN 28758336886fSJerry Chu * in a single packet! (May consider it later but will 28768336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until 28778336886fSJerry Chu * data is written and socket is closed.) 28781da177e4SLinus Torvalds */ 28791da177e4SLinus Torvalds tcp_send_fin(sk); 28801da177e4SLinus Torvalds } 28811da177e4SLinus Torvalds 28821da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout); 28831da177e4SLinus Torvalds 28841da177e4SLinus Torvalds adjudge_to_death: 288575c2d907SHerbert Xu state = sk->sk_state; 288675c2d907SHerbert Xu sock_hold(sk); 288775c2d907SHerbert Xu sock_orphan(sk); 288875c2d907SHerbert Xu 28891da177e4SLinus Torvalds local_bh_disable(); 28901da177e4SLinus Torvalds bh_lock_sock(sk); 28918873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */ 28928873c064SEric Dumazet __release_sock(sk); 28931da177e4SLinus Torvalds 289419757cebSEric Dumazet this_cpu_inc(tcp_orphan_count); 2895eb4dea58SHerbert Xu 289675c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */ 289775c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 289875c2d907SHerbert Xu goto out; 28991da177e4SLinus Torvalds 29001da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a 29011da177e4SLinus Torvalds * problem with TCP as specified in that the other end could 29021da177e4SLinus Torvalds * keep a socket open forever with no application left this end. 2903b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill 29041da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough 29051da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops 29061da177e4SLinus Torvalds * reset mistake. 29071da177e4SLinus Torvalds * 29081da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour 29091da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but 29101da177e4SLinus Torvalds * consume significant resources. Let's do it with special 29111da177e4SLinus Torvalds * linger2 option. --ANK 29121da177e4SLinus Torvalds */ 29131da177e4SLinus Torvalds 29141da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) { 29151da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2916a81722ddSEric Dumazet if (READ_ONCE(tp->linger2) < 0) { 29171da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29185691276bSJason Xing tcp_send_active_reset(sk, GFP_ATOMIC, 2919edc92b48SJason Xing SK_RST_REASON_TCP_ABORT_ON_LINGER); 292002a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2921de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER); 29221da177e4SLinus Torvalds } else { 2923463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk); 29241da177e4SLinus Torvalds 29251da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 292652499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk, 292752499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN); 29281da177e4SLinus Torvalds } else { 29291da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 29301da177e4SLinus Torvalds goto out; 29311da177e4SLinus Torvalds } 29321da177e4SLinus Torvalds } 29331da177e4SLinus Torvalds } 29341da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 2935efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) { 29361da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29375691276bSJason Xing tcp_send_active_reset(sk, GFP_ATOMIC, 29388407994fSJason Xing SK_RST_REASON_TCP_ABORT_ON_MEMORY); 293902a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2940de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY); 29414ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) { 29424ee806d5SDan Streetman /* Not possible to send reset; just close */ 29434ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE); 29441da177e4SLinus Torvalds } 29451da177e4SLinus Torvalds } 29461da177e4SLinus Torvalds 29478336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) { 2948d983ea6fSEric Dumazet struct request_sock *req; 2949d983ea6fSEric Dumazet 2950d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 2951d983ea6fSEric Dumazet lockdep_sock_is_held(sk)); 29528336886fSJerry Chu /* We could get here with a non-NULL req if the socket is 29538336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS 29548336886fSJerry Chu * finishes. 29558336886fSJerry Chu */ 295600db4124SIan Morris if (req) 29578336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 29580a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk); 29598336886fSJerry Chu } 29601da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */ 29611da177e4SLinus Torvalds 29621da177e4SLinus Torvalds out: 29631da177e4SLinus Torvalds bh_unlock_sock(sk); 29641da177e4SLinus Torvalds local_bh_enable(); 296577c3c956SPaolo Abeni } 296677c3c956SPaolo Abeni 296777c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout) 296877c3c956SPaolo Abeni { 296977c3c956SPaolo Abeni lock_sock(sk); 297077c3c956SPaolo Abeni __tcp_close(sk, timeout); 29718873c064SEric Dumazet release_sock(sk); 2972151c9c72SEric Dumazet if (!sk->sk_net_refcnt) 2973151c9c72SEric Dumazet inet_csk_clear_xmit_timers_sync(sk); 29741da177e4SLinus Torvalds sock_put(sk); 29751da177e4SLinus Torvalds } 29764bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close); 29771da177e4SLinus Torvalds 29781da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */ 29791da177e4SLinus Torvalds 2980a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state) 29811da177e4SLinus Torvalds { 29821da177e4SLinus Torvalds return (1 << state) & 29831da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2984a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 29851da177e4SLinus Torvalds } 29861da177e4SLinus Torvalds 298775c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk) 298875c119afSEric Dumazet { 298975c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 299075c119afSEric Dumazet 29912bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL; 299275c119afSEric Dumazet while (p) { 299375c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p); 299475c119afSEric Dumazet 299575c119afSEric Dumazet p = rb_next(p); 299675c119afSEric Dumazet /* Since we are deleting whole queue, no need to 299775c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor) 299875c119afSEric Dumazet */ 299975c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk); 300003271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 300175c119afSEric Dumazet } 300275c119afSEric Dumazet } 300375c119afSEric Dumazet 3004ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk) 3005ac3f09baSEric Dumazet { 3006ac3f09baSEric Dumazet struct sk_buff *skb; 3007ac3f09baSEric Dumazet 3008ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3009ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 3010ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb); 301103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 3012ac3f09baSEric Dumazet } 301375c119afSEric Dumazet tcp_rtx_queue_purge(sk); 3014ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 3015ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk)); 3016bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0; 301704c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0; 3018ac3f09baSEric Dumazet } 3019ac3f09baSEric Dumazet 30201da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags) 30211da177e4SLinus Torvalds { 30221da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 3023463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 30241da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30251da177e4SLinus Torvalds int old_state = sk->sk_state; 30260f317464SEric Dumazet u32 seq; 30271da177e4SLinus Torvalds 30281da177e4SLinus Torvalds if (old_state != TCP_CLOSE) 30291da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 30301da177e4SLinus Torvalds 30311da177e4SLinus Torvalds /* ABORT function of RFC793 */ 30321da177e4SLinus Torvalds if (old_state == TCP_LISTEN) { 30330a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 3034ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) { 3035e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNABORTED); 3036edefba66SJason Xing } else if (tcp_need_reset(old_state)) { 3037edefba66SJason Xing tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE); 3038edefba66SJason Xing WRITE_ONCE(sk->sk_err, ECONNRESET); 3039edefba66SJason Xing } else if (tp->snd_nxt != tp->write_seq && 3040edefba66SJason Xing (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) { 3041caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC 30421da177e4SLinus Torvalds * states 30431da177e4SLinus Torvalds */ 3044c026c656SJason Xing tcp_send_active_reset(sk, gfp_any(), 3045c026c656SJason Xing SK_RST_REASON_TCP_DISCONNECT_WITH_DATA); 3046e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 3047a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT) 3048e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 30491da177e4SLinus Torvalds 30501da177e4SLinus Torvalds tcp_clear_xmit_timers(sk); 30511da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue); 30527db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 30537b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 305405ea4916SJon Maloy sk_set_peek_off(sk, -1); 3055fe067e8aSDavid S. Miller tcp_write_queue_purge(sk); 3056cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk); 30579f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue); 30581da177e4SLinus Torvalds 3059c720c7e8SEric Dumazet inet->inet_dport = 0; 30601da177e4SLinus Torvalds 3061e0833d1fSKuniyuki Iwashima inet_bhash2_reset_saddr(sk); 30621da177e4SLinus Torvalds 3063e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, 0); 30641da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 3065740b0f18SEric Dumazet tp->srtt_us = 0; 3066b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 30673f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0; 30680f317464SEric Dumazet 30690f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2; 30700f317464SEric Dumazet if (!seq) 30710f317464SEric Dumazet seq = 1; 30720f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq); 30730f317464SEric Dumazet 3074463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 30756687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 30769d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0; 30776a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT; 3078ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 30792b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 30800b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 308140570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 30821da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 3083f4ce91ceSNeal Cardwell tp->is_cwnd_limited = 0; 3084f4ce91ceSNeal Cardwell tp->max_packets_out = 0; 30851fdf475aSEric Dumazet tp->window_clamp = 0; 30862fbdd562SEric Dumazet tp->delivered = 0; 3087e21db6f6SYuchung Cheng tp->delivered_ce = 0; 3088ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release) 3089ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk); 3090ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 30918919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0; 30926687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 3093d4761754SYousuk Seung tp->is_sack_reneg = 0; 30941da177e4SLinus Torvalds tcp_clear_retrans(tp); 3095c13c48c0SEric Dumazet tp->total_retrans = 0; 3096463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk); 3097499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3098499350a5SWei Wang * issue in __tcp_select_window() 3099499350a5SWei Wang */ 3100499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3101b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 31021da177e4SLinus Torvalds __sk_dst_reset(sk); 3103b4cb4a13SEric Dumazet dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL))); 310417c3060bSEric Dumazet tcp_saved_syn_free(tp); 31055d9f4262SEric Dumazet tp->compressed_ack = 0; 3106784f8344SEric Dumazet tp->segs_in = 0; 3107784f8344SEric Dumazet tp->segs_out = 0; 3108ba113c3aSWei Wang tp->bytes_sent = 0; 3109e858faf5SChristoph Paasch tp->bytes_acked = 0; 3110e858faf5SChristoph Paasch tp->bytes_received = 0; 3111fb31c9b9SWei Wang tp->bytes_retrans = 0; 3112db7ffee6SEric Dumazet tp->data_segs_in = 0; 3113db7ffee6SEric Dumazet tp->data_segs_out = 0; 31147788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0; 31157788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0; 31167e10b655SWei Wang tp->dsack_dups = 0; 31177ec65372SWei Wang tp->reord_seen = 0; 31185c701549SEric Dumazet tp->retrans_out = 0; 31195c701549SEric Dumazet tp->sacked_out = 0; 31205c701549SEric Dumazet tp->tlp_high_seq = 0; 31215c701549SEric Dumazet tp->last_oow_ack_time = 0; 312229c1c446SMubashir Adnan Qureshi tp->plb_rehash = 0; 31236cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */ 31246cda8b74SEric Dumazet tp->app_limited = ~0U; 3125300b655dSDavid Morley tp->rate_app_limited = 1; 3126792c4354SEric Dumazet tp->rack.mstamp = 0; 3127792c4354SEric Dumazet tp->rack.advanced = 0; 3128792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1; 3129792c4354SEric Dumazet tp->rack.last_delivered = 0; 3130792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0; 3131792c4354SEric Dumazet tp->rack.dsack_seen = 0; 31326bcdc40dSEric Dumazet tp->syn_data_acked = 0; 31336bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0; 31346bcdc40dSEric Dumazet tp->rx_opt.dsack = 0; 31356bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0; 3136f9af2dbbSThomas Higdon tp->rcv_ooopack = 0; 31376cda8b74SEric Dumazet 31381da177e4SLinus Torvalds 31397db92362SWei Wang /* Clean up fastopen related fields */ 31407db92362SWei Wang tcp_free_fastopen_req(tp); 314108e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk); 314248027478SJason Baron tp->fastopen_client_fail = 0; 31437db92362SWei Wang 3144c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 31451da177e4SLinus Torvalds 31469b42d55aSLi RongQing if (sk->sk_frag.page) { 31479b42d55aSLi RongQing put_page(sk->sk_frag.page); 31489b42d55aSLi RongQing sk->sk_frag.page = NULL; 31499b42d55aSLi RongQing sk->sk_frag.offset = 0; 31509b42d55aSLi RongQing } 3151e3ae2365SAlexander Aring sk_error_report(sk); 3152a01512b1SYueHaibing return 0; 31531da177e4SLinus Torvalds } 31544bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect); 31551da177e4SLinus Torvalds 3156a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk) 3157ee995283SPavel Emelyanov { 3158cb388e7eSMartin KaFai Lau return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3159319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN); 3160ee995283SPavel Emelyanov } 3161ee995283SPavel Emelyanov 3162d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3163b1ed4c4fSAndrey Vagin { 3164b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 3165b1ed4c4fSAndrey Vagin 3166b1ed4c4fSAndrey Vagin if (!tp->repair) 3167b1ed4c4fSAndrey Vagin return -EPERM; 3168b1ed4c4fSAndrey Vagin 3169b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 3170b1ed4c4fSAndrey Vagin return -EINVAL; 3171b1ed4c4fSAndrey Vagin 3172d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3173b1ed4c4fSAndrey Vagin return -EFAULT; 3174b1ed4c4fSAndrey Vagin 3175b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd) 3176b1ed4c4fSAndrey Vagin return -EINVAL; 3177b1ed4c4fSAndrey Vagin 3178b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3179b1ed4c4fSAndrey Vagin return -EINVAL; 3180b1ed4c4fSAndrey Vagin 3181b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt)) 3182b1ed4c4fSAndrey Vagin return -EINVAL; 3183b1ed4c4fSAndrey Vagin 3184b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1; 3185b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd; 3186b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window; 3187b1ed4c4fSAndrey Vagin 3188b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd; 3189b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup; 3190b1ed4c4fSAndrey Vagin 3191b1ed4c4fSAndrey Vagin return 0; 3192b1ed4c4fSAndrey Vagin } 3193b1ed4c4fSAndrey Vagin 3194d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3195d38d2b00SChristoph Hellwig unsigned int len) 3196b139ba4eSPavel Emelyanov { 319715e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk); 3198de248a75SPavel Emelyanov struct tcp_repair_opt opt; 3199d3c48151SChristoph Hellwig size_t offset = 0; 3200b139ba4eSPavel Emelyanov 3201de248a75SPavel Emelyanov while (len >= sizeof(opt)) { 3202d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3203b139ba4eSPavel Emelyanov return -EFAULT; 3204b139ba4eSPavel Emelyanov 3205d3c48151SChristoph Hellwig offset += sizeof(opt); 3206de248a75SPavel Emelyanov len -= sizeof(opt); 3207b139ba4eSPavel Emelyanov 3208de248a75SPavel Emelyanov switch (opt.opt_code) { 3209de248a75SPavel Emelyanov case TCPOPT_MSS: 3210de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val; 321115e56515SDouglas Caetano dos Santos tcp_mtup_init(sk); 3212b139ba4eSPavel Emelyanov break; 3213de248a75SPavel Emelyanov case TCPOPT_WINDOW: 3214bc26ccd8SAndrey Vagin { 3215bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF; 3216bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16; 3217bc26ccd8SAndrey Vagin 3218589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3219b139ba4eSPavel Emelyanov return -EFBIG; 3220b139ba4eSPavel Emelyanov 3221bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale; 3222bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale; 3223bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1; 3224bc26ccd8SAndrey Vagin } 3225b139ba4eSPavel Emelyanov break; 3226b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM: 3227de248a75SPavel Emelyanov if (opt.opt_val != 0) 3228de248a75SPavel Emelyanov return -EINVAL; 3229de248a75SPavel Emelyanov 3230b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3231b139ba4eSPavel Emelyanov break; 3232b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP: 3233de248a75SPavel Emelyanov if (opt.opt_val != 0) 3234de248a75SPavel Emelyanov return -EINVAL; 3235de248a75SPavel Emelyanov 3236b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1; 3237b139ba4eSPavel Emelyanov break; 3238b139ba4eSPavel Emelyanov } 3239b139ba4eSPavel Emelyanov } 3240b139ba4eSPavel Emelyanov 3241b139ba4eSPavel Emelyanov return 0; 3242b139ba4eSPavel Emelyanov } 3243b139ba4eSPavel Emelyanov 3244a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3245a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled); 3246a842fe14SEric Dumazet 3247a842fe14SEric Dumazet static void tcp_enable_tx_delay(void) 3248a842fe14SEric Dumazet { 3249a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3250a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0; 3251a842fe14SEric Dumazet 3252a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3253a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled); 3254a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n"); 3255a842fe14SEric Dumazet } 3256a842fe14SEric Dumazet } 3257a842fe14SEric Dumazet } 3258a842fe14SEric Dumazet 3259db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears 3260db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is 3261db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the 3262db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then 3263db10538aSChristoph Hellwig * use sendfile to send out the data parts. 3264db10538aSChristoph Hellwig * 3265db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3266db10538aSChristoph Hellwig * TCP_NODELAY. 3267db10538aSChristoph Hellwig */ 32686fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on) 3269db10538aSChristoph Hellwig { 3270db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 3271db10538aSChristoph Hellwig 3272db10538aSChristoph Hellwig if (on) { 3273db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK; 3274db10538aSChristoph Hellwig } else { 3275db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK; 3276db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF) 3277db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH; 3278db10538aSChristoph Hellwig tcp_push_pending_frames(sk); 3279db10538aSChristoph Hellwig } 3280db10538aSChristoph Hellwig } 3281db10538aSChristoph Hellwig 3282db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on) 3283db10538aSChristoph Hellwig { 3284db10538aSChristoph Hellwig lock_sock(sk); 3285db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on); 3286db10538aSChristoph Hellwig release_sock(sk); 3287db10538aSChristoph Hellwig } 3288db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork); 3289db10538aSChristoph Hellwig 329012abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 329112abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared. 329212abc5eeSChristoph Hellwig * 329312abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides 329412abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments. 329512abc5eeSChristoph Hellwig */ 32966fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on) 329712abc5eeSChristoph Hellwig { 329812abc5eeSChristoph Hellwig if (on) { 329912abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 330012abc5eeSChristoph Hellwig tcp_push_pending_frames(sk); 330112abc5eeSChristoph Hellwig } else { 330212abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 330312abc5eeSChristoph Hellwig } 330412abc5eeSChristoph Hellwig } 330512abc5eeSChristoph Hellwig 330612abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk) 330712abc5eeSChristoph Hellwig { 330812abc5eeSChristoph Hellwig lock_sock(sk); 330912abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true); 331012abc5eeSChristoph Hellwig release_sock(sk); 331112abc5eeSChristoph Hellwig } 331212abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay); 331312abc5eeSChristoph Hellwig 3314ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val) 3315ddd061b8SChristoph Hellwig { 3316ddd061b8SChristoph Hellwig if (!val) { 3317ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3318ddd061b8SChristoph Hellwig return; 3319ddd061b8SChristoph Hellwig } 3320ddd061b8SChristoph Hellwig 3321ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk); 3322ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3323ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) { 3324ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3325ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1); 3326ddd061b8SChristoph Hellwig if (!(val & 1)) 3327ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3328ddd061b8SChristoph Hellwig } 3329ddd061b8SChristoph Hellwig } 3330ddd061b8SChristoph Hellwig 3331ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val) 3332ddd061b8SChristoph Hellwig { 3333ddd061b8SChristoph Hellwig lock_sock(sk); 3334ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 3335ddd061b8SChristoph Hellwig release_sock(sk); 3336ddd061b8SChristoph Hellwig } 3337ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack); 3338ddd061b8SChristoph Hellwig 3339557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val) 3340557eadfcSChristoph Hellwig { 3341557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT) 3342557eadfcSChristoph Hellwig return -EINVAL; 3343557eadfcSChristoph Hellwig 33443a037f0fSEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); 3345557eadfcSChristoph Hellwig return 0; 3346557eadfcSChristoph Hellwig } 3347557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt); 3348557eadfcSChristoph Hellwig 3349d58f2e15SEric Dumazet int tcp_sock_set_user_timeout(struct sock *sk, int val) 3350c488aeadSChristoph Hellwig { 3351d58f2e15SEric Dumazet /* Cap the max time in ms TCP will retry or probe the window 3352d58f2e15SEric Dumazet * before giving up and aborting (ETIMEDOUT) a connection. 3353d58f2e15SEric Dumazet */ 3354d58f2e15SEric Dumazet if (val < 0) 3355d58f2e15SEric Dumazet return -EINVAL; 3356d58f2e15SEric Dumazet 335726023e91SEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); 3358d58f2e15SEric Dumazet return 0; 3359c488aeadSChristoph Hellwig } 3360c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3361c488aeadSChristoph Hellwig 3362aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 336371c48eb8SChristoph Hellwig { 336471c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 336571c48eb8SChristoph Hellwig 336671c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE) 336771c48eb8SChristoph Hellwig return -EINVAL; 336871c48eb8SChristoph Hellwig 33694164245cSEric Dumazet /* Paired with WRITE_ONCE() in keepalive_time_when() */ 33704164245cSEric Dumazet WRITE_ONCE(tp->keepalive_time, val * HZ); 337171c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) && 337271c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 337371c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp); 337471c48eb8SChristoph Hellwig 337571c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed) 337671c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed; 337771c48eb8SChristoph Hellwig else 337871c48eb8SChristoph Hellwig elapsed = 0; 337971c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed); 338071c48eb8SChristoph Hellwig } 338171c48eb8SChristoph Hellwig 338271c48eb8SChristoph Hellwig return 0; 338371c48eb8SChristoph Hellwig } 338471c48eb8SChristoph Hellwig 338571c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val) 338671c48eb8SChristoph Hellwig { 338771c48eb8SChristoph Hellwig int err; 338871c48eb8SChristoph Hellwig 338971c48eb8SChristoph Hellwig lock_sock(sk); 3390aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 339171c48eb8SChristoph Hellwig release_sock(sk); 339271c48eb8SChristoph Hellwig return err; 339371c48eb8SChristoph Hellwig } 339471c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle); 339571c48eb8SChristoph Hellwig 3396d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val) 3397d41ecaacSChristoph Hellwig { 3398d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL) 3399d41ecaacSChristoph Hellwig return -EINVAL; 3400d41ecaacSChristoph Hellwig 34015ecf9d4fSEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); 3402d41ecaacSChristoph Hellwig return 0; 3403d41ecaacSChristoph Hellwig } 3404d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3405d41ecaacSChristoph Hellwig 3406480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val) 3407480aeb96SChristoph Hellwig { 3408480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT) 3409480aeb96SChristoph Hellwig return -EINVAL; 3410480aeb96SChristoph Hellwig 34116e5e1de6SEric Dumazet /* Paired with READ_ONCE() in keepalive_probes() */ 34126e5e1de6SEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); 3413480aeb96SChristoph Hellwig return 0; 3414480aeb96SChristoph Hellwig } 3415480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3416480aeb96SChristoph Hellwig 3417cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val) 3418cb811109SPrankur gupta { 3419cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk); 3420cb811109SPrankur gupta 3421cb811109SPrankur gupta if (!val) { 3422cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE) 3423cb811109SPrankur gupta return -EINVAL; 3424f410cbeaSEric Dumazet WRITE_ONCE(tp->window_clamp, 0); 3425cb811109SPrankur gupta } else { 342658d3aadeSPaolo Abeni u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp; 342758d3aadeSPaolo Abeni u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3428cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val; 342958d3aadeSPaolo Abeni 343058d3aadeSPaolo Abeni if (new_window_clamp == old_window_clamp) 343158d3aadeSPaolo Abeni return 0; 343258d3aadeSPaolo Abeni 3433f410cbeaSEric Dumazet WRITE_ONCE(tp->window_clamp, new_window_clamp); 343458d3aadeSPaolo Abeni if (new_window_clamp < old_window_clamp) { 343558d3aadeSPaolo Abeni /* need to apply the reserved mem provisioning only 343658d3aadeSPaolo Abeni * when shrinking the window clamp 343758d3aadeSPaolo Abeni */ 343858d3aadeSPaolo Abeni __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp); 343958d3aadeSPaolo Abeni 344058d3aadeSPaolo Abeni } else { 344158d3aadeSPaolo Abeni new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 344258d3aadeSPaolo Abeni tp->rcv_ssthresh = max(new_rcv_ssthresh, 344358d3aadeSPaolo Abeni tp->rcv_ssthresh); 344458d3aadeSPaolo Abeni } 3445cb811109SPrankur gupta } 3446cb811109SPrankur gupta return 0; 3447cb811109SPrankur gupta } 3448cb811109SPrankur gupta 34491da177e4SLinus Torvalds /* 34501da177e4SLinus Torvalds * Socket option code for TCP. 34511da177e4SLinus Torvalds */ 34520c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3453d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen) 34541da177e4SLinus Torvalds { 34551da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3456463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 34571e579caaSNikolay Borisov struct net *net = sock_net(sk); 34581da177e4SLinus Torvalds int val; 34591da177e4SLinus Torvalds int err = 0; 34601da177e4SLinus Torvalds 3461e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */ 3462e56fb50fSWilliam Allen Simpson switch (optname) { 3463e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: { 34645f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX]; 34655f8ef48dSStephen Hemminger 34665f8ef48dSStephen Hemminger if (optlen < 1) 34675f8ef48dSStephen Hemminger return -EINVAL; 34685f8ef48dSStephen Hemminger 3469d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 34704fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen)); 34715f8ef48dSStephen Hemminger if (val < 0) 34725f8ef48dSStephen Hemminger return -EFAULT; 34735f8ef48dSStephen Hemminger name[val] = 0; 34745f8ef48dSStephen Hemminger 3475cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 347684e5a0f2SMartin KaFai Lau err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3477cb388e7eSMartin KaFai Lau sockopt_ns_capable(sock_net(sk)->user_ns, 34788d650cdeSEric Dumazet CAP_NET_ADMIN)); 3479cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 34805f8ef48dSStephen Hemminger return err; 34815f8ef48dSStephen Hemminger } 3482734942ccSDave Watson case TCP_ULP: { 3483734942ccSDave Watson char name[TCP_ULP_NAME_MAX]; 3484734942ccSDave Watson 3485734942ccSDave Watson if (optlen < 1) 3486734942ccSDave Watson return -EINVAL; 3487734942ccSDave Watson 3488d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 3489734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1, 3490734942ccSDave Watson optlen)); 3491734942ccSDave Watson if (val < 0) 3492734942ccSDave Watson return -EFAULT; 3493734942ccSDave Watson name[val] = 0; 3494734942ccSDave Watson 3495cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 3496734942ccSDave Watson err = tcp_set_ulp(sk, name); 3497cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 3498734942ccSDave Watson return err; 3499734942ccSDave Watson } 35001fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 35010f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 35020f1ce023SJason Baron __u8 *backup_key = NULL; 35031fba70e5SYuchung Cheng 35040f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation 35050f1ce023SJason Baron * First key is the active one. 35060f1ce023SJason Baron */ 35070f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH && 35080f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 35091fba70e5SYuchung Cheng return -EINVAL; 35101fba70e5SYuchung Cheng 3511d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen)) 35121fba70e5SYuchung Cheng return -EFAULT; 35131fba70e5SYuchung Cheng 35140f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 35150f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 35160f1ce023SJason Baron 3517438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 35181fba70e5SYuchung Cheng } 3519e56fb50fSWilliam Allen Simpson default: 3520e56fb50fSWilliam Allen Simpson /* fallthru */ 3521e56fb50fSWilliam Allen Simpson break; 3522ccbd6a5aSJoe Perches } 35235f8ef48dSStephen Hemminger 35241da177e4SLinus Torvalds if (optlen < sizeof(int)) 35251da177e4SLinus Torvalds return -EINVAL; 35261da177e4SLinus Torvalds 3527d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val))) 35281da177e4SLinus Torvalds return -EFAULT; 35291da177e4SLinus Torvalds 3530d44fd4a7SEric Dumazet /* Handle options that can be set without locking the socket. */ 3531d44fd4a7SEric Dumazet switch (optname) { 3532d44fd4a7SEric Dumazet case TCP_SYNCNT: 3533d44fd4a7SEric Dumazet return tcp_sock_set_syncnt(sk, val); 3534d58f2e15SEric Dumazet case TCP_USER_TIMEOUT: 3535d58f2e15SEric Dumazet return tcp_sock_set_user_timeout(sk, val); 35366fd70a6bSEric Dumazet case TCP_KEEPINTVL: 35376fd70a6bSEric Dumazet return tcp_sock_set_keepintvl(sk, val); 353884485080SEric Dumazet case TCP_KEEPCNT: 353984485080SEric Dumazet return tcp_sock_set_keepcnt(sk, val); 3540a81722ddSEric Dumazet case TCP_LINGER2: 3541a81722ddSEric Dumazet if (val < 0) 3542a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, -1); 3543a81722ddSEric Dumazet else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3544a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); 3545a81722ddSEric Dumazet else 3546a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, val * HZ); 3547a81722ddSEric Dumazet return 0; 35486e97ba55SEric Dumazet case TCP_DEFER_ACCEPT: 35496e97ba55SEric Dumazet /* Translate value in seconds to number of retransmits */ 35506e97ba55SEric Dumazet WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, 35516e97ba55SEric Dumazet secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 35526e97ba55SEric Dumazet TCP_RTO_MAX / HZ)); 35536e97ba55SEric Dumazet return 0; 3554d44fd4a7SEric Dumazet } 3555d44fd4a7SEric Dumazet 3556cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 35571da177e4SLinus Torvalds 35581da177e4SLinus Torvalds switch (optname) { 35591da177e4SLinus Torvalds case TCP_MAXSEG: 35601da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However 35611da177e4SLinus Torvalds * at the point when this call is done we typically don't yet 3562a777f715SRohit Chavan * know which interface is going to be used 3563a777f715SRohit Chavan */ 3564cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 35651da177e4SLinus Torvalds err = -EINVAL; 35661da177e4SLinus Torvalds break; 35671da177e4SLinus Torvalds } 35681da177e4SLinus Torvalds tp->rx_opt.user_mss = val; 35691da177e4SLinus Torvalds break; 35701da177e4SLinus Torvalds 35711da177e4SLinus Torvalds case TCP_NODELAY: 357212abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val); 35731da177e4SLinus Torvalds break; 35741da177e4SLinus Torvalds 357536e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS: 357636e31b0aSAndreas Petlund if (val < 0 || val > 1) 357736e31b0aSAndreas Petlund err = -EINVAL; 357836e31b0aSAndreas Petlund else 357936e31b0aSAndreas Petlund tp->thin_lto = val; 358036e31b0aSAndreas Petlund break; 358136e31b0aSAndreas Petlund 35827e380175SAndreas Petlund case TCP_THIN_DUPACK: 35837e380175SAndreas Petlund if (val < 0 || val > 1) 35847e380175SAndreas Petlund err = -EINVAL; 35857e380175SAndreas Petlund break; 35867e380175SAndreas Petlund 3587ee995283SPavel Emelyanov case TCP_REPAIR: 3588ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk)) 3589ee995283SPavel Emelyanov err = -EPERM; 359031048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) { 3591ee995283SPavel Emelyanov tp->repair = 1; 3592ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE; 3593ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE; 359431048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) { 3595ee995283SPavel Emelyanov tp->repair = 0; 3596ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE; 3597ee995283SPavel Emelyanov tcp_send_window_probe(sk); 359831048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) { 359931048d7aSStefan Baranoff tp->repair = 0; 360031048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE; 3601ee995283SPavel Emelyanov } else 3602ee995283SPavel Emelyanov err = -EINVAL; 3603ee995283SPavel Emelyanov 3604ee995283SPavel Emelyanov break; 3605ee995283SPavel Emelyanov 3606ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 3607ee995283SPavel Emelyanov if (!tp->repair) 3608ee995283SPavel Emelyanov err = -EPERM; 3609bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR) 3610ee995283SPavel Emelyanov tp->repair_queue = val; 3611ee995283SPavel Emelyanov else 3612ee995283SPavel Emelyanov err = -EINVAL; 3613ee995283SPavel Emelyanov break; 3614ee995283SPavel Emelyanov 3615ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 36168811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) { 3617ee995283SPavel Emelyanov err = -EPERM; 36188811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) { 36198811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk)) 36208811f4a9SEric Dumazet err = -EPERM; 36218811f4a9SEric Dumazet else 36220f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val); 36238811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) { 36248811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) { 36258811f4a9SEric Dumazet err = -EPERM; 36268811f4a9SEric Dumazet } else { 3627dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val); 36286cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val); 36296cd6cbf5SEric Dumazet } 36308811f4a9SEric Dumazet } else { 3631ee995283SPavel Emelyanov err = -EINVAL; 36328811f4a9SEric Dumazet } 3633ee995283SPavel Emelyanov break; 3634ee995283SPavel Emelyanov 3635b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS: 3636b139ba4eSPavel Emelyanov if (!tp->repair) 3637b139ba4eSPavel Emelyanov err = -EINVAL; 36380c175da7SLu Wei else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 3639d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen); 3640b139ba4eSPavel Emelyanov else 3641b139ba4eSPavel Emelyanov err = -EPERM; 3642b139ba4eSPavel Emelyanov break; 3643b139ba4eSPavel Emelyanov 36441da177e4SLinus Torvalds case TCP_CORK: 3645db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val); 36461da177e4SLinus Torvalds break; 36471da177e4SLinus Torvalds 36481da177e4SLinus Torvalds case TCP_KEEPIDLE: 3649aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 36501da177e4SLinus Torvalds break; 3651cd8ae852SEric Dumazet case TCP_SAVE_SYN: 3652267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */ 3653267cf9faSMartin KaFai Lau if (val < 0 || val > 2) 3654cd8ae852SEric Dumazet err = -EINVAL; 3655cd8ae852SEric Dumazet else 3656cd8ae852SEric Dumazet tp->save_syn = val; 3657cd8ae852SEric Dumazet break; 3658cd8ae852SEric Dumazet 36591da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 3660cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val); 36611da177e4SLinus Torvalds break; 36621da177e4SLinus Torvalds 36631da177e4SLinus Torvalds case TCP_QUICKACK: 3664ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 36651da177e4SLinus Torvalds break; 36661da177e4SLinus Torvalds 3667faadfabaSDmitry Safonov case TCP_AO_REPAIR: 3668965c00e4SDmitry Safonov if (!tcp_can_repair_sock(sk)) { 3669965c00e4SDmitry Safonov err = -EPERM; 3670965c00e4SDmitry Safonov break; 3671965c00e4SDmitry Safonov } 3672faadfabaSDmitry Safonov err = tcp_ao_set_repair(sk, optval, optlen); 3673faadfabaSDmitry Safonov break; 36744954f17dSDmitry Safonov #ifdef CONFIG_TCP_AO 36754954f17dSDmitry Safonov case TCP_AO_ADD_KEY: 36764954f17dSDmitry Safonov case TCP_AO_DEL_KEY: 36774954f17dSDmitry Safonov case TCP_AO_INFO: { 36784954f17dSDmitry Safonov /* If this is the first TCP-AO setsockopt() on the socket, 3679faadfabaSDmitry Safonov * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR 3680faadfabaSDmitry Safonov * in any state. 36814954f17dSDmitry Safonov */ 3682faadfabaSDmitry Safonov if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) 3683faadfabaSDmitry Safonov goto ao_parse; 3684faadfabaSDmitry Safonov if (rcu_dereference_protected(tcp_sk(sk)->ao_info, 36854954f17dSDmitry Safonov lockdep_sock_is_held(sk))) 3686faadfabaSDmitry Safonov goto ao_parse; 3687faadfabaSDmitry Safonov if (tp->repair) 3688faadfabaSDmitry Safonov goto ao_parse; 36894954f17dSDmitry Safonov err = -EISCONN; 36904954f17dSDmitry Safonov break; 3691faadfabaSDmitry Safonov ao_parse: 3692faadfabaSDmitry Safonov err = tp->af_specific->ao_parse(sk, optname, optval, optlen); 3693faadfabaSDmitry Safonov break; 36944954f17dSDmitry Safonov } 36954954f17dSDmitry Safonov #endif 3696cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3697cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG: 36988917a777SIvan Delalande case TCP_MD5SIG_EXT: 3699d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3700cfb6eeb4SYOSHIFUJI Hideaki break; 3701cfb6eeb4SYOSHIFUJI Hideaki #endif 37028336886fSJerry Chu case TCP_FASTOPEN: 37038336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3704dfea2aa6SChristoph Paasch TCPF_LISTEN))) { 370543713848SHaishuang Yan tcp_fastopen_init_key_once(net); 3706dfea2aa6SChristoph Paasch 37070536fcc0SEric Dumazet fastopen_queue_tune(sk, val); 3708dfea2aa6SChristoph Paasch } else { 37098336886fSJerry Chu err = -EINVAL; 3710dfea2aa6SChristoph Paasch } 37118336886fSJerry Chu break; 371219f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 371319f6d3f3SWei Wang if (val > 1 || val < 0) { 371419f6d3f3SWei Wang err = -EINVAL; 37155a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 37165a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) { 371719f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE) 371819f6d3f3SWei Wang tp->fastopen_connect = val; 371919f6d3f3SWei Wang else 372019f6d3f3SWei Wang err = -EINVAL; 372119f6d3f3SWei Wang } else { 372219f6d3f3SWei Wang err = -EOPNOTSUPP; 372319f6d3f3SWei Wang } 372419f6d3f3SWei Wang break; 372571c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 372671c02379SChristoph Paasch if (val > 1 || val < 0) 372771c02379SChristoph Paasch err = -EINVAL; 372871c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 372971c02379SChristoph Paasch err = -EINVAL; 373071c02379SChristoph Paasch else 373171c02379SChristoph Paasch tp->fastopen_no_cookie = val; 373271c02379SChristoph Paasch break; 373393be6ce0SAndrey Vagin case TCP_TIMESTAMP: 3734614e8316SEric Dumazet if (!tp->repair) { 373593be6ce0SAndrey Vagin err = -EPERM; 3736614e8316SEric Dumazet break; 3737614e8316SEric Dumazet } 3738614e8316SEric Dumazet /* val is an opaque field, 3739614e8316SEric Dumazet * and low order bit contains usec_ts enable bit. 3740614e8316SEric Dumazet * Its a best effort, and we do not care if user makes an error. 3741614e8316SEric Dumazet */ 3742614e8316SEric Dumazet tp->tcp_usec_ts = val & 1; 3743614e8316SEric Dumazet WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts)); 374493be6ce0SAndrey Vagin break; 3745b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: 3746b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen); 3747b1ed4c4fSAndrey Vagin break; 3748c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 37491aeb87bcSEric Dumazet WRITE_ONCE(tp->notsent_lowat, val); 3750c9bee3b7SEric Dumazet sk->sk_write_space(sk); 3751c9bee3b7SEric Dumazet break; 3752b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 3753b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0) 3754b75eba76SSoheil Hassas Yeganeh err = -EINVAL; 3755b75eba76SSoheil Hassas Yeganeh else 3756b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val; 3757b75eba76SSoheil Hassas Yeganeh break; 3758a842fe14SEric Dumazet case TCP_TX_DELAY: 3759a842fe14SEric Dumazet if (val) 3760a842fe14SEric Dumazet tcp_enable_tx_delay(); 3761348b81b6SEric Dumazet WRITE_ONCE(tp->tcp_tx_delay, val); 3762a842fe14SEric Dumazet break; 37631da177e4SLinus Torvalds default: 37641da177e4SLinus Torvalds err = -ENOPROTOOPT; 37651da177e4SLinus Torvalds break; 37663ff50b79SStephen Hemminger } 37673ff50b79SStephen Hemminger 3768cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 37691da177e4SLinus Torvalds return err; 37701da177e4SLinus Torvalds } 37711da177e4SLinus Torvalds 3772a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3773b7058842SDavid S. Miller unsigned int optlen) 37743fdadf7dSDmitry Mishin { 3775cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 37763fdadf7dSDmitry Mishin 37773fdadf7dSDmitry Mishin if (level != SOL_TCP) 3778f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 3779f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 37803fdadf7dSDmitry Mishin optval, optlen); 3781a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen); 37823fdadf7dSDmitry Mishin } 37834bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt); 37843fdadf7dSDmitry Mishin 3785efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3786efd90174SFrancis Yan struct tcp_info *info) 3787efd90174SFrancis Yan { 3788efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0; 3789efd90174SFrancis Yan enum tcp_chrono i; 3790efd90174SFrancis Yan 3791efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3792efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1]; 3793efd90174SFrancis Yan if (i == tp->chrono_type) 3794628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start; 3795efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ; 3796efd90174SFrancis Yan total += stats[i]; 3797efd90174SFrancis Yan } 3798efd90174SFrancis Yan 3799efd90174SFrancis Yan info->tcpi_busy_time = total; 3800efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3801efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3802efd90174SFrancis Yan } 3803efd90174SFrancis Yan 38041da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */ 38050df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info) 38061da177e4SLinus Torvalds { 380735ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3808463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 380976a9ebe8SEric Dumazet unsigned long rate; 38100263598cSWei Wang u32 now; 3811ff5d7497SEric Dumazet u64 rate64; 381267db3e4bSEric Dumazet bool slow; 38131da177e4SLinus Torvalds 38141da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 381535ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM) 381635ac838aSCraig Gallek return; 38171da177e4SLinus Torvalds 3818986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk); 381900fd38d9SEric Dumazet 3820ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */ 3821ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate); 382276a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3823f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64; 3824ccbf3bfaSEric Dumazet 3825ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate); 382676a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3827f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64; 3828ccbf3bfaSEric Dumazet 3829ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering; 383040570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3831ccbf3bfaSEric Dumazet 3832ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) { 3833ccbf3bfaSEric Dumazet /* listeners aliased fields : 3834ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept() 3835ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog 3836ccbf3bfaSEric Dumazet */ 3837288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3838099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3839ccbf3bfaSEric Dumazet return; 3840ccbf3bfaSEric Dumazet } 3841b369e7fdSEric Dumazet 3842b369e7fdSEric Dumazet slow = lock_sock_fast(sk); 3843b369e7fdSEric Dumazet 38446687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state; 3845463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits; 38466687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out; 3847463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff; 38481da177e4SLinus Torvalds 38491da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 38501da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3851e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 38521da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK; 38531da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) { 38541da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE; 38551da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 38561da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 38571da177e4SLinus Torvalds } 38581da177e4SLinus Torvalds 38591da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK) 38601da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN; 3861b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN) 3862b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN; 38636f73601eSYuchung Cheng if (tp->syn_data_acked) 38646f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA; 3865a77a0f5cSEric Dumazet if (tp->tcp_usec_ts) 3866a77a0f5cSEric Dumazet info->tcpi_options |= TCPI_OPT_USEC_TS; 38671da177e4SLinus Torvalds 3868463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 386995b9a87cSDavid Morley info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato, 3870bbf80d71SEric Dumazet tcp_delack_max(sk))); 3871c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache; 3872463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 38731da177e4SLinus Torvalds 38741da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out; 38751da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out; 3876ccbf3bfaSEric Dumazet 38771da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out; 38781da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out; 38791da177e4SLinus Torvalds 3880d635fbe2SEric Dumazet now = tcp_jiffies32; 38811da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3882463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 38831da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 38841da177e4SLinus Torvalds 3885d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 38861da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3887740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3; 3888740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2; 38891da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh; 38901da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss; 38911da177e4SLinus Torvalds 3892645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 38931da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space; 38941da177e4SLinus Torvalds 38951da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans; 3896977cb0ecSEric Dumazet 3897f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked; 3898f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received; 389967db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3900efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info); 390167db3e4bSEric Dumazet 39022efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out; 39030307a0b7SEric Dumazet 39040307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 39050307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in); 39060307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3907cd9b2660SEric Dumazet 3908cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp); 3909a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out; 3910eb8329e0SYuchung Cheng 3911eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 39120263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp); 39130263598cSWei Wang if (rate64) 3914f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64; 3915feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered; 3916feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce; 3917ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent; 3918fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans; 39197e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups; 39207ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen; 3921f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack; 39228f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd; 392371fc7047SMubashir Adnan Qureshi info->tcpi_rcv_wnd = tp->rcv_wnd; 392471fc7047SMubashir Adnan Qureshi info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 392548027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 39263868ab0fSAananth V 39273868ab0fSAananth V info->tcpi_total_rto = tp->total_rto; 39283868ab0fSAananth V info->tcpi_total_rto_recoveries = tp->total_rto_recoveries; 39293868ab0fSAananth V info->tcpi_total_rto_time = tp->total_rto_time; 39302a7c8d29SEric Dumazet if (tp->rto_stamp) 39312a7c8d29SEric Dumazet info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp; 39323868ab0fSAananth V 3933b369e7fdSEric Dumazet unlock_sock_fast(sk, slow); 39341da177e4SLinus Torvalds } 39351da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info); 39361da177e4SLinus Torvalds 3937984988aaSWei Wang static size_t tcp_opt_stats_get_size(void) 3938984988aaSWei Wang { 3939984988aaSWei Wang return 3940984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3941984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3942984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3943984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3944984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3945984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3946984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3947984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3948984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3949984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3950984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3951984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3952984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3953984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3954984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3955984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3956984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3957ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3958fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 39597e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 39607ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3961e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 396232efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3963e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 396448040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3965e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 396629c1c446SMubashir Adnan Qureshi nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 3967984988aaSWei Wang 0; 3968984988aaSWei Wang } 3969984988aaSWei Wang 3970e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */ 3971e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3972e7ed11eeSYousuk Seung { 3973e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP)) 3974e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl; 3975e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6)) 3976e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit; 3977e7ed11eeSYousuk Seung else 3978e7ed11eeSYousuk Seung return 0; 3979e7ed11eeSYousuk Seung } 3980e7ed11eeSYousuk Seung 398148040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3982e7ed11eeSYousuk Seung const struct sk_buff *orig_skb, 3983e7ed11eeSYousuk Seung const struct sk_buff *ack_skb) 39841c885808SFrancis Yan { 39851c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk); 39861c885808SFrancis Yan struct sk_buff *stats; 39871c885808SFrancis Yan struct tcp_info info; 398876a9ebe8SEric Dumazet unsigned long rate; 3989bb7c19f9SWei Wang u64 rate64; 39901c885808SFrancis Yan 3991984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 39921c885808SFrancis Yan if (!stats) 39931c885808SFrancis Yan return NULL; 39941c885808SFrancis Yan 39951c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info); 39961c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY, 39971c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD); 39981c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 39991c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD); 40001c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 40011c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD); 40027e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 40037e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD); 40047e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 40057e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD); 4006bb7c19f9SWei Wang 4007bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate); 400876a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 4009bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 4010bb7c19f9SWei Wang 4011bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp); 4012bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 4013bb7c19f9SWei Wang 401440570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 4015bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 4016bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 4017bb7c19f9SWei Wang 4018bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 4019bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 40207156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 4021feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 4022feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 402387ecc95dSPriyaranjan Jha 402487ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 4025be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 4026feb5f2ecSYuchung Cheng 4027ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 4028ba113c3aSWei Wang TCP_NLA_PAD); 4029fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 4030fb31c9b9SWei Wang TCP_NLA_PAD); 40317e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 40327ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 4033e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 403432efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 4035e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 4036e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt)); 403748040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 403848040793SYousuk Seung TCP_NLA_PAD); 4039e7ed11eeSYousuk Seung if (ack_skb) 4040e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL, 4041e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb)); 4042ba113c3aSWei Wang 404329c1c446SMubashir Adnan Qureshi nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 40441c885808SFrancis Yan return stats; 40451c885808SFrancis Yan } 40461c885808SFrancis Yan 4047273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level, 404834704ef0SMartin KaFai Lau int optname, sockptr_t optval, sockptr_t optlen) 40491da177e4SLinus Torvalds { 4050295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 40511da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 40526fa25166SNikolay Borisov struct net *net = sock_net(sk); 40531da177e4SLinus Torvalds int val, len; 40541da177e4SLinus Torvalds 405534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40561da177e4SLinus Torvalds return -EFAULT; 40571da177e4SLinus Torvalds 40581da177e4SLinus Torvalds if (len < 0) 40591da177e4SLinus Torvalds return -EINVAL; 40601da177e4SLinus Torvalds 4061716edc97SGavrilov Ilia len = min_t(unsigned int, len, sizeof(int)); 4062716edc97SGavrilov Ilia 40631da177e4SLinus Torvalds switch (optname) { 40641da177e4SLinus Torvalds case TCP_MAXSEG: 4065c1b4a7e6SDavid S. Miller val = tp->mss_cache; 406634dfde4aSCambda Zhu if (tp->rx_opt.user_mss && 406734dfde4aSCambda Zhu ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 40681da177e4SLinus Torvalds val = tp->rx_opt.user_mss; 40695e6a3ce6SPavel Emelyanov if (tp->repair) 40705e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp; 40711da177e4SLinus Torvalds break; 40721da177e4SLinus Torvalds case TCP_NODELAY: 40731da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF); 40741da177e4SLinus Torvalds break; 40751da177e4SLinus Torvalds case TCP_CORK: 40761da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK); 40771da177e4SLinus Torvalds break; 40781da177e4SLinus Torvalds case TCP_KEEPIDLE: 4079df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ; 40801da177e4SLinus Torvalds break; 40811da177e4SLinus Torvalds case TCP_KEEPINTVL: 4082df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ; 40831da177e4SLinus Torvalds break; 40841da177e4SLinus Torvalds case TCP_KEEPCNT: 4085df19a626SEric Dumazet val = keepalive_probes(tp); 40861da177e4SLinus Torvalds break; 40871da177e4SLinus Torvalds case TCP_SYNCNT: 40883a037f0fSEric Dumazet val = READ_ONCE(icsk->icsk_syn_retries) ? : 408920a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 40901da177e4SLinus Torvalds break; 40911da177e4SLinus Torvalds case TCP_LINGER2: 40929df5335cSEric Dumazet val = READ_ONCE(tp->linger2); 40931da177e4SLinus Torvalds if (val >= 0) 409439e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 40951da177e4SLinus Torvalds break; 40961da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 4097ae488c74SEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); 4098ae488c74SEric Dumazet val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ, 4099ae488c74SEric Dumazet TCP_RTO_MAX / HZ); 41001da177e4SLinus Torvalds break; 41011da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 4102f410cbeaSEric Dumazet val = READ_ONCE(tp->window_clamp); 41031da177e4SLinus Torvalds break; 41041da177e4SLinus Torvalds case TCP_INFO: { 41051da177e4SLinus Torvalds struct tcp_info info; 41061da177e4SLinus Torvalds 410734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41081da177e4SLinus Torvalds return -EFAULT; 41091da177e4SLinus Torvalds 41101da177e4SLinus Torvalds tcp_get_info(sk, &info); 41111da177e4SLinus Torvalds 41121da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info)); 411334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41141da177e4SLinus Torvalds return -EFAULT; 411534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 41161da177e4SLinus Torvalds return -EFAULT; 41171da177e4SLinus Torvalds return 0; 41181da177e4SLinus Torvalds } 41196e9250f5SEric Dumazet case TCP_CC_INFO: { 41206e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops; 41216e9250f5SEric Dumazet union tcp_cc_info info; 41226e9250f5SEric Dumazet size_t sz = 0; 41236e9250f5SEric Dumazet int attr; 41246e9250f5SEric Dumazet 412534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41266e9250f5SEric Dumazet return -EFAULT; 41276e9250f5SEric Dumazet 41286e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops; 41296e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info) 41306e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info); 41316e9250f5SEric Dumazet 41326e9250f5SEric Dumazet len = min_t(unsigned int, len, sz); 413334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41346e9250f5SEric Dumazet return -EFAULT; 413534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 41366e9250f5SEric Dumazet return -EFAULT; 41376e9250f5SEric Dumazet return 0; 41386e9250f5SEric Dumazet } 41391da177e4SLinus Torvalds case TCP_QUICKACK: 414031954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk); 41411da177e4SLinus Torvalds break; 41425f8ef48dSStephen Hemminger 41435f8ef48dSStephen Hemminger case TCP_CONGESTION: 414434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41455f8ef48dSStephen Hemminger return -EFAULT; 41465f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 414734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41485f8ef48dSStephen Hemminger return -EFAULT; 414934704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 41505f8ef48dSStephen Hemminger return -EFAULT; 41515f8ef48dSStephen Hemminger return 0; 4152e56fb50fSWilliam Allen Simpson 4153734942ccSDave Watson case TCP_ULP: 415434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4155734942ccSDave Watson return -EFAULT; 4156734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4157d97af30fSDave Watson if (!icsk->icsk_ulp_ops) { 415834704ef0SMartin KaFai Lau len = 0; 415934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4160d97af30fSDave Watson return -EFAULT; 4161d97af30fSDave Watson return 0; 4162d97af30fSDave Watson } 416334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4164734942ccSDave Watson return -EFAULT; 416534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4166734942ccSDave Watson return -EFAULT; 4167734942ccSDave Watson return 0; 4168734942ccSDave Watson 41691fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 4170f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4171f19008e6SJason Baron unsigned int key_len; 41721fba70e5SYuchung Cheng 417334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41741fba70e5SYuchung Cheng return -EFAULT; 41751fba70e5SYuchung Cheng 4176f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) * 41770f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH; 41780f1ce023SJason Baron len = min_t(unsigned int, len, key_len); 417934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41801fba70e5SYuchung Cheng return -EFAULT; 418134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, key, len)) 41821fba70e5SYuchung Cheng return -EFAULT; 41831fba70e5SYuchung Cheng return 0; 41841fba70e5SYuchung Cheng } 41853c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS: 41863c0fef0bSJosh Hunt val = tp->thin_lto; 41873c0fef0bSJosh Hunt break; 41884a7f6009SYuchung Cheng 41893c0fef0bSJosh Hunt case TCP_THIN_DUPACK: 41904a7f6009SYuchung Cheng val = 0; 41913c0fef0bSJosh Hunt break; 4192dca43c75SJerry Chu 4193ee995283SPavel Emelyanov case TCP_REPAIR: 4194ee995283SPavel Emelyanov val = tp->repair; 4195ee995283SPavel Emelyanov break; 4196ee995283SPavel Emelyanov 4197ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 4198ee995283SPavel Emelyanov if (tp->repair) 4199ee995283SPavel Emelyanov val = tp->repair_queue; 4200ee995283SPavel Emelyanov else 4201ee995283SPavel Emelyanov return -EINVAL; 4202ee995283SPavel Emelyanov break; 4203ee995283SPavel Emelyanov 4204b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: { 4205b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 4206b1ed4c4fSAndrey Vagin 420734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4208b1ed4c4fSAndrey Vagin return -EFAULT; 4209b1ed4c4fSAndrey Vagin 4210b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 4211b1ed4c4fSAndrey Vagin return -EINVAL; 4212b1ed4c4fSAndrey Vagin 4213b1ed4c4fSAndrey Vagin if (!tp->repair) 4214b1ed4c4fSAndrey Vagin return -EPERM; 4215b1ed4c4fSAndrey Vagin 4216b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1; 4217b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd; 4218b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window; 4219b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd; 4220b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup; 4221b1ed4c4fSAndrey Vagin 422234704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &opt, len)) 4223b1ed4c4fSAndrey Vagin return -EFAULT; 4224b1ed4c4fSAndrey Vagin return 0; 4225b1ed4c4fSAndrey Vagin } 4226ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 4227ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 4228ee995283SPavel Emelyanov val = tp->write_seq; 4229ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE) 4230ee995283SPavel Emelyanov val = tp->rcv_nxt; 4231ee995283SPavel Emelyanov else 4232ee995283SPavel Emelyanov return -EINVAL; 4233ee995283SPavel Emelyanov break; 4234ee995283SPavel Emelyanov 4235dca43c75SJerry Chu case TCP_USER_TIMEOUT: 423626023e91SEric Dumazet val = READ_ONCE(icsk->icsk_user_timeout); 4237dca43c75SJerry Chu break; 42381536e285SKenjiro Nakayama 42391536e285SKenjiro Nakayama case TCP_FASTOPEN: 424070f360ddSEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); 42411536e285SKenjiro Nakayama break; 42421536e285SKenjiro Nakayama 424319f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 424419f6d3f3SWei Wang val = tp->fastopen_connect; 424519f6d3f3SWei Wang break; 424619f6d3f3SWei Wang 424771c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 424871c02379SChristoph Paasch val = tp->fastopen_no_cookie; 424971c02379SChristoph Paasch break; 425071c02379SChristoph Paasch 4251a842fe14SEric Dumazet case TCP_TX_DELAY: 4252348b81b6SEric Dumazet val = READ_ONCE(tp->tcp_tx_delay); 4253a842fe14SEric Dumazet break; 4254a842fe14SEric Dumazet 425593be6ce0SAndrey Vagin case TCP_TIMESTAMP: 4256614e8316SEric Dumazet val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset); 4257614e8316SEric Dumazet if (tp->tcp_usec_ts) 4258614e8316SEric Dumazet val |= 1; 4259614e8316SEric Dumazet else 4260614e8316SEric Dumazet val &= ~1; 426193be6ce0SAndrey Vagin break; 4262c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 42631aeb87bcSEric Dumazet val = READ_ONCE(tp->notsent_lowat); 4264c9bee3b7SEric Dumazet break; 4265b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 4266b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq; 4267b75eba76SSoheil Hassas Yeganeh break; 4268cd8ae852SEric Dumazet case TCP_SAVE_SYN: 4269cd8ae852SEric Dumazet val = tp->save_syn; 4270cd8ae852SEric Dumazet break; 4271cd8ae852SEric Dumazet case TCP_SAVED_SYN: { 427234704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4273cd8ae852SEric Dumazet return -EFAULT; 4274cd8ae852SEric Dumazet 4275d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 4276cd8ae852SEric Dumazet if (tp->saved_syn) { 427770a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) { 427834704ef0SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 427934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4280d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4281aea0929eSEric B Munson return -EFAULT; 4282aea0929eSEric B Munson } 4283d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4284aea0929eSEric B Munson return -EINVAL; 4285aea0929eSEric B Munson } 428670a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 428734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4288d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4289cd8ae852SEric Dumazet return -EFAULT; 4290cd8ae852SEric Dumazet } 429134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4292d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4293cd8ae852SEric Dumazet return -EFAULT; 4294cd8ae852SEric Dumazet } 4295cd8ae852SEric Dumazet tcp_saved_syn_free(tp); 4296d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4297cd8ae852SEric Dumazet } else { 4298d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4299cd8ae852SEric Dumazet len = 0; 430034704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4301cd8ae852SEric Dumazet return -EFAULT; 4302cd8ae852SEric Dumazet } 4303cd8ae852SEric Dumazet return 0; 4304cd8ae852SEric Dumazet } 430505255b82SEric Dumazet #ifdef CONFIG_MMU 430605255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: { 43077eeba170SArjun Roy struct scm_timestamping_internal tss; 4308e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {}; 430905255b82SEric Dumazet int err; 431005255b82SEric Dumazet 431134704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 431205255b82SEric Dumazet return -EFAULT; 43132107d45fSArjun Roy if (len < 0 || 43142107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length)) 431505255b82SEric Dumazet return -EINVAL; 43163c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) { 431734704ef0SMartin KaFai Lau err = check_zeroed_sockptr(optval, sizeof(zc), 43183c5a2fd0SArjun Roy len - sizeof(zc)); 43193c5a2fd0SArjun Roy if (err < 1) 43203c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err; 4321c8856c05SArjun Roy len = sizeof(zc); 432234704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 43230b7f41f6SArjun Roy return -EFAULT; 43240b7f41f6SArjun Roy } 432534704ef0SMartin KaFai Lau if (copy_from_sockptr(&zc, optval, len)) 432605255b82SEric Dumazet return -EFAULT; 43273c5a2fd0SArjun Roy if (zc.reserved) 43283c5a2fd0SArjun Roy return -EINVAL; 43293c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 43303c5a2fd0SArjun Roy return -EINVAL; 4331d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 43327eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss); 43339cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 43349cacf81fSStanislav Fomichev &zc, &len, err); 4335d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 43367eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 43377eeba170SArjun Roy goto zerocopy_rcv_cmsg; 4338c8856c05SArjun Roy switch (len) { 43397eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags): 43407eeba170SArjun Roy goto zerocopy_rcv_cmsg; 43417eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 43427eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control): 43437eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags): 43447eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 43457eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 434633946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err): 434733946518SArjun Roy goto zerocopy_rcv_sk_err; 4348c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq): 4349c8856c05SArjun Roy goto zerocopy_rcv_inq; 4350c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length): 4351c8856c05SArjun Roy default: 4352c8856c05SArjun Roy goto zerocopy_rcv_out; 4353c8856c05SArjun Roy } 43547eeba170SArjun Roy zerocopy_rcv_cmsg: 43557eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS) 43567eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 43577eeba170SArjun Roy else 43587eeba170SArjun Roy zc.msg_flags = 0; 435933946518SArjun Roy zerocopy_rcv_sk_err: 436033946518SArjun Roy if (!err) 436133946518SArjun Roy zc.err = sock_error(sk); 4362c8856c05SArjun Roy zerocopy_rcv_inq: 4363c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk); 4364c8856c05SArjun Roy zerocopy_rcv_out: 436534704ef0SMartin KaFai Lau if (!err && copy_to_sockptr(optval, &zc, len)) 436605255b82SEric Dumazet err = -EFAULT; 436705255b82SEric Dumazet return err; 436805255b82SEric Dumazet } 436905255b82SEric Dumazet #endif 4370faadfabaSDmitry Safonov case TCP_AO_REPAIR: 4371965c00e4SDmitry Safonov if (!tcp_can_repair_sock(sk)) 4372965c00e4SDmitry Safonov return -EPERM; 4373faadfabaSDmitry Safonov return tcp_ao_get_repair(sk, optval, optlen); 4374ef84703aSDmitry Safonov case TCP_AO_GET_KEYS: 4375ef84703aSDmitry Safonov case TCP_AO_INFO: { 4376ef84703aSDmitry Safonov int err; 4377ef84703aSDmitry Safonov 4378ef84703aSDmitry Safonov sockopt_lock_sock(sk); 4379ef84703aSDmitry Safonov if (optname == TCP_AO_GET_KEYS) 4380ef84703aSDmitry Safonov err = tcp_ao_get_mkts(sk, optval, optlen); 4381ef84703aSDmitry Safonov else 4382ef84703aSDmitry Safonov err = tcp_ao_get_sock_info(sk, optval, optlen); 4383ef84703aSDmitry Safonov sockopt_release_sock(sk); 4384ef84703aSDmitry Safonov 4385ef84703aSDmitry Safonov return err; 4386ef84703aSDmitry Safonov } 4387c084ebd7SMatthieu Baerts (NGI0) case TCP_IS_MPTCP: 4388c084ebd7SMatthieu Baerts (NGI0) val = 0; 4389c084ebd7SMatthieu Baerts (NGI0) break; 43901da177e4SLinus Torvalds default: 43911da177e4SLinus Torvalds return -ENOPROTOOPT; 43923ff50b79SStephen Hemminger } 43931da177e4SLinus Torvalds 439434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 43951da177e4SLinus Torvalds return -EFAULT; 439634704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &val, len)) 43971da177e4SLinus Torvalds return -EFAULT; 43981da177e4SLinus Torvalds return 0; 43991da177e4SLinus Torvalds } 44001da177e4SLinus Torvalds 44019cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname) 44029cacf81fSStanislav Fomichev { 44039cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation 44049cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 44059cacf81fSStanislav Fomichev */ 44069cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 44079cacf81fSStanislav Fomichev return true; 44089cacf81fSStanislav Fomichev 44099cacf81fSStanislav Fomichev return false; 44109cacf81fSStanislav Fomichev } 44119cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 44129cacf81fSStanislav Fomichev 44133fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 44143fdadf7dSDmitry Mishin int __user *optlen) 44153fdadf7dSDmitry Mishin { 44163fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk); 44173fdadf7dSDmitry Mishin 44183fdadf7dSDmitry Mishin if (level != SOL_TCP) 4419f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4420f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 44213fdadf7dSDmitry Mishin optval, optlen); 442234704ef0SMartin KaFai Lau return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 442334704ef0SMartin KaFai Lau USER_SOCKPTR(optlen)); 44243fdadf7dSDmitry Mishin } 44254bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt); 44263fdadf7dSDmitry Mishin 4427cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 44288c73b263SDmitry Safonov int tcp_md5_sigpool_id = -1; 44298c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id); 4430cfb6eeb4SYOSHIFUJI Hideaki 44318c73b263SDmitry Safonov int tcp_md5_alloc_sigpool(void) 4432cfb6eeb4SYOSHIFUJI Hideaki { 44338c73b263SDmitry Safonov size_t scratch_size; 44348c73b263SDmitry Safonov int ret; 4435cfb6eeb4SYOSHIFUJI Hideaki 44368c73b263SDmitry Safonov scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr); 44378c73b263SDmitry Safonov ret = tcp_sigpool_alloc_ahash("md5", scratch_size); 44388c73b263SDmitry Safonov if (ret >= 0) { 44398c73b263SDmitry Safonov /* As long as any md5 sigpool was allocated, the return 44408c73b263SDmitry Safonov * id would stay the same. Re-write the id only for the case 44418c73b263SDmitry Safonov * when previously all MD5 keys were deleted and this call 44428c73b263SDmitry Safonov * allocates the first MD5 key, which may return a different 44438c73b263SDmitry Safonov * sigpool id than was used previously. 444471cea17eSEric Dumazet */ 44458c73b263SDmitry Safonov WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */ 444649a72dfbSAdam Langley return 0; 444749a72dfbSAdam Langley } 44488c73b263SDmitry Safonov return ret; 44498c73b263SDmitry Safonov } 445049a72dfbSAdam Langley 44518c73b263SDmitry Safonov void tcp_md5_release_sigpool(void) 44528c73b263SDmitry Safonov { 44538c73b263SDmitry Safonov tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id)); 44548c73b263SDmitry Safonov } 44558c73b263SDmitry Safonov 44568c73b263SDmitry Safonov void tcp_md5_add_sigpool(void) 44578c73b263SDmitry Safonov { 44588c73b263SDmitry Safonov tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id)); 44598c73b263SDmitry Safonov } 44608c73b263SDmitry Safonov 44618c73b263SDmitry Safonov int tcp_md5_hash_key(struct tcp_sigpool *hp, 44628c73b263SDmitry Safonov const struct tcp_md5sig_key *key) 446349a72dfbSAdam Langley { 4464e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 446549a72dfbSAdam Langley struct scatterlist sg; 446649a72dfbSAdam Langley 44676a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen); 44688c73b263SDmitry Safonov ahash_request_set_crypt(hp->req, &sg, NULL, keylen); 4469e6ced831SEric Dumazet 44708c73b263SDmitry Safonov /* We use data_race() because tcp_md5_do_add() might change 44718c73b263SDmitry Safonov * key->key under us 44728c73b263SDmitry Safonov */ 44738c73b263SDmitry Safonov return data_race(crypto_ahash_update(hp->req)); 447449a72dfbSAdam Langley } 447549a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key); 447649a72dfbSAdam Langley 44777bbb765bSDmitry Safonov /* Called with rcu_read_lock() */ 4478811efc06SDmitry Safonov static enum skb_drop_reason 44791330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 44807bbb765bSDmitry Safonov const void *saddr, const void *daddr, 44810a3a8090SDmitry Safonov int family, int l3index, const __u8 *hash_location) 44827bbb765bSDmitry Safonov { 44830a3a8090SDmitry Safonov /* This gets called for each TCP segment that has TCP-MD5 option. 44847bbb765bSDmitry Safonov * We have 3 drop cases: 44857bbb765bSDmitry Safonov * o No MD5 hash and one expected. 44867bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one. 44877bbb765bSDmitry Safonov * o MD5 hash and its wrong. 44887bbb765bSDmitry Safonov */ 4489e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 44900a3a8090SDmitry Safonov struct tcp_md5sig_key *key; 44917bbb765bSDmitry Safonov u8 newhash[16]; 44920a3a8090SDmitry Safonov int genhash; 44937bbb765bSDmitry Safonov 44940a3a8090SDmitry Safonov key = tcp_md5_do_lookup(sk, l3index, saddr, family); 44957bbb765bSDmitry Safonov 44960a3a8090SDmitry Safonov if (!key && hash_location) { 44977bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 449896be3dcdSDmitry Safonov trace_tcp_hash_md5_unexpected(sk, skb); 44991330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 45007bbb765bSDmitry Safonov } 45017bbb765bSDmitry Safonov 4502e62d2e11SEric Dumazet /* Check the signature. 4503e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle 4504e62d2e11SEric Dumazet * IPv4-mapped case. 4505e62d2e11SEric Dumazet */ 4506e62d2e11SEric Dumazet if (family == AF_INET) 45070a3a8090SDmitry Safonov genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); 4508e62d2e11SEric Dumazet else 45090a3a8090SDmitry Safonov genhash = tp->af_specific->calc_md5_hash(newhash, key, 45107bbb765bSDmitry Safonov NULL, skb); 45117bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) { 45127bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 451396be3dcdSDmitry Safonov trace_tcp_hash_md5_mismatch(sk, skb); 45141330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE; 45157bbb765bSDmitry Safonov } 45161330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 45177bbb765bSDmitry Safonov } 4518811efc06SDmitry Safonov #else 4519811efc06SDmitry Safonov static inline enum skb_drop_reason 4520811efc06SDmitry Safonov tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 4521811efc06SDmitry Safonov const void *saddr, const void *daddr, 4522811efc06SDmitry Safonov int family, int l3index, const __u8 *hash_location) 4523811efc06SDmitry Safonov { 4524811efc06SDmitry Safonov return SKB_NOT_DROPPED_YET; 4525811efc06SDmitry Safonov } 45267bbb765bSDmitry Safonov 4527cfb6eeb4SYOSHIFUJI Hideaki #endif 4528cfb6eeb4SYOSHIFUJI Hideaki 4529811efc06SDmitry Safonov /* Called with rcu_read_lock() */ 4530811efc06SDmitry Safonov enum skb_drop_reason 4531811efc06SDmitry Safonov tcp_inbound_hash(struct sock *sk, const struct request_sock *req, 4532811efc06SDmitry Safonov const struct sk_buff *skb, 4533811efc06SDmitry Safonov const void *saddr, const void *daddr, 4534811efc06SDmitry Safonov int family, int dif, int sdif) 4535811efc06SDmitry Safonov { 4536811efc06SDmitry Safonov const struct tcphdr *th = tcp_hdr(skb); 4537811efc06SDmitry Safonov const struct tcp_ao_hdr *aoh; 4538811efc06SDmitry Safonov const __u8 *md5_location; 4539811efc06SDmitry Safonov int l3index; 4540811efc06SDmitry Safonov 4541811efc06SDmitry Safonov /* Invalid option or two times meet any of auth options */ 4542811efc06SDmitry Safonov if (tcp_parse_auth_options(th, &md5_location, &aoh)) { 454396be3dcdSDmitry Safonov trace_tcp_hash_bad_header(sk, skb); 4544811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_AUTH_HDR; 4545811efc06SDmitry Safonov } 4546811efc06SDmitry Safonov 4547811efc06SDmitry Safonov if (req) { 4548811efc06SDmitry Safonov if (tcp_rsk_used_ao(req) != !!aoh) { 454996be3dcdSDmitry Safonov u8 keyid, rnext, maclen; 455096be3dcdSDmitry Safonov 455196be3dcdSDmitry Safonov if (aoh) { 455296be3dcdSDmitry Safonov keyid = aoh->keyid; 455396be3dcdSDmitry Safonov rnext = aoh->rnext_keyid; 455496be3dcdSDmitry Safonov maclen = tcp_ao_hdr_maclen(aoh); 455596be3dcdSDmitry Safonov } else { 455696be3dcdSDmitry Safonov keyid = rnext = maclen = 0; 455796be3dcdSDmitry Safonov } 455896be3dcdSDmitry Safonov 4559811efc06SDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); 456096be3dcdSDmitry Safonov trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen); 4561811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_AOFAILURE; 4562811efc06SDmitry Safonov } 4563811efc06SDmitry Safonov } 4564811efc06SDmitry Safonov 4565811efc06SDmitry Safonov /* sdif set, means packet ingressed via a device 4566811efc06SDmitry Safonov * in an L3 domain and dif is set to the l3mdev 4567811efc06SDmitry Safonov */ 4568811efc06SDmitry Safonov l3index = sdif ? dif : 0; 4569811efc06SDmitry Safonov 4570811efc06SDmitry Safonov /* Fast path: unsigned segments */ 4571811efc06SDmitry Safonov if (likely(!md5_location && !aoh)) { 4572811efc06SDmitry Safonov /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid 4573811efc06SDmitry Safonov * for the remote peer. On TCP-AO established connection 4574811efc06SDmitry Safonov * the last key is impossible to remove, so there's 4575811efc06SDmitry Safonov * always at least one current_key. 4576811efc06SDmitry Safonov */ 4577811efc06SDmitry Safonov if (tcp_ao_required(sk, saddr, family, l3index, true)) { 457896be3dcdSDmitry Safonov trace_tcp_hash_ao_required(sk, skb); 4579811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_AONOTFOUND; 4580811efc06SDmitry Safonov } 4581811efc06SDmitry Safonov if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { 4582811efc06SDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 458396be3dcdSDmitry Safonov trace_tcp_hash_md5_required(sk, skb); 4584811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_MD5NOTFOUND; 4585811efc06SDmitry Safonov } 4586811efc06SDmitry Safonov return SKB_NOT_DROPPED_YET; 4587811efc06SDmitry Safonov } 4588811efc06SDmitry Safonov 4589811efc06SDmitry Safonov if (aoh) 4590811efc06SDmitry Safonov return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); 4591811efc06SDmitry Safonov 4592811efc06SDmitry Safonov return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, 4593811efc06SDmitry Safonov l3index, md5_location); 4594811efc06SDmitry Safonov } 4595811efc06SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_inbound_hash); 4596811efc06SDmitry Safonov 45974ac02babSAndi Kleen void tcp_done(struct sock *sk) 45984ac02babSAndi Kleen { 4599d983ea6fSEric Dumazet struct request_sock *req; 46008336886fSJerry Chu 4601cab209e5SEric Dumazet /* We might be called with a new socket, after 4602cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called 4603cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk) 4604cab209e5SEric Dumazet */ 4605cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 46064ac02babSAndi Kleen 46074ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4608c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 46094ac02babSAndi Kleen 46104ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE); 46114ac02babSAndi Kleen tcp_clear_xmit_timers(sk); 461200db4124SIan Morris if (req) 46138336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 46144ac02babSAndi Kleen 4615e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 46164ac02babSAndi Kleen 46174ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD)) 46184ac02babSAndi Kleen sk->sk_state_change(sk); 46194ac02babSAndi Kleen else 46204ac02babSAndi Kleen inet_csk_destroy_sock(sk); 46214ac02babSAndi Kleen } 46224ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done); 46234ac02babSAndi Kleen 4624c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err) 4625c1e64e29SLorenzo Colitti { 4626af9784d0SEric Dumazet int state = inet_sk_state_load(sk); 4627af9784d0SEric Dumazet 4628af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) { 462907f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk); 463007f6f4a3SEric Dumazet 463107f6f4a3SEric Dumazet local_bh_disable(); 4632acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req); 463307f6f4a3SEric Dumazet local_bh_enable(); 463407f6f4a3SEric Dumazet return 0; 463507f6f4a3SEric Dumazet } 4636af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) { 4637af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk); 4638af9784d0SEric Dumazet 4639af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt); 4640af9784d0SEric Dumazet local_bh_disable(); 4641af9784d0SEric Dumazet inet_twsk_deschedule_put(tw); 4642af9784d0SEric Dumazet local_bh_enable(); 4643af9784d0SEric Dumazet return 0; 4644c1e64e29SLorenzo Colitti } 4645c1e64e29SLorenzo Colitti 46464ddbcb88SAditi Ghag /* BPF context ensures sock locking. */ 46474ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4648c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */ 4649c1e64e29SLorenzo Colitti lock_sock(sk); 4650c1e64e29SLorenzo Colitti 4651bac76cf8SXueming Feng /* Avoid closing the same socket twice. */ 4652bac76cf8SXueming Feng if (sk->sk_state == TCP_CLOSE) { 4653bac76cf8SXueming Feng if (!has_current_bpf_ctx()) 4654bac76cf8SXueming Feng release_sock(sk); 4655bac76cf8SXueming Feng return -ENOENT; 4656bac76cf8SXueming Feng } 4657bac76cf8SXueming Feng 46582010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) { 46592010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE); 46602010b93eSLorenzo Colitti inet_csk_listen_stop(sk); 46612010b93eSLorenzo Colitti } 46622010b93eSLorenzo Colitti 4663c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4664c1e64e29SLorenzo Colitti local_bh_disable(); 4665c1e64e29SLorenzo Colitti bh_lock_sock(sk); 4666c1e64e29SLorenzo Colitti 4667c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state)) 46685691276bSJason Xing tcp_send_active_reset(sk, GFP_ATOMIC, 4669edefba66SJason Xing SK_RST_REASON_TCP_STATE); 46705ce4645cSEric Dumazet tcp_done_with_error(sk, err); 4671c1e64e29SLorenzo Colitti 4672c1e64e29SLorenzo Colitti bh_unlock_sock(sk); 4673c1e64e29SLorenzo Colitti local_bh_enable(); 46744ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4675c1e64e29SLorenzo Colitti release_sock(sk); 4676c1e64e29SLorenzo Colitti return 0; 4677c1e64e29SLorenzo Colitti } 4678c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort); 4679c1e64e29SLorenzo Colitti 46805f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno; 46811da177e4SLinus Torvalds 46821da177e4SLinus Torvalds static __initdata unsigned long thash_entries; 46831da177e4SLinus Torvalds static int __init set_thash_entries(char *str) 46841da177e4SLinus Torvalds { 4685413c27d8SEldad Zack ssize_t ret; 4686413c27d8SEldad Zack 46871da177e4SLinus Torvalds if (!str) 46881da177e4SLinus Torvalds return 0; 4689413c27d8SEldad Zack 4690413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries); 4691413c27d8SEldad Zack if (ret) 4692413c27d8SEldad Zack return 0; 4693413c27d8SEldad Zack 46941da177e4SLinus Torvalds return 1; 46951da177e4SLinus Torvalds } 46961da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries); 46971da177e4SLinus Torvalds 469847d7a88cSFabian Frederick static void __init tcp_init_mem(void) 46994acb4190SGlauber Costa { 4700b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16; 4701b66e91ccSEric Dumazet 47024acb4190SGlauber Costa limit = max(limit, 128UL); 4703b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4704b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4705b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 47064acb4190SGlauber Costa } 47074acb4190SGlauber Costa 4708d5fed5adSCoco Li static void __init tcp_struct_check(void) 4709d5fed5adSCoco Li { 4710d5fed5adSCoco Li /* TX read-mostly hotpath cache lines */ 4711d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window); 4712d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh); 4713d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering); 4714d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat); 4715d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs); 4716d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint); 4717d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint); 4718d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40); 4719d5fed5adSCoco Li 4720d5fed5adSCoco Li /* TXRX read-mostly hotpath cache lines */ 4721d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset); 4722d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd); 4723d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache); 4724d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd); 4725d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out); 4726d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out); 4727d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out); 4728119ff048SEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio); 4729119ff048SEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32); 4730d5fed5adSCoco Li 4731d5fed5adSCoco Li /* RX read-mostly hotpath cache lines */ 4732d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq); 4733d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp); 4734d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1); 4735d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq); 4736d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us); 4737d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out); 4738d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss); 4739d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data); 4740d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost); 4741d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min); 4742d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue); 4743d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh); 4744d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69); 4745d5fed5adSCoco Li 4746d5fed5adSCoco Li /* TX read-write hotpath cache lines */ 4747d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out); 4748d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out); 4749d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent); 4750d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml); 4751d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start); 4752d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat); 4753d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq); 4754d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq); 4755d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); 4756d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); 4757d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); 4758d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); 4759d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); 4760d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); 4761d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); 4762d2c3a7ebSEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89); 4763d5fed5adSCoco Li 4764d5fed5adSCoco Li /* TXRX read-write hotpath cache lines */ 4765d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); 4766d2c3a7ebSEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache); 4767d2c3a7ebSEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp); 4768d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); 4769d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); 4770d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); 4771d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp); 4772d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us); 4773d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out); 4774d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up); 4775d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered); 4776d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce); 4777d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); 4778d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); 4779d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); 47809b9fd458SEric Dumazet 47819b9fd458SEric Dumazet /* 32bit arches with 8byte alignment on u64 fields might need padding 47829b9fd458SEric Dumazet * before tcp_clock_cache. 47839b9fd458SEric Dumazet */ 47849b9fd458SEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4); 4785d5fed5adSCoco Li 4786d5fed5adSCoco Li /* RX read-write hotpath cache lines */ 4787d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); 4788d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in); 4789d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in); 4790d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup); 4791d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out); 4792d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq); 4793d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered); 4794d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us); 4795d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr); 4796d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp); 4797d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp); 4798d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked); 4799d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est); 4800d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space); 4801d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99); 4802d5fed5adSCoco Li } 4803d5fed5adSCoco Li 48041da177e4SLinus Torvalds void __init tcp_init(void) 48051da177e4SLinus Torvalds { 4806b49960a0SEric Dumazet int max_rshare, max_wshare, cnt; 4807b2d3ea4aSEric Dumazet unsigned long limit; 4808074b8517SDimitri Sivanich unsigned int i; 48091da177e4SLinus Torvalds 48103b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4811b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4812c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb)); 48131da177e4SLinus Torvalds 4814d5fed5adSCoco Li tcp_struct_check(); 4815d5fed5adSCoco Li 4816908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 481719757cebSEric Dumazet 481819757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 481919757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 482019757cebSEric Dumazet 482127da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 482227da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/ 482327da6d37SMartin KaFai Lau 0, 64 * 1024); 48246e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep = 48256e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket", 48266e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0, 4827990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4828990c74e3SVasily Averin SLAB_ACCOUNT, 4829990c74e3SVasily Averin NULL); 483028044fc1SJoanne Koong tcp_hashinfo.bind2_bucket_cachep = 483128044fc1SJoanne Koong kmem_cache_create("tcp_bind2_bucket", 483228044fc1SJoanne Koong sizeof(struct inet_bind2_bucket), 0, 483328044fc1SJoanne Koong SLAB_HWCACHE_ALIGN | SLAB_PANIC | 483428044fc1SJoanne Koong SLAB_ACCOUNT, 483528044fc1SJoanne Koong NULL); 48361da177e4SLinus Torvalds 48371da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket 48381da177e4SLinus Torvalds * hash tables. 48391da177e4SLinus Torvalds * 48401da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache. 48411da177e4SLinus Torvalds */ 48426e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash = 48431da177e4SLinus Torvalds alloc_large_system_hash("TCP established", 48440f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket), 48451da177e4SLinus Torvalds thash_entries, 4846fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 48479e950efaSJohn Heffner 0, 48481da177e4SLinus Torvalds NULL, 4849f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask, 485031fe62b9STim Bird 0, 48510ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024); 485205dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 48533ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 485405dbc7b5SEric Dumazet 4855230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4856230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks"); 48576e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash = 4858593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind", 485928044fc1SJoanne Koong 2 * sizeof(struct inet_bind_hashbucket), 4860f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, 4861fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 48629e950efaSJohn Heffner 0, 48636e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size, 48641da177e4SLinus Torvalds NULL, 486531fe62b9STim Bird 0, 48661da177e4SLinus Torvalds 64 * 1024); 4867074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 486828044fc1SJoanne Koong tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 48696e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 48706e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock); 48716e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 487228044fc1SJoanne Koong spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 487328044fc1SJoanne Koong INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 48741da177e4SLinus Torvalds } 48751da177e4SLinus Torvalds 4876d1e5e640SKuniyuki Iwashima tcp_hashinfo.pernet = false; 4877c5ed63d6SEric Dumazet 4878c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1; 4879c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2; 48801da177e4SLinus Torvalds 4881a4fe34bfSEric W. Biederman tcp_init_mem(); 4882c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */ 48835fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4884b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit); 4885b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit); 48867b4f4b5eSJohn Heffner 4887100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 4888356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4889356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 48907b4f4b5eSJohn Heffner 4891100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 4892a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4893a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 48941da177e4SLinus Torvalds 4895afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n", 4896f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4897317a76f9SStephen Hemminger 48981946e672SHaishuang Yan tcp_v4_init(); 489951c5d0c4SDavid S. Miller tcp_metrics_init(); 490055d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 490146d3ceabSEric Dumazet tcp_tasklet_init(); 4902f870fa0bSMat Martineau mptcp_init(); 49031da177e4SLinus Torvalds } 4904