12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * Fixes: 221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls 231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset 241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while 251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect 261da177e4SLinus Torvalds * (tcp_err()). 271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken 281da177e4SLinus Torvalds * pointers passed where wrong and the 291da177e4SLinus Torvalds * socket was looked up backwards. Nobody 301da177e4SLinus Torvalds * tested any icmp error code obviously. 311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It 321da177e4SLinus Torvalds * wakes people on errors. poll 331da177e4SLinus Torvalds * behaves and the icmp error race 341da177e4SLinus Torvalds * has gone by moving it into sock.c 351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for 361da177e4SLinus Torvalds * everything not just packets for 371da177e4SLinus Torvalds * unknown sockets. 381da177e4SLinus Torvalds * Alan Cox : tcp option processing. 391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had 401da177e4SLinus Torvalds * syn rule wrong] 411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes 421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames. 431da177e4SLinus Torvalds * Acking any kind of RST is right out. 441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst 451da177e4SLinus Torvalds * receive otherwise odd bits of prattle 461da177e4SLinus Torvalds * escape still 471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug. 481da177e4SLinus Torvalds * Should stop LAN workplace lockups. 491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list 501da177e4SLinus Torvalds * facilities 511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work 521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts 531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors 541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a 551da177e4SLinus Torvalds * bit to skb ops. 561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential 571da177e4SLinus Torvalds * nasty. 581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the 591da177e4SLinus Torvalds * tcp is hard to follow 601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh 611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix. 621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet). 631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH 641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries. 651da177e4SLinus Torvalds * Also added awareness of them to accept. 661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP) 671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks, 681da177e4SLinus Torvalds * so the kernel can layer network 691da177e4SLinus Torvalds * sockets. 701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings. 711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope). 721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised 731da177e4SLinus Torvalds * state ack error. 741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit. 751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E 761da177e4SLinus Torvalds * window non shrink trick. 771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer 781da177e4SLinus Torvalds * fixes 791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes 801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes 811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight 821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work 831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling 841da177e4SLinus Torvalds * completely 851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling 861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC 871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds 881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors 891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug 901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes. 911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing 921da177e4SLinus Torvalds * (not yet usable) 931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging 951da177e4SLinus Torvalds * Alan Cox : Window clamping 961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check() 971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes 981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the 991da177e4SLinus Torvalds * TCP code (Be very nice to this man if 1001da177e4SLinus Torvalds * tcp finally works 100%) 8) 1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics. 1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug. 1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in 1051da177e4SLinus Torvalds * all cases. 1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg() 1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin). 1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in 1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG 1101da177e4SLinus Torvalds * works now. 1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the 1121da177e4SLinus Torvalds * BSD api. 1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to 1141da177e4SLinus Torvalds * fix a race and a signal problem with 1151da177e4SLinus Torvalds * accept() and async I/O. 1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto(). 1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem. 1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 1191da177e4SLinus Torvalds * clients/servers which listen in on 1201da177e4SLinus Torvalds * fixed ports. 1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to 1221da177e4SLinus Torvalds * a sensible code size. 1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix. 1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast. 1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master 1261da177e4SLinus Torvalds * socket close. 1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code. 1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown. 1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error 1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an 1311da177e4SLinus Torvalds * accept. 1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since 1331da177e4SLinus Torvalds * it's handy for debugging. 1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes. 1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on 1361da177e4SLinus Torvalds * the RFC's for other useful protocol 1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and 1381da177e4SLinus Torvalds * for a reference on the difference 1391da177e4SLinus Torvalds * between specifications and how BSD 1401da177e4SLinus Torvalds * works see the 4.4lite source. 1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy 1421da177e4SLinus Torvalds * close. 1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes. 1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn 1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC 1461da177e4SLinus Torvalds * and using multiple timers for sanity. 1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new 1481da177e4SLinus Torvalds * comments. 1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking 1501da177e4SLinus Torvalds * the buffers (much like datagram.c) 1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe 1521da177e4SLinus Torvalds * now gets fed up of retrying without 1531da177e4SLinus Torvalds * (even a no space) answer. 1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better 1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to 1561da177e4SLinus Torvalds * resemble the RFC. 1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes. 1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming. 1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH 1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack 1611da177e4SLinus Torvalds * generates them. 1621da177e4SLinus Torvalds * Alan Cox : Cache last socket. 1631da177e4SLinus Torvalds * Alan Cox : Per route irtt. 1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error 1651da177e4SLinus Torvalds * Alan Cox : New buffers 1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and 1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed. 1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close, 1691da177e4SLinus Torvalds * and TCP syn retries gets used now. 1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an 1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED. 1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may 1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right 1741da177e4SLinus Torvalds * but it's a start! 1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes. 1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications. 1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors. 1781da177e4SLinus Torvalds * Alan Cox : Block double connect(). 1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP. 1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery. 1811da177e4SLinus Torvalds * Alan Cox : Support soft errors. 1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case 1831da177e4SLinus Torvalds * when the remote claims no mtu! 1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix. 1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in 1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems) 1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack. 1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in 1891da177e4SLinus Torvalds * tcp_do_retransmit() 1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window 1911da177e4SLinus Torvalds * avoidance algorithm to BSD style 1921da177e4SLinus Torvalds * algorithm. This doubles throughput 1931da177e4SLinus Torvalds * against machines running Solaris, 1941da177e4SLinus Torvalds * and seems to result in general 1951da177e4SLinus Torvalds * improvement. 1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 1981da177e4SLinus Torvalds * Mike McLagan : Routing by source 1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in 2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness. 2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with 2021da177e4SLinus Torvalds * shutdown() followed by close(). 2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO 2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call) 2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of 2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible. 2081da177e4SLinus Torvalds * 2091da177e4SLinus Torvalds * Description of States: 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack 2121da177e4SLinus Torvalds * 2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack, 2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake. 2151da177e4SLinus Torvalds * 2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established 2171da177e4SLinus Torvalds * 2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 2191da177e4SLinus Torvalds * transmission of remaining buffered data 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 2221da177e4SLinus Torvalds * to shutdown 2231da177e4SLinus Torvalds * 2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have 2251da177e4SLinus Torvalds * data we have to finish sending 2261da177e4SLinus Torvalds * 2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering 2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2 2291da177e4SLinus Torvalds * or CLOSING. Required because the other end 2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it 2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore) 2321da177e4SLinus Torvalds * 2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown 2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK) 2361da177e4SLinus Torvalds * 2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has 2381da177e4SLinus Torvalds * shutdown. There may still be data in our 2391da177e4SLinus Torvalds * buffer that we have to finish sending 2401da177e4SLinus Torvalds * 2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds 244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt 245afd46503SJoe Perches 246cf80e0e4SHerbert Xu #include <crypto/hash.h> 247172589ccSIlpo Järvinen #include <linux/kernel.h> 2481da177e4SLinus Torvalds #include <linux/module.h> 2491da177e4SLinus Torvalds #include <linux/types.h> 2501da177e4SLinus Torvalds #include <linux/fcntl.h> 2511da177e4SLinus Torvalds #include <linux/poll.h> 2526e9250f5SEric Dumazet #include <linux/inet_diag.h> 2531da177e4SLinus Torvalds #include <linux/init.h> 2541da177e4SLinus Torvalds #include <linux/fs.h> 2559c55e01cSJens Axboe #include <linux/skbuff.h> 25681b23b4aSAndrew Morton #include <linux/scatterlist.h> 2579c55e01cSJens Axboe #include <linux/splice.h> 2589c55e01cSJens Axboe #include <linux/net.h> 2599c55e01cSJens Axboe #include <linux/socket.h> 2601da177e4SLinus Torvalds #include <linux/random.h> 26157c8a661SMike Rapoport #include <linux/memblock.h> 26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h> 263b8059eadSDavid S. Miller #include <linux/cache.h> 264f4c50d99SHerbert Xu #include <linux/err.h> 265da5c78c8SWilliam Allen Simpson #include <linux/time.h> 2665a0e3ad6STejun Heo #include <linux/slab.h> 26798aaa913SMike Maloney #include <linux/errqueue.h> 26860e2a778SUrsula Braun #include <linux/static_key.h> 26997a19cafSYonghong Song #include <linux/btf.h> 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds #include <net/icmp.h> 272cf60af03SYuchung Cheng #include <net/inet_common.h> 2731da177e4SLinus Torvalds #include <net/tcp.h> 274f870fa0bSMat Martineau #include <net/mptcp.h> 2751da177e4SLinus Torvalds #include <net/xfrm.h> 2761da177e4SLinus Torvalds #include <net/ip.h> 2779c55e01cSJens Axboe #include <net/sock.h> 2781da177e4SLinus Torvalds 2797c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2801da177e4SLinus Torvalds #include <asm/ioctls.h> 281076bb0c8SEliezer Tamir #include <net/busy_poll.h> 2821da177e4SLinus Torvalds 283925bba24SArjun Roy /* Track pending CMSGs. */ 284925bba24SArjun Roy enum { 285925bba24SArjun Roy TCP_CMSG_INQ = 1, 286925bba24SArjun Roy TCP_CMSG_TS = 2 287925bba24SArjun Roy }; 288925bba24SArjun Roy 28919757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 29019757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 2910a5578cfSArnaldo Carvalho de Melo 292a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly; 293a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem); 2941da177e4SLinus Torvalds 29591b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 2961da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated); 2970defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 2980defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 2991748376bSEric Dumazet 30060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 30160e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 30260e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc); 30360e2a778SUrsula Braun #endif 30460e2a778SUrsula Braun 3051748376bSEric Dumazet /* 3061748376bSEric Dumazet * Current number of TCP sockets. 3071748376bSEric Dumazet */ 30891b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 3091da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated); 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds /* 3129c55e01cSJens Axboe * TCP splice context 3139c55e01cSJens Axboe */ 3149c55e01cSJens Axboe struct tcp_splice_state { 3159c55e01cSJens Axboe struct pipe_inode_info *pipe; 3169c55e01cSJens Axboe size_t len; 3179c55e01cSJens Axboe unsigned int flags; 3189c55e01cSJens Axboe }; 3199c55e01cSJens Axboe 3209c55e01cSJens Axboe /* 3211da177e4SLinus Torvalds * Pressure flag: try to collapse. 3221da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically. 3233ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting 3241da177e4SLinus Torvalds * is strict, actions are advisory and have some latency. 3251da177e4SLinus Torvalds */ 32606044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly; 32706044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure); 3281da177e4SLinus Torvalds 3295c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk) 3301da177e4SLinus Torvalds { 33106044751SEric Dumazet unsigned long val; 33206044751SEric Dumazet 3331f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure)) 33406044751SEric Dumazet return; 33506044751SEric Dumazet val = jiffies; 33606044751SEric Dumazet 33706044751SEric Dumazet if (!val) 33806044751SEric Dumazet val--; 33906044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val)) 3404e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 3411da177e4SLinus Torvalds } 34206044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 34306044751SEric Dumazet 34406044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk) 34506044751SEric Dumazet { 34606044751SEric Dumazet unsigned long val; 34706044751SEric Dumazet 3481f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure)) 34906044751SEric Dumazet return; 35006044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0); 35106044751SEric Dumazet if (val) 35206044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 35306044751SEric Dumazet jiffies_to_msecs(jiffies - val)); 3541da177e4SLinus Torvalds } 35506044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 3561da177e4SLinus Torvalds 357b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */ 358b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 359b103cf34SJulian Anastasov { 360b103cf34SJulian Anastasov u8 res = 0; 361b103cf34SJulian Anastasov 362b103cf34SJulian Anastasov if (seconds > 0) { 363b103cf34SJulian Anastasov int period = timeout; 364b103cf34SJulian Anastasov 365b103cf34SJulian Anastasov res = 1; 366b103cf34SJulian Anastasov while (seconds > period && res < 255) { 367b103cf34SJulian Anastasov res++; 368b103cf34SJulian Anastasov timeout <<= 1; 369b103cf34SJulian Anastasov if (timeout > rto_max) 370b103cf34SJulian Anastasov timeout = rto_max; 371b103cf34SJulian Anastasov period += timeout; 372b103cf34SJulian Anastasov } 373b103cf34SJulian Anastasov } 374b103cf34SJulian Anastasov return res; 375b103cf34SJulian Anastasov } 376b103cf34SJulian Anastasov 377b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */ 378b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 379b103cf34SJulian Anastasov { 380b103cf34SJulian Anastasov int period = 0; 381b103cf34SJulian Anastasov 382b103cf34SJulian Anastasov if (retrans > 0) { 383b103cf34SJulian Anastasov period = timeout; 384b103cf34SJulian Anastasov while (--retrans) { 385b103cf34SJulian Anastasov timeout <<= 1; 386b103cf34SJulian Anastasov if (timeout > rto_max) 387b103cf34SJulian Anastasov timeout = rto_max; 388b103cf34SJulian Anastasov period += timeout; 389b103cf34SJulian Anastasov } 390b103cf34SJulian Anastasov } 391b103cf34SJulian Anastasov return period; 392b103cf34SJulian Anastasov } 393b103cf34SJulian Anastasov 3940263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 3950263598cSWei Wang { 3960263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered); 3970263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us); 3980263598cSWei Wang u64 rate64 = 0; 3990263598cSWei Wang 4000263598cSWei Wang if (rate && intv) { 4010263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 4020263598cSWei Wang do_div(rate64, intv); 4030263598cSWei Wang } 4040263598cSWei Wang return rate64; 4050263598cSWei Wang } 4060263598cSWei Wang 407900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock. 408900f65d3SNeal Cardwell * 409900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to 410900f65d3SNeal Cardwell * sk_alloc() so need not be done here. 411900f65d3SNeal Cardwell */ 412900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk) 413900f65d3SNeal Cardwell { 414900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk); 415900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk); 416900f65d3SNeal Cardwell 4179f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT; 41875c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT; 419900f65d3SNeal Cardwell tcp_init_xmit_timers(sk); 42046d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node); 421e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue); 422900f65d3SNeal Cardwell 423900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT; 424ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 4252b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 426740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 427ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 428900f65d3SNeal Cardwell 429900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the 430900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control 431900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk 432900f65d3SNeal Cardwell * efficiently to them. -DaveM 433900f65d3SNeal Cardwell */ 43440570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 435900f65d3SNeal Cardwell 436d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */ 437d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U; 438300b655dSDavid Morley tp->rate_app_limited = 1; 439d7722e85SSoheil Hassas Yeganeh 440900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the 441900f65d3SNeal Cardwell * initialization of these values. 442900f65d3SNeal Cardwell */ 443900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 444900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0; 445900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT; 446900f65d3SNeal Cardwell 44746778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 44855d8694fSFlorian Westphal tcp_assign_congestion_control(sk); 449900f65d3SNeal Cardwell 450ceaa1fefSAndrey Vagin tp->tsoffset = 0; 4511f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1; 452ceaa1fefSAndrey Vagin 453900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space; 454900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 455900f65d3SNeal Cardwell 456900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss; 457900f65d3SNeal Cardwell 45802739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 45902739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 460900f65d3SNeal Cardwell 461e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 462900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk); 463900f65d3SNeal Cardwell } 464900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock); 465900f65d3SNeal Cardwell 4664e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 4674ed2d765SWillem de Bruijn { 4684e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 4694e8cc228SEric Dumazet 470ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) { 4714ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 4726b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 4734ed2d765SWillem de Bruijn 474c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 4750a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK) 4760a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1; 4770a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 4784ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 4794ed2d765SWillem de Bruijn } 480f066e2b0SWillem de Bruijn } 4814ed2d765SWillem de Bruijn 48205dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target) 4838934ce2fSJohn Fastabend { 48405dc72abSEric Dumazet if (tcp_epollin_ready(sk, target)) 48505dc72abSEric Dumazet return true; 4867b50ecfcSCong Wang return sk_is_readable(sk); 4878934ce2fSJohn Fastabend } 4888934ce2fSJohn Fastabend 4891da177e4SLinus Torvalds /* 490a11e1d43SLinus Torvalds * Wait for a TCP event. 491a11e1d43SLinus Torvalds * 492a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers 493a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't 494a11e1d43SLinus Torvalds * go look at any of the socket buffers directly. 4951da177e4SLinus Torvalds */ 496a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 4971da177e4SLinus Torvalds { 498a11e1d43SLinus Torvalds __poll_t mask; 4991da177e4SLinus Torvalds struct sock *sk = sock->sk; 500cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 501e14cadfdSEric Dumazet u8 shutdown; 50200fd38d9SEric Dumazet int state; 5031da177e4SLinus Torvalds 50489ab066dSKarsten Graul sock_poll_wait(file, sock, wait); 505a11e1d43SLinus Torvalds 506986ffdfdSYafang Shao state = inet_sk_state_load(sk); 50700fd38d9SEric Dumazet if (state == TCP_LISTEN) 508dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk); 5091da177e4SLinus Torvalds 510a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events 511a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes 512a11e1d43SLinus Torvalds * made by other threads is impossible in any case. 513a11e1d43SLinus Torvalds */ 514a11e1d43SLinus Torvalds 515a11e1d43SLinus Torvalds mask = 0; 516a11e1d43SLinus Torvalds 5171da177e4SLinus Torvalds /* 518a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't 5191da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a 5201da177e4SLinus Torvalds * socket the read side is more interesting. 5211da177e4SLinus Torvalds * 522a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible 523a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this 5241da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many 5251da177e4SLinus Torvalds * bits than too few, and you can easily break real applications 5261da177e4SLinus Torvalds * if you don't tell them that something has hung up! 5271da177e4SLinus Torvalds * 5281da177e4SLinus Torvalds * Check-me. 5291da177e4SLinus Torvalds * 530a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 5311da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF, 5321da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write() 533a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 5341da177e4SLinus Torvalds * if and only if shutdown has been made in both directions. 5351da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX 536a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 5371da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given 5381da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains 539a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 5401da177e4SLinus Torvalds * 5411da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 5421da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK 5431da177e4SLinus Torvalds */ 544e14cadfdSEric Dumazet shutdown = READ_ONCE(sk->sk_shutdown); 545e14cadfdSEric Dumazet if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 546a9a08845SLinus Torvalds mask |= EPOLLHUP; 547e14cadfdSEric Dumazet if (shutdown & RCV_SHUTDOWN) 548a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 5491da177e4SLinus Torvalds 5508336886fSJerry Chu /* Connected or passive Fast Open socket? */ 55100fd38d9SEric Dumazet if (state != TCP_SYN_SENT && 552d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 553c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX); 5547b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data); 555c7004482SDavid S. Miller 556b96c51bdSEric Dumazet if (unlikely(urg_data) && 5577b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 5587b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE)) 559b634f875SAlexandra Kossovsky target++; 560c7004482SDavid S. Miller 56105dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target)) 562a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 5631da177e4SLinus Torvalds 564e14cadfdSEric Dumazet if (!(shutdown & SEND_SHUTDOWN)) { 5658ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) { 566a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5671da177e4SLinus Torvalds } else { /* send SIGIO later */ 5689cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 5691da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 5701da177e4SLinus Torvalds 5711da177e4SLinus Torvalds /* Race breaker. If space is freed after 5721da177e4SLinus Torvalds * wspace test but before the flags are set, 5733c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier 5743c715127Sjbaron@akamai.com * pairs with the input side. 5751da177e4SLinus Torvalds */ 5763c715127Sjbaron@akamai.com smp_mb__after_atomic(); 5778ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) 578a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5791da177e4SLinus Torvalds } 580d84ba638SKOSAKI Motohiro } else 581a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5821da177e4SLinus Torvalds 5837b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID) 584a9a08845SLinus Torvalds mask |= EPOLLPRI; 585d68be71eSDavide Caratti } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 58619f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect 587a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write() 58819f6d3f3SWei Wang * in order for kernel to generate SYN+data 58919f6d3f3SWei Wang */ 590a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5911da177e4SLinus Torvalds } 592a4d25803STom Marshall /* This barrier is coupled with smp_wmb() in tcp_reset() */ 593a4d25803STom Marshall smp_rmb(); 594e13ec3daSEric Dumazet if (READ_ONCE(sk->sk_err) || 595e13ec3daSEric Dumazet !skb_queue_empty_lockless(&sk->sk_error_queue)) 596a9a08845SLinus Torvalds mask |= EPOLLERR; 597a4d25803STom Marshall 5981da177e4SLinus Torvalds return mask; 5991da177e4SLinus Torvalds } 600a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll); 6011da177e4SLinus Torvalds 602e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg) 6031da177e4SLinus Torvalds { 6041da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6051da177e4SLinus Torvalds int answ; 6060e71c55cSEric Dumazet bool slow; 6071da177e4SLinus Torvalds 6081da177e4SLinus Torvalds switch (cmd) { 6091da177e4SLinus Torvalds case SIOCINQ: 6101da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6111da177e4SLinus Torvalds return -EINVAL; 6121da177e4SLinus Torvalds 6130e71c55cSEric Dumazet slow = lock_sock_fast(sk); 614473bd239STom Herbert answ = tcp_inq(sk); 6150e71c55cSEric Dumazet unlock_sock_fast(sk, slow); 6161da177e4SLinus Torvalds break; 6171da177e4SLinus Torvalds case SIOCATMARK: 6187b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) && 619d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 6201da177e4SLinus Torvalds break; 6211da177e4SLinus Torvalds case SIOCOUTQ: 6221da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6231da177e4SLinus Torvalds return -EINVAL; 6241da177e4SLinus Torvalds 6251da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6261da177e4SLinus Torvalds answ = 0; 6271da177e4SLinus Torvalds else 6280f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una; 6291da177e4SLinus Torvalds break; 6302f4e1b39SMario Schuknecht case SIOCOUTQNSD: 6312f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN) 6322f4e1b39SMario Schuknecht return -EINVAL; 6332f4e1b39SMario Schuknecht 6342f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6352f4e1b39SMario Schuknecht answ = 0; 6362f4e1b39SMario Schuknecht else 637e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) - 638e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt); 6392f4e1b39SMario Schuknecht break; 6401da177e4SLinus Torvalds default: 6411da177e4SLinus Torvalds return -ENOIOCTLCMD; 6423ff50b79SStephen Hemminger } 6431da177e4SLinus Torvalds 644e1d001faSBreno Leitao *karg = answ; 645e1d001faSBreno Leitao return 0; 6461da177e4SLinus Torvalds } 6474bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl); 6481da177e4SLinus Torvalds 64904d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 6501da177e4SLinus Torvalds { 6514de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 6521da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq; 6531da177e4SLinus Torvalds } 6541da177e4SLinus Torvalds 655a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp) 6561da177e4SLinus Torvalds { 6571da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 6581da177e4SLinus Torvalds } 6591da177e4SLinus Torvalds 66004d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 6611da177e4SLinus Torvalds { 6629e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 663352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 664352d4800SArnaldo Carvalho de Melo 665352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq; 6664de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK; 667f4a775d1SEric Dumazet __skb_header_release(skb); 668fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 669ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 6703ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 67189ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH) 6721da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH; 6736f021c62SEric Dumazet 6746f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk); 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds 677afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 6781da177e4SLinus Torvalds { 67933f5f57eSIlpo Järvinen if (flags & MSG_OOB) 6801da177e4SLinus Torvalds tp->snd_up = tp->write_seq; 6811da177e4SLinus Torvalds } 6821da177e4SLinus Torvalds 683f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if 684a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues : 685f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance 686f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without 687f54b3111SEric Dumazet * need for a timer, and with no latency trade off. 688f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize 689a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent 690a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues, 691a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet. 692f54b3111SEric Dumazet */ 693f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 694f54b3111SEric Dumazet int size_goal) 6951da177e4SLinus Torvalds { 696f54b3111SEric Dumazet return skb->len < size_goal && 69785225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 698114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) && 699b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 700b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb); 701f54b3111SEric Dumazet } 7029e412ba7SIlpo Järvinen 70335b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now, 704f54b3111SEric Dumazet int nonagle, int size_goal) 705f54b3111SEric Dumazet { 706f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 707f54b3111SEric Dumazet struct sk_buff *skb; 708f54b3111SEric Dumazet 709f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk); 71075c119afSEric Dumazet if (!skb) 71175c119afSEric Dumazet return; 7121da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp)) 713f54b3111SEric Dumazet tcp_mark_push(tp, skb); 714afeca340SKrishna Kumar 715afeca340SKrishna Kumar tcp_mark_urg(tp, flags); 716f54b3111SEric Dumazet 717f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) { 718f54b3111SEric Dumazet 719f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */ 7207aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 721f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 7227aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 7231da177e4SLinus Torvalds } 724a181ceb5SEric Dumazet /* It is possible TX completion already happened 725a181ceb5SEric Dumazet * before we set TSQ_THROTTLED. 726a181ceb5SEric Dumazet */ 72714afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 728f54b3111SEric Dumazet return; 729f54b3111SEric Dumazet } 730f54b3111SEric Dumazet 731f54b3111SEric Dumazet if (flags & MSG_MORE) 732f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK; 733f54b3111SEric Dumazet 734f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle); 7351da177e4SLinus Torvalds } 7361da177e4SLinus Torvalds 7376ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 7389c55e01cSJens Axboe unsigned int offset, size_t len) 7399c55e01cSJens Axboe { 7409c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data; 74133966dd0SWilly Tarreau int ret; 7429c55e01cSJens Axboe 743a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 74425869262SAl Viro min(rd_desc->count, len), tss->flags); 74533966dd0SWilly Tarreau if (ret > 0) 74633966dd0SWilly Tarreau rd_desc->count -= ret; 74733966dd0SWilly Tarreau return ret; 7489c55e01cSJens Axboe } 7499c55e01cSJens Axboe 7509c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 7519c55e01cSJens Axboe { 7529c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */ 7539c55e01cSJens Axboe read_descriptor_t rd_desc = { 7549c55e01cSJens Axboe .arg.data = tss, 75533966dd0SWilly Tarreau .count = tss->len, 7569c55e01cSJens Axboe }; 7579c55e01cSJens Axboe 7589c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 7599c55e01cSJens Axboe } 7609c55e01cSJens Axboe 7619c55e01cSJens Axboe /** 7629c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe 7639c55e01cSJens Axboe * @sock: socket to splice from 7649c55e01cSJens Axboe * @ppos: position (not valid) 7659c55e01cSJens Axboe * @pipe: pipe to splice to 7669c55e01cSJens Axboe * @len: number of bytes to splice 7679c55e01cSJens Axboe * @flags: splice modifier flags 7689c55e01cSJens Axboe * 7699c55e01cSJens Axboe * Description: 7709c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe. 7719c55e01cSJens Axboe * 7729c55e01cSJens Axboe **/ 7739c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 7749c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len, 7759c55e01cSJens Axboe unsigned int flags) 7769c55e01cSJens Axboe { 7779c55e01cSJens Axboe struct sock *sk = sock->sk; 7789c55e01cSJens Axboe struct tcp_splice_state tss = { 7799c55e01cSJens Axboe .pipe = pipe, 7809c55e01cSJens Axboe .len = len, 7819c55e01cSJens Axboe .flags = flags, 7829c55e01cSJens Axboe }; 7839c55e01cSJens Axboe long timeo; 7849c55e01cSJens Axboe ssize_t spliced; 7859c55e01cSJens Axboe int ret; 7869c55e01cSJens Axboe 7873a047bf8SChangli Gao sock_rps_record_flow(sk); 7889c55e01cSJens Axboe /* 7899c55e01cSJens Axboe * We can't seek on a socket input 7909c55e01cSJens Axboe */ 7919c55e01cSJens Axboe if (unlikely(*ppos)) 7929c55e01cSJens Axboe return -ESPIPE; 7939c55e01cSJens Axboe 7949c55e01cSJens Axboe ret = spliced = 0; 7959c55e01cSJens Axboe 7969c55e01cSJens Axboe lock_sock(sk); 7979c55e01cSJens Axboe 79842324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 7999c55e01cSJens Axboe while (tss.len) { 8009c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss); 8019c55e01cSJens Axboe if (ret < 0) 8029c55e01cSJens Axboe break; 8039c55e01cSJens Axboe else if (!ret) { 8049c55e01cSJens Axboe if (spliced) 8059c55e01cSJens Axboe break; 8069c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE)) 8079c55e01cSJens Axboe break; 8089c55e01cSJens Axboe if (sk->sk_err) { 8099c55e01cSJens Axboe ret = sock_error(sk); 8109c55e01cSJens Axboe break; 8119c55e01cSJens Axboe } 8129c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN) 8139c55e01cSJens Axboe break; 8149c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) { 8159c55e01cSJens Axboe /* 8169c55e01cSJens Axboe * This occurs when user tries to read 8179c55e01cSJens Axboe * from never connected socket. 8189c55e01cSJens Axboe */ 8199c55e01cSJens Axboe ret = -ENOTCONN; 8209c55e01cSJens Axboe break; 8219c55e01cSJens Axboe } 8229c55e01cSJens Axboe if (!timeo) { 8239c55e01cSJens Axboe ret = -EAGAIN; 8249c55e01cSJens Axboe break; 8259c55e01cSJens Axboe } 826ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have 827ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop. 828ccf7abb9SEric Dumazet * This might happen with URG data. 829ccf7abb9SEric Dumazet */ 830ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue)) 831ccf7abb9SEric Dumazet break; 832dfbafc99SSabrina Dubroca sk_wait_data(sk, &timeo, NULL); 8339c55e01cSJens Axboe if (signal_pending(current)) { 8349c55e01cSJens Axboe ret = sock_intr_errno(timeo); 8359c55e01cSJens Axboe break; 8369c55e01cSJens Axboe } 8379c55e01cSJens Axboe continue; 8389c55e01cSJens Axboe } 8399c55e01cSJens Axboe tss.len -= ret; 8409c55e01cSJens Axboe spliced += ret; 8419c55e01cSJens Axboe 84233966dd0SWilly Tarreau if (!timeo) 84333966dd0SWilly Tarreau break; 8449c55e01cSJens Axboe release_sock(sk); 8459c55e01cSJens Axboe lock_sock(sk); 8469c55e01cSJens Axboe 8479c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE || 84833966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) || 8499c55e01cSJens Axboe signal_pending(current)) 8509c55e01cSJens Axboe break; 8519c55e01cSJens Axboe } 8529c55e01cSJens Axboe 8539c55e01cSJens Axboe release_sock(sk); 8549c55e01cSJens Axboe 8559c55e01cSJens Axboe if (spliced) 8569c55e01cSJens Axboe return spliced; 8579c55e01cSJens Axboe 8589c55e01cSJens Axboe return ret; 8599c55e01cSJens Axboe } 8604bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read); 8619c55e01cSJens Axboe 8625882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 863eb934478SEric Dumazet bool force_schedule) 864f561d0f2SPavel Emelyanov { 865f561d0f2SPavel Emelyanov struct sk_buff *skb; 866f561d0f2SPavel Emelyanov 8675882efffSEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 8688e4d980aSEric Dumazet if (likely(skb)) { 869eb934478SEric Dumazet bool mem_scheduled; 8708e4d980aSEric Dumazet 8719b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 872eb934478SEric Dumazet if (force_schedule) { 873eb934478SEric Dumazet mem_scheduled = true; 8748e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 8758e4d980aSEric Dumazet } else { 876eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 8778e4d980aSEric Dumazet } 878eb934478SEric Dumazet if (likely(mem_scheduled)) { 8798a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 880a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 881e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 882f561d0f2SPavel Emelyanov return skb; 883f561d0f2SPavel Emelyanov } 884f561d0f2SPavel Emelyanov __kfree_skb(skb); 885f561d0f2SPavel Emelyanov } else { 8865c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk); 887f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk); 888f561d0f2SPavel Emelyanov } 889f561d0f2SPavel Emelyanov return NULL; 890f561d0f2SPavel Emelyanov } 891f561d0f2SPavel Emelyanov 8920c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 8930c54b85fSIlpo Järvinen int large_allowed) 8940c54b85fSIlpo Järvinen { 8950c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 8966c09fa09SEric Dumazet u32 new_size_goal, size_goal; 8970c54b85fSIlpo Järvinen 89874d4a8f8SEric Dumazet if (!large_allowed) 899605ad7f1SEric Dumazet return mss_now; 9000c54b85fSIlpo Järvinen 9016c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */ 902ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 9032a3a041cSIlpo Järvinen 9042a3a041cSIlpo Järvinen /* We try hard to avoid divides here */ 905605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 906605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal || 907605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) { 908605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now, 9091485348dSBen Hutchings sk->sk_gso_max_segs); 910605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 9110c54b85fSIlpo Järvinen } 9120c54b85fSIlpo Järvinen 913605ad7f1SEric Dumazet return max(size_goal, mss_now); 9140c54b85fSIlpo Järvinen } 9150c54b85fSIlpo Järvinen 91635b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 9170c54b85fSIlpo Järvinen { 9180c54b85fSIlpo Järvinen int mss_now; 9190c54b85fSIlpo Järvinen 9200c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 9210c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 9220c54b85fSIlpo Järvinen 9230c54b85fSIlpo Järvinen return mss_now; 9240c54b85fSIlpo Järvinen } 9250c54b85fSIlpo Järvinen 926fdfc5c85SEric Dumazet /* In some cases, both sendpage() and sendmsg() could have added 927fdfc5c85SEric Dumazet * an skb to the write queue, but failed adding payload on it. 928fdfc5c85SEric Dumazet * We need to remove it to consume less memory, but more 929fdfc5c85SEric Dumazet * importantly be able to generate EPOLLOUT for Edge Trigger epoll() 930fdfc5c85SEric Dumazet * users. 931fdfc5c85SEric Dumazet */ 93227728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk) 933fdfc5c85SEric Dumazet { 93427728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 93527728ba8SEric Dumazet 936cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 937fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk); 938fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk)) 939fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 94003271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 941fdfc5c85SEric Dumazet } 942fdfc5c85SEric Dumazet } 943fdfc5c85SEric Dumazet 944f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */ 945f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 946f8d9d938SEric Dumazet { 947f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) { 948f8d9d938SEric Dumazet u32 extra = skb->truesize - 949f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb)); 950f8d9d938SEric Dumazet 951f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra)) 952f8d9d938SEric Dumazet return -ENOMEM; 953f8d9d938SEric Dumazet 954f8d9d938SEric Dumazet sk_mem_charge(sk, extra); 955f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 956f8d9d938SEric Dumazet } 957f8d9d938SEric Dumazet return 0; 958f8d9d938SEric Dumazet } 959f8d9d938SEric Dumazet 960849b425cSEric Dumazet 961fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy) 962f54755f6SEric Dumazet { 963f54755f6SEric Dumazet int left; 964f54755f6SEric Dumazet 965f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy))) 966f54755f6SEric Dumazet return copy; 967f54755f6SEric Dumazet 968f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued. 969f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 970f54755f6SEric Dumazet * to guarantee some progress. 971f54755f6SEric Dumazet */ 972f54755f6SEric Dumazet left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued; 973f54755f6SEric Dumazet if (left > 0) 974f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy)); 975f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc); 976f54755f6SEric Dumazet } 977f54755f6SEric Dumazet 9785367f9bbSDavid Howells int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 97964022d0bSEric Dumazet size_t size, int flags) 9801da177e4SLinus Torvalds { 981c5c37af6SDavid Howells struct bio_vec bvec; 982c5c37af6SDavid Howells struct msghdr msg = { .msg_flags = flags | MSG_SPLICE_PAGES, }; 9831da177e4SLinus Torvalds 9845367f9bbSDavid Howells if (!(sk->sk_route_caps & NETIF_F_SG)) 9855367f9bbSDavid Howells return sock_no_sendpage_locked(sk, page, offset, size, flags); 9865367f9bbSDavid Howells 9875367f9bbSDavid Howells tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 9885367f9bbSDavid Howells 989c5c37af6SDavid Howells bvec_set_page(&bvec, page, size, offset); 990c5c37af6SDavid Howells iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); 991a10674bfSVasily Averin 992c5c37af6SDavid Howells if (flags & MSG_SENDPAGE_NOTLAST) 993c5c37af6SDavid Howells msg.msg_flags |= MSG_MORE; 9941da177e4SLinus Torvalds 995c5c37af6SDavid Howells return tcp_sendmsg_locked(sk, &msg, size); 9961da177e4SLinus Torvalds } 997774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendpage_locked); 998306b13ebSTom Herbert 999306b13ebSTom Herbert int tcp_sendpage(struct sock *sk, struct page *page, int offset, 1000306b13ebSTom Herbert size_t size, int flags) 1001306b13ebSTom Herbert { 1002306b13ebSTom Herbert int ret; 1003306b13ebSTom Herbert 1004306b13ebSTom Herbert lock_sock(sk); 1005306b13ebSTom Herbert ret = tcp_sendpage_locked(sk, page, offset, size, flags); 10061da177e4SLinus Torvalds release_sock(sk); 1007306b13ebSTom Herbert 1008306b13ebSTom Herbert return ret; 10091da177e4SLinus Torvalds } 10104bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendpage); 10111da177e4SLinus Torvalds 1012cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp) 1013cf60af03SYuchung Cheng { 101400db4124SIan Morris if (tp->fastopen_req) { 1015cf60af03SYuchung Cheng kfree(tp->fastopen_req); 1016cf60af03SYuchung Cheng tp->fastopen_req = NULL; 1017cf60af03SYuchung Cheng } 1018cf60af03SYuchung Cheng } 1019cf60af03SYuchung Cheng 10203242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 10213242abebSBenjamin Hesmans size_t size, struct ubuf_info *uarg) 1022cf60af03SYuchung Cheng { 1023cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 102419f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk); 1025ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name; 1026cf60af03SYuchung Cheng int err, flags; 1027cf60af03SYuchung Cheng 10285a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 10295a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) || 1030ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1031ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC)) 1032cf60af03SYuchung Cheng return -EOPNOTSUPP; 103300db4124SIan Morris if (tp->fastopen_req) 1034cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */ 1035cf60af03SYuchung Cheng 1036cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1037cf60af03SYuchung Cheng sk->sk_allocation); 103851456b29SIan Morris if (unlikely(!tp->fastopen_req)) 1039cf60af03SYuchung Cheng return -ENOBUFS; 1040cf60af03SYuchung Cheng tp->fastopen_req->data = msg; 1041f5ddcbbbSEric Dumazet tp->fastopen_req->size = size; 1042f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg; 1043cf60af03SYuchung Cheng 104419f6d3f3SWei Wang if (inet->defer_connect) { 104519f6d3f3SWei Wang err = tcp_connect(sk); 104619f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */ 104719f6d3f3SWei Wang if (err) { 104819f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE); 104919f6d3f3SWei Wang inet->inet_dport = 0; 105019f6d3f3SWei Wang sk->sk_route_caps = 0; 105119f6d3f3SWei Wang } 105219f6d3f3SWei Wang } 1053cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1054ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr, 10553979ad7eSWilly Tarreau msg->msg_namelen, flags, 1); 10567db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect 10577db92362SWei Wang * if the connection times out or gets rst 10587db92362SWei Wang */ 10597db92362SWei Wang if (tp->fastopen_req) { 1060f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied; 1061cf60af03SYuchung Cheng tcp_free_fastopen_req(tp); 10627db92362SWei Wang inet->defer_connect = 0; 10637db92362SWei Wang } 1064cf60af03SYuchung Cheng return err; 1065cf60af03SYuchung Cheng } 1066cf60af03SYuchung Cheng 1067306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 10681da177e4SLinus Torvalds { 10691da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1070f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL; 10711da177e4SLinus Torvalds struct sk_buff *skb; 1072c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc; 107357be5bdaSAl Viro int flags, err, copied = 0; 107457be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0; 10751a991488SEric Dumazet int process_backlog = 0; 1076270a1c3dSDavid Howells int zc = 0; 10771da177e4SLinus Torvalds long timeo; 10781da177e4SLinus Torvalds 10791da177e4SLinus Torvalds flags = msg->msg_flags; 1080f214f915SWillem de Bruijn 1081eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) { 1082eb315a7dSPavel Begunkov if (msg->msg_ubuf) { 1083eb315a7dSPavel Begunkov uarg = msg->msg_ubuf; 1084270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1085270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1086eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1087eea96a3eSPavel Begunkov skb = tcp_write_queue_tail(sk); 10888c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1089f214f915SWillem de Bruijn if (!uarg) { 1090f214f915SWillem de Bruijn err = -ENOBUFS; 1091f214f915SWillem de Bruijn goto out_err; 1092f214f915SWillem de Bruijn } 1093270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1094270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1095270a1c3dSDavid Howells else 1096e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0; 1097f214f915SWillem de Bruijn } 1098270a1c3dSDavid Howells } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { 1099270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1100270a1c3dSDavid Howells zc = MSG_SPLICE_PAGES; 1101eb315a7dSPavel Begunkov } 1102f214f915SWillem de Bruijn 110316ae6aa1SYuchung Cheng if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && 110416ae6aa1SYuchung Cheng !tp->repair) { 1105f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1106cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0) 1107cf60af03SYuchung Cheng goto out; 1108cf60af03SYuchung Cheng else if (err) 1109cf60af03SYuchung Cheng goto out_err; 1110cf60af03SYuchung Cheng } 1111cf60af03SYuchung Cheng 11121da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 11131da177e4SLinus Torvalds 1114d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1115d7722e85SSoheil Hassas Yeganeh 11168336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open 11178336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection 11188336886fSJerry Chu * is fully established. 11198336886fSJerry Chu */ 11208336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 11218336886fSJerry Chu !tcp_passive_fastopen(sk)) { 1122686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo); 1123686a5624SYuvaraja Mariappan if (err != 0) 1124cf60af03SYuchung Cheng goto do_error; 11258336886fSJerry Chu } 11261da177e4SLinus Torvalds 1127c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 1128c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) { 1129c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size); 11305924f17aSChristoph Paasch goto out_nopush; 1131c0e88ff0SPavel Emelyanov } 1132c0e88ff0SPavel Emelyanov 1133c0e88ff0SPavel Emelyanov err = -EINVAL; 1134c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 1135c0e88ff0SPavel Emelyanov goto out_err; 1136c0e88ff0SPavel Emelyanov 1137c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */ 1138c0e88ff0SPavel Emelyanov } 1139c0e88ff0SPavel Emelyanov 1140657a0667SWillem de Bruijn sockcm_init(&sockc, sk); 1141c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) { 1142c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc); 1143c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) { 1144c14ac945SSoheil Hassas Yeganeh err = -EINVAL; 1145c14ac945SSoheil Hassas Yeganeh goto out_err; 1146c14ac945SSoheil Hassas Yeganeh } 1147c14ac945SSoheil Hassas Yeganeh } 1148c14ac945SSoheil Hassas Yeganeh 11491da177e4SLinus Torvalds /* This should be in poll */ 11509cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 11511da177e4SLinus Torvalds 11521da177e4SLinus Torvalds /* Ok commence sending. */ 11531da177e4SLinus Torvalds copied = 0; 11541da177e4SLinus Torvalds 1155d41a69f1SEric Dumazet restart: 1156d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags); 1157d41a69f1SEric Dumazet 11581da177e4SLinus Torvalds err = -EPIPE; 11591da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 116079d8665bSEric Dumazet goto do_error; 11611da177e4SLinus Torvalds 116201e97e65SAl Viro while (msg_data_left(msg)) { 1163270a1c3dSDavid Howells ssize_t copy = 0; 11641da177e4SLinus Torvalds 1165fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk); 116665ec6097SEric Dumazet if (skb) 116765ec6097SEric Dumazet copy = size_goal - skb->len; 11681da177e4SLinus Torvalds 1169c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 11703613b3dbSEric Dumazet bool first_skb; 11713613b3dbSEric Dumazet 11721da177e4SLinus Torvalds new_segment: 11731da177e4SLinus Torvalds if (!sk_stream_memory_free(sk)) 1174afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11751da177e4SLinus Torvalds 11761a991488SEric Dumazet if (unlikely(process_backlog >= 16)) { 11771a991488SEric Dumazet process_backlog = 0; 11781a991488SEric Dumazet if (sk_flush_backlog(sk)) 1179d41a69f1SEric Dumazet goto restart; 1180d4011239SEric Dumazet } 118175c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk); 11825882efffSEric Dumazet skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, 11833613b3dbSEric Dumazet first_skb); 11841da177e4SLinus Torvalds if (!skb) 1185afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11861da177e4SLinus Torvalds 11871a991488SEric Dumazet process_backlog++; 11881da177e4SLinus Torvalds 118904d8825cSPaolo Abeni tcp_skb_entail(sk, skb); 1190c1b4a7e6SDavid S. Miller copy = size_goal; 11919d186cacSAndrey Vagin 11929d186cacSAndrey Vagin /* All packets are restored as if they have 1193d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to 11949d186cacSAndrey Vagin * avoid wrong rtt estimation. 11959d186cacSAndrey Vagin */ 11969d186cacSAndrey Vagin if (tp->repair) 11979d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 11981da177e4SLinus Torvalds } 11991da177e4SLinus Torvalds 12001da177e4SLinus Torvalds /* Try to append data to the end of skb. */ 120101e97e65SAl Viro if (copy > msg_data_left(msg)) 120201e97e65SAl Viro copy = msg_data_left(msg); 12031da177e4SLinus Torvalds 1204270a1c3dSDavid Howells if (zc == 0) { 12055640f768SEric Dumazet bool merge = true; 12061da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags; 12075640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 1208761965eaSEric Dumazet 12095640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 1210afb83012SSoheil Hassas Yeganeh goto wait_for_space; 1211761965eaSEric Dumazet 12125640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page, 12135640f768SEric Dumazet pfrag->offset)) { 1214657b991aSKuniyuki Iwashima if (i >= READ_ONCE(sysctl_max_skb_frags)) { 12151da177e4SLinus Torvalds tcp_mark_push(tp, skb); 12161da177e4SLinus Torvalds goto new_segment; 12171da177e4SLinus Torvalds } 12185640f768SEric Dumazet merge = false; 12195640f768SEric Dumazet } 1220ef015786SHerbert Xu 12215640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset); 1222ef015786SHerbert Xu 1223eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1224849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb)) 1225849b425cSEric Dumazet goto wait_for_space; 1226eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb); 1227eb315a7dSPavel Begunkov } 1228849b425cSEric Dumazet 1229849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1230849b425cSEric Dumazet if (!copy) 1231afb83012SSoheil Hassas Yeganeh goto wait_for_space; 12321da177e4SLinus Torvalds 123357be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 12345640f768SEric Dumazet pfrag->page, 12355640f768SEric Dumazet pfrag->offset, 12365640f768SEric Dumazet copy); 12375640f768SEric Dumazet if (err) 12381da177e4SLinus Torvalds goto do_error; 12391da177e4SLinus Torvalds 12401da177e4SLinus Torvalds /* Update the skb. */ 12411da177e4SLinus Torvalds if (merge) { 12429e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 12431da177e4SLinus Torvalds } else { 12445640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page, 12455640f768SEric Dumazet pfrag->offset, copy); 12464e33e346SEric Dumazet page_ref_inc(pfrag->page); 12471da177e4SLinus Torvalds } 12485640f768SEric Dumazet pfrag->offset += copy; 1249270a1c3dSDavid Howells } else if (zc == MSG_ZEROCOPY) { 12509b65b17dSTalal Ahmad /* First append to a fragless skb builds initial 12519b65b17dSTalal Ahmad * pure zerocopy skb 12529b65b17dSTalal Ahmad */ 12539b65b17dSTalal Ahmad if (!skb->len) 12549b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 12559b65b17dSTalal Ahmad 12569b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) { 1257849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1258849b425cSEric Dumazet if (!copy) 1259358ed624STalal Ahmad goto wait_for_space; 12609b65b17dSTalal Ahmad } 1261358ed624STalal Ahmad 1262f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1263111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) { 1264111856c7SWillem de Bruijn tcp_mark_push(tp, skb); 1265f214f915SWillem de Bruijn goto new_segment; 1266111856c7SWillem de Bruijn } 1267f214f915SWillem de Bruijn if (err < 0) 1268f214f915SWillem de Bruijn goto do_error; 1269f214f915SWillem de Bruijn copy = err; 1270270a1c3dSDavid Howells } else if (zc == MSG_SPLICE_PAGES) { 1271270a1c3dSDavid Howells /* Splice in data if we can; copy if we can't. */ 1272270a1c3dSDavid Howells if (tcp_downgrade_zcopy_pure(sk, skb)) 1273270a1c3dSDavid Howells goto wait_for_space; 1274270a1c3dSDavid Howells copy = tcp_wmem_schedule(sk, copy); 1275270a1c3dSDavid Howells if (!copy) 1276270a1c3dSDavid Howells goto wait_for_space; 1277270a1c3dSDavid Howells 1278270a1c3dSDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy, 1279270a1c3dSDavid Howells sk->sk_allocation); 1280270a1c3dSDavid Howells if (err < 0) { 1281270a1c3dSDavid Howells if (err == -EMSGSIZE) { 1282270a1c3dSDavid Howells tcp_mark_push(tp, skb); 1283270a1c3dSDavid Howells goto new_segment; 1284270a1c3dSDavid Howells } 1285270a1c3dSDavid Howells goto do_error; 1286270a1c3dSDavid Howells } 1287270a1c3dSDavid Howells copy = err; 1288270a1c3dSDavid Howells 1289270a1c3dSDavid Howells if (!(flags & MSG_NO_SHARED_FRAGS)) 1290270a1c3dSDavid Howells skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1291270a1c3dSDavid Howells 1292270a1c3dSDavid Howells sk_wmem_queued_add(sk, copy); 1293270a1c3dSDavid Howells sk_mem_charge(sk, copy); 12941da177e4SLinus Torvalds } 12951da177e4SLinus Torvalds 12961da177e4SLinus Torvalds if (!copied) 12974de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 12981da177e4SLinus Torvalds 12990f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 13001da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy; 1301cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0); 13021da177e4SLinus Torvalds 13031da177e4SLinus Torvalds copied += copy; 130401e97e65SAl Viro if (!msg_data_left(msg)) { 1305c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR)) 1306c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1; 13071da177e4SLinus Torvalds goto out; 13084ed2d765SWillem de Bruijn } 13091da177e4SLinus Torvalds 131065ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 13111da177e4SLinus Torvalds continue; 13121da177e4SLinus Torvalds 13131da177e4SLinus Torvalds if (forced_push(tp)) { 13141da177e4SLinus Torvalds tcp_mark_push(tp, skb); 13159e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1316fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk)) 13171da177e4SLinus Torvalds tcp_push_one(sk, mss_now); 13181da177e4SLinus Torvalds continue; 13191da177e4SLinus Torvalds 1320afb83012SSoheil Hassas Yeganeh wait_for_space: 13211da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1322ec342325SAndrew Vagin if (copied) 1323f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now, 1324f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal); 13251da177e4SLinus Torvalds 1326686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo); 1327686a5624SYuvaraja Mariappan if (err != 0) 13281da177e4SLinus Torvalds goto do_error; 13291da177e4SLinus Torvalds 13300c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 13311da177e4SLinus Torvalds } 13321da177e4SLinus Torvalds 13331da177e4SLinus Torvalds out: 1334ad02c4f5SSoheil Hassas Yeganeh if (copied) { 13354e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags); 1336f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1337ad02c4f5SSoheil Hassas Yeganeh } 13385924f17aSChristoph Paasch out_nopush: 1339a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1340a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13418e044917SJonathan Lemon net_zcopy_put(uarg); 1342cf60af03SYuchung Cheng return copied + copied_syn; 13431da177e4SLinus Torvalds 13441da177e4SLinus Torvalds do_error: 134527728ba8SEric Dumazet tcp_remove_empty_skb(sk); 1346fdfc5c85SEric Dumazet 1347cf60af03SYuchung Cheng if (copied + copied_syn) 13481da177e4SLinus Torvalds goto out; 13491da177e4SLinus Torvalds out_err: 1350a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1351a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13528e044917SJonathan Lemon net_zcopy_put_abort(uarg, true); 13531da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err); 1354ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */ 1355216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1356ce5ec440SJason Baron sk->sk_write_space(sk); 1357b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1358b0f71bd3SFrancis Yan } 13591da177e4SLinus Torvalds return err; 13601da177e4SLinus Torvalds } 1361774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1362306b13ebSTom Herbert 1363306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1364306b13ebSTom Herbert { 1365306b13ebSTom Herbert int ret; 1366306b13ebSTom Herbert 1367306b13ebSTom Herbert lock_sock(sk); 1368306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size); 1369306b13ebSTom Herbert release_sock(sk); 1370306b13ebSTom Herbert 1371306b13ebSTom Herbert return ret; 1372306b13ebSTom Herbert } 13734bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg); 13741da177e4SLinus Torvalds 13751d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock) 13761d7e4538SDavid Howells { 13771d7e4538SDavid Howells struct sock *sk = sock->sk; 13781d7e4538SDavid Howells struct tcp_sock *tp = tcp_sk(sk); 13791d7e4538SDavid Howells int mss_now, size_goal; 13801d7e4538SDavid Howells 13811d7e4538SDavid Howells if (!tcp_write_queue_tail(sk)) 13821d7e4538SDavid Howells return; 13831d7e4538SDavid Howells 13841d7e4538SDavid Howells lock_sock(sk); 13851d7e4538SDavid Howells mss_now = tcp_send_mss(sk, &size_goal, 0); 13861d7e4538SDavid Howells tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 13871d7e4538SDavid Howells release_sock(sk); 13881d7e4538SDavid Howells } 13891d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof); 13901d7e4538SDavid Howells 13911da177e4SLinus Torvalds /* 13921da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for 13931da177e4SLinus Torvalds * this, no blocking and very strange errors 8) 13941da177e4SLinus Torvalds */ 13951da177e4SLinus Torvalds 1396377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 13971da177e4SLinus Torvalds { 13981da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds /* No URG data to read. */ 14011da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 14021da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ) 14031da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */ 14041da177e4SLinus Torvalds 14051da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 14061da177e4SLinus Torvalds return -ENOTCONN; 14071da177e4SLinus Torvalds 14081da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) { 14091da177e4SLinus Torvalds int err = 0; 14101da177e4SLinus Torvalds char c = tp->urg_data; 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds if (!(flags & MSG_PEEK)) 14137b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ); 14141da177e4SLinus Torvalds 14151da177e4SLinus Torvalds /* Read urgent data. */ 14161da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB; 14171da177e4SLinus Torvalds 14181da177e4SLinus Torvalds if (len > 0) { 14191da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) 14207eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1); 14211da177e4SLinus Torvalds len = 1; 14221da177e4SLinus Torvalds } else 14231da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC; 14241da177e4SLinus Torvalds 14251da177e4SLinus Torvalds return err ? -EFAULT : len; 14261da177e4SLinus Torvalds } 14271da177e4SLinus Torvalds 14281da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 14291da177e4SLinus Torvalds return 0; 14301da177e4SLinus Torvalds 14311da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 14321da177e4SLinus Torvalds * the available implementations agree in this case: 14331da177e4SLinus Torvalds * this call should never block, independent of the 14341da177e4SLinus Torvalds * blocking state of the socket. 14351da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de> 14361da177e4SLinus Torvalds */ 14371da177e4SLinus Torvalds return -EAGAIN; 14381da177e4SLinus Torvalds } 14391da177e4SLinus Torvalds 1440c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1441c0e88ff0SPavel Emelyanov { 1442c0e88ff0SPavel Emelyanov struct sk_buff *skb; 1443c0e88ff0SPavel Emelyanov int copied = 0, err = 0; 1444c0e88ff0SPavel Emelyanov 1445c0e88ff0SPavel Emelyanov /* XXX -- need to support SO_PEEK_OFF */ 1446c0e88ff0SPavel Emelyanov 144775c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 144875c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 144975c119afSEric Dumazet if (err) 145075c119afSEric Dumazet return err; 145175c119afSEric Dumazet copied += skb->len; 145275c119afSEric Dumazet } 145375c119afSEric Dumazet 1454c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) { 145551f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1456c0e88ff0SPavel Emelyanov if (err) 1457c0e88ff0SPavel Emelyanov break; 1458c0e88ff0SPavel Emelyanov 1459c0e88ff0SPavel Emelyanov copied += skb->len; 1460c0e88ff0SPavel Emelyanov } 1461c0e88ff0SPavel Emelyanov 1462c0e88ff0SPavel Emelyanov return err ?: copied; 1463c0e88ff0SPavel Emelyanov } 1464c0e88ff0SPavel Emelyanov 14651da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user, 14661da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes 14671da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the 14681da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of 14691da177e4SLinus Torvalds * a window update. 14701da177e4SLinus Torvalds */ 1471e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied) 14721da177e4SLinus Torvalds { 14731da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1474a2a385d6SEric Dumazet bool time_to_ack = false; 14751da177e4SLinus Torvalds 1476463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) { 1477463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1478b6b6d653SEric Dumazet 1479b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1480463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 14811da177e4SLinus Torvalds /* 14821da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if 14831da177e4SLinus Torvalds * connection is not bidirectional, user drained 14841da177e4SLinus Torvalds * receive buffer and there was a small segment 14851da177e4SLinus Torvalds * in queue. 14861da177e4SLinus Torvalds */ 14871ef9696cSAlexey Kuznetsov (copied > 0 && 14881ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 14891ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 149031954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) && 14911ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc))) 1492a2a385d6SEric Dumazet time_to_ack = true; 14931da177e4SLinus Torvalds } 14941da177e4SLinus Torvalds 14951da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window 14961da177e4SLinus Torvalds * which has been raised "significantly". 14971da177e4SLinus Torvalds * 14981da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK 14991da177e4SLinus Torvalds * in states, where we will not receive more. It is useless. 15001da177e4SLinus Torvalds */ 15011da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 15021da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp); 15031da177e4SLinus Torvalds 15041da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */ 15051da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) { 15061da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk); 15071da177e4SLinus Torvalds 15081da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space 15091da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window. 15101da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one. 15111da177e4SLinus Torvalds * "Lots" means "at least twice" here. 15121da177e4SLinus Torvalds */ 15131da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now) 1514a2a385d6SEric Dumazet time_to_ack = true; 15151da177e4SLinus Torvalds } 15161da177e4SLinus Torvalds } 15171da177e4SLinus Torvalds if (time_to_ack) 15181da177e4SLinus Torvalds tcp_send_ack(sk); 15191da177e4SLinus Torvalds } 15201da177e4SLinus Torvalds 1521c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied) 1522c457985aSCong Wang { 1523c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1524c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk); 1525c457985aSCong Wang 1526c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1527c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1528c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1529c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied); 1530c457985aSCong Wang } 1531c457985aSCong Wang 15323df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 15333df684c1SEric Dumazet { 1534f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue); 15353df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) { 15363df684c1SEric Dumazet sock_rfree(skb); 15373df684c1SEric Dumazet skb->destructor = NULL; 15383df684c1SEric Dumazet skb->sk = NULL; 153968822bdfSEric Dumazet return skb_attempt_defer_free(skb); 1540f35f8219SEric Dumazet } 1541f35f8219SEric Dumazet __kfree_skb(skb); 15423df684c1SEric Dumazet } 15433df684c1SEric Dumazet 15443f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 15451da177e4SLinus Torvalds { 15461da177e4SLinus Torvalds struct sk_buff *skb; 15471da177e4SLinus Torvalds u32 offset; 15481da177e4SLinus Torvalds 1549f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 15501da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq; 15519d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 15529d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 15531da177e4SLinus Torvalds offset--; 15549d691539SEric Dumazet } 1555e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 15561da177e4SLinus Torvalds *off = offset; 15571da177e4SLinus Torvalds return skb; 15581da177e4SLinus Torvalds } 1559f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing 1560f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock 1561f26845b4SEric Dumazet * in skb_splice_bits() 1562f26845b4SEric Dumazet */ 15633df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 15641da177e4SLinus Torvalds } 15651da177e4SLinus Torvalds return NULL; 15661da177e4SLinus Torvalds } 15673f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb); 15681da177e4SLinus Torvalds 15691da177e4SLinus Torvalds /* 15701da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines 15711da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile' 15721da177e4SLinus Torvalds * fashion. 15731da177e4SLinus Torvalds * Note: 15741da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller. 15751da177e4SLinus Torvalds * - The routine does not block. 15761da177e4SLinus Torvalds * - At present, there is no support for reading OOB data 15771da177e4SLinus Torvalds * or for 'peeking' the socket using this routine 15781da177e4SLinus Torvalds * (although both would be easy to implement). 15791da177e4SLinus Torvalds */ 15801da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 15811da177e4SLinus Torvalds sk_read_actor_t recv_actor) 15821da177e4SLinus Torvalds { 15831da177e4SLinus Torvalds struct sk_buff *skb; 15841da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15851da177e4SLinus Torvalds u32 seq = tp->copied_seq; 15861da177e4SLinus Torvalds u32 offset; 15871da177e4SLinus Torvalds int copied = 0; 15881da177e4SLinus Torvalds 15891da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 15901da177e4SLinus Torvalds return -ENOTCONN; 15911da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 15921da177e4SLinus Torvalds if (offset < skb->len) { 1593374e7b59SOctavian Purdila int used; 1594374e7b59SOctavian Purdila size_t len; 15951da177e4SLinus Torvalds 15961da177e4SLinus Torvalds len = skb->len - offset; 15971da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */ 1598b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 15991da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq; 16001da177e4SLinus Torvalds if (urg_offset < len) 16011da177e4SLinus Torvalds len = urg_offset; 16021da177e4SLinus Torvalds if (!len) 16031da177e4SLinus Torvalds break; 16041da177e4SLinus Torvalds } 16051da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len); 1606ff905b1eSEric Dumazet if (used <= 0) { 1607ddb61a57SJens Axboe if (!copied) 1608ddb61a57SJens Axboe copied = used; 1609ddb61a57SJens Axboe break; 1610e3d5ea2cSEric Dumazet } 1611e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len)) 1612e3d5ea2cSEric Dumazet used = len; 16131da177e4SLinus Torvalds seq += used; 16141da177e4SLinus Torvalds copied += used; 16151da177e4SLinus Torvalds offset += used; 1616e3d5ea2cSEric Dumazet 161702275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice 1618293ad604SOctavian Purdila * receive) the skb pointer might be invalid when 1619293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it 1620293ad604SOctavian Purdila * while aggregating skbs from the socket queue. 1621293ad604SOctavian Purdila */ 1622293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset); 162302275a2eSWilly Tarreau if (!skb) 16241da177e4SLinus Torvalds break; 162502275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb. 162602275a2eSWilly Tarreau * Try to splice more frags 162702275a2eSWilly Tarreau */ 162802275a2eSWilly Tarreau if (offset + 1 != skb->len) 162902275a2eSWilly Tarreau continue; 16301da177e4SLinus Torvalds } 1631e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16323df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16331da177e4SLinus Torvalds ++seq; 16341da177e4SLinus Torvalds break; 16351da177e4SLinus Torvalds } 16363df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16371da177e4SLinus Torvalds if (!desc->count) 16381da177e4SLinus Torvalds break; 16397db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16401da177e4SLinus Torvalds } 16417db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16421da177e4SLinus Torvalds 16431da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 16441da177e4SLinus Torvalds 16451da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 1646f26845b4SEric Dumazet if (copied > 0) { 1647f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset); 16480e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 1649f26845b4SEric Dumazet } 16501da177e4SLinus Torvalds return copied; 16511da177e4SLinus Torvalds } 16524bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock); 16531da177e4SLinus Torvalds 1654965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 165504919bedSCong Wang { 165604919bedSCong Wang struct tcp_sock *tp = tcp_sk(sk); 165704919bedSCong Wang u32 seq = tp->copied_seq; 165804919bedSCong Wang struct sk_buff *skb; 165904919bedSCong Wang int copied = 0; 166004919bedSCong Wang u32 offset; 166104919bedSCong Wang 166204919bedSCong Wang if (sk->sk_state == TCP_LISTEN) 166304919bedSCong Wang return -ENOTCONN; 166404919bedSCong Wang 1665db4192a7SCong Wang while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1666db4192a7SCong Wang u8 tcp_flags; 1667db4192a7SCong Wang int used; 166804919bedSCong Wang 166904919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue); 167096628951SPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1671db4192a7SCong Wang tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1672db4192a7SCong Wang used = recv_actor(sk, skb); 1673db4192a7SCong Wang if (used < 0) { 1674db4192a7SCong Wang if (!copied) 1675db4192a7SCong Wang copied = used; 1676db4192a7SCong Wang break; 1677db4192a7SCong Wang } 1678db4192a7SCong Wang seq += used; 1679db4192a7SCong Wang copied += used; 1680db4192a7SCong Wang 1681db4192a7SCong Wang if (tcp_flags & TCPHDR_FIN) { 1682db4192a7SCong Wang ++seq; 1683db4192a7SCong Wang break; 1684db4192a7SCong Wang } 1685db4192a7SCong Wang } 168604919bedSCong Wang return copied; 168704919bedSCong Wang } 168804919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb); 168904919bedSCong Wang 16903f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len) 16913f92a64eSJakub Kicinski { 16923f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk); 16933f92a64eSJakub Kicinski u32 seq = tp->copied_seq; 16943f92a64eSJakub Kicinski struct sk_buff *skb; 16953f92a64eSJakub Kicinski size_t left; 16963f92a64eSJakub Kicinski u32 offset; 16973f92a64eSJakub Kicinski 16983f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN) 16993f92a64eSJakub Kicinski return; 17003f92a64eSJakub Kicinski 17013f92a64eSJakub Kicinski left = len; 17023f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 17033f92a64eSJakub Kicinski int used; 17043f92a64eSJakub Kicinski 17053f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left); 17063f92a64eSJakub Kicinski seq += used; 17073f92a64eSJakub Kicinski left -= used; 17083f92a64eSJakub Kicinski 17093f92a64eSJakub Kicinski if (skb->len > offset + used) 17103f92a64eSJakub Kicinski break; 17113f92a64eSJakub Kicinski 17123f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 17133f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 17143f92a64eSJakub Kicinski ++seq; 17153f92a64eSJakub Kicinski break; 17163f92a64eSJakub Kicinski } 17173f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 17183f92a64eSJakub Kicinski } 17193f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq); 17203f92a64eSJakub Kicinski 17213f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk); 17223f92a64eSJakub Kicinski 17233f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */ 17243f92a64eSJakub Kicinski if (left != len) 17253f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left); 17263f92a64eSJakub Kicinski } 17273f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done); 17283f92a64eSJakub Kicinski 172932035585STom Herbert int tcp_peek_len(struct socket *sock) 173032035585STom Herbert { 173132035585STom Herbert return tcp_inq(sock->sk); 173232035585STom Herbert } 173332035585STom Herbert EXPORT_SYMBOL(tcp_peek_len); 173432035585STom Herbert 1735d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1736d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val) 1737d1361840SEric Dumazet { 1738867f816bSSoheil Hassas Yeganeh int cap; 1739867f816bSSoheil Hassas Yeganeh 1740867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1741867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1; 1742867f816bSSoheil Hassas Yeganeh else 174302739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1744867f816bSSoheil Hassas Yeganeh val = min(val, cap); 1745eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 174603f45c88SEric Dumazet 174703f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */ 174803f45c88SEric Dumazet tcp_data_ready(sk); 174903f45c88SEric Dumazet 1750d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1751d1361840SEric Dumazet return 0; 1752d1361840SEric Dumazet 1753d1361840SEric Dumazet val <<= 1; 1754d1361840SEric Dumazet if (val > sk->sk_rcvbuf) { 1755ebb3b78dSEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, val); 1756d1361840SEric Dumazet tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); 1757d1361840SEric Dumazet } 1758d1361840SEric Dumazet return 0; 1759d1361840SEric Dumazet } 1760d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat); 1761d1361840SEric Dumazet 1762892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb, 17637eeba170SArjun Roy struct scm_timestamping_internal *tss) 17647eeba170SArjun Roy { 17657eeba170SArjun Roy if (skb->tstamp) 17667eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp); 17677eeba170SArjun Roy else 17687eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0}; 17697eeba170SArjun Roy 17707eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp) 17717eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 17727eeba170SArjun Roy else 17737eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0}; 17747eeba170SArjun Roy } 17757eeba170SArjun Roy 177605255b82SEric Dumazet #ifdef CONFIG_MMU 1777*7a7f0946SArjun Roy const struct vm_operations_struct tcp_vm_ops = { 177805255b82SEric Dumazet }; 177905255b82SEric Dumazet 178093ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock, 178193ab6cc6SEric Dumazet struct vm_area_struct *vma) 178293ab6cc6SEric Dumazet { 178305255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 178405255b82SEric Dumazet return -EPERM; 17851c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 178605255b82SEric Dumazet 17873e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 17881c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MIXEDMAP); 178905255b82SEric Dumazet 179005255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops; 179105255b82SEric Dumazet return 0; 179205255b82SEric Dumazet } 179305255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap); 179405255b82SEric Dumazet 17957fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 17967fba5309SArjun Roy u32 *offset_frag) 17977fba5309SArjun Roy { 17987fba5309SArjun Roy skb_frag_t *frag; 17997fba5309SArjun Roy 180070701b83SArjun Roy if (unlikely(offset_skb >= skb->len)) 180170701b83SArjun Roy return NULL; 180270701b83SArjun Roy 18037fba5309SArjun Roy offset_skb -= skb_headlen(skb); 18047fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 18057fba5309SArjun Roy return NULL; 18067fba5309SArjun Roy 18077fba5309SArjun Roy frag = skb_shinfo(skb)->frags; 18087fba5309SArjun Roy while (offset_skb) { 18097fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) { 18107fba5309SArjun Roy *offset_frag = offset_skb; 18117fba5309SArjun Roy return frag; 18127fba5309SArjun Roy } 18137fba5309SArjun Roy offset_skb -= skb_frag_size(frag); 18147fba5309SArjun Roy ++frag; 18157fba5309SArjun Roy } 18167fba5309SArjun Roy *offset_frag = 0; 18177fba5309SArjun Roy return frag; 18187fba5309SArjun Roy } 18197fba5309SArjun Roy 182098917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag) 182198917cf0SArjun Roy { 182298917cf0SArjun Roy return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag); 182398917cf0SArjun Roy } 182498917cf0SArjun Roy 182598917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag, 182698917cf0SArjun Roy int remaining_in_skb) 182798917cf0SArjun Roy { 182898917cf0SArjun Roy int offset = 0; 182998917cf0SArjun Roy 183098917cf0SArjun Roy if (likely(can_map_frag(frag))) 183198917cf0SArjun Roy return 0; 183298917cf0SArjun Roy 183398917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) { 183498917cf0SArjun Roy offset += skb_frag_size(frag); 183598917cf0SArjun Roy ++frag; 183698917cf0SArjun Roy } 183798917cf0SArjun Roy return offset; 183898917cf0SArjun Roy } 183998917cf0SArjun Roy 18400c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 18410c3936d3SArjun Roy struct tcp_zerocopy_receive *zc, 18420c3936d3SArjun Roy struct sk_buff *skb, u32 offset) 18430c3936d3SArjun Roy { 18440c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0; 18450c3936d3SArjun Roy int mappable_offset; 18460c3936d3SArjun Roy skb_frag_t *frag; 18470c3936d3SArjun Roy 18480c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */ 18490c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset; 18500c3936d3SArjun Roy 18510c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */ 18520c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset); 18530c3936d3SArjun Roy if (!frag) 18540c3936d3SArjun Roy return; 18550c3936d3SArjun Roy 18560c3936d3SArjun Roy if (frag_offset) { 18570c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb); 18580c3936d3SArjun Roy 18590c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */ 18600c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1]) 18610c3936d3SArjun Roy return; 18620c3936d3SArjun Roy 18630c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */ 18640c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset; 18650c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder; 18660c3936d3SArjun Roy ++frag; 18670c3936d3SArjun Roy } 18680c3936d3SArjun Roy 18690c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest. 18700c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes 18710c3936d3SArjun Roy * in partial_frag_remainder. 18720c3936d3SArjun Roy */ 18730c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 18740c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 18750c3936d3SArjun Roy } 18760c3936d3SArjun Roy 1877f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1878ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 1879f21a3c48SArjun Roy int *cmsg_flags); 1880f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk, 18817eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq, 18827eeba170SArjun Roy struct scm_timestamping_internal *tss) 1883f21a3c48SArjun Roy { 1884f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 1885f21a3c48SArjun Roy struct msghdr msg = {}; 1886f21a3c48SArjun Roy struct iovec iov; 18877eeba170SArjun Roy int err; 1888f21a3c48SArjun Roy 1889f21a3c48SArjun Roy zc->length = 0; 1890f21a3c48SArjun Roy zc->recv_skip_hint = 0; 1891f21a3c48SArjun Roy 1892f21a3c48SArjun Roy if (copy_address != zc->copybuf_address) 1893f21a3c48SArjun Roy return -EINVAL; 1894f21a3c48SArjun Roy 1895de4eda9dSAl Viro err = import_single_range(ITER_DEST, (void __user *)copy_address, 1896f21a3c48SArjun Roy inq, &iov, &msg.msg_iter); 1897f21a3c48SArjun Roy if (err) 1898f21a3c48SArjun Roy return err; 1899f21a3c48SArjun Roy 1900ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 19017eeba170SArjun Roy tss, &zc->msg_flags); 1902f21a3c48SArjun Roy if (err < 0) 1903f21a3c48SArjun Roy return err; 1904f21a3c48SArjun Roy 1905f21a3c48SArjun Roy zc->copybuf_len = err; 19060c3936d3SArjun Roy if (likely(zc->copybuf_len)) { 19070c3936d3SArjun Roy struct sk_buff *skb; 19080c3936d3SArjun Roy u32 offset; 19090c3936d3SArjun Roy 19100c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 19110c3936d3SArjun Roy if (skb) 19120c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 19130c3936d3SArjun Roy } 1914f21a3c48SArjun Roy return 0; 1915f21a3c48SArjun Roy } 1916f21a3c48SArjun Roy 191718fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 191818fb76edSArjun Roy struct sk_buff *skb, u32 copylen, 191918fb76edSArjun Roy u32 *offset, u32 *seq) 192018fb76edSArjun Roy { 192118fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 192218fb76edSArjun Roy struct msghdr msg = {}; 192318fb76edSArjun Roy struct iovec iov; 192418fb76edSArjun Roy int err; 192518fb76edSArjun Roy 192618fb76edSArjun Roy if (copy_address != zc->copybuf_address) 192718fb76edSArjun Roy return -EINVAL; 192818fb76edSArjun Roy 1929de4eda9dSAl Viro err = import_single_range(ITER_DEST, (void __user *)copy_address, 193018fb76edSArjun Roy copylen, &iov, &msg.msg_iter); 193118fb76edSArjun Roy if (err) 193218fb76edSArjun Roy return err; 193318fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 193418fb76edSArjun Roy if (err) 193518fb76edSArjun Roy return err; 193618fb76edSArjun Roy zc->recv_skip_hint -= copylen; 193718fb76edSArjun Roy *offset += copylen; 193818fb76edSArjun Roy *seq += copylen; 193918fb76edSArjun Roy return (__s32)copylen; 194018fb76edSArjun Roy } 194118fb76edSArjun Roy 19427eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 194318fb76edSArjun Roy struct sock *sk, 194418fb76edSArjun Roy struct sk_buff *skb, 194518fb76edSArjun Roy u32 *seq, 19467eeba170SArjun Roy s32 copybuf_len, 19477eeba170SArjun Roy struct scm_timestamping_internal *tss) 194818fb76edSArjun Roy { 194918fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 195018fb76edSArjun Roy 195118fb76edSArjun Roy if (!copylen) 195218fb76edSArjun Roy return 0; 195318fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */ 19547eeba170SArjun Roy if (skb) { 195518fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq; 19567eeba170SArjun Roy } else { 195718fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset); 19587eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 19597eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 19607eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 19617eeba170SArjun Roy } 19627eeba170SArjun Roy } 196318fb76edSArjun Roy 196418fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 196518fb76edSArjun Roy seq); 196618fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen; 196718fb76edSArjun Roy } 196818fb76edSArjun Roy 196994ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 197094ab9eb9SArjun Roy struct page **pending_pages, 197194ab9eb9SArjun Roy unsigned long pages_remaining, 197294ab9eb9SArjun Roy unsigned long *address, 197394ab9eb9SArjun Roy u32 *length, 197494ab9eb9SArjun Roy u32 *seq, 197594ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 197694ab9eb9SArjun Roy u32 total_bytes_to_map, 197794ab9eb9SArjun Roy int err) 197894ab9eb9SArjun Roy { 197994ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */ 198094ab9eb9SArjun Roy if (err == -EBUSY && 198194ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 198294ab9eb9SArjun Roy u32 maybe_zap_len; 198394ab9eb9SArjun Roy 198494ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 198594ab9eb9SArjun Roy *length + /* Mapped or pending */ 198694ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */ 1987e9adcfecSMike Kravetz zap_page_range_single(vma, *address, maybe_zap_len, NULL); 198894ab9eb9SArjun Roy err = 0; 198994ab9eb9SArjun Roy } 199094ab9eb9SArjun Roy 199194ab9eb9SArjun Roy if (!err) { 199294ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining; 199394ab9eb9SArjun Roy int bytes_mapped; 199494ab9eb9SArjun Roy 1995e9adcfecSMike Kravetz /* We called zap_page_range_single, try to reinsert. */ 199694ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, 199794ab9eb9SArjun Roy pending_pages, 199894ab9eb9SArjun Roy &pages_remaining); 199994ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 200094ab9eb9SArjun Roy *seq += bytes_mapped; 200194ab9eb9SArjun Roy *address += bytes_mapped; 200294ab9eb9SArjun Roy } 200394ab9eb9SArjun Roy if (err) { 200494ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an 200594ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining 200694ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll 200794ab9eb9SArjun Roy * some state we speculatively touched before. 200894ab9eb9SArjun Roy */ 200994ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 201094ab9eb9SArjun Roy 201194ab9eb9SArjun Roy *length -= bytes_not_mapped; 201294ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped; 201394ab9eb9SArjun Roy } 201494ab9eb9SArjun Roy return err; 201594ab9eb9SArjun Roy } 201694ab9eb9SArjun Roy 20173763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 20183763a24cSArjun Roy struct page **pages, 201994ab9eb9SArjun Roy unsigned int pages_to_map, 202094ab9eb9SArjun Roy unsigned long *address, 202194ab9eb9SArjun Roy u32 *length, 20223763a24cSArjun Roy u32 *seq, 202394ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 202494ab9eb9SArjun Roy u32 total_bytes_to_map) 20253763a24cSArjun Roy { 20263763a24cSArjun Roy unsigned long pages_remaining = pages_to_map; 202794ab9eb9SArjun Roy unsigned int pages_mapped; 202894ab9eb9SArjun Roy unsigned int bytes_mapped; 202994ab9eb9SArjun Roy int err; 20303763a24cSArjun Roy 203194ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining); 203294ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining; 203394ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped; 20343763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in 20353763a24cSArjun Roy * mapping (some but not all of the pages). 20363763a24cSArjun Roy */ 20373763a24cSArjun Roy *seq += bytes_mapped; 203894ab9eb9SArjun Roy *address += bytes_mapped; 203994ab9eb9SArjun Roy 204094ab9eb9SArjun Roy if (likely(!err)) 204194ab9eb9SArjun Roy return 0; 204294ab9eb9SArjun Roy 204394ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */ 204494ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 204594ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map, 204694ab9eb9SArjun Roy err); 20473763a24cSArjun Roy } 20483763a24cSArjun Roy 20493c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 20507eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 20517eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20527eeba170SArjun Roy struct scm_timestamping_internal *tss) 20537eeba170SArjun Roy { 20547eeba170SArjun Roy unsigned long msg_control_addr; 20557eeba170SArjun Roy struct msghdr cmsg_dummy; 20567eeba170SArjun Roy 20577eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control; 2058c39ef213SKevin Brodsky cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 20597eeba170SArjun Roy cmsg_dummy.msg_controllen = 20607eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen; 20617eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall() 20627eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0; 2063a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true; 20647eeba170SArjun Roy zc->msg_flags = 0; 20657eeba170SArjun Roy if (zc->msg_control == msg_control_addr && 20667eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) { 20677eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss); 20687eeba170SArjun Roy zc->msg_control = (__u64) 2069c39ef213SKevin Brodsky ((uintptr_t)cmsg_dummy.msg_control_user); 20707eeba170SArjun Roy zc->msg_controllen = 20717eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen; 20727eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 20737eeba170SArjun Roy } 20747eeba170SArjun Roy } 20757eeba170SArjun Roy 2076*7a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, 2077*7a7f0946SArjun Roy unsigned long address, 2078*7a7f0946SArjun Roy bool *mmap_locked) 2079*7a7f0946SArjun Roy { 2080*7a7f0946SArjun Roy struct vm_area_struct *vma = NULL; 2081*7a7f0946SArjun Roy 2082*7a7f0946SArjun Roy #ifdef CONFIG_PER_VMA_LOCK 2083*7a7f0946SArjun Roy vma = lock_vma_under_rcu(mm, address); 2084*7a7f0946SArjun Roy #endif 2085*7a7f0946SArjun Roy if (vma) { 2086*7a7f0946SArjun Roy if (!vma_is_tcp(vma)) { 2087*7a7f0946SArjun Roy vma_end_read(vma); 2088*7a7f0946SArjun Roy return NULL; 2089*7a7f0946SArjun Roy } 2090*7a7f0946SArjun Roy *mmap_locked = false; 2091*7a7f0946SArjun Roy return vma; 2092*7a7f0946SArjun Roy } 2093*7a7f0946SArjun Roy 2094*7a7f0946SArjun Roy mmap_read_lock(mm); 2095*7a7f0946SArjun Roy vma = vma_lookup(mm, address); 2096*7a7f0946SArjun Roy if (!vma || !vma_is_tcp(vma)) { 2097*7a7f0946SArjun Roy mmap_read_unlock(mm); 2098*7a7f0946SArjun Roy return NULL; 2099*7a7f0946SArjun Roy } 2100*7a7f0946SArjun Roy *mmap_locked = true; 2101*7a7f0946SArjun Roy return vma; 2102*7a7f0946SArjun Roy } 2103*7a7f0946SArjun Roy 210494ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 210505255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk, 21067eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 21077eeba170SArjun Roy struct scm_timestamping_internal *tss) 210805255b82SEric Dumazet { 210994ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0; 211005255b82SEric Dumazet unsigned long address = (unsigned long)zc->address; 211194ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 211218fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len; 211318fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk); 211405255b82SEric Dumazet const skb_frag_t *frags = NULL; 211594ab9eb9SArjun Roy unsigned int pages_to_map = 0; 211605255b82SEric Dumazet struct vm_area_struct *vma; 211705255b82SEric Dumazet struct sk_buff *skb = NULL; 211818fb76edSArjun Roy u32 seq = tp->copied_seq; 211994ab9eb9SArjun Roy u32 total_bytes_to_map; 212018fb76edSArjun Roy int inq = tcp_inq(sk); 2121*7a7f0946SArjun Roy bool mmap_locked; 212293ab6cc6SEric Dumazet int ret; 212393ab6cc6SEric Dumazet 212418fb76edSArjun Roy zc->copybuf_len = 0; 21257eeba170SArjun Roy zc->msg_flags = 0; 212618fb76edSArjun Roy 212705255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address) 212893ab6cc6SEric Dumazet return -EINVAL; 212993ab6cc6SEric Dumazet 213093ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN) 213105255b82SEric Dumazet return -ENOTCONN; 213293ab6cc6SEric Dumazet 213393ab6cc6SEric Dumazet sock_rps_record_flow(sk); 213493ab6cc6SEric Dumazet 2135f21a3c48SArjun Roy if (inq && inq <= copybuf_len) 21367eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss); 2137f21a3c48SArjun Roy 2138936ced41SArjun Roy if (inq < PAGE_SIZE) { 2139936ced41SArjun Roy zc->length = 0; 2140936ced41SArjun Roy zc->recv_skip_hint = inq; 2141936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE)) 2142936ced41SArjun Roy return -EIO; 2143936ced41SArjun Roy return 0; 2144936ced41SArjun Roy } 2145936ced41SArjun Roy 2146*7a7f0946SArjun Roy vma = find_tcp_vma(current->mm, address, &mmap_locked); 2147*7a7f0946SArjun Roy if (!vma) 2148e776af60SEric Dumazet return -EINVAL; 2149*7a7f0946SArjun Roy 215018fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 215118fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq); 215294ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 215394ab9eb9SArjun Roy if (total_bytes_to_map) { 215494ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2155e9adcfecSMike Kravetz zap_page_range_single(vma, address, total_bytes_to_map, 2156e9adcfecSMike Kravetz NULL); 215794ab9eb9SArjun Roy zc->length = total_bytes_to_map; 215805255b82SEric Dumazet zc->recv_skip_hint = 0; 21598f2b0293SSoheil Hassas Yeganeh } else { 216018fb76edSArjun Roy zc->length = avail_len; 216118fb76edSArjun Roy zc->recv_skip_hint = avail_len; 21628f2b0293SSoheil Hassas Yeganeh } 216305255b82SEric Dumazet ret = 0; 216405255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) { 216598917cf0SArjun Roy int mappable_offset; 216694ab9eb9SArjun Roy struct page *page; 216798917cf0SArjun Roy 216805255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) { 21697fba5309SArjun Roy u32 offset_frag; 21707fba5309SArjun Roy 217105255b82SEric Dumazet if (skb) { 21720e627190SArjun Roy if (zc->recv_skip_hint > 0) 21730e627190SArjun Roy break; 217405255b82SEric Dumazet skb = skb->next; 217505255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq; 217605255b82SEric Dumazet } else { 217793ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset); 217805255b82SEric Dumazet } 21797eeba170SArjun Roy 21807eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 21817eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 21827eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 21837eeba170SArjun Roy } 218405255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset; 21857fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag); 21867fba5309SArjun Roy if (!frags || offset_frag) 218705255b82SEric Dumazet break; 218805255b82SEric Dumazet } 2189789762ceSSoheil Hassas Yeganeh 219098917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags, 219198917cf0SArjun Roy zc->recv_skip_hint); 219298917cf0SArjun Roy if (mappable_offset) { 219398917cf0SArjun Roy zc->recv_skip_hint = mappable_offset; 219405255b82SEric Dumazet break; 2195789762ceSSoheil Hassas Yeganeh } 219694ab9eb9SArjun Roy page = skb_frag_page(frags); 219794ab9eb9SArjun Roy prefetchw(page); 219894ab9eb9SArjun Roy pages[pages_to_map++] = page; 219905255b82SEric Dumazet length += PAGE_SIZE; 220005255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE; 220105255b82SEric Dumazet frags++; 220294ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 220394ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) { 220494ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb 220594ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs). 220694ab9eb9SArjun Roy */ 220794ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, 220894ab9eb9SArjun Roy pages_to_map, 220994ab9eb9SArjun Roy &address, &length, 221094ab9eb9SArjun Roy &seq, zc, 221194ab9eb9SArjun Roy total_bytes_to_map); 22123763a24cSArjun Roy if (ret) 22133763a24cSArjun Roy goto out; 221494ab9eb9SArjun Roy pages_to_map = 0; 22153763a24cSArjun Roy } 22163763a24cSArjun Roy } 221794ab9eb9SArjun Roy if (pages_to_map) { 221894ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 221994ab9eb9SArjun Roy &address, &length, &seq, 222094ab9eb9SArjun Roy zc, total_bytes_to_map); 222193ab6cc6SEric Dumazet } 222205255b82SEric Dumazet out: 2223*7a7f0946SArjun Roy if (mmap_locked) 2224d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 2225*7a7f0946SArjun Roy else 2226*7a7f0946SArjun Roy vma_end_read(vma); 222718fb76edSArjun Roy /* Try to copy straggler data. */ 222818fb76edSArjun Roy if (!ret) 22297eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 223018fb76edSArjun Roy 223118fb76edSArjun Roy if (length + copylen) { 22327db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 223393ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk); 223493ab6cc6SEric Dumazet 223593ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */ 223693ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset); 223718fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen); 223893ab6cc6SEric Dumazet ret = 0; 223905255b82SEric Dumazet if (length == zc->length) 224005255b82SEric Dumazet zc->recv_skip_hint = 0; 224105255b82SEric Dumazet } else { 224205255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 224305255b82SEric Dumazet ret = -EIO; 224405255b82SEric Dumazet } 224505255b82SEric Dumazet zc->length = length; 224693ab6cc6SEric Dumazet return ret; 224793ab6cc6SEric Dumazet } 224805255b82SEric Dumazet #endif 224993ab6cc6SEric Dumazet 225098aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */ 2251892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 22529718475eSDeepa Dinamani struct scm_timestamping_internal *tss) 225398aaa913SMike Maloney { 2254887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 225598aaa913SMike Maloney bool has_timestamping = false; 225698aaa913SMike Maloney 225798aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 225898aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) { 225998aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2260887feae3SDeepa Dinamani if (new_tstamp) { 2261df1b4ba9SArnd Bergmann struct __kernel_timespec kts = { 2262df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2263df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2264df1b4ba9SArnd Bergmann }; 2265887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2266887feae3SDeepa Dinamani sizeof(kts), &kts); 2267887feae3SDeepa Dinamani } else { 2268df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = { 2269df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2270df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2271df1b4ba9SArnd Bergmann }; 22727f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 22739718475eSDeepa Dinamani sizeof(ts_old), &ts_old); 2274887feae3SDeepa Dinamani } 227598aaa913SMike Maloney } else { 2276887feae3SDeepa Dinamani if (new_tstamp) { 2277df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = { 2278df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2279df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2280df1b4ba9SArnd Bergmann }; 2281887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2282887feae3SDeepa Dinamani sizeof(stv), &stv); 2283887feae3SDeepa Dinamani } else { 2284df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = { 2285df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2286df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2287df1b4ba9SArnd Bergmann }; 22887f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 228998aaa913SMike Maloney sizeof(tv), &tv); 229098aaa913SMike Maloney } 229198aaa913SMike Maloney } 2292887feae3SDeepa Dinamani } 229398aaa913SMike Maloney 229498aaa913SMike Maloney if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) 229598aaa913SMike Maloney has_timestamping = true; 229698aaa913SMike Maloney else 22979718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0}; 229898aaa913SMike Maloney } 229998aaa913SMike Maloney 230098aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 230198aaa913SMike Maloney if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) 230298aaa913SMike Maloney has_timestamping = true; 230398aaa913SMike Maloney else 23049718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0}; 230598aaa913SMike Maloney } 230698aaa913SMike Maloney 230798aaa913SMike Maloney if (has_timestamping) { 23089718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0}; 23099718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW)) 23109718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss); 23119718475eSDeepa Dinamani else 23129718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss); 231398aaa913SMike Maloney } 231498aaa913SMike Maloney } 231598aaa913SMike Maloney 2316b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk) 2317b75eba76SSoheil Hassas Yeganeh { 2318b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk); 2319b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq); 2320b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2321b75eba76SSoheil Hassas Yeganeh int inq; 2322b75eba76SSoheil Hassas Yeganeh 2323b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq; 2324b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2325b75eba76SSoheil Hassas Yeganeh lock_sock(sk); 2326b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq; 2327b75eba76SSoheil Hassas Yeganeh release_sock(sk); 2328b75eba76SSoheil Hassas Yeganeh } 23296466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading 23306466e715SSoheil Hassas Yeganeh * by returning a non-zero inq. 23316466e715SSoheil Hassas Yeganeh */ 23326466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE)) 23336466e715SSoheil Hassas Yeganeh inq = 1; 2334b75eba76SSoheil Hassas Yeganeh return inq; 2335b75eba76SSoheil Hassas Yeganeh } 2336b75eba76SSoheil Hassas Yeganeh 23371da177e4SLinus Torvalds /* 23381da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer. 23391da177e4SLinus Torvalds * 23401da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that 23411da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required. 23421da177e4SLinus Torvalds * Probably, code can be easily improved even more. 23431da177e4SLinus Torvalds */ 23441da177e4SLinus Torvalds 23452cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2346ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 23472cd81161SArjun Roy int *cmsg_flags) 23481da177e4SLinus Torvalds { 23491da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23501da177e4SLinus Torvalds int copied = 0; 23511da177e4SLinus Torvalds u32 peek_seq; 23521da177e4SLinus Torvalds u32 *seq; 23531da177e4SLinus Torvalds unsigned long used; 23542cd81161SArjun Roy int err; 23551da177e4SLinus Torvalds int target; /* Read at least this many bytes */ 23561da177e4SLinus Torvalds long timeo; 2357dfbafc99SSabrina Dubroca struct sk_buff *skb, *last; 235877527313SIlpo Järvinen u32 urg_hole = 0; 23591da177e4SLinus Torvalds 23601da177e4SLinus Torvalds err = -ENOTCONN; 23611da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 23621da177e4SLinus Torvalds goto out; 23631da177e4SLinus Torvalds 2364f94fd25cSJens Axboe if (tp->recvmsg_inq) { 2365925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ; 2366f94fd25cSJens Axboe msg->msg_get_inq = 1; 2367f94fd25cSJens Axboe } 2368ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 23691da177e4SLinus Torvalds 23701da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */ 23711da177e4SLinus Torvalds if (flags & MSG_OOB) 23721da177e4SLinus Torvalds goto recv_urg; 23731da177e4SLinus Torvalds 2374c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 2375c0e88ff0SPavel Emelyanov err = -EPERM; 2376c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK)) 2377c0e88ff0SPavel Emelyanov goto out; 2378c0e88ff0SPavel Emelyanov 2379c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 2380c0e88ff0SPavel Emelyanov goto recv_sndq; 2381c0e88ff0SPavel Emelyanov 2382c0e88ff0SPavel Emelyanov err = -EINVAL; 2383c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 2384c0e88ff0SPavel Emelyanov goto out; 2385c0e88ff0SPavel Emelyanov 2386c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */ 2387c0e88ff0SPavel Emelyanov } 2388c0e88ff0SPavel Emelyanov 23891da177e4SLinus Torvalds seq = &tp->copied_seq; 23901da177e4SLinus Torvalds if (flags & MSG_PEEK) { 23911da177e4SLinus Torvalds peek_seq = tp->copied_seq; 23921da177e4SLinus Torvalds seq = &peek_seq; 23931da177e4SLinus Torvalds } 23941da177e4SLinus Torvalds 23951da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 23961da177e4SLinus Torvalds 23971da177e4SLinus Torvalds do { 23981da177e4SLinus Torvalds u32 offset; 23991da177e4SLinus Torvalds 24001da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2401b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 24021da177e4SLinus Torvalds if (copied) 24031da177e4SLinus Torvalds break; 24041da177e4SLinus Torvalds if (signal_pending(current)) { 24051da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 24061da177e4SLinus Torvalds break; 24071da177e4SLinus Torvalds } 24081da177e4SLinus Torvalds } 24091da177e4SLinus Torvalds 24101da177e4SLinus Torvalds /* Next get a buffer. */ 24111da177e4SLinus Torvalds 2412dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue); 241391521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) { 2414dfbafc99SSabrina Dubroca last = skb; 24151da177e4SLinus Torvalds /* Now that we have two receive queues this 24161da177e4SLinus Torvalds * shouldn't happen. 24171da177e4SLinus Torvalds */ 2418d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2419e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 24202af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2421d792c100SIlpo Järvinen flags)) 24221da177e4SLinus Torvalds break; 2423d792c100SIlpo Järvinen 24241da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq; 24259d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 24269d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 24271da177e4SLinus Torvalds offset--; 24289d691539SEric Dumazet } 24291da177e4SLinus Torvalds if (offset < skb->len) 24301da177e4SLinus Torvalds goto found_ok_skb; 2431e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 24321da177e4SLinus Torvalds goto found_fin_ok; 24332af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK), 2434e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 24352af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 243691521944SDavid S. Miller } 24371da177e4SLinus Torvalds 24381da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */ 24391da177e4SLinus Torvalds 24409ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 24411da177e4SLinus Torvalds break; 24421da177e4SLinus Torvalds 24431da177e4SLinus Torvalds if (copied) { 24448bd172b7SEric Dumazet if (!timeo || 24458bd172b7SEric Dumazet sk->sk_err || 24461da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE || 24471da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) || 2448518a09efSDavid S. Miller signal_pending(current)) 24491da177e4SLinus Torvalds break; 24501da177e4SLinus Torvalds } else { 24511da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE)) 24521da177e4SLinus Torvalds break; 24531da177e4SLinus Torvalds 24541da177e4SLinus Torvalds if (sk->sk_err) { 24551da177e4SLinus Torvalds copied = sock_error(sk); 24561da177e4SLinus Torvalds break; 24571da177e4SLinus Torvalds } 24581da177e4SLinus Torvalds 24591da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) 24601da177e4SLinus Torvalds break; 24611da177e4SLinus Torvalds 24621da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) { 24631da177e4SLinus Torvalds /* This occurs when user tries to read 24641da177e4SLinus Torvalds * from never connected socket. 24651da177e4SLinus Torvalds */ 24661da177e4SLinus Torvalds copied = -ENOTCONN; 24671da177e4SLinus Torvalds break; 24681da177e4SLinus Torvalds } 24691da177e4SLinus Torvalds 24701da177e4SLinus Torvalds if (!timeo) { 24711da177e4SLinus Torvalds copied = -EAGAIN; 24721da177e4SLinus Torvalds break; 24731da177e4SLinus Torvalds } 24741da177e4SLinus Torvalds 24751da177e4SLinus Torvalds if (signal_pending(current)) { 24761da177e4SLinus Torvalds copied = sock_intr_errno(timeo); 24771da177e4SLinus Torvalds break; 24781da177e4SLinus Torvalds } 24791da177e4SLinus Torvalds } 24801da177e4SLinus Torvalds 24811da177e4SLinus Torvalds if (copied >= target) { 24821da177e4SLinus Torvalds /* Do not sleep, just process backlog. */ 248393afcfd1SEric Dumazet __sk_flush_backlog(sk); 2484dfbafc99SSabrina Dubroca } else { 248529fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied); 2486dfbafc99SSabrina Dubroca sk_wait_data(sk, &timeo, last); 2487dfbafc99SSabrina Dubroca } 24881da177e4SLinus Torvalds 248977527313SIlpo Järvinen if ((flags & MSG_PEEK) && 249077527313SIlpo Järvinen (peek_seq - copied - urg_hole != tp->copied_seq)) { 2491e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2492e87cc472SJoe Perches current->comm, 2493e87cc472SJoe Perches task_pid_nr(current)); 24941da177e4SLinus Torvalds peek_seq = tp->copied_seq; 24951da177e4SLinus Torvalds } 24961da177e4SLinus Torvalds continue; 24971da177e4SLinus Torvalds 24981da177e4SLinus Torvalds found_ok_skb: 24991da177e4SLinus Torvalds /* Ok so how much can we use? */ 25001da177e4SLinus Torvalds used = skb->len - offset; 25011da177e4SLinus Torvalds if (len < used) 25021da177e4SLinus Torvalds used = len; 25031da177e4SLinus Torvalds 25041da177e4SLinus Torvalds /* Do we have urgent data here? */ 2505b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 25061da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq; 25071da177e4SLinus Torvalds if (urg_offset < used) { 25081da177e4SLinus Torvalds if (!urg_offset) { 25091da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) { 25107db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 251177527313SIlpo Järvinen urg_hole++; 25121da177e4SLinus Torvalds offset++; 25131da177e4SLinus Torvalds used--; 25141da177e4SLinus Torvalds if (!used) 25151da177e4SLinus Torvalds goto skip_copy; 25161da177e4SLinus Torvalds } 25171da177e4SLinus Torvalds } else 25181da177e4SLinus Torvalds used = urg_offset; 25191da177e4SLinus Torvalds } 25201da177e4SLinus Torvalds } 25211da177e4SLinus Torvalds 25221da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) { 252351f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, offset, msg, used); 25241da177e4SLinus Torvalds if (err) { 25251da177e4SLinus Torvalds /* Exception. Bailout! */ 25261da177e4SLinus Torvalds if (!copied) 25271da177e4SLinus Torvalds copied = -EFAULT; 25281da177e4SLinus Torvalds break; 25291da177e4SLinus Torvalds } 25301da177e4SLinus Torvalds } 25311da177e4SLinus Torvalds 25327db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used); 25331da177e4SLinus Torvalds copied += used; 25341da177e4SLinus Torvalds len -= used; 25351da177e4SLinus Torvalds 25361da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds skip_copy: 2539b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 25407b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 254131770e34SFlorian Westphal tcp_fast_path_check(sk); 254231770e34SFlorian Westphal } 25431da177e4SLinus Torvalds 254498aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) { 25452cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss); 2546925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS; 254798aaa913SMike Maloney } 2548cc4de047SKelly Littlepage 2549cc4de047SKelly Littlepage if (used + offset < skb->len) 2550cc4de047SKelly Littlepage continue; 2551cc4de047SKelly Littlepage 2552e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 25531da177e4SLinus Torvalds goto found_fin_ok; 25547bced397SDan Williams if (!(flags & MSG_PEEK)) 25553df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25561da177e4SLinus Torvalds continue; 25571da177e4SLinus Torvalds 25581da177e4SLinus Torvalds found_fin_ok: 25591da177e4SLinus Torvalds /* Process the FIN. */ 25607db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 25617bced397SDan Williams if (!(flags & MSG_PEEK)) 25623df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25631da177e4SLinus Torvalds break; 25641da177e4SLinus Torvalds } while (len > 0); 25651da177e4SLinus Torvalds 25661da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored 25671da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK 25681da177e4SLinus Torvalds */ 25691da177e4SLinus Torvalds 25701da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 25710e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 25721da177e4SLinus Torvalds return copied; 25731da177e4SLinus Torvalds 25741da177e4SLinus Torvalds out: 25751da177e4SLinus Torvalds return err; 25761da177e4SLinus Torvalds 25771da177e4SLinus Torvalds recv_urg: 2578377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags); 25791da177e4SLinus Torvalds goto out; 2580c0e88ff0SPavel Emelyanov 2581c0e88ff0SPavel Emelyanov recv_sndq: 2582c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len); 2583c0e88ff0SPavel Emelyanov goto out; 25841da177e4SLinus Torvalds } 25852cd81161SArjun Roy 2586ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2587ec095263SOliver Hartkopp int *addr_len) 25882cd81161SArjun Roy { 2589f94fd25cSJens Axboe int cmsg_flags = 0, ret; 25902cd81161SArjun Roy struct scm_timestamping_internal tss; 25912cd81161SArjun Roy 25922cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE)) 25932cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len); 25942cd81161SArjun Roy 25952cd81161SArjun Roy if (sk_can_busy_loop(sk) && 25962cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) && 25972cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED) 2598ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT); 25992cd81161SArjun Roy 26002cd81161SArjun Roy lock_sock(sk); 2601ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 26022cd81161SArjun Roy release_sock(sk); 26032cd81161SArjun Roy 2604f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2605925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS) 26062cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss); 2607f94fd25cSJens Axboe if (msg->msg_get_inq) { 2608f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk); 2609f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ) 2610f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2611f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq); 26122cd81161SArjun Roy } 26132cd81161SArjun Roy } 26142cd81161SArjun Roy return ret; 26152cd81161SArjun Roy } 26164bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg); 26171da177e4SLinus Torvalds 2618490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state) 2619490d5046SIlpo Järvinen { 2620490d5046SIlpo Järvinen int oldstate = sk->sk_state; 2621490d5046SIlpo Järvinen 2622d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF 2623d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The 2624d4487491SLawrence Brakmo * following checks will detect if an internal state value ever 2625d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will 2626d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling 2627d4487491SLawrence Brakmo * tcp_call_bpf_2arg. 2628d4487491SLawrence Brakmo */ 2629d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2630d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2631d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2632d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2633d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2634d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2635d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2636d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2637d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2638d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2639d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2640d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2641d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2642d4487491SLawrence Brakmo 264397a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values 264497a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 264597a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 264697a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF 264797a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo. 264897a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the 264997a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 265097a19cafSYonghong Song * regardless of which compiler is used. 265197a19cafSYonghong Song */ 265297a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 265397a19cafSYonghong Song 2654d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2655d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2656e8fce239SSong Liu 2657490d5046SIlpo Järvinen switch (state) { 2658490d5046SIlpo Järvinen case TCP_ESTABLISHED: 2659490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED) 266081cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2661490d5046SIlpo Järvinen break; 2662490d5046SIlpo Järvinen 2663490d5046SIlpo Järvinen case TCP_CLOSE: 2664490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 266581cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2666490d5046SIlpo Järvinen 2667490d5046SIlpo Järvinen sk->sk_prot->unhash(sk); 2668490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash && 2669490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2670ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk); 2671a8eceea8SJoe Perches fallthrough; 2672490d5046SIlpo Järvinen default: 2673490d5046SIlpo Järvinen if (oldstate == TCP_ESTABLISHED) 267474688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2675490d5046SIlpo Järvinen } 2676490d5046SIlpo Järvinen 2677490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed 2678490d5046SIlpo Järvinen * socket sitting in hash tables. 2679490d5046SIlpo Järvinen */ 2680563e0bb0SYafang Shao inet_sk_state_store(sk, state); 2681490d5046SIlpo Järvinen } 2682490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state); 2683490d5046SIlpo Järvinen 26841da177e4SLinus Torvalds /* 26851da177e4SLinus Torvalds * State processing on a close. This implements the state shift for 26861da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some 26871da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be 26881da177e4SLinus Torvalds * closed. 26891da177e4SLinus Torvalds */ 26901da177e4SLinus Torvalds 26919b5b5cffSArjan van de Ven static const unsigned char new_state[16] = { 26921da177e4SLinus Torvalds /* current state: new state: action: */ 26930980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE, 26940980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 26950980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE, 26960980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 26970980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 26980980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 26990980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE, 27000980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE, 27010980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 27020980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK, 27030980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE, 27040980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING, 27050980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 27061da177e4SLinus Torvalds }; 27071da177e4SLinus Torvalds 27081da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk) 27091da177e4SLinus Torvalds { 27101da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state]; 27111da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK; 27121da177e4SLinus Torvalds 27131da177e4SLinus Torvalds tcp_set_state(sk, ns); 27141da177e4SLinus Torvalds 27151da177e4SLinus Torvalds return next & TCP_ACTION_FIN; 27161da177e4SLinus Torvalds } 27171da177e4SLinus Torvalds 27181da177e4SLinus Torvalds /* 27191da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except 27201f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 27211da177e4SLinus Torvalds */ 27221da177e4SLinus Torvalds 27231da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how) 27241da177e4SLinus Torvalds { 27251da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN, 27261da177e4SLinus Torvalds * and then put it into the queue to be sent. 27271da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 27281da177e4SLinus Torvalds */ 27291da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN)) 27301da177e4SLinus Torvalds return; 27311da177e4SLinus Torvalds 27321da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */ 27331da177e4SLinus Torvalds if ((1 << sk->sk_state) & 27341da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT | 27351da177e4SLinus Torvalds TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 27361da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */ 27371da177e4SLinus Torvalds if (tcp_close_state(sk)) 27381da177e4SLinus Torvalds tcp_send_fin(sk); 27391da177e4SLinus Torvalds } 27401da177e4SLinus Torvalds } 27414bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown); 27421da177e4SLinus Torvalds 274319757cebSEric Dumazet int tcp_orphan_count_sum(void) 274419757cebSEric Dumazet { 274519757cebSEric Dumazet int i, total = 0; 274619757cebSEric Dumazet 274719757cebSEric Dumazet for_each_possible_cpu(i) 274819757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i); 274919757cebSEric Dumazet 275019757cebSEric Dumazet return max(total, 0); 275119757cebSEric Dumazet } 275219757cebSEric Dumazet 275319757cebSEric Dumazet static int tcp_orphan_cache; 275419757cebSEric Dumazet static struct timer_list tcp_orphan_timer; 275519757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 275619757cebSEric Dumazet 275719757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused) 275819757cebSEric Dumazet { 275919757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 276019757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 276119757cebSEric Dumazet } 276219757cebSEric Dumazet 276319757cebSEric Dumazet static bool tcp_too_many_orphans(int shift) 276419757cebSEric Dumazet { 276547e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift > 276647e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans); 276719757cebSEric Dumazet } 276819757cebSEric Dumazet 2769efcdbf24SArun Sharma bool tcp_check_oom(struct sock *sk, int shift) 2770efcdbf24SArun Sharma { 2771efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory; 2772efcdbf24SArun Sharma 277319757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift); 2774efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk); 2775efcdbf24SArun Sharma 2776e87cc472SJoe Perches if (too_many_orphans) 2777e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n"); 2778e87cc472SJoe Perches if (out_of_socket_memory) 2779e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2780efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory; 2781efcdbf24SArun Sharma } 2782efcdbf24SArun Sharma 278377c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout) 27841da177e4SLinus Torvalds { 27851da177e4SLinus Torvalds struct sk_buff *skb; 27861da177e4SLinus Torvalds int data_was_unread = 0; 278775c2d907SHerbert Xu int state; 27881da177e4SLinus Torvalds 2789e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 27901da177e4SLinus Torvalds 27911da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) { 27921da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 27931da177e4SLinus Torvalds 27941da177e4SLinus Torvalds /* Special case. */ 27950a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 27961da177e4SLinus Torvalds 27971da177e4SLinus Torvalds goto adjudge_to_death; 27981da177e4SLinus Torvalds } 27991da177e4SLinus Torvalds 28001da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the 28011da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the 28021da177e4SLinus Torvalds * reader process may not have drained the data yet! 28031da177e4SLinus Torvalds */ 28041da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2805e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2806e11ecddfSEric Dumazet 2807e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2808e11ecddfSEric Dumazet len--; 28091da177e4SLinus Torvalds data_was_unread += len; 28101da177e4SLinus Torvalds __kfree_skb(skb); 28111da177e4SLinus Torvalds } 28121da177e4SLinus Torvalds 2813565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2814565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE) 2815565b7b2dSKonstantin Khorenko goto adjudge_to_death; 2816565b7b2dSKonstantin Khorenko 281765bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because 281865bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of 281965bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 282065bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to 282165bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee... 282265bb723cSGerrit Renker * Note: timeout is always zero in such a case. 28231da177e4SLinus Torvalds */ 2824ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) { 2825ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0); 2826ee995283SPavel Emelyanov } else if (data_was_unread) { 28271da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */ 28286aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 28291da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 2830aa133076SWu Fengguang tcp_send_active_reset(sk, sk->sk_allocation); 28311da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 28321da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */ 28331da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0); 28346aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 28351da177e4SLinus Torvalds } else if (tcp_close_state(sk)) { 28361da177e4SLinus Torvalds /* We FIN if the application ate all the data before 28371da177e4SLinus Torvalds * zapping the connection. 28381da177e4SLinus Torvalds */ 28391da177e4SLinus Torvalds 28401da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state 28411da177e4SLinus Torvalds * machine. State transitions: 28421da177e4SLinus Torvalds * 28431da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1 28441da177e4SLinus Torvalds * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 28451da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK 28461da177e4SLinus Torvalds * 28471da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window), 28481da177e4SLinus Torvalds * rather than queued out of window. Purists blame. 28491da177e4SLinus Torvalds * 28501da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED, 28511da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent. 28521da177e4SLinus Torvalds * 28531da177e4SLinus Torvalds * The visible declinations are that sometimes 28541da177e4SLinus Torvalds * we enter time-wait state, when it is not required really 28551da177e4SLinus Torvalds * (harmless), do not send active resets, when they are 28561da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 28571da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux) 28581da177e4SLinus Torvalds * Probably, I missed some more holelets. 28591da177e4SLinus Torvalds * --ANK 28608336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN 28618336886fSJerry Chu * in a single packet! (May consider it later but will 28628336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until 28638336886fSJerry Chu * data is written and socket is closed.) 28641da177e4SLinus Torvalds */ 28651da177e4SLinus Torvalds tcp_send_fin(sk); 28661da177e4SLinus Torvalds } 28671da177e4SLinus Torvalds 28681da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout); 28691da177e4SLinus Torvalds 28701da177e4SLinus Torvalds adjudge_to_death: 287175c2d907SHerbert Xu state = sk->sk_state; 287275c2d907SHerbert Xu sock_hold(sk); 287375c2d907SHerbert Xu sock_orphan(sk); 287475c2d907SHerbert Xu 28751da177e4SLinus Torvalds local_bh_disable(); 28761da177e4SLinus Torvalds bh_lock_sock(sk); 28778873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */ 28788873c064SEric Dumazet __release_sock(sk); 28791da177e4SLinus Torvalds 288019757cebSEric Dumazet this_cpu_inc(tcp_orphan_count); 2881eb4dea58SHerbert Xu 288275c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */ 288375c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 288475c2d907SHerbert Xu goto out; 28851da177e4SLinus Torvalds 28861da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a 28871da177e4SLinus Torvalds * problem with TCP as specified in that the other end could 28881da177e4SLinus Torvalds * keep a socket open forever with no application left this end. 2889b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill 28901da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough 28911da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops 28921da177e4SLinus Torvalds * reset mistake. 28931da177e4SLinus Torvalds * 28941da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour 28951da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but 28961da177e4SLinus Torvalds * consume significant resources. Let's do it with special 28971da177e4SLinus Torvalds * linger2 option. --ANK 28981da177e4SLinus Torvalds */ 28991da177e4SLinus Torvalds 29001da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) { 29011da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29021da177e4SLinus Torvalds if (tp->linger2 < 0) { 29031da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29041da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 290502a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2906de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER); 29071da177e4SLinus Torvalds } else { 2908463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk); 29091da177e4SLinus Torvalds 29101da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 291152499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk, 291252499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN); 29131da177e4SLinus Torvalds } else { 29141da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 29151da177e4SLinus Torvalds goto out; 29161da177e4SLinus Torvalds } 29171da177e4SLinus Torvalds } 29181da177e4SLinus Torvalds } 29191da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 2920efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) { 29211da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29221da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 292302a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2924de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY); 29254ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) { 29264ee806d5SDan Streetman /* Not possible to send reset; just close */ 29274ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE); 29281da177e4SLinus Torvalds } 29291da177e4SLinus Torvalds } 29301da177e4SLinus Torvalds 29318336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) { 2932d983ea6fSEric Dumazet struct request_sock *req; 2933d983ea6fSEric Dumazet 2934d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 2935d983ea6fSEric Dumazet lockdep_sock_is_held(sk)); 29368336886fSJerry Chu /* We could get here with a non-NULL req if the socket is 29378336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS 29388336886fSJerry Chu * finishes. 29398336886fSJerry Chu */ 294000db4124SIan Morris if (req) 29418336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 29420a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk); 29438336886fSJerry Chu } 29441da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */ 29451da177e4SLinus Torvalds 29461da177e4SLinus Torvalds out: 29471da177e4SLinus Torvalds bh_unlock_sock(sk); 29481da177e4SLinus Torvalds local_bh_enable(); 294977c3c956SPaolo Abeni } 295077c3c956SPaolo Abeni 295177c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout) 295277c3c956SPaolo Abeni { 295377c3c956SPaolo Abeni lock_sock(sk); 295477c3c956SPaolo Abeni __tcp_close(sk, timeout); 29558873c064SEric Dumazet release_sock(sk); 29561da177e4SLinus Torvalds sock_put(sk); 29571da177e4SLinus Torvalds } 29584bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close); 29591da177e4SLinus Torvalds 29601da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */ 29611da177e4SLinus Torvalds 2962a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state) 29631da177e4SLinus Torvalds { 29641da177e4SLinus Torvalds return (1 << state) & 29651da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2966a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 29671da177e4SLinus Torvalds } 29681da177e4SLinus Torvalds 296975c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk) 297075c119afSEric Dumazet { 297175c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 297275c119afSEric Dumazet 29732bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL; 297475c119afSEric Dumazet while (p) { 297575c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p); 297675c119afSEric Dumazet 297775c119afSEric Dumazet p = rb_next(p); 297875c119afSEric Dumazet /* Since we are deleting whole queue, no need to 297975c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor) 298075c119afSEric Dumazet */ 298175c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk); 298203271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 298375c119afSEric Dumazet } 298475c119afSEric Dumazet } 298575c119afSEric Dumazet 2986ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk) 2987ac3f09baSEric Dumazet { 2988ac3f09baSEric Dumazet struct sk_buff *skb; 2989ac3f09baSEric Dumazet 2990ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 2991ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 2992ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb); 299303271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 2994ac3f09baSEric Dumazet } 299575c119afSEric Dumazet tcp_rtx_queue_purge(sk); 2996ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 2997ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk)); 2998bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0; 299904c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0; 3000ac3f09baSEric Dumazet } 3001ac3f09baSEric Dumazet 30021da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags) 30031da177e4SLinus Torvalds { 30041da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 3005463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 30061da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30071da177e4SLinus Torvalds int old_state = sk->sk_state; 30080f317464SEric Dumazet u32 seq; 30091da177e4SLinus Torvalds 30104faeee0cSEric Dumazet /* Deny disconnect if other threads are blocked in sk_wait_event() 30114faeee0cSEric Dumazet * or inet_wait_for_connect(). 30124faeee0cSEric Dumazet */ 30134faeee0cSEric Dumazet if (sk->sk_wait_pending) 30144faeee0cSEric Dumazet return -EBUSY; 30154faeee0cSEric Dumazet 30161da177e4SLinus Torvalds if (old_state != TCP_CLOSE) 30171da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 30181da177e4SLinus Torvalds 30191da177e4SLinus Torvalds /* ABORT function of RFC793 */ 30201da177e4SLinus Torvalds if (old_state == TCP_LISTEN) { 30210a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 3022ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) { 3023e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNABORTED); 30241da177e4SLinus Torvalds } else if (tcp_need_reset(old_state) || 30251da177e4SLinus Torvalds (tp->snd_nxt != tp->write_seq && 30261da177e4SLinus Torvalds (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 3027caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC 30281da177e4SLinus Torvalds * states 30291da177e4SLinus Torvalds */ 30301da177e4SLinus Torvalds tcp_send_active_reset(sk, gfp_any()); 3031e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 3032a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT) 3033e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 30341da177e4SLinus Torvalds 30351da177e4SLinus Torvalds tcp_clear_xmit_timers(sk); 30361da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue); 30377db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 30387b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 3039fe067e8aSDavid S. Miller tcp_write_queue_purge(sk); 3040cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk); 30419f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue); 30421da177e4SLinus Torvalds 3043c720c7e8SEric Dumazet inet->inet_dport = 0; 30441da177e4SLinus Torvalds 3045e0833d1fSKuniyuki Iwashima inet_bhash2_reset_saddr(sk); 30461da177e4SLinus Torvalds 3047e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, 0); 30481da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 3049740b0f18SEric Dumazet tp->srtt_us = 0; 3050b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 30513f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0; 30520f317464SEric Dumazet 30530f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2; 30540f317464SEric Dumazet if (!seq) 30550f317464SEric Dumazet seq = 1; 30560f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq); 30570f317464SEric Dumazet 3058463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 30596687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 30609d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0; 30616a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT; 3062ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 30632b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 30640b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 306540570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 30661da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 3067f4ce91ceSNeal Cardwell tp->is_cwnd_limited = 0; 3068f4ce91ceSNeal Cardwell tp->max_packets_out = 0; 30691fdf475aSEric Dumazet tp->window_clamp = 0; 30702fbdd562SEric Dumazet tp->delivered = 0; 3071e21db6f6SYuchung Cheng tp->delivered_ce = 0; 3072ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release) 3073ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk); 3074ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 30758919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0; 30766687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 3077d4761754SYousuk Seung tp->is_sack_reneg = 0; 30781da177e4SLinus Torvalds tcp_clear_retrans(tp); 3079c13c48c0SEric Dumazet tp->total_retrans = 0; 3080463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk); 3081499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3082499350a5SWei Wang * issue in __tcp_select_window() 3083499350a5SWei Wang */ 3084499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3085b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 30861da177e4SLinus Torvalds __sk_dst_reset(sk); 30878f905c0eSEric Dumazet dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); 308817c3060bSEric Dumazet tcp_saved_syn_free(tp); 30895d9f4262SEric Dumazet tp->compressed_ack = 0; 3090784f8344SEric Dumazet tp->segs_in = 0; 3091784f8344SEric Dumazet tp->segs_out = 0; 3092ba113c3aSWei Wang tp->bytes_sent = 0; 3093e858faf5SChristoph Paasch tp->bytes_acked = 0; 3094e858faf5SChristoph Paasch tp->bytes_received = 0; 3095fb31c9b9SWei Wang tp->bytes_retrans = 0; 3096db7ffee6SEric Dumazet tp->data_segs_in = 0; 3097db7ffee6SEric Dumazet tp->data_segs_out = 0; 30987788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0; 30997788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0; 31007e10b655SWei Wang tp->dsack_dups = 0; 31017ec65372SWei Wang tp->reord_seen = 0; 31025c701549SEric Dumazet tp->retrans_out = 0; 31035c701549SEric Dumazet tp->sacked_out = 0; 31045c701549SEric Dumazet tp->tlp_high_seq = 0; 31055c701549SEric Dumazet tp->last_oow_ack_time = 0; 310629c1c446SMubashir Adnan Qureshi tp->plb_rehash = 0; 31076cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */ 31086cda8b74SEric Dumazet tp->app_limited = ~0U; 3109300b655dSDavid Morley tp->rate_app_limited = 1; 3110792c4354SEric Dumazet tp->rack.mstamp = 0; 3111792c4354SEric Dumazet tp->rack.advanced = 0; 3112792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1; 3113792c4354SEric Dumazet tp->rack.last_delivered = 0; 3114792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0; 3115792c4354SEric Dumazet tp->rack.dsack_seen = 0; 31166bcdc40dSEric Dumazet tp->syn_data_acked = 0; 31176bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0; 31186bcdc40dSEric Dumazet tp->rx_opt.dsack = 0; 31196bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0; 3120f9af2dbbSThomas Higdon tp->rcv_ooopack = 0; 31216cda8b74SEric Dumazet 31221da177e4SLinus Torvalds 31237db92362SWei Wang /* Clean up fastopen related fields */ 31247db92362SWei Wang tcp_free_fastopen_req(tp); 31257db92362SWei Wang inet->defer_connect = 0; 312648027478SJason Baron tp->fastopen_client_fail = 0; 31277db92362SWei Wang 3128c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 31291da177e4SLinus Torvalds 31309b42d55aSLi RongQing if (sk->sk_frag.page) { 31319b42d55aSLi RongQing put_page(sk->sk_frag.page); 31329b42d55aSLi RongQing sk->sk_frag.page = NULL; 31339b42d55aSLi RongQing sk->sk_frag.offset = 0; 31349b42d55aSLi RongQing } 3135e3ae2365SAlexander Aring sk_error_report(sk); 3136a01512b1SYueHaibing return 0; 31371da177e4SLinus Torvalds } 31384bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect); 31391da177e4SLinus Torvalds 3140a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk) 3141ee995283SPavel Emelyanov { 3142cb388e7eSMartin KaFai Lau return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3143319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN); 3144ee995283SPavel Emelyanov } 3145ee995283SPavel Emelyanov 3146d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3147b1ed4c4fSAndrey Vagin { 3148b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 3149b1ed4c4fSAndrey Vagin 3150b1ed4c4fSAndrey Vagin if (!tp->repair) 3151b1ed4c4fSAndrey Vagin return -EPERM; 3152b1ed4c4fSAndrey Vagin 3153b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 3154b1ed4c4fSAndrey Vagin return -EINVAL; 3155b1ed4c4fSAndrey Vagin 3156d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3157b1ed4c4fSAndrey Vagin return -EFAULT; 3158b1ed4c4fSAndrey Vagin 3159b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd) 3160b1ed4c4fSAndrey Vagin return -EINVAL; 3161b1ed4c4fSAndrey Vagin 3162b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3163b1ed4c4fSAndrey Vagin return -EINVAL; 3164b1ed4c4fSAndrey Vagin 3165b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt)) 3166b1ed4c4fSAndrey Vagin return -EINVAL; 3167b1ed4c4fSAndrey Vagin 3168b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1; 3169b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd; 3170b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window; 3171b1ed4c4fSAndrey Vagin 3172b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd; 3173b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup; 3174b1ed4c4fSAndrey Vagin 3175b1ed4c4fSAndrey Vagin return 0; 3176b1ed4c4fSAndrey Vagin } 3177b1ed4c4fSAndrey Vagin 3178d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3179d38d2b00SChristoph Hellwig unsigned int len) 3180b139ba4eSPavel Emelyanov { 318115e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk); 3182de248a75SPavel Emelyanov struct tcp_repair_opt opt; 3183d3c48151SChristoph Hellwig size_t offset = 0; 3184b139ba4eSPavel Emelyanov 3185de248a75SPavel Emelyanov while (len >= sizeof(opt)) { 3186d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3187b139ba4eSPavel Emelyanov return -EFAULT; 3188b139ba4eSPavel Emelyanov 3189d3c48151SChristoph Hellwig offset += sizeof(opt); 3190de248a75SPavel Emelyanov len -= sizeof(opt); 3191b139ba4eSPavel Emelyanov 3192de248a75SPavel Emelyanov switch (opt.opt_code) { 3193de248a75SPavel Emelyanov case TCPOPT_MSS: 3194de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val; 319515e56515SDouglas Caetano dos Santos tcp_mtup_init(sk); 3196b139ba4eSPavel Emelyanov break; 3197de248a75SPavel Emelyanov case TCPOPT_WINDOW: 3198bc26ccd8SAndrey Vagin { 3199bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF; 3200bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16; 3201bc26ccd8SAndrey Vagin 3202589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3203b139ba4eSPavel Emelyanov return -EFBIG; 3204b139ba4eSPavel Emelyanov 3205bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale; 3206bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale; 3207bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1; 3208bc26ccd8SAndrey Vagin } 3209b139ba4eSPavel Emelyanov break; 3210b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM: 3211de248a75SPavel Emelyanov if (opt.opt_val != 0) 3212de248a75SPavel Emelyanov return -EINVAL; 3213de248a75SPavel Emelyanov 3214b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3215b139ba4eSPavel Emelyanov break; 3216b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP: 3217de248a75SPavel Emelyanov if (opt.opt_val != 0) 3218de248a75SPavel Emelyanov return -EINVAL; 3219de248a75SPavel Emelyanov 3220b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1; 3221b139ba4eSPavel Emelyanov break; 3222b139ba4eSPavel Emelyanov } 3223b139ba4eSPavel Emelyanov } 3224b139ba4eSPavel Emelyanov 3225b139ba4eSPavel Emelyanov return 0; 3226b139ba4eSPavel Emelyanov } 3227b139ba4eSPavel Emelyanov 3228a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3229a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled); 3230a842fe14SEric Dumazet 3231a842fe14SEric Dumazet static void tcp_enable_tx_delay(void) 3232a842fe14SEric Dumazet { 3233a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3234a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0; 3235a842fe14SEric Dumazet 3236a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3237a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled); 3238a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n"); 3239a842fe14SEric Dumazet } 3240a842fe14SEric Dumazet } 3241a842fe14SEric Dumazet } 3242a842fe14SEric Dumazet 3243db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears 3244db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is 3245db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the 3246db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then 3247db10538aSChristoph Hellwig * use sendfile to send out the data parts. 3248db10538aSChristoph Hellwig * 3249db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3250db10538aSChristoph Hellwig * TCP_NODELAY. 3251db10538aSChristoph Hellwig */ 32526fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on) 3253db10538aSChristoph Hellwig { 3254db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 3255db10538aSChristoph Hellwig 3256db10538aSChristoph Hellwig if (on) { 3257db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK; 3258db10538aSChristoph Hellwig } else { 3259db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK; 3260db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF) 3261db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH; 3262db10538aSChristoph Hellwig tcp_push_pending_frames(sk); 3263db10538aSChristoph Hellwig } 3264db10538aSChristoph Hellwig } 3265db10538aSChristoph Hellwig 3266db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on) 3267db10538aSChristoph Hellwig { 3268db10538aSChristoph Hellwig lock_sock(sk); 3269db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on); 3270db10538aSChristoph Hellwig release_sock(sk); 3271db10538aSChristoph Hellwig } 3272db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork); 3273db10538aSChristoph Hellwig 327412abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 327512abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared. 327612abc5eeSChristoph Hellwig * 327712abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides 327812abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments. 327912abc5eeSChristoph Hellwig */ 32806fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on) 328112abc5eeSChristoph Hellwig { 328212abc5eeSChristoph Hellwig if (on) { 328312abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 328412abc5eeSChristoph Hellwig tcp_push_pending_frames(sk); 328512abc5eeSChristoph Hellwig } else { 328612abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 328712abc5eeSChristoph Hellwig } 328812abc5eeSChristoph Hellwig } 328912abc5eeSChristoph Hellwig 329012abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk) 329112abc5eeSChristoph Hellwig { 329212abc5eeSChristoph Hellwig lock_sock(sk); 329312abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true); 329412abc5eeSChristoph Hellwig release_sock(sk); 329512abc5eeSChristoph Hellwig } 329612abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay); 329712abc5eeSChristoph Hellwig 3298ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val) 3299ddd061b8SChristoph Hellwig { 3300ddd061b8SChristoph Hellwig if (!val) { 3301ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3302ddd061b8SChristoph Hellwig return; 3303ddd061b8SChristoph Hellwig } 3304ddd061b8SChristoph Hellwig 3305ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk); 3306ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3307ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) { 3308ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3309ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1); 3310ddd061b8SChristoph Hellwig if (!(val & 1)) 3311ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3312ddd061b8SChristoph Hellwig } 3313ddd061b8SChristoph Hellwig } 3314ddd061b8SChristoph Hellwig 3315ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val) 3316ddd061b8SChristoph Hellwig { 3317ddd061b8SChristoph Hellwig lock_sock(sk); 3318ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 3319ddd061b8SChristoph Hellwig release_sock(sk); 3320ddd061b8SChristoph Hellwig } 3321ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack); 3322ddd061b8SChristoph Hellwig 3323557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val) 3324557eadfcSChristoph Hellwig { 3325557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT) 3326557eadfcSChristoph Hellwig return -EINVAL; 3327557eadfcSChristoph Hellwig 3328557eadfcSChristoph Hellwig lock_sock(sk); 3329557eadfcSChristoph Hellwig inet_csk(sk)->icsk_syn_retries = val; 3330557eadfcSChristoph Hellwig release_sock(sk); 3331557eadfcSChristoph Hellwig return 0; 3332557eadfcSChristoph Hellwig } 3333557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt); 3334557eadfcSChristoph Hellwig 3335c488aeadSChristoph Hellwig void tcp_sock_set_user_timeout(struct sock *sk, u32 val) 3336c488aeadSChristoph Hellwig { 3337c488aeadSChristoph Hellwig lock_sock(sk); 3338c488aeadSChristoph Hellwig inet_csk(sk)->icsk_user_timeout = val; 3339c488aeadSChristoph Hellwig release_sock(sk); 3340c488aeadSChristoph Hellwig } 3341c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3342c488aeadSChristoph Hellwig 3343aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 334471c48eb8SChristoph Hellwig { 334571c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 334671c48eb8SChristoph Hellwig 334771c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE) 334871c48eb8SChristoph Hellwig return -EINVAL; 334971c48eb8SChristoph Hellwig 335071c48eb8SChristoph Hellwig tp->keepalive_time = val * HZ; 335171c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) && 335271c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 335371c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp); 335471c48eb8SChristoph Hellwig 335571c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed) 335671c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed; 335771c48eb8SChristoph Hellwig else 335871c48eb8SChristoph Hellwig elapsed = 0; 335971c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed); 336071c48eb8SChristoph Hellwig } 336171c48eb8SChristoph Hellwig 336271c48eb8SChristoph Hellwig return 0; 336371c48eb8SChristoph Hellwig } 336471c48eb8SChristoph Hellwig 336571c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val) 336671c48eb8SChristoph Hellwig { 336771c48eb8SChristoph Hellwig int err; 336871c48eb8SChristoph Hellwig 336971c48eb8SChristoph Hellwig lock_sock(sk); 3370aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 337171c48eb8SChristoph Hellwig release_sock(sk); 337271c48eb8SChristoph Hellwig return err; 337371c48eb8SChristoph Hellwig } 337471c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle); 337571c48eb8SChristoph Hellwig 3376d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val) 3377d41ecaacSChristoph Hellwig { 3378d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL) 3379d41ecaacSChristoph Hellwig return -EINVAL; 3380d41ecaacSChristoph Hellwig 3381d41ecaacSChristoph Hellwig lock_sock(sk); 3382d41ecaacSChristoph Hellwig tcp_sk(sk)->keepalive_intvl = val * HZ; 3383d41ecaacSChristoph Hellwig release_sock(sk); 3384d41ecaacSChristoph Hellwig return 0; 3385d41ecaacSChristoph Hellwig } 3386d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3387d41ecaacSChristoph Hellwig 3388480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val) 3389480aeb96SChristoph Hellwig { 3390480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT) 3391480aeb96SChristoph Hellwig return -EINVAL; 3392480aeb96SChristoph Hellwig 3393480aeb96SChristoph Hellwig lock_sock(sk); 3394480aeb96SChristoph Hellwig tcp_sk(sk)->keepalive_probes = val; 3395480aeb96SChristoph Hellwig release_sock(sk); 3396480aeb96SChristoph Hellwig return 0; 3397480aeb96SChristoph Hellwig } 3398480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3399480aeb96SChristoph Hellwig 3400cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val) 3401cb811109SPrankur gupta { 3402cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk); 3403cb811109SPrankur gupta 3404cb811109SPrankur gupta if (!val) { 3405cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE) 3406cb811109SPrankur gupta return -EINVAL; 3407cb811109SPrankur gupta tp->window_clamp = 0; 3408cb811109SPrankur gupta } else { 3409cb811109SPrankur gupta tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3410cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val; 34113aa7857fSNeil Spring tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 3412cb811109SPrankur gupta } 3413cb811109SPrankur gupta return 0; 3414cb811109SPrankur gupta } 3415cb811109SPrankur gupta 34161da177e4SLinus Torvalds /* 34171da177e4SLinus Torvalds * Socket option code for TCP. 34181da177e4SLinus Torvalds */ 34190c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3420d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen) 34211da177e4SLinus Torvalds { 34221da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3423463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 34241e579caaSNikolay Borisov struct net *net = sock_net(sk); 34251da177e4SLinus Torvalds int val; 34261da177e4SLinus Torvalds int err = 0; 34271da177e4SLinus Torvalds 3428e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */ 3429e56fb50fSWilliam Allen Simpson switch (optname) { 3430e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: { 34315f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX]; 34325f8ef48dSStephen Hemminger 34335f8ef48dSStephen Hemminger if (optlen < 1) 34345f8ef48dSStephen Hemminger return -EINVAL; 34355f8ef48dSStephen Hemminger 3436d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 34374fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen)); 34385f8ef48dSStephen Hemminger if (val < 0) 34395f8ef48dSStephen Hemminger return -EFAULT; 34405f8ef48dSStephen Hemminger name[val] = 0; 34415f8ef48dSStephen Hemminger 3442cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 344384e5a0f2SMartin KaFai Lau err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3444cb388e7eSMartin KaFai Lau sockopt_ns_capable(sock_net(sk)->user_ns, 34458d650cdeSEric Dumazet CAP_NET_ADMIN)); 3446cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 34475f8ef48dSStephen Hemminger return err; 34485f8ef48dSStephen Hemminger } 3449734942ccSDave Watson case TCP_ULP: { 3450734942ccSDave Watson char name[TCP_ULP_NAME_MAX]; 3451734942ccSDave Watson 3452734942ccSDave Watson if (optlen < 1) 3453734942ccSDave Watson return -EINVAL; 3454734942ccSDave Watson 3455d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 3456734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1, 3457734942ccSDave Watson optlen)); 3458734942ccSDave Watson if (val < 0) 3459734942ccSDave Watson return -EFAULT; 3460734942ccSDave Watson name[val] = 0; 3461734942ccSDave Watson 3462cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 3463734942ccSDave Watson err = tcp_set_ulp(sk, name); 3464cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 3465734942ccSDave Watson return err; 3466734942ccSDave Watson } 34671fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 34680f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 34690f1ce023SJason Baron __u8 *backup_key = NULL; 34701fba70e5SYuchung Cheng 34710f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation 34720f1ce023SJason Baron * First key is the active one. 34730f1ce023SJason Baron */ 34740f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH && 34750f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 34761fba70e5SYuchung Cheng return -EINVAL; 34771fba70e5SYuchung Cheng 3478d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen)) 34791fba70e5SYuchung Cheng return -EFAULT; 34801fba70e5SYuchung Cheng 34810f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 34820f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 34830f1ce023SJason Baron 3484438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 34851fba70e5SYuchung Cheng } 3486e56fb50fSWilliam Allen Simpson default: 3487e56fb50fSWilliam Allen Simpson /* fallthru */ 3488e56fb50fSWilliam Allen Simpson break; 3489ccbd6a5aSJoe Perches } 34905f8ef48dSStephen Hemminger 34911da177e4SLinus Torvalds if (optlen < sizeof(int)) 34921da177e4SLinus Torvalds return -EINVAL; 34931da177e4SLinus Torvalds 3494d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val))) 34951da177e4SLinus Torvalds return -EFAULT; 34961da177e4SLinus Torvalds 3497cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 34981da177e4SLinus Torvalds 34991da177e4SLinus Torvalds switch (optname) { 35001da177e4SLinus Torvalds case TCP_MAXSEG: 35011da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However 35021da177e4SLinus Torvalds * at the point when this call is done we typically don't yet 3503a777f715SRohit Chavan * know which interface is going to be used 3504a777f715SRohit Chavan */ 3505cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 35061da177e4SLinus Torvalds err = -EINVAL; 35071da177e4SLinus Torvalds break; 35081da177e4SLinus Torvalds } 35091da177e4SLinus Torvalds tp->rx_opt.user_mss = val; 35101da177e4SLinus Torvalds break; 35111da177e4SLinus Torvalds 35121da177e4SLinus Torvalds case TCP_NODELAY: 351312abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val); 35141da177e4SLinus Torvalds break; 35151da177e4SLinus Torvalds 351636e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS: 351736e31b0aSAndreas Petlund if (val < 0 || val > 1) 351836e31b0aSAndreas Petlund err = -EINVAL; 351936e31b0aSAndreas Petlund else 352036e31b0aSAndreas Petlund tp->thin_lto = val; 352136e31b0aSAndreas Petlund break; 352236e31b0aSAndreas Petlund 35237e380175SAndreas Petlund case TCP_THIN_DUPACK: 35247e380175SAndreas Petlund if (val < 0 || val > 1) 35257e380175SAndreas Petlund err = -EINVAL; 35267e380175SAndreas Petlund break; 35277e380175SAndreas Petlund 3528ee995283SPavel Emelyanov case TCP_REPAIR: 3529ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk)) 3530ee995283SPavel Emelyanov err = -EPERM; 353131048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) { 3532ee995283SPavel Emelyanov tp->repair = 1; 3533ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE; 3534ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE; 353531048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) { 3536ee995283SPavel Emelyanov tp->repair = 0; 3537ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE; 3538ee995283SPavel Emelyanov tcp_send_window_probe(sk); 353931048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) { 354031048d7aSStefan Baranoff tp->repair = 0; 354131048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE; 3542ee995283SPavel Emelyanov } else 3543ee995283SPavel Emelyanov err = -EINVAL; 3544ee995283SPavel Emelyanov 3545ee995283SPavel Emelyanov break; 3546ee995283SPavel Emelyanov 3547ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 3548ee995283SPavel Emelyanov if (!tp->repair) 3549ee995283SPavel Emelyanov err = -EPERM; 3550bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR) 3551ee995283SPavel Emelyanov tp->repair_queue = val; 3552ee995283SPavel Emelyanov else 3553ee995283SPavel Emelyanov err = -EINVAL; 3554ee995283SPavel Emelyanov break; 3555ee995283SPavel Emelyanov 3556ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 35578811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) { 3558ee995283SPavel Emelyanov err = -EPERM; 35598811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) { 35608811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk)) 35618811f4a9SEric Dumazet err = -EPERM; 35628811f4a9SEric Dumazet else 35630f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val); 35648811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) { 35658811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) { 35668811f4a9SEric Dumazet err = -EPERM; 35678811f4a9SEric Dumazet } else { 3568dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val); 35696cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val); 35706cd6cbf5SEric Dumazet } 35718811f4a9SEric Dumazet } else { 3572ee995283SPavel Emelyanov err = -EINVAL; 35738811f4a9SEric Dumazet } 3574ee995283SPavel Emelyanov break; 3575ee995283SPavel Emelyanov 3576b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS: 3577b139ba4eSPavel Emelyanov if (!tp->repair) 3578b139ba4eSPavel Emelyanov err = -EINVAL; 35790c175da7SLu Wei else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 3580d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen); 3581b139ba4eSPavel Emelyanov else 3582b139ba4eSPavel Emelyanov err = -EPERM; 3583b139ba4eSPavel Emelyanov break; 3584b139ba4eSPavel Emelyanov 35851da177e4SLinus Torvalds case TCP_CORK: 3586db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val); 35871da177e4SLinus Torvalds break; 35881da177e4SLinus Torvalds 35891da177e4SLinus Torvalds case TCP_KEEPIDLE: 3590aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 35911da177e4SLinus Torvalds break; 35921da177e4SLinus Torvalds case TCP_KEEPINTVL: 35931da177e4SLinus Torvalds if (val < 1 || val > MAX_TCP_KEEPINTVL) 35941da177e4SLinus Torvalds err = -EINVAL; 35951da177e4SLinus Torvalds else 35961da177e4SLinus Torvalds tp->keepalive_intvl = val * HZ; 35971da177e4SLinus Torvalds break; 35981da177e4SLinus Torvalds case TCP_KEEPCNT: 35991da177e4SLinus Torvalds if (val < 1 || val > MAX_TCP_KEEPCNT) 36001da177e4SLinus Torvalds err = -EINVAL; 36011da177e4SLinus Torvalds else 36021da177e4SLinus Torvalds tp->keepalive_probes = val; 36031da177e4SLinus Torvalds break; 36041da177e4SLinus Torvalds case TCP_SYNCNT: 36051da177e4SLinus Torvalds if (val < 1 || val > MAX_TCP_SYNCNT) 36061da177e4SLinus Torvalds err = -EINVAL; 36071da177e4SLinus Torvalds else 3608463c84b9SArnaldo Carvalho de Melo icsk->icsk_syn_retries = val; 36091da177e4SLinus Torvalds break; 36101da177e4SLinus Torvalds 3611cd8ae852SEric Dumazet case TCP_SAVE_SYN: 3612267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */ 3613267cf9faSMartin KaFai Lau if (val < 0 || val > 2) 3614cd8ae852SEric Dumazet err = -EINVAL; 3615cd8ae852SEric Dumazet else 3616cd8ae852SEric Dumazet tp->save_syn = val; 3617cd8ae852SEric Dumazet break; 3618cd8ae852SEric Dumazet 36191da177e4SLinus Torvalds case TCP_LINGER2: 36201da177e4SLinus Torvalds if (val < 0) 36211da177e4SLinus Torvalds tp->linger2 = -1; 3622f0628c52SCambda Zhu else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3623f0628c52SCambda Zhu tp->linger2 = TCP_FIN_TIMEOUT_MAX; 36241da177e4SLinus Torvalds else 36251da177e4SLinus Torvalds tp->linger2 = val * HZ; 36261da177e4SLinus Torvalds break; 36271da177e4SLinus Torvalds 36281da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 3629b103cf34SJulian Anastasov /* Translate value in seconds to number of retransmits */ 3630b103cf34SJulian Anastasov icsk->icsk_accept_queue.rskq_defer_accept = 3631b103cf34SJulian Anastasov secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 3632b103cf34SJulian Anastasov TCP_RTO_MAX / HZ); 36331da177e4SLinus Torvalds break; 36341da177e4SLinus Torvalds 36351da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 3636cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val); 36371da177e4SLinus Torvalds break; 36381da177e4SLinus Torvalds 36391da177e4SLinus Torvalds case TCP_QUICKACK: 3640ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 36411da177e4SLinus Torvalds break; 36421da177e4SLinus Torvalds 3643cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3644cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG: 36458917a777SIvan Delalande case TCP_MD5SIG_EXT: 3646d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3647cfb6eeb4SYOSHIFUJI Hideaki break; 3648cfb6eeb4SYOSHIFUJI Hideaki #endif 3649dca43c75SJerry Chu case TCP_USER_TIMEOUT: 3650b248230cSYuchung Cheng /* Cap the max time in ms TCP will retry or probe the window 3651dca43c75SJerry Chu * before giving up and aborting (ETIMEDOUT) a connection. 3652dca43c75SJerry Chu */ 365342493570SHangbin Liu if (val < 0) 365442493570SHangbin Liu err = -EINVAL; 365542493570SHangbin Liu else 36569bcc66e1SJon Maxwell icsk->icsk_user_timeout = val; 3657dca43c75SJerry Chu break; 36588336886fSJerry Chu 36598336886fSJerry Chu case TCP_FASTOPEN: 36608336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3661dfea2aa6SChristoph Paasch TCPF_LISTEN))) { 366243713848SHaishuang Yan tcp_fastopen_init_key_once(net); 3663dfea2aa6SChristoph Paasch 36640536fcc0SEric Dumazet fastopen_queue_tune(sk, val); 3665dfea2aa6SChristoph Paasch } else { 36668336886fSJerry Chu err = -EINVAL; 3667dfea2aa6SChristoph Paasch } 36688336886fSJerry Chu break; 366919f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 367019f6d3f3SWei Wang if (val > 1 || val < 0) { 367119f6d3f3SWei Wang err = -EINVAL; 36725a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 36735a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) { 367419f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE) 367519f6d3f3SWei Wang tp->fastopen_connect = val; 367619f6d3f3SWei Wang else 367719f6d3f3SWei Wang err = -EINVAL; 367819f6d3f3SWei Wang } else { 367919f6d3f3SWei Wang err = -EOPNOTSUPP; 368019f6d3f3SWei Wang } 368119f6d3f3SWei Wang break; 368271c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 368371c02379SChristoph Paasch if (val > 1 || val < 0) 368471c02379SChristoph Paasch err = -EINVAL; 368571c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 368671c02379SChristoph Paasch err = -EINVAL; 368771c02379SChristoph Paasch else 368871c02379SChristoph Paasch tp->fastopen_no_cookie = val; 368971c02379SChristoph Paasch break; 369093be6ce0SAndrey Vagin case TCP_TIMESTAMP: 369193be6ce0SAndrey Vagin if (!tp->repair) 369293be6ce0SAndrey Vagin err = -EPERM; 369393be6ce0SAndrey Vagin else 36949a568de4SEric Dumazet tp->tsoffset = val - tcp_time_stamp_raw(); 369593be6ce0SAndrey Vagin break; 3696b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: 3697b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen); 3698b1ed4c4fSAndrey Vagin break; 3699c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 3700c9bee3b7SEric Dumazet tp->notsent_lowat = val; 3701c9bee3b7SEric Dumazet sk->sk_write_space(sk); 3702c9bee3b7SEric Dumazet break; 3703b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 3704b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0) 3705b75eba76SSoheil Hassas Yeganeh err = -EINVAL; 3706b75eba76SSoheil Hassas Yeganeh else 3707b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val; 3708b75eba76SSoheil Hassas Yeganeh break; 3709a842fe14SEric Dumazet case TCP_TX_DELAY: 3710a842fe14SEric Dumazet if (val) 3711a842fe14SEric Dumazet tcp_enable_tx_delay(); 3712a842fe14SEric Dumazet tp->tcp_tx_delay = val; 3713a842fe14SEric Dumazet break; 37141da177e4SLinus Torvalds default: 37151da177e4SLinus Torvalds err = -ENOPROTOOPT; 37161da177e4SLinus Torvalds break; 37173ff50b79SStephen Hemminger } 37183ff50b79SStephen Hemminger 3719cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 37201da177e4SLinus Torvalds return err; 37211da177e4SLinus Torvalds } 37221da177e4SLinus Torvalds 3723a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3724b7058842SDavid S. Miller unsigned int optlen) 37253fdadf7dSDmitry Mishin { 3726cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 37273fdadf7dSDmitry Mishin 37283fdadf7dSDmitry Mishin if (level != SOL_TCP) 3729f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 3730f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 37313fdadf7dSDmitry Mishin optval, optlen); 3732a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen); 37333fdadf7dSDmitry Mishin } 37344bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt); 37353fdadf7dSDmitry Mishin 3736efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3737efd90174SFrancis Yan struct tcp_info *info) 3738efd90174SFrancis Yan { 3739efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0; 3740efd90174SFrancis Yan enum tcp_chrono i; 3741efd90174SFrancis Yan 3742efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3743efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1]; 3744efd90174SFrancis Yan if (i == tp->chrono_type) 3745628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start; 3746efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ; 3747efd90174SFrancis Yan total += stats[i]; 3748efd90174SFrancis Yan } 3749efd90174SFrancis Yan 3750efd90174SFrancis Yan info->tcpi_busy_time = total; 3751efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3752efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3753efd90174SFrancis Yan } 3754efd90174SFrancis Yan 37551da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */ 37560df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info) 37571da177e4SLinus Torvalds { 375835ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3759463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 376076a9ebe8SEric Dumazet unsigned long rate; 37610263598cSWei Wang u32 now; 3762ff5d7497SEric Dumazet u64 rate64; 376367db3e4bSEric Dumazet bool slow; 37641da177e4SLinus Torvalds 37651da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 376635ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM) 376735ac838aSCraig Gallek return; 37681da177e4SLinus Torvalds 3769986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk); 377000fd38d9SEric Dumazet 3771ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */ 3772ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate); 377376a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3774f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64; 3775ccbf3bfaSEric Dumazet 3776ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate); 377776a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3778f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64; 3779ccbf3bfaSEric Dumazet 3780ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering; 378140570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3782ccbf3bfaSEric Dumazet 3783ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) { 3784ccbf3bfaSEric Dumazet /* listeners aliased fields : 3785ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept() 3786ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog 3787ccbf3bfaSEric Dumazet */ 3788288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3789099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3790ccbf3bfaSEric Dumazet return; 3791ccbf3bfaSEric Dumazet } 3792b369e7fdSEric Dumazet 3793b369e7fdSEric Dumazet slow = lock_sock_fast(sk); 3794b369e7fdSEric Dumazet 37956687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state; 3796463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits; 37976687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out; 3798463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff; 37991da177e4SLinus Torvalds 38001da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 38011da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3802e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 38031da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK; 38041da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) { 38051da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE; 38061da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 38071da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 38081da177e4SLinus Torvalds } 38091da177e4SLinus Torvalds 38101da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK) 38111da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN; 3812b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN) 3813b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN; 38146f73601eSYuchung Cheng if (tp->syn_data_acked) 38156f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA; 38161da177e4SLinus Torvalds 3817463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 3818463c84b9SArnaldo Carvalho de Melo info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 3819c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache; 3820463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 38211da177e4SLinus Torvalds 38221da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out; 38231da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out; 3824ccbf3bfaSEric Dumazet 38251da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out; 38261da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out; 38271da177e4SLinus Torvalds 3828d635fbe2SEric Dumazet now = tcp_jiffies32; 38291da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3830463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 38311da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 38321da177e4SLinus Torvalds 3833d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 38341da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3835740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3; 3836740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2; 38371da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh; 38381da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss; 38391da177e4SLinus Torvalds 3840645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 38411da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space; 38421da177e4SLinus Torvalds 38431da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans; 3844977cb0ecSEric Dumazet 3845f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked; 3846f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received; 384767db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3848efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info); 384967db3e4bSEric Dumazet 38502efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out; 38510307a0b7SEric Dumazet 38520307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 38530307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in); 38540307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3855cd9b2660SEric Dumazet 3856cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp); 3857a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out; 3858eb8329e0SYuchung Cheng 3859eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 38600263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp); 38610263598cSWei Wang if (rate64) 3862f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64; 3863feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered; 3864feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce; 3865ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent; 3866fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans; 38677e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups; 38687ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen; 3869f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack; 38708f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd; 387171fc7047SMubashir Adnan Qureshi info->tcpi_rcv_wnd = tp->rcv_wnd; 387271fc7047SMubashir Adnan Qureshi info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 387348027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 3874b369e7fdSEric Dumazet unlock_sock_fast(sk, slow); 38751da177e4SLinus Torvalds } 38761da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info); 38771da177e4SLinus Torvalds 3878984988aaSWei Wang static size_t tcp_opt_stats_get_size(void) 3879984988aaSWei Wang { 3880984988aaSWei Wang return 3881984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3882984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3883984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3884984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3885984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3886984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3887984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3888984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3889984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3890984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3891984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3892984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3893984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3894984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3895984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3896984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3897984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3898ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3899fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 39007e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 39017ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3902e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 390332efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3904e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 390548040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3906e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 390729c1c446SMubashir Adnan Qureshi nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 3908984988aaSWei Wang 0; 3909984988aaSWei Wang } 3910984988aaSWei Wang 3911e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */ 3912e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3913e7ed11eeSYousuk Seung { 3914e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP)) 3915e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl; 3916e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6)) 3917e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit; 3918e7ed11eeSYousuk Seung else 3919e7ed11eeSYousuk Seung return 0; 3920e7ed11eeSYousuk Seung } 3921e7ed11eeSYousuk Seung 392248040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3923e7ed11eeSYousuk Seung const struct sk_buff *orig_skb, 3924e7ed11eeSYousuk Seung const struct sk_buff *ack_skb) 39251c885808SFrancis Yan { 39261c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk); 39271c885808SFrancis Yan struct sk_buff *stats; 39281c885808SFrancis Yan struct tcp_info info; 392976a9ebe8SEric Dumazet unsigned long rate; 3930bb7c19f9SWei Wang u64 rate64; 39311c885808SFrancis Yan 3932984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 39331c885808SFrancis Yan if (!stats) 39341c885808SFrancis Yan return NULL; 39351c885808SFrancis Yan 39361c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info); 39371c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY, 39381c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD); 39391c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 39401c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD); 39411c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 39421c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD); 39437e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 39447e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD); 39457e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 39467e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD); 3947bb7c19f9SWei Wang 3948bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate); 394976a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3950bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 3951bb7c19f9SWei Wang 3952bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp); 3953bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 3954bb7c19f9SWei Wang 395540570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 3956bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 3957bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 3958bb7c19f9SWei Wang 3959bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 3960bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 39617156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 3962feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 3963feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 396487ecc95dSPriyaranjan Jha 396587ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 3966be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 3967feb5f2ecSYuchung Cheng 3968ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 3969ba113c3aSWei Wang TCP_NLA_PAD); 3970fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 3971fb31c9b9SWei Wang TCP_NLA_PAD); 39727e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 39737ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 3974e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 397532efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 3976e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 3977e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt)); 397848040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 397948040793SYousuk Seung TCP_NLA_PAD); 3980e7ed11eeSYousuk Seung if (ack_skb) 3981e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL, 3982e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb)); 3983ba113c3aSWei Wang 398429c1c446SMubashir Adnan Qureshi nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 39851c885808SFrancis Yan return stats; 39861c885808SFrancis Yan } 39871c885808SFrancis Yan 3988273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level, 398934704ef0SMartin KaFai Lau int optname, sockptr_t optval, sockptr_t optlen) 39901da177e4SLinus Torvalds { 3991295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 39921da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 39936fa25166SNikolay Borisov struct net *net = sock_net(sk); 39941da177e4SLinus Torvalds int val, len; 39951da177e4SLinus Torvalds 399634704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 39971da177e4SLinus Torvalds return -EFAULT; 39981da177e4SLinus Torvalds 39991da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(int)); 40001da177e4SLinus Torvalds 40011da177e4SLinus Torvalds if (len < 0) 40021da177e4SLinus Torvalds return -EINVAL; 40031da177e4SLinus Torvalds 40041da177e4SLinus Torvalds switch (optname) { 40051da177e4SLinus Torvalds case TCP_MAXSEG: 4006c1b4a7e6SDavid S. Miller val = tp->mss_cache; 400734dfde4aSCambda Zhu if (tp->rx_opt.user_mss && 400834dfde4aSCambda Zhu ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 40091da177e4SLinus Torvalds val = tp->rx_opt.user_mss; 40105e6a3ce6SPavel Emelyanov if (tp->repair) 40115e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp; 40121da177e4SLinus Torvalds break; 40131da177e4SLinus Torvalds case TCP_NODELAY: 40141da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF); 40151da177e4SLinus Torvalds break; 40161da177e4SLinus Torvalds case TCP_CORK: 40171da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK); 40181da177e4SLinus Torvalds break; 40191da177e4SLinus Torvalds case TCP_KEEPIDLE: 4020df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ; 40211da177e4SLinus Torvalds break; 40221da177e4SLinus Torvalds case TCP_KEEPINTVL: 4023df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ; 40241da177e4SLinus Torvalds break; 40251da177e4SLinus Torvalds case TCP_KEEPCNT: 4026df19a626SEric Dumazet val = keepalive_probes(tp); 40271da177e4SLinus Torvalds break; 40281da177e4SLinus Torvalds case TCP_SYNCNT: 402920a3b1c0SKuniyuki Iwashima val = icsk->icsk_syn_retries ? : 403020a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 40311da177e4SLinus Torvalds break; 40321da177e4SLinus Torvalds case TCP_LINGER2: 40331da177e4SLinus Torvalds val = tp->linger2; 40341da177e4SLinus Torvalds if (val >= 0) 403539e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 40361da177e4SLinus Torvalds break; 40371da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 4038b103cf34SJulian Anastasov val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 4039b103cf34SJulian Anastasov TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 40401da177e4SLinus Torvalds break; 40411da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 40421da177e4SLinus Torvalds val = tp->window_clamp; 40431da177e4SLinus Torvalds break; 40441da177e4SLinus Torvalds case TCP_INFO: { 40451da177e4SLinus Torvalds struct tcp_info info; 40461da177e4SLinus Torvalds 404734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40481da177e4SLinus Torvalds return -EFAULT; 40491da177e4SLinus Torvalds 40501da177e4SLinus Torvalds tcp_get_info(sk, &info); 40511da177e4SLinus Torvalds 40521da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info)); 405334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40541da177e4SLinus Torvalds return -EFAULT; 405534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 40561da177e4SLinus Torvalds return -EFAULT; 40571da177e4SLinus Torvalds return 0; 40581da177e4SLinus Torvalds } 40596e9250f5SEric Dumazet case TCP_CC_INFO: { 40606e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops; 40616e9250f5SEric Dumazet union tcp_cc_info info; 40626e9250f5SEric Dumazet size_t sz = 0; 40636e9250f5SEric Dumazet int attr; 40646e9250f5SEric Dumazet 406534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40666e9250f5SEric Dumazet return -EFAULT; 40676e9250f5SEric Dumazet 40686e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops; 40696e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info) 40706e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info); 40716e9250f5SEric Dumazet 40726e9250f5SEric Dumazet len = min_t(unsigned int, len, sz); 407334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40746e9250f5SEric Dumazet return -EFAULT; 407534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 40766e9250f5SEric Dumazet return -EFAULT; 40776e9250f5SEric Dumazet return 0; 40786e9250f5SEric Dumazet } 40791da177e4SLinus Torvalds case TCP_QUICKACK: 408031954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk); 40811da177e4SLinus Torvalds break; 40825f8ef48dSStephen Hemminger 40835f8ef48dSStephen Hemminger case TCP_CONGESTION: 408434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40855f8ef48dSStephen Hemminger return -EFAULT; 40865f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 408734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40885f8ef48dSStephen Hemminger return -EFAULT; 408934704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 40905f8ef48dSStephen Hemminger return -EFAULT; 40915f8ef48dSStephen Hemminger return 0; 4092e56fb50fSWilliam Allen Simpson 4093734942ccSDave Watson case TCP_ULP: 409434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4095734942ccSDave Watson return -EFAULT; 4096734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4097d97af30fSDave Watson if (!icsk->icsk_ulp_ops) { 409834704ef0SMartin KaFai Lau len = 0; 409934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4100d97af30fSDave Watson return -EFAULT; 4101d97af30fSDave Watson return 0; 4102d97af30fSDave Watson } 410334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4104734942ccSDave Watson return -EFAULT; 410534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4106734942ccSDave Watson return -EFAULT; 4107734942ccSDave Watson return 0; 4108734942ccSDave Watson 41091fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 4110f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4111f19008e6SJason Baron unsigned int key_len; 41121fba70e5SYuchung Cheng 411334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 41141fba70e5SYuchung Cheng return -EFAULT; 41151fba70e5SYuchung Cheng 4116f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) * 41170f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH; 41180f1ce023SJason Baron len = min_t(unsigned int, len, key_len); 411934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 41201fba70e5SYuchung Cheng return -EFAULT; 412134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, key, len)) 41221fba70e5SYuchung Cheng return -EFAULT; 41231fba70e5SYuchung Cheng return 0; 41241fba70e5SYuchung Cheng } 41253c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS: 41263c0fef0bSJosh Hunt val = tp->thin_lto; 41273c0fef0bSJosh Hunt break; 41284a7f6009SYuchung Cheng 41293c0fef0bSJosh Hunt case TCP_THIN_DUPACK: 41304a7f6009SYuchung Cheng val = 0; 41313c0fef0bSJosh Hunt break; 4132dca43c75SJerry Chu 4133ee995283SPavel Emelyanov case TCP_REPAIR: 4134ee995283SPavel Emelyanov val = tp->repair; 4135ee995283SPavel Emelyanov break; 4136ee995283SPavel Emelyanov 4137ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 4138ee995283SPavel Emelyanov if (tp->repair) 4139ee995283SPavel Emelyanov val = tp->repair_queue; 4140ee995283SPavel Emelyanov else 4141ee995283SPavel Emelyanov return -EINVAL; 4142ee995283SPavel Emelyanov break; 4143ee995283SPavel Emelyanov 4144b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: { 4145b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 4146b1ed4c4fSAndrey Vagin 414734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4148b1ed4c4fSAndrey Vagin return -EFAULT; 4149b1ed4c4fSAndrey Vagin 4150b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 4151b1ed4c4fSAndrey Vagin return -EINVAL; 4152b1ed4c4fSAndrey Vagin 4153b1ed4c4fSAndrey Vagin if (!tp->repair) 4154b1ed4c4fSAndrey Vagin return -EPERM; 4155b1ed4c4fSAndrey Vagin 4156b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1; 4157b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd; 4158b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window; 4159b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd; 4160b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup; 4161b1ed4c4fSAndrey Vagin 416234704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &opt, len)) 4163b1ed4c4fSAndrey Vagin return -EFAULT; 4164b1ed4c4fSAndrey Vagin return 0; 4165b1ed4c4fSAndrey Vagin } 4166ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 4167ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 4168ee995283SPavel Emelyanov val = tp->write_seq; 4169ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE) 4170ee995283SPavel Emelyanov val = tp->rcv_nxt; 4171ee995283SPavel Emelyanov else 4172ee995283SPavel Emelyanov return -EINVAL; 4173ee995283SPavel Emelyanov break; 4174ee995283SPavel Emelyanov 4175dca43c75SJerry Chu case TCP_USER_TIMEOUT: 41769bcc66e1SJon Maxwell val = icsk->icsk_user_timeout; 4177dca43c75SJerry Chu break; 41781536e285SKenjiro Nakayama 41791536e285SKenjiro Nakayama case TCP_FASTOPEN: 41800536fcc0SEric Dumazet val = icsk->icsk_accept_queue.fastopenq.max_qlen; 41811536e285SKenjiro Nakayama break; 41821536e285SKenjiro Nakayama 418319f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 418419f6d3f3SWei Wang val = tp->fastopen_connect; 418519f6d3f3SWei Wang break; 418619f6d3f3SWei Wang 418771c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 418871c02379SChristoph Paasch val = tp->fastopen_no_cookie; 418971c02379SChristoph Paasch break; 419071c02379SChristoph Paasch 4191a842fe14SEric Dumazet case TCP_TX_DELAY: 4192a842fe14SEric Dumazet val = tp->tcp_tx_delay; 4193a842fe14SEric Dumazet break; 4194a842fe14SEric Dumazet 419593be6ce0SAndrey Vagin case TCP_TIMESTAMP: 41969a568de4SEric Dumazet val = tcp_time_stamp_raw() + tp->tsoffset; 419793be6ce0SAndrey Vagin break; 4198c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 4199c9bee3b7SEric Dumazet val = tp->notsent_lowat; 4200c9bee3b7SEric Dumazet break; 4201b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 4202b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq; 4203b75eba76SSoheil Hassas Yeganeh break; 4204cd8ae852SEric Dumazet case TCP_SAVE_SYN: 4205cd8ae852SEric Dumazet val = tp->save_syn; 4206cd8ae852SEric Dumazet break; 4207cd8ae852SEric Dumazet case TCP_SAVED_SYN: { 420834704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4209cd8ae852SEric Dumazet return -EFAULT; 4210cd8ae852SEric Dumazet 4211d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 4212cd8ae852SEric Dumazet if (tp->saved_syn) { 421370a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) { 421434704ef0SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 421534704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4216d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4217aea0929eSEric B Munson return -EFAULT; 4218aea0929eSEric B Munson } 4219d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4220aea0929eSEric B Munson return -EINVAL; 4221aea0929eSEric B Munson } 422270a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 422334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4224d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4225cd8ae852SEric Dumazet return -EFAULT; 4226cd8ae852SEric Dumazet } 422734704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4228d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4229cd8ae852SEric Dumazet return -EFAULT; 4230cd8ae852SEric Dumazet } 4231cd8ae852SEric Dumazet tcp_saved_syn_free(tp); 4232d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4233cd8ae852SEric Dumazet } else { 4234d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4235cd8ae852SEric Dumazet len = 0; 423634704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4237cd8ae852SEric Dumazet return -EFAULT; 4238cd8ae852SEric Dumazet } 4239cd8ae852SEric Dumazet return 0; 4240cd8ae852SEric Dumazet } 424105255b82SEric Dumazet #ifdef CONFIG_MMU 424205255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: { 42437eeba170SArjun Roy struct scm_timestamping_internal tss; 4244e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {}; 424505255b82SEric Dumazet int err; 424605255b82SEric Dumazet 424734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 424805255b82SEric Dumazet return -EFAULT; 42492107d45fSArjun Roy if (len < 0 || 42502107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length)) 425105255b82SEric Dumazet return -EINVAL; 42523c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) { 425334704ef0SMartin KaFai Lau err = check_zeroed_sockptr(optval, sizeof(zc), 42543c5a2fd0SArjun Roy len - sizeof(zc)); 42553c5a2fd0SArjun Roy if (err < 1) 42563c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err; 4257c8856c05SArjun Roy len = sizeof(zc); 425834704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 42590b7f41f6SArjun Roy return -EFAULT; 42600b7f41f6SArjun Roy } 426134704ef0SMartin KaFai Lau if (copy_from_sockptr(&zc, optval, len)) 426205255b82SEric Dumazet return -EFAULT; 42633c5a2fd0SArjun Roy if (zc.reserved) 42643c5a2fd0SArjun Roy return -EINVAL; 42653c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 42663c5a2fd0SArjun Roy return -EINVAL; 4267d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 42687eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss); 42699cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 42709cacf81fSStanislav Fomichev &zc, &len, err); 4271d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 42727eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 42737eeba170SArjun Roy goto zerocopy_rcv_cmsg; 4274c8856c05SArjun Roy switch (len) { 42757eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags): 42767eeba170SArjun Roy goto zerocopy_rcv_cmsg; 42777eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 42787eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control): 42797eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags): 42807eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 42817eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 428233946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err): 428333946518SArjun Roy goto zerocopy_rcv_sk_err; 4284c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq): 4285c8856c05SArjun Roy goto zerocopy_rcv_inq; 4286c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length): 4287c8856c05SArjun Roy default: 4288c8856c05SArjun Roy goto zerocopy_rcv_out; 4289c8856c05SArjun Roy } 42907eeba170SArjun Roy zerocopy_rcv_cmsg: 42917eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS) 42927eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 42937eeba170SArjun Roy else 42947eeba170SArjun Roy zc.msg_flags = 0; 429533946518SArjun Roy zerocopy_rcv_sk_err: 429633946518SArjun Roy if (!err) 429733946518SArjun Roy zc.err = sock_error(sk); 4298c8856c05SArjun Roy zerocopy_rcv_inq: 4299c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk); 4300c8856c05SArjun Roy zerocopy_rcv_out: 430134704ef0SMartin KaFai Lau if (!err && copy_to_sockptr(optval, &zc, len)) 430205255b82SEric Dumazet err = -EFAULT; 430305255b82SEric Dumazet return err; 430405255b82SEric Dumazet } 430505255b82SEric Dumazet #endif 43061da177e4SLinus Torvalds default: 43071da177e4SLinus Torvalds return -ENOPROTOOPT; 43083ff50b79SStephen Hemminger } 43091da177e4SLinus Torvalds 431034704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 43111da177e4SLinus Torvalds return -EFAULT; 431234704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &val, len)) 43131da177e4SLinus Torvalds return -EFAULT; 43141da177e4SLinus Torvalds return 0; 43151da177e4SLinus Torvalds } 43161da177e4SLinus Torvalds 43179cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname) 43189cacf81fSStanislav Fomichev { 43199cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation 43209cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 43219cacf81fSStanislav Fomichev */ 43229cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 43239cacf81fSStanislav Fomichev return true; 43249cacf81fSStanislav Fomichev 43259cacf81fSStanislav Fomichev return false; 43269cacf81fSStanislav Fomichev } 43279cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 43289cacf81fSStanislav Fomichev 43293fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 43303fdadf7dSDmitry Mishin int __user *optlen) 43313fdadf7dSDmitry Mishin { 43323fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk); 43333fdadf7dSDmitry Mishin 43343fdadf7dSDmitry Mishin if (level != SOL_TCP) 4335f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4336f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 43373fdadf7dSDmitry Mishin optval, optlen); 433834704ef0SMartin KaFai Lau return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 433934704ef0SMartin KaFai Lau USER_SOCKPTR(optlen)); 43403fdadf7dSDmitry Mishin } 43414bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt); 43423fdadf7dSDmitry Mishin 4343cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 4344349ce993SEric Dumazet static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); 434571cea17eSEric Dumazet static DEFINE_MUTEX(tcp_md5sig_mutex); 4346349ce993SEric Dumazet static bool tcp_md5sig_pool_populated = false; 4347cfb6eeb4SYOSHIFUJI Hideaki 434871cea17eSEric Dumazet static void __tcp_alloc_md5sig_pool(void) 4349cfb6eeb4SYOSHIFUJI Hideaki { 4350cf80e0e4SHerbert Xu struct crypto_ahash *hash; 4351cfb6eeb4SYOSHIFUJI Hideaki int cpu; 4352cfb6eeb4SYOSHIFUJI Hideaki 4353cf80e0e4SHerbert Xu hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 43541eea84b7SInsu Yun if (IS_ERR(hash)) 4355349ce993SEric Dumazet return; 4356cf80e0e4SHerbert Xu 4357cf80e0e4SHerbert Xu for_each_possible_cpu(cpu) { 435819689e38SEric Dumazet void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; 4359cf80e0e4SHerbert Xu struct ahash_request *req; 4360cf80e0e4SHerbert Xu 436119689e38SEric Dumazet if (!scratch) { 436219689e38SEric Dumazet scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + 436319689e38SEric Dumazet sizeof(struct tcphdr), 436419689e38SEric Dumazet GFP_KERNEL, 436519689e38SEric Dumazet cpu_to_node(cpu)); 436619689e38SEric Dumazet if (!scratch) 436719689e38SEric Dumazet return; 436819689e38SEric Dumazet per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; 436919689e38SEric Dumazet } 4370cf80e0e4SHerbert Xu if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 4371cf80e0e4SHerbert Xu continue; 4372cf80e0e4SHerbert Xu 4373cf80e0e4SHerbert Xu req = ahash_request_alloc(hash, GFP_KERNEL); 4374cf80e0e4SHerbert Xu if (!req) 4375cf80e0e4SHerbert Xu return; 4376cf80e0e4SHerbert Xu 4377cf80e0e4SHerbert Xu ahash_request_set_callback(req, 0, NULL, NULL); 4378cf80e0e4SHerbert Xu 4379cf80e0e4SHerbert Xu per_cpu(tcp_md5sig_pool, cpu).md5_req = req; 4380349ce993SEric Dumazet } 4381349ce993SEric Dumazet /* before setting tcp_md5sig_pool_populated, we must commit all writes 4382349ce993SEric Dumazet * to memory. See smp_rmb() in tcp_get_md5sig_pool() 438371cea17eSEric Dumazet */ 438471cea17eSEric Dumazet smp_wmb(); 4385aacd467cSEric Dumazet /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() 4386aacd467cSEric Dumazet * and tcp_get_md5sig_pool(). 4387aacd467cSEric Dumazet */ 4388aacd467cSEric Dumazet WRITE_ONCE(tcp_md5sig_pool_populated, true); 4389cfb6eeb4SYOSHIFUJI Hideaki } 4390cfb6eeb4SYOSHIFUJI Hideaki 439171cea17eSEric Dumazet bool tcp_alloc_md5sig_pool(void) 4392cfb6eeb4SYOSHIFUJI Hideaki { 4393aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4394aacd467cSEric Dumazet if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { 439571cea17eSEric Dumazet mutex_lock(&tcp_md5sig_mutex); 4396cfb6eeb4SYOSHIFUJI Hideaki 4397459837b5SDmitry Safonov if (!tcp_md5sig_pool_populated) 439871cea17eSEric Dumazet __tcp_alloc_md5sig_pool(); 4399cfb6eeb4SYOSHIFUJI Hideaki 440071cea17eSEric Dumazet mutex_unlock(&tcp_md5sig_mutex); 4401cfb6eeb4SYOSHIFUJI Hideaki } 4402aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4403aacd467cSEric Dumazet return READ_ONCE(tcp_md5sig_pool_populated); 4404cfb6eeb4SYOSHIFUJI Hideaki } 4405cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 4406cfb6eeb4SYOSHIFUJI Hideaki 440735790c04SEric Dumazet 440835790c04SEric Dumazet /** 440935790c04SEric Dumazet * tcp_get_md5sig_pool - get md5sig_pool for this user 441035790c04SEric Dumazet * 441135790c04SEric Dumazet * We use percpu structure, so if we succeed, we exit with preemption 441235790c04SEric Dumazet * and BH disabled, to make sure another thread or softirq handling 441335790c04SEric Dumazet * wont try to get same context. 441435790c04SEric Dumazet */ 441535790c04SEric Dumazet struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 4416cfb6eeb4SYOSHIFUJI Hideaki { 441735790c04SEric Dumazet local_bh_disable(); 441835790c04SEric Dumazet 4419aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4420aacd467cSEric Dumazet if (READ_ONCE(tcp_md5sig_pool_populated)) { 4421349ce993SEric Dumazet /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ 4422349ce993SEric Dumazet smp_rmb(); 4423349ce993SEric Dumazet return this_cpu_ptr(&tcp_md5sig_pool); 4424349ce993SEric Dumazet } 442535790c04SEric Dumazet local_bh_enable(); 442635790c04SEric Dumazet return NULL; 4427cfb6eeb4SYOSHIFUJI Hideaki } 442835790c04SEric Dumazet EXPORT_SYMBOL(tcp_get_md5sig_pool); 4429cfb6eeb4SYOSHIFUJI Hideaki 443049a72dfbSAdam Langley int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 4431cf533ea5SEric Dumazet const struct sk_buff *skb, unsigned int header_len) 443249a72dfbSAdam Langley { 443349a72dfbSAdam Langley struct scatterlist sg; 443449a72dfbSAdam Langley const struct tcphdr *tp = tcp_hdr(skb); 4435cf80e0e4SHerbert Xu struct ahash_request *req = hp->md5_req; 443695c96174SEric Dumazet unsigned int i; 443795c96174SEric Dumazet const unsigned int head_data_len = skb_headlen(skb) > header_len ? 443849a72dfbSAdam Langley skb_headlen(skb) - header_len : 0; 443949a72dfbSAdam Langley const struct skb_shared_info *shi = skb_shinfo(skb); 4440d7fd1b57SEric Dumazet struct sk_buff *frag_iter; 444149a72dfbSAdam Langley 444249a72dfbSAdam Langley sg_init_table(&sg, 1); 444349a72dfbSAdam Langley 444449a72dfbSAdam Langley sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 4445cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, head_data_len); 4446cf80e0e4SHerbert Xu if (crypto_ahash_update(req)) 444749a72dfbSAdam Langley return 1; 444849a72dfbSAdam Langley 444949a72dfbSAdam Langley for (i = 0; i < shi->nr_frags; ++i) { 4450d8e18a51SMatthew Wilcox (Oracle) const skb_frag_t *f = &shi->frags[i]; 4451b54c9d5bSJonathan Lemon unsigned int offset = skb_frag_off(f); 445254d27fcbSEric Dumazet struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 445354d27fcbSEric Dumazet 445454d27fcbSEric Dumazet sg_set_page(&sg, page, skb_frag_size(f), 445554d27fcbSEric Dumazet offset_in_page(offset)); 4456cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); 4457cf80e0e4SHerbert Xu if (crypto_ahash_update(req)) 445849a72dfbSAdam Langley return 1; 445949a72dfbSAdam Langley } 446049a72dfbSAdam Langley 4461d7fd1b57SEric Dumazet skb_walk_frags(skb, frag_iter) 4462d7fd1b57SEric Dumazet if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 4463d7fd1b57SEric Dumazet return 1; 4464d7fd1b57SEric Dumazet 446549a72dfbSAdam Langley return 0; 446649a72dfbSAdam Langley } 446749a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_skb_data); 446849a72dfbSAdam Langley 4469cf533ea5SEric Dumazet int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 447049a72dfbSAdam Langley { 4471e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 447249a72dfbSAdam Langley struct scatterlist sg; 447349a72dfbSAdam Langley 44746a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen); 44756a2febecSEric Dumazet ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); 4476e6ced831SEric Dumazet 4477e6ced831SEric Dumazet /* We use data_race() because tcp_md5_do_add() might change key->key under us */ 4478e6ced831SEric Dumazet return data_race(crypto_ahash_update(hp->md5_req)); 447949a72dfbSAdam Langley } 448049a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key); 448149a72dfbSAdam Langley 44827bbb765bSDmitry Safonov /* Called with rcu_read_lock() */ 44831330b6efSJakub Kicinski enum skb_drop_reason 44841330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 44857bbb765bSDmitry Safonov const void *saddr, const void *daddr, 44867bbb765bSDmitry Safonov int family, int dif, int sdif) 44877bbb765bSDmitry Safonov { 44887bbb765bSDmitry Safonov /* 44897bbb765bSDmitry Safonov * This gets called for each TCP segment that arrives 44907bbb765bSDmitry Safonov * so we want to be efficient. 44917bbb765bSDmitry Safonov * We have 3 drop cases: 44927bbb765bSDmitry Safonov * o No MD5 hash and one expected. 44937bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one. 44947bbb765bSDmitry Safonov * o MD5 hash and its wrong. 44957bbb765bSDmitry Safonov */ 44967bbb765bSDmitry Safonov const __u8 *hash_location = NULL; 44977bbb765bSDmitry Safonov struct tcp_md5sig_key *hash_expected; 44987bbb765bSDmitry Safonov const struct tcphdr *th = tcp_hdr(skb); 4499e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 45007bbb765bSDmitry Safonov int genhash, l3index; 45017bbb765bSDmitry Safonov u8 newhash[16]; 45027bbb765bSDmitry Safonov 45037bbb765bSDmitry Safonov /* sdif set, means packet ingressed via a device 45047bbb765bSDmitry Safonov * in an L3 domain and dif is set to the l3mdev 45057bbb765bSDmitry Safonov */ 45067bbb765bSDmitry Safonov l3index = sdif ? dif : 0; 45077bbb765bSDmitry Safonov 45087bbb765bSDmitry Safonov hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family); 45097bbb765bSDmitry Safonov hash_location = tcp_parse_md5sig_option(th); 45107bbb765bSDmitry Safonov 45117bbb765bSDmitry Safonov /* We've parsed the options - do we have a hash? */ 45127bbb765bSDmitry Safonov if (!hash_expected && !hash_location) 45131330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 45147bbb765bSDmitry Safonov 45157bbb765bSDmitry Safonov if (hash_expected && !hash_location) { 45167bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 45171330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5NOTFOUND; 45187bbb765bSDmitry Safonov } 45197bbb765bSDmitry Safonov 45207bbb765bSDmitry Safonov if (!hash_expected && hash_location) { 45217bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 45221330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 45237bbb765bSDmitry Safonov } 45247bbb765bSDmitry Safonov 4525e62d2e11SEric Dumazet /* Check the signature. 4526e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle 4527e62d2e11SEric Dumazet * IPv4-mapped case. 4528e62d2e11SEric Dumazet */ 4529e62d2e11SEric Dumazet if (family == AF_INET) 4530e62d2e11SEric Dumazet genhash = tcp_v4_md5_hash_skb(newhash, 4531e62d2e11SEric Dumazet hash_expected, 4532e62d2e11SEric Dumazet NULL, skb); 4533e62d2e11SEric Dumazet else 4534e62d2e11SEric Dumazet genhash = tp->af_specific->calc_md5_hash(newhash, 4535e62d2e11SEric Dumazet hash_expected, 45367bbb765bSDmitry Safonov NULL, skb); 45377bbb765bSDmitry Safonov 45387bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) { 45397bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 45407bbb765bSDmitry Safonov if (family == AF_INET) { 45417bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", 45427bbb765bSDmitry Safonov saddr, ntohs(th->source), 45437bbb765bSDmitry Safonov daddr, ntohs(th->dest), 45447bbb765bSDmitry Safonov genhash ? " tcp_v4_calc_md5_hash failed" 45457bbb765bSDmitry Safonov : "", l3index); 45467bbb765bSDmitry Safonov } else { 45477bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", 45487bbb765bSDmitry Safonov genhash ? "failed" : "mismatch", 45497bbb765bSDmitry Safonov saddr, ntohs(th->source), 45507bbb765bSDmitry Safonov daddr, ntohs(th->dest), l3index); 45517bbb765bSDmitry Safonov } 45521330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE; 45537bbb765bSDmitry Safonov } 45541330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 45557bbb765bSDmitry Safonov } 45567bbb765bSDmitry Safonov EXPORT_SYMBOL(tcp_inbound_md5_hash); 45577bbb765bSDmitry Safonov 4558cfb6eeb4SYOSHIFUJI Hideaki #endif 4559cfb6eeb4SYOSHIFUJI Hideaki 45604ac02babSAndi Kleen void tcp_done(struct sock *sk) 45614ac02babSAndi Kleen { 4562d983ea6fSEric Dumazet struct request_sock *req; 45638336886fSJerry Chu 4564cab209e5SEric Dumazet /* We might be called with a new socket, after 4565cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called 4566cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk) 4567cab209e5SEric Dumazet */ 4568cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 45694ac02babSAndi Kleen 45704ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4571c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 45724ac02babSAndi Kleen 45734ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE); 45744ac02babSAndi Kleen tcp_clear_xmit_timers(sk); 457500db4124SIan Morris if (req) 45768336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 45774ac02babSAndi Kleen 4578e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 45794ac02babSAndi Kleen 45804ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD)) 45814ac02babSAndi Kleen sk->sk_state_change(sk); 45824ac02babSAndi Kleen else 45834ac02babSAndi Kleen inet_csk_destroy_sock(sk); 45844ac02babSAndi Kleen } 45854ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done); 45864ac02babSAndi Kleen 4587c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err) 4588c1e64e29SLorenzo Colitti { 4589af9784d0SEric Dumazet int state = inet_sk_state_load(sk); 4590af9784d0SEric Dumazet 4591af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) { 459207f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk); 459307f6f4a3SEric Dumazet 459407f6f4a3SEric Dumazet local_bh_disable(); 4595acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req); 459607f6f4a3SEric Dumazet local_bh_enable(); 459707f6f4a3SEric Dumazet return 0; 459807f6f4a3SEric Dumazet } 4599af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) { 4600af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk); 4601af9784d0SEric Dumazet 4602af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt); 4603af9784d0SEric Dumazet local_bh_disable(); 4604af9784d0SEric Dumazet inet_twsk_deschedule_put(tw); 4605af9784d0SEric Dumazet local_bh_enable(); 4606af9784d0SEric Dumazet return 0; 4607c1e64e29SLorenzo Colitti } 4608c1e64e29SLorenzo Colitti 46094ddbcb88SAditi Ghag /* BPF context ensures sock locking. */ 46104ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4611c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */ 4612c1e64e29SLorenzo Colitti lock_sock(sk); 4613c1e64e29SLorenzo Colitti 46142010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) { 46152010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE); 46162010b93eSLorenzo Colitti inet_csk_listen_stop(sk); 46172010b93eSLorenzo Colitti } 46182010b93eSLorenzo Colitti 4619c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4620c1e64e29SLorenzo Colitti local_bh_disable(); 4621c1e64e29SLorenzo Colitti bh_lock_sock(sk); 4622c1e64e29SLorenzo Colitti 4623c1e64e29SLorenzo Colitti if (!sock_flag(sk, SOCK_DEAD)) { 4624e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, err); 4625c1e64e29SLorenzo Colitti /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4626c1e64e29SLorenzo Colitti smp_wmb(); 4627e3ae2365SAlexander Aring sk_error_report(sk); 4628c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state)) 4629c1e64e29SLorenzo Colitti tcp_send_active_reset(sk, GFP_ATOMIC); 4630c1e64e29SLorenzo Colitti tcp_done(sk); 4631c1e64e29SLorenzo Colitti } 4632c1e64e29SLorenzo Colitti 4633c1e64e29SLorenzo Colitti bh_unlock_sock(sk); 4634c1e64e29SLorenzo Colitti local_bh_enable(); 4635e05836acSSoheil Hassas Yeganeh tcp_write_queue_purge(sk); 46364ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4637c1e64e29SLorenzo Colitti release_sock(sk); 4638c1e64e29SLorenzo Colitti return 0; 4639c1e64e29SLorenzo Colitti } 4640c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort); 4641c1e64e29SLorenzo Colitti 46425f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno; 46431da177e4SLinus Torvalds 46441da177e4SLinus Torvalds static __initdata unsigned long thash_entries; 46451da177e4SLinus Torvalds static int __init set_thash_entries(char *str) 46461da177e4SLinus Torvalds { 4647413c27d8SEldad Zack ssize_t ret; 4648413c27d8SEldad Zack 46491da177e4SLinus Torvalds if (!str) 46501da177e4SLinus Torvalds return 0; 4651413c27d8SEldad Zack 4652413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries); 4653413c27d8SEldad Zack if (ret) 4654413c27d8SEldad Zack return 0; 4655413c27d8SEldad Zack 46561da177e4SLinus Torvalds return 1; 46571da177e4SLinus Torvalds } 46581da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries); 46591da177e4SLinus Torvalds 466047d7a88cSFabian Frederick static void __init tcp_init_mem(void) 46614acb4190SGlauber Costa { 4662b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16; 4663b66e91ccSEric Dumazet 46644acb4190SGlauber Costa limit = max(limit, 128UL); 4665b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4666b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4667b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 46684acb4190SGlauber Costa } 46694acb4190SGlauber Costa 46701da177e4SLinus Torvalds void __init tcp_init(void) 46711da177e4SLinus Torvalds { 4672b49960a0SEric Dumazet int max_rshare, max_wshare, cnt; 4673b2d3ea4aSEric Dumazet unsigned long limit; 4674074b8517SDimitri Sivanich unsigned int i; 46751da177e4SLinus Torvalds 46763b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4677b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4678c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb)); 46791da177e4SLinus Torvalds 4680908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 468119757cebSEric Dumazet 468219757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 468319757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 468419757cebSEric Dumazet 468527da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 468627da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/ 468727da6d37SMartin KaFai Lau 0, 64 * 1024); 46886e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep = 46896e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket", 46906e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0, 4691990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4692990c74e3SVasily Averin SLAB_ACCOUNT, 4693990c74e3SVasily Averin NULL); 469428044fc1SJoanne Koong tcp_hashinfo.bind2_bucket_cachep = 469528044fc1SJoanne Koong kmem_cache_create("tcp_bind2_bucket", 469628044fc1SJoanne Koong sizeof(struct inet_bind2_bucket), 0, 469728044fc1SJoanne Koong SLAB_HWCACHE_ALIGN | SLAB_PANIC | 469828044fc1SJoanne Koong SLAB_ACCOUNT, 469928044fc1SJoanne Koong NULL); 47001da177e4SLinus Torvalds 47011da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket 47021da177e4SLinus Torvalds * hash tables. 47031da177e4SLinus Torvalds * 47041da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache. 47051da177e4SLinus Torvalds */ 47066e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash = 47071da177e4SLinus Torvalds alloc_large_system_hash("TCP established", 47080f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket), 47091da177e4SLinus Torvalds thash_entries, 4710fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 47119e950efaSJohn Heffner 0, 47121da177e4SLinus Torvalds NULL, 4713f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask, 471431fe62b9STim Bird 0, 47150ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024); 471605dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 47173ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 471805dbc7b5SEric Dumazet 4719230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4720230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks"); 47216e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash = 4722593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind", 472328044fc1SJoanne Koong 2 * sizeof(struct inet_bind_hashbucket), 4724f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, 4725fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 47269e950efaSJohn Heffner 0, 47276e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size, 47281da177e4SLinus Torvalds NULL, 472931fe62b9STim Bird 0, 47301da177e4SLinus Torvalds 64 * 1024); 4731074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 473228044fc1SJoanne Koong tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 47336e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 47346e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock); 47356e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 473628044fc1SJoanne Koong spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 473728044fc1SJoanne Koong INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 47381da177e4SLinus Torvalds } 47391da177e4SLinus Torvalds 4740d1e5e640SKuniyuki Iwashima tcp_hashinfo.pernet = false; 4741c5ed63d6SEric Dumazet 4742c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1; 4743c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2; 47441da177e4SLinus Torvalds 4745a4fe34bfSEric W. Biederman tcp_init_mem(); 4746c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */ 47475fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4748b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit); 4749b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit); 47507b4f4b5eSJohn Heffner 4751100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 4752356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4753356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 47547b4f4b5eSJohn Heffner 4755100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 4756a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4757a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 47581da177e4SLinus Torvalds 4759afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n", 4760f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4761317a76f9SStephen Hemminger 47621946e672SHaishuang Yan tcp_v4_init(); 476351c5d0c4SDavid S. Miller tcp_metrics_init(); 476455d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 476546d3ceabSEric Dumazet tcp_tasklet_init(); 4766f870fa0bSMat Martineau mptcp_init(); 47671da177e4SLinus Torvalds } 4768