12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * Fixes: 221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls 231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset 241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while 251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect 261da177e4SLinus Torvalds * (tcp_err()). 271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken 281da177e4SLinus Torvalds * pointers passed where wrong and the 291da177e4SLinus Torvalds * socket was looked up backwards. Nobody 301da177e4SLinus Torvalds * tested any icmp error code obviously. 311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It 321da177e4SLinus Torvalds * wakes people on errors. poll 331da177e4SLinus Torvalds * behaves and the icmp error race 341da177e4SLinus Torvalds * has gone by moving it into sock.c 351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for 361da177e4SLinus Torvalds * everything not just packets for 371da177e4SLinus Torvalds * unknown sockets. 381da177e4SLinus Torvalds * Alan Cox : tcp option processing. 391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had 401da177e4SLinus Torvalds * syn rule wrong] 411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes 421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames. 431da177e4SLinus Torvalds * Acking any kind of RST is right out. 441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst 451da177e4SLinus Torvalds * receive otherwise odd bits of prattle 461da177e4SLinus Torvalds * escape still 471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug. 481da177e4SLinus Torvalds * Should stop LAN workplace lockups. 491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list 501da177e4SLinus Torvalds * facilities 511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work 521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts 531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors 541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a 551da177e4SLinus Torvalds * bit to skb ops. 561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential 571da177e4SLinus Torvalds * nasty. 581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the 591da177e4SLinus Torvalds * tcp is hard to follow 601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh 611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix. 621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet). 631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH 641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries. 651da177e4SLinus Torvalds * Also added awareness of them to accept. 661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP) 671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks, 681da177e4SLinus Torvalds * so the kernel can layer network 691da177e4SLinus Torvalds * sockets. 701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings. 711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope). 721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised 731da177e4SLinus Torvalds * state ack error. 741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit. 751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E 761da177e4SLinus Torvalds * window non shrink trick. 771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer 781da177e4SLinus Torvalds * fixes 791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes 801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes 811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight 821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work 831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling 841da177e4SLinus Torvalds * completely 851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling 861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC 871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds 881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors 891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug 901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes. 911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing 921da177e4SLinus Torvalds * (not yet usable) 931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging 951da177e4SLinus Torvalds * Alan Cox : Window clamping 961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check() 971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes 981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the 991da177e4SLinus Torvalds * TCP code (Be very nice to this man if 1001da177e4SLinus Torvalds * tcp finally works 100%) 8) 1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics. 1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug. 1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in 1051da177e4SLinus Torvalds * all cases. 1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg() 1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin). 1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in 1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG 1101da177e4SLinus Torvalds * works now. 1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the 1121da177e4SLinus Torvalds * BSD api. 1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to 1141da177e4SLinus Torvalds * fix a race and a signal problem with 1151da177e4SLinus Torvalds * accept() and async I/O. 1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto(). 1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem. 1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 1191da177e4SLinus Torvalds * clients/servers which listen in on 1201da177e4SLinus Torvalds * fixed ports. 1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to 1221da177e4SLinus Torvalds * a sensible code size. 1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix. 1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast. 1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master 1261da177e4SLinus Torvalds * socket close. 1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code. 1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown. 1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error 1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an 1311da177e4SLinus Torvalds * accept. 1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since 1331da177e4SLinus Torvalds * it's handy for debugging. 1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes. 1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on 1361da177e4SLinus Torvalds * the RFC's for other useful protocol 1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and 1381da177e4SLinus Torvalds * for a reference on the difference 1391da177e4SLinus Torvalds * between specifications and how BSD 1401da177e4SLinus Torvalds * works see the 4.4lite source. 1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy 1421da177e4SLinus Torvalds * close. 1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes. 1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn 1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC 1461da177e4SLinus Torvalds * and using multiple timers for sanity. 1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new 1481da177e4SLinus Torvalds * comments. 1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking 1501da177e4SLinus Torvalds * the buffers (much like datagram.c) 1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe 1521da177e4SLinus Torvalds * now gets fed up of retrying without 1531da177e4SLinus Torvalds * (even a no space) answer. 1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better 1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to 1561da177e4SLinus Torvalds * resemble the RFC. 1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes. 1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming. 1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH 1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack 1611da177e4SLinus Torvalds * generates them. 1621da177e4SLinus Torvalds * Alan Cox : Cache last socket. 1631da177e4SLinus Torvalds * Alan Cox : Per route irtt. 1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error 1651da177e4SLinus Torvalds * Alan Cox : New buffers 1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and 1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed. 1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close, 1691da177e4SLinus Torvalds * and TCP syn retries gets used now. 1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an 1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED. 1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may 1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right 1741da177e4SLinus Torvalds * but it's a start! 1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes. 1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications. 1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors. 1781da177e4SLinus Torvalds * Alan Cox : Block double connect(). 1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP. 1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery. 1811da177e4SLinus Torvalds * Alan Cox : Support soft errors. 1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case 1831da177e4SLinus Torvalds * when the remote claims no mtu! 1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix. 1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in 1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems) 1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack. 1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in 1891da177e4SLinus Torvalds * tcp_do_retransmit() 1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window 1911da177e4SLinus Torvalds * avoidance algorithm to BSD style 1921da177e4SLinus Torvalds * algorithm. This doubles throughput 1931da177e4SLinus Torvalds * against machines running Solaris, 1941da177e4SLinus Torvalds * and seems to result in general 1951da177e4SLinus Torvalds * improvement. 1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 1981da177e4SLinus Torvalds * Mike McLagan : Routing by source 1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in 2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness. 2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with 2021da177e4SLinus Torvalds * shutdown() followed by close(). 2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO 2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call) 2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of 2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible. 2081da177e4SLinus Torvalds * 2091da177e4SLinus Torvalds * Description of States: 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack 2121da177e4SLinus Torvalds * 2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack, 2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake. 2151da177e4SLinus Torvalds * 2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established 2171da177e4SLinus Torvalds * 2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 2191da177e4SLinus Torvalds * transmission of remaining buffered data 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 2221da177e4SLinus Torvalds * to shutdown 2231da177e4SLinus Torvalds * 2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have 2251da177e4SLinus Torvalds * data we have to finish sending 2261da177e4SLinus Torvalds * 2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering 2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2 2291da177e4SLinus Torvalds * or CLOSING. Required because the other end 2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it 2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore) 2321da177e4SLinus Torvalds * 2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown 2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK) 2361da177e4SLinus Torvalds * 2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has 2381da177e4SLinus Torvalds * shutdown. There may still be data in our 2391da177e4SLinus Torvalds * buffer that we have to finish sending 2401da177e4SLinus Torvalds * 2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds 244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt 245afd46503SJoe Perches 246cf80e0e4SHerbert Xu #include <crypto/hash.h> 247172589ccSIlpo Järvinen #include <linux/kernel.h> 2481da177e4SLinus Torvalds #include <linux/module.h> 2491da177e4SLinus Torvalds #include <linux/types.h> 2501da177e4SLinus Torvalds #include <linux/fcntl.h> 2511da177e4SLinus Torvalds #include <linux/poll.h> 2526e9250f5SEric Dumazet #include <linux/inet_diag.h> 2531da177e4SLinus Torvalds #include <linux/init.h> 2541da177e4SLinus Torvalds #include <linux/fs.h> 2559c55e01cSJens Axboe #include <linux/skbuff.h> 25681b23b4aSAndrew Morton #include <linux/scatterlist.h> 2579c55e01cSJens Axboe #include <linux/splice.h> 2589c55e01cSJens Axboe #include <linux/net.h> 2599c55e01cSJens Axboe #include <linux/socket.h> 2601da177e4SLinus Torvalds #include <linux/random.h> 26157c8a661SMike Rapoport #include <linux/memblock.h> 26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h> 263b8059eadSDavid S. Miller #include <linux/cache.h> 264f4c50d99SHerbert Xu #include <linux/err.h> 265da5c78c8SWilliam Allen Simpson #include <linux/time.h> 2665a0e3ad6STejun Heo #include <linux/slab.h> 26798aaa913SMike Maloney #include <linux/errqueue.h> 26860e2a778SUrsula Braun #include <linux/static_key.h> 26997a19cafSYonghong Song #include <linux/btf.h> 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds #include <net/icmp.h> 272cf60af03SYuchung Cheng #include <net/inet_common.h> 2731da177e4SLinus Torvalds #include <net/tcp.h> 274f870fa0bSMat Martineau #include <net/mptcp.h> 2751da177e4SLinus Torvalds #include <net/xfrm.h> 2761da177e4SLinus Torvalds #include <net/ip.h> 2779c55e01cSJens Axboe #include <net/sock.h> 2781da177e4SLinus Torvalds 2797c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2801da177e4SLinus Torvalds #include <asm/ioctls.h> 281076bb0c8SEliezer Tamir #include <net/busy_poll.h> 2821da177e4SLinus Torvalds 283925bba24SArjun Roy /* Track pending CMSGs. */ 284925bba24SArjun Roy enum { 285925bba24SArjun Roy TCP_CMSG_INQ = 1, 286925bba24SArjun Roy TCP_CMSG_TS = 2 287925bba24SArjun Roy }; 288925bba24SArjun Roy 28919757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 29019757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 2910a5578cfSArnaldo Carvalho de Melo 292a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly; 293a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem); 2941da177e4SLinus Torvalds 29591b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 2961da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated); 2970defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 2980defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 2991748376bSEric Dumazet 30060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 30160e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 30260e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc); 30360e2a778SUrsula Braun #endif 30460e2a778SUrsula Braun 3051748376bSEric Dumazet /* 3061748376bSEric Dumazet * Current number of TCP sockets. 3071748376bSEric Dumazet */ 30891b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 3091da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated); 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds /* 3129c55e01cSJens Axboe * TCP splice context 3139c55e01cSJens Axboe */ 3149c55e01cSJens Axboe struct tcp_splice_state { 3159c55e01cSJens Axboe struct pipe_inode_info *pipe; 3169c55e01cSJens Axboe size_t len; 3179c55e01cSJens Axboe unsigned int flags; 3189c55e01cSJens Axboe }; 3199c55e01cSJens Axboe 3209c55e01cSJens Axboe /* 3211da177e4SLinus Torvalds * Pressure flag: try to collapse. 3221da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically. 3233ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting 3241da177e4SLinus Torvalds * is strict, actions are advisory and have some latency. 3251da177e4SLinus Torvalds */ 32606044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly; 32706044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure); 3281da177e4SLinus Torvalds 3295c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk) 3301da177e4SLinus Torvalds { 33106044751SEric Dumazet unsigned long val; 33206044751SEric Dumazet 3331f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure)) 33406044751SEric Dumazet return; 33506044751SEric Dumazet val = jiffies; 33606044751SEric Dumazet 33706044751SEric Dumazet if (!val) 33806044751SEric Dumazet val--; 33906044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val)) 3404e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 3411da177e4SLinus Torvalds } 34206044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 34306044751SEric Dumazet 34406044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk) 34506044751SEric Dumazet { 34606044751SEric Dumazet unsigned long val; 34706044751SEric Dumazet 3481f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure)) 34906044751SEric Dumazet return; 35006044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0); 35106044751SEric Dumazet if (val) 35206044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 35306044751SEric Dumazet jiffies_to_msecs(jiffies - val)); 3541da177e4SLinus Torvalds } 35506044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 3561da177e4SLinus Torvalds 357b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */ 358b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 359b103cf34SJulian Anastasov { 360b103cf34SJulian Anastasov u8 res = 0; 361b103cf34SJulian Anastasov 362b103cf34SJulian Anastasov if (seconds > 0) { 363b103cf34SJulian Anastasov int period = timeout; 364b103cf34SJulian Anastasov 365b103cf34SJulian Anastasov res = 1; 366b103cf34SJulian Anastasov while (seconds > period && res < 255) { 367b103cf34SJulian Anastasov res++; 368b103cf34SJulian Anastasov timeout <<= 1; 369b103cf34SJulian Anastasov if (timeout > rto_max) 370b103cf34SJulian Anastasov timeout = rto_max; 371b103cf34SJulian Anastasov period += timeout; 372b103cf34SJulian Anastasov } 373b103cf34SJulian Anastasov } 374b103cf34SJulian Anastasov return res; 375b103cf34SJulian Anastasov } 376b103cf34SJulian Anastasov 377b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */ 378b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 379b103cf34SJulian Anastasov { 380b103cf34SJulian Anastasov int period = 0; 381b103cf34SJulian Anastasov 382b103cf34SJulian Anastasov if (retrans > 0) { 383b103cf34SJulian Anastasov period = timeout; 384b103cf34SJulian Anastasov while (--retrans) { 385b103cf34SJulian Anastasov timeout <<= 1; 386b103cf34SJulian Anastasov if (timeout > rto_max) 387b103cf34SJulian Anastasov timeout = rto_max; 388b103cf34SJulian Anastasov period += timeout; 389b103cf34SJulian Anastasov } 390b103cf34SJulian Anastasov } 391b103cf34SJulian Anastasov return period; 392b103cf34SJulian Anastasov } 393b103cf34SJulian Anastasov 3940263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 3950263598cSWei Wang { 3960263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered); 3970263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us); 3980263598cSWei Wang u64 rate64 = 0; 3990263598cSWei Wang 4000263598cSWei Wang if (rate && intv) { 4010263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 4020263598cSWei Wang do_div(rate64, intv); 4030263598cSWei Wang } 4040263598cSWei Wang return rate64; 4050263598cSWei Wang } 4060263598cSWei Wang 407900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock. 408900f65d3SNeal Cardwell * 409900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to 410900f65d3SNeal Cardwell * sk_alloc() so need not be done here. 411900f65d3SNeal Cardwell */ 412900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk) 413900f65d3SNeal Cardwell { 414900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk); 415900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk); 416900f65d3SNeal Cardwell 4179f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT; 41875c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT; 419900f65d3SNeal Cardwell tcp_init_xmit_timers(sk); 42046d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node); 421e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue); 422900f65d3SNeal Cardwell 423900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT; 424ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 4252b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 426740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 427ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 428900f65d3SNeal Cardwell 429900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the 430900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control 431900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk 432900f65d3SNeal Cardwell * efficiently to them. -DaveM 433900f65d3SNeal Cardwell */ 43440570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 435900f65d3SNeal Cardwell 436d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */ 437d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U; 438d7722e85SSoheil Hassas Yeganeh 439900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the 440900f65d3SNeal Cardwell * initialization of these values. 441900f65d3SNeal Cardwell */ 442900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 443900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0; 444900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT; 445900f65d3SNeal Cardwell 44646778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 44755d8694fSFlorian Westphal tcp_assign_congestion_control(sk); 448900f65d3SNeal Cardwell 449ceaa1fefSAndrey Vagin tp->tsoffset = 0; 4501f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1; 451ceaa1fefSAndrey Vagin 452900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space; 453900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 454900f65d3SNeal Cardwell 455900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss; 456900f65d3SNeal Cardwell 45702739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 45802739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 459900f65d3SNeal Cardwell 460900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk); 461900f65d3SNeal Cardwell } 462900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock); 463900f65d3SNeal Cardwell 4644e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 4654ed2d765SWillem de Bruijn { 4664e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 4674e8cc228SEric Dumazet 468ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) { 4694ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 4706b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 4714ed2d765SWillem de Bruijn 472c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 4730a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK) 4740a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1; 4750a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 4764ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 4774ed2d765SWillem de Bruijn } 478f066e2b0SWillem de Bruijn } 4794ed2d765SWillem de Bruijn 48005dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target) 4818934ce2fSJohn Fastabend { 48205dc72abSEric Dumazet if (tcp_epollin_ready(sk, target)) 48305dc72abSEric Dumazet return true; 4847b50ecfcSCong Wang return sk_is_readable(sk); 4858934ce2fSJohn Fastabend } 4868934ce2fSJohn Fastabend 4871da177e4SLinus Torvalds /* 488a11e1d43SLinus Torvalds * Wait for a TCP event. 489a11e1d43SLinus Torvalds * 490a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers 491a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't 492a11e1d43SLinus Torvalds * go look at any of the socket buffers directly. 4931da177e4SLinus Torvalds */ 494a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 4951da177e4SLinus Torvalds { 496a11e1d43SLinus Torvalds __poll_t mask; 4971da177e4SLinus Torvalds struct sock *sk = sock->sk; 498cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 49900fd38d9SEric Dumazet int state; 5001da177e4SLinus Torvalds 50189ab066dSKarsten Graul sock_poll_wait(file, sock, wait); 502a11e1d43SLinus Torvalds 503986ffdfdSYafang Shao state = inet_sk_state_load(sk); 50400fd38d9SEric Dumazet if (state == TCP_LISTEN) 505dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk); 5061da177e4SLinus Torvalds 507a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events 508a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes 509a11e1d43SLinus Torvalds * made by other threads is impossible in any case. 510a11e1d43SLinus Torvalds */ 511a11e1d43SLinus Torvalds 512a11e1d43SLinus Torvalds mask = 0; 513a11e1d43SLinus Torvalds 5141da177e4SLinus Torvalds /* 515a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't 5161da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a 5171da177e4SLinus Torvalds * socket the read side is more interesting. 5181da177e4SLinus Torvalds * 519a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible 520a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this 5211da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many 5221da177e4SLinus Torvalds * bits than too few, and you can easily break real applications 5231da177e4SLinus Torvalds * if you don't tell them that something has hung up! 5241da177e4SLinus Torvalds * 5251da177e4SLinus Torvalds * Check-me. 5261da177e4SLinus Torvalds * 527a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 5281da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF, 5291da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write() 530a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 5311da177e4SLinus Torvalds * if and only if shutdown has been made in both directions. 5321da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX 533a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 5341da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given 5351da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains 536a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 5371da177e4SLinus Torvalds * 5381da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 5391da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK 5401da177e4SLinus Torvalds */ 54100fd38d9SEric Dumazet if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 542a9a08845SLinus Torvalds mask |= EPOLLHUP; 5431da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) 544a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 5451da177e4SLinus Torvalds 5468336886fSJerry Chu /* Connected or passive Fast Open socket? */ 54700fd38d9SEric Dumazet if (state != TCP_SYN_SENT && 548d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 549c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX); 5507b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data); 551c7004482SDavid S. Miller 552b96c51bdSEric Dumazet if (unlikely(urg_data) && 5537b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 5547b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE)) 555b634f875SAlexandra Kossovsky target++; 556c7004482SDavid S. Miller 55705dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target)) 558a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 5591da177e4SLinus Torvalds 5601da177e4SLinus Torvalds if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { 5618ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) { 562a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5631da177e4SLinus Torvalds } else { /* send SIGIO later */ 5649cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 5651da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 5661da177e4SLinus Torvalds 5671da177e4SLinus Torvalds /* Race breaker. If space is freed after 5681da177e4SLinus Torvalds * wspace test but before the flags are set, 5693c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier 5703c715127Sjbaron@akamai.com * pairs with the input side. 5711da177e4SLinus Torvalds */ 5723c715127Sjbaron@akamai.com smp_mb__after_atomic(); 5738ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) 574a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5751da177e4SLinus Torvalds } 576d84ba638SKOSAKI Motohiro } else 577a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5781da177e4SLinus Torvalds 5797b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID) 580a9a08845SLinus Torvalds mask |= EPOLLPRI; 581d68be71eSDavide Caratti } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { 58219f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect 583a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write() 58419f6d3f3SWei Wang * in order for kernel to generate SYN+data 58519f6d3f3SWei Wang */ 586a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5871da177e4SLinus Torvalds } 588a4d25803STom Marshall /* This barrier is coupled with smp_wmb() in tcp_reset() */ 589a4d25803STom Marshall smp_rmb(); 5903ef7cf57SEric Dumazet if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) 591a9a08845SLinus Torvalds mask |= EPOLLERR; 592a4d25803STom Marshall 5931da177e4SLinus Torvalds return mask; 5941da177e4SLinus Torvalds } 595a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll); 5961da177e4SLinus Torvalds 5971da177e4SLinus Torvalds int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 5981da177e4SLinus Torvalds { 5991da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6001da177e4SLinus Torvalds int answ; 6010e71c55cSEric Dumazet bool slow; 6021da177e4SLinus Torvalds 6031da177e4SLinus Torvalds switch (cmd) { 6041da177e4SLinus Torvalds case SIOCINQ: 6051da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6061da177e4SLinus Torvalds return -EINVAL; 6071da177e4SLinus Torvalds 6080e71c55cSEric Dumazet slow = lock_sock_fast(sk); 609473bd239STom Herbert answ = tcp_inq(sk); 6100e71c55cSEric Dumazet unlock_sock_fast(sk, slow); 6111da177e4SLinus Torvalds break; 6121da177e4SLinus Torvalds case SIOCATMARK: 6137b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) && 614d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 6151da177e4SLinus Torvalds break; 6161da177e4SLinus Torvalds case SIOCOUTQ: 6171da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6181da177e4SLinus Torvalds return -EINVAL; 6191da177e4SLinus Torvalds 6201da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6211da177e4SLinus Torvalds answ = 0; 6221da177e4SLinus Torvalds else 6230f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una; 6241da177e4SLinus Torvalds break; 6252f4e1b39SMario Schuknecht case SIOCOUTQNSD: 6262f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN) 6272f4e1b39SMario Schuknecht return -EINVAL; 6282f4e1b39SMario Schuknecht 6292f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6302f4e1b39SMario Schuknecht answ = 0; 6312f4e1b39SMario Schuknecht else 632e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) - 633e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt); 6342f4e1b39SMario Schuknecht break; 6351da177e4SLinus Torvalds default: 6361da177e4SLinus Torvalds return -ENOIOCTLCMD; 6373ff50b79SStephen Hemminger } 6381da177e4SLinus Torvalds 6391da177e4SLinus Torvalds return put_user(answ, (int __user *)arg); 6401da177e4SLinus Torvalds } 6414bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl); 6421da177e4SLinus Torvalds 64304d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 6441da177e4SLinus Torvalds { 6454de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 6461da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq; 6471da177e4SLinus Torvalds } 6481da177e4SLinus Torvalds 649a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp) 6501da177e4SLinus Torvalds { 6511da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 6521da177e4SLinus Torvalds } 6531da177e4SLinus Torvalds 65404d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 6551da177e4SLinus Torvalds { 6569e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 657352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 658352d4800SArnaldo Carvalho de Melo 659352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq; 6604de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK; 661f4a775d1SEric Dumazet __skb_header_release(skb); 662fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 663ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 6643ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 66589ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH) 6661da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH; 6676f021c62SEric Dumazet 6686f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk); 6691da177e4SLinus Torvalds } 6701da177e4SLinus Torvalds 671afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 6721da177e4SLinus Torvalds { 67333f5f57eSIlpo Järvinen if (flags & MSG_OOB) 6741da177e4SLinus Torvalds tp->snd_up = tp->write_seq; 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds 677f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if 678a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues : 679f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance 680f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without 681f54b3111SEric Dumazet * need for a timer, and with no latency trade off. 682f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize 683a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent 684a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues, 685a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet. 686f54b3111SEric Dumazet */ 687f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 688f54b3111SEric Dumazet int size_goal) 6891da177e4SLinus Torvalds { 690f54b3111SEric Dumazet return skb->len < size_goal && 69185225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 692114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) && 693b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 694b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb); 695f54b3111SEric Dumazet } 6969e412ba7SIlpo Järvinen 69735b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now, 698f54b3111SEric Dumazet int nonagle, int size_goal) 699f54b3111SEric Dumazet { 700f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 701f54b3111SEric Dumazet struct sk_buff *skb; 702f54b3111SEric Dumazet 703f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk); 70475c119afSEric Dumazet if (!skb) 70575c119afSEric Dumazet return; 7061da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp)) 707f54b3111SEric Dumazet tcp_mark_push(tp, skb); 708afeca340SKrishna Kumar 709afeca340SKrishna Kumar tcp_mark_urg(tp, flags); 710f54b3111SEric Dumazet 711f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) { 712f54b3111SEric Dumazet 713f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */ 7147aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 715f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 7167aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 7171da177e4SLinus Torvalds } 718a181ceb5SEric Dumazet /* It is possible TX completion already happened 719a181ceb5SEric Dumazet * before we set TSQ_THROTTLED. 720a181ceb5SEric Dumazet */ 72114afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 722f54b3111SEric Dumazet return; 723f54b3111SEric Dumazet } 724f54b3111SEric Dumazet 725f54b3111SEric Dumazet if (flags & MSG_MORE) 726f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK; 727f54b3111SEric Dumazet 728f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle); 7291da177e4SLinus Torvalds } 7301da177e4SLinus Torvalds 7316ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 7329c55e01cSJens Axboe unsigned int offset, size_t len) 7339c55e01cSJens Axboe { 7349c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data; 73533966dd0SWilly Tarreau int ret; 7369c55e01cSJens Axboe 737a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 73825869262SAl Viro min(rd_desc->count, len), tss->flags); 73933966dd0SWilly Tarreau if (ret > 0) 74033966dd0SWilly Tarreau rd_desc->count -= ret; 74133966dd0SWilly Tarreau return ret; 7429c55e01cSJens Axboe } 7439c55e01cSJens Axboe 7449c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 7459c55e01cSJens Axboe { 7469c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */ 7479c55e01cSJens Axboe read_descriptor_t rd_desc = { 7489c55e01cSJens Axboe .arg.data = tss, 74933966dd0SWilly Tarreau .count = tss->len, 7509c55e01cSJens Axboe }; 7519c55e01cSJens Axboe 7529c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 7539c55e01cSJens Axboe } 7549c55e01cSJens Axboe 7559c55e01cSJens Axboe /** 7569c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe 7579c55e01cSJens Axboe * @sock: socket to splice from 7589c55e01cSJens Axboe * @ppos: position (not valid) 7599c55e01cSJens Axboe * @pipe: pipe to splice to 7609c55e01cSJens Axboe * @len: number of bytes to splice 7619c55e01cSJens Axboe * @flags: splice modifier flags 7629c55e01cSJens Axboe * 7639c55e01cSJens Axboe * Description: 7649c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe. 7659c55e01cSJens Axboe * 7669c55e01cSJens Axboe **/ 7679c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 7689c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len, 7699c55e01cSJens Axboe unsigned int flags) 7709c55e01cSJens Axboe { 7719c55e01cSJens Axboe struct sock *sk = sock->sk; 7729c55e01cSJens Axboe struct tcp_splice_state tss = { 7739c55e01cSJens Axboe .pipe = pipe, 7749c55e01cSJens Axboe .len = len, 7759c55e01cSJens Axboe .flags = flags, 7769c55e01cSJens Axboe }; 7779c55e01cSJens Axboe long timeo; 7789c55e01cSJens Axboe ssize_t spliced; 7799c55e01cSJens Axboe int ret; 7809c55e01cSJens Axboe 7813a047bf8SChangli Gao sock_rps_record_flow(sk); 7829c55e01cSJens Axboe /* 7839c55e01cSJens Axboe * We can't seek on a socket input 7849c55e01cSJens Axboe */ 7859c55e01cSJens Axboe if (unlikely(*ppos)) 7869c55e01cSJens Axboe return -ESPIPE; 7879c55e01cSJens Axboe 7889c55e01cSJens Axboe ret = spliced = 0; 7899c55e01cSJens Axboe 7909c55e01cSJens Axboe lock_sock(sk); 7919c55e01cSJens Axboe 79242324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 7939c55e01cSJens Axboe while (tss.len) { 7949c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss); 7959c55e01cSJens Axboe if (ret < 0) 7969c55e01cSJens Axboe break; 7979c55e01cSJens Axboe else if (!ret) { 7989c55e01cSJens Axboe if (spliced) 7999c55e01cSJens Axboe break; 8009c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE)) 8019c55e01cSJens Axboe break; 8029c55e01cSJens Axboe if (sk->sk_err) { 8039c55e01cSJens Axboe ret = sock_error(sk); 8049c55e01cSJens Axboe break; 8059c55e01cSJens Axboe } 8069c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN) 8079c55e01cSJens Axboe break; 8089c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) { 8099c55e01cSJens Axboe /* 8109c55e01cSJens Axboe * This occurs when user tries to read 8119c55e01cSJens Axboe * from never connected socket. 8129c55e01cSJens Axboe */ 8139c55e01cSJens Axboe ret = -ENOTCONN; 8149c55e01cSJens Axboe break; 8159c55e01cSJens Axboe } 8169c55e01cSJens Axboe if (!timeo) { 8179c55e01cSJens Axboe ret = -EAGAIN; 8189c55e01cSJens Axboe break; 8199c55e01cSJens Axboe } 820ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have 821ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop. 822ccf7abb9SEric Dumazet * This might happen with URG data. 823ccf7abb9SEric Dumazet */ 824ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue)) 825ccf7abb9SEric Dumazet break; 826dfbafc99SSabrina Dubroca sk_wait_data(sk, &timeo, NULL); 8279c55e01cSJens Axboe if (signal_pending(current)) { 8289c55e01cSJens Axboe ret = sock_intr_errno(timeo); 8299c55e01cSJens Axboe break; 8309c55e01cSJens Axboe } 8319c55e01cSJens Axboe continue; 8329c55e01cSJens Axboe } 8339c55e01cSJens Axboe tss.len -= ret; 8349c55e01cSJens Axboe spliced += ret; 8359c55e01cSJens Axboe 83633966dd0SWilly Tarreau if (!timeo) 83733966dd0SWilly Tarreau break; 8389c55e01cSJens Axboe release_sock(sk); 8399c55e01cSJens Axboe lock_sock(sk); 8409c55e01cSJens Axboe 8419c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE || 84233966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) || 8439c55e01cSJens Axboe signal_pending(current)) 8449c55e01cSJens Axboe break; 8459c55e01cSJens Axboe } 8469c55e01cSJens Axboe 8479c55e01cSJens Axboe release_sock(sk); 8489c55e01cSJens Axboe 8499c55e01cSJens Axboe if (spliced) 8509c55e01cSJens Axboe return spliced; 8519c55e01cSJens Axboe 8529c55e01cSJens Axboe return ret; 8539c55e01cSJens Axboe } 8544bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read); 8559c55e01cSJens Axboe 856f8dd3b8dSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, 857eb934478SEric Dumazet bool force_schedule) 858f561d0f2SPavel Emelyanov { 859f561d0f2SPavel Emelyanov struct sk_buff *skb; 860f561d0f2SPavel Emelyanov 8618a794df6SEric Dumazet skb = alloc_skb_fclone(size + MAX_TCP_HEADER, gfp); 8628e4d980aSEric Dumazet if (likely(skb)) { 863eb934478SEric Dumazet bool mem_scheduled; 8648e4d980aSEric Dumazet 8659b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 866eb934478SEric Dumazet if (force_schedule) { 867eb934478SEric Dumazet mem_scheduled = true; 8688e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 8698e4d980aSEric Dumazet } else { 870eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 8718e4d980aSEric Dumazet } 872eb934478SEric Dumazet if (likely(mem_scheduled)) { 8738a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 874a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 875e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 876f561d0f2SPavel Emelyanov return skb; 877f561d0f2SPavel Emelyanov } 878f561d0f2SPavel Emelyanov __kfree_skb(skb); 879f561d0f2SPavel Emelyanov } else { 8805c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk); 881f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk); 882f561d0f2SPavel Emelyanov } 883f561d0f2SPavel Emelyanov return NULL; 884f561d0f2SPavel Emelyanov } 885f561d0f2SPavel Emelyanov 8860c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 8870c54b85fSIlpo Järvinen int large_allowed) 8880c54b85fSIlpo Järvinen { 8890c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 8906c09fa09SEric Dumazet u32 new_size_goal, size_goal; 8910c54b85fSIlpo Järvinen 89274d4a8f8SEric Dumazet if (!large_allowed) 893605ad7f1SEric Dumazet return mss_now; 8940c54b85fSIlpo Järvinen 8956c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */ 896ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 8972a3a041cSIlpo Järvinen 8982a3a041cSIlpo Järvinen /* We try hard to avoid divides here */ 899605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 900605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal || 901605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) { 902605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now, 9031485348dSBen Hutchings sk->sk_gso_max_segs); 904605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 9050c54b85fSIlpo Järvinen } 9060c54b85fSIlpo Järvinen 907605ad7f1SEric Dumazet return max(size_goal, mss_now); 9080c54b85fSIlpo Järvinen } 9090c54b85fSIlpo Järvinen 91035b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 9110c54b85fSIlpo Järvinen { 9120c54b85fSIlpo Järvinen int mss_now; 9130c54b85fSIlpo Järvinen 9140c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 9150c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 9160c54b85fSIlpo Järvinen 9170c54b85fSIlpo Järvinen return mss_now; 9180c54b85fSIlpo Järvinen } 9190c54b85fSIlpo Järvinen 920fdfc5c85SEric Dumazet /* In some cases, both sendpage() and sendmsg() could have added 921fdfc5c85SEric Dumazet * an skb to the write queue, but failed adding payload on it. 922fdfc5c85SEric Dumazet * We need to remove it to consume less memory, but more 923fdfc5c85SEric Dumazet * importantly be able to generate EPOLLOUT for Edge Trigger epoll() 924fdfc5c85SEric Dumazet * users. 925fdfc5c85SEric Dumazet */ 92627728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk) 927fdfc5c85SEric Dumazet { 92827728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 92927728ba8SEric Dumazet 930cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 931fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk); 932fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk)) 933fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 93403271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 935fdfc5c85SEric Dumazet } 936fdfc5c85SEric Dumazet } 937fdfc5c85SEric Dumazet 938f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */ 939f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 940f8d9d938SEric Dumazet { 941f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) { 942f8d9d938SEric Dumazet u32 extra = skb->truesize - 943f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb)); 944f8d9d938SEric Dumazet 945f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra)) 946f8d9d938SEric Dumazet return -ENOMEM; 947f8d9d938SEric Dumazet 948f8d9d938SEric Dumazet sk_mem_charge(sk, extra); 949f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 950f8d9d938SEric Dumazet } 951f8d9d938SEric Dumazet return 0; 952f8d9d938SEric Dumazet } 953f8d9d938SEric Dumazet 954849b425cSEric Dumazet 955f54755f6SEric Dumazet static int tcp_wmem_schedule(struct sock *sk, int copy) 956f54755f6SEric Dumazet { 957f54755f6SEric Dumazet int left; 958f54755f6SEric Dumazet 959f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy))) 960f54755f6SEric Dumazet return copy; 961f54755f6SEric Dumazet 962f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued. 963f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 964f54755f6SEric Dumazet * to guarantee some progress. 965f54755f6SEric Dumazet */ 966f54755f6SEric Dumazet left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued; 967f54755f6SEric Dumazet if (left > 0) 968f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy)); 969f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc); 970f54755f6SEric Dumazet } 971f54755f6SEric Dumazet 972ff6fb083SPaolo Abeni static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, 973b796d04bSPaolo Abeni struct page *page, int offset, size_t *size) 974b796d04bSPaolo Abeni { 975b796d04bSPaolo Abeni struct sk_buff *skb = tcp_write_queue_tail(sk); 976b796d04bSPaolo Abeni struct tcp_sock *tp = tcp_sk(sk); 977b796d04bSPaolo Abeni bool can_coalesce; 978b796d04bSPaolo Abeni int copy, i; 979b796d04bSPaolo Abeni 980b796d04bSPaolo Abeni if (!skb || (copy = size_goal - skb->len) <= 0 || 981b796d04bSPaolo Abeni !tcp_skb_can_collapse_to(skb)) { 982b796d04bSPaolo Abeni new_segment: 983b796d04bSPaolo Abeni if (!sk_stream_memory_free(sk)) 984b796d04bSPaolo Abeni return NULL; 985b796d04bSPaolo Abeni 986f8dd3b8dSEric Dumazet skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, 987b796d04bSPaolo Abeni tcp_rtx_and_write_queues_empty(sk)); 988b796d04bSPaolo Abeni if (!skb) 989b796d04bSPaolo Abeni return NULL; 990b796d04bSPaolo Abeni 991b796d04bSPaolo Abeni #ifdef CONFIG_TLS_DEVICE 992b796d04bSPaolo Abeni skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED); 993b796d04bSPaolo Abeni #endif 99404d8825cSPaolo Abeni tcp_skb_entail(sk, skb); 995b796d04bSPaolo Abeni copy = size_goal; 996b796d04bSPaolo Abeni } 997b796d04bSPaolo Abeni 998b796d04bSPaolo Abeni if (copy > *size) 999b796d04bSPaolo Abeni copy = *size; 1000b796d04bSPaolo Abeni 1001b796d04bSPaolo Abeni i = skb_shinfo(skb)->nr_frags; 1002b796d04bSPaolo Abeni can_coalesce = skb_can_coalesce(skb, i, page, offset); 1003b796d04bSPaolo Abeni if (!can_coalesce && i >= sysctl_max_skb_frags) { 1004b796d04bSPaolo Abeni tcp_mark_push(tp, skb); 1005b796d04bSPaolo Abeni goto new_segment; 1006b796d04bSPaolo Abeni } 1007849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb)) 1008849b425cSEric Dumazet return NULL; 1009849b425cSEric Dumazet 1010849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1011849b425cSEric Dumazet if (!copy) 1012b796d04bSPaolo Abeni return NULL; 1013b796d04bSPaolo Abeni 1014b796d04bSPaolo Abeni if (can_coalesce) { 1015b796d04bSPaolo Abeni skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1016b796d04bSPaolo Abeni } else { 1017b796d04bSPaolo Abeni get_page(page); 1018b796d04bSPaolo Abeni skb_fill_page_desc(skb, i, page, offset, copy); 1019b796d04bSPaolo Abeni } 1020b796d04bSPaolo Abeni 1021b796d04bSPaolo Abeni if (!(flags & MSG_NO_SHARED_FRAGS)) 102206b4feb3SJonathan Lemon skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1023b796d04bSPaolo Abeni 1024b796d04bSPaolo Abeni skb->len += copy; 1025b796d04bSPaolo Abeni skb->data_len += copy; 1026b796d04bSPaolo Abeni skb->truesize += copy; 1027b796d04bSPaolo Abeni sk_wmem_queued_add(sk, copy); 1028b796d04bSPaolo Abeni sk_mem_charge(sk, copy); 1029b796d04bSPaolo Abeni WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 1030b796d04bSPaolo Abeni TCP_SKB_CB(skb)->end_seq += copy; 1031b796d04bSPaolo Abeni tcp_skb_pcount_set(skb, 0); 1032b796d04bSPaolo Abeni 1033b796d04bSPaolo Abeni *size = copy; 1034b796d04bSPaolo Abeni return skb; 1035b796d04bSPaolo Abeni } 1036b796d04bSPaolo Abeni 1037e3b5616aSDave Watson ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 103864022d0bSEric Dumazet size_t size, int flags) 10391da177e4SLinus Torvalds { 10401da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1041c1b4a7e6SDavid S. Miller int mss_now, size_goal; 10421da177e4SLinus Torvalds int err; 10431da177e4SLinus Torvalds ssize_t copied; 10441da177e4SLinus Torvalds long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 10451da177e4SLinus Torvalds 1046a10674bfSVasily Averin if (IS_ENABLED(CONFIG_DEBUG_VM) && 1047cf83a17eSColy Li WARN_ONCE(!sendpage_ok(page), 1048cf83a17eSColy Li "page must not be a Slab one and have page_count > 0")) 1049a10674bfSVasily Averin return -EINVAL; 1050a10674bfSVasily Averin 10518336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open 10528336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection 10538336886fSJerry Chu * is fully established. 10548336886fSJerry Chu */ 10558336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 10568336886fSJerry Chu !tcp_passive_fastopen(sk)) { 1057686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo); 1058686a5624SYuvaraja Mariappan if (err != 0) 10591da177e4SLinus Torvalds goto out_err; 10608336886fSJerry Chu } 10611da177e4SLinus Torvalds 10629cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 10631da177e4SLinus Torvalds 10640c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 10651da177e4SLinus Torvalds copied = 0; 10661da177e4SLinus Torvalds 10671da177e4SLinus Torvalds err = -EPIPE; 10681da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 10690d6a775eSIlpo Järvinen goto out_err; 10701da177e4SLinus Torvalds 107164022d0bSEric Dumazet while (size > 0) { 1072b796d04bSPaolo Abeni struct sk_buff *skb; 1073b796d04bSPaolo Abeni size_t copy = size; 10741da177e4SLinus Torvalds 1075b796d04bSPaolo Abeni skb = tcp_build_frag(sk, size_goal, flags, page, offset, ©); 10761da177e4SLinus Torvalds if (!skb) 1077afb83012SSoheil Hassas Yeganeh goto wait_for_space; 10781da177e4SLinus Torvalds 10791da177e4SLinus Torvalds if (!copied) 10804de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 10811da177e4SLinus Torvalds 10821da177e4SLinus Torvalds copied += copy; 108364022d0bSEric Dumazet offset += copy; 1084686a5624SYuvaraja Mariappan size -= copy; 1085ad02c4f5SSoheil Hassas Yeganeh if (!size) 10861da177e4SLinus Torvalds goto out; 10871da177e4SLinus Torvalds 108869d15067SHerbert Xu if (skb->len < size_goal || (flags & MSG_OOB)) 10891da177e4SLinus Torvalds continue; 10901da177e4SLinus Torvalds 10911da177e4SLinus Torvalds if (forced_push(tp)) { 10921da177e4SLinus Torvalds tcp_mark_push(tp, skb); 10939e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1094fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk)) 10951da177e4SLinus Torvalds tcp_push_one(sk, mss_now); 10961da177e4SLinus Torvalds continue; 10971da177e4SLinus Torvalds 1098afb83012SSoheil Hassas Yeganeh wait_for_space: 10991da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1100f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now, 1101f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal); 11021da177e4SLinus Torvalds 1103686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo); 1104686a5624SYuvaraja Mariappan if (err != 0) 11051da177e4SLinus Torvalds goto do_error; 11061da177e4SLinus Torvalds 11070c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 11081da177e4SLinus Torvalds } 11091da177e4SLinus Torvalds 11101da177e4SLinus Torvalds out: 1111ad02c4f5SSoheil Hassas Yeganeh if (copied) { 11124e8cc228SEric Dumazet tcp_tx_timestamp(sk, sk->sk_tsflags); 1113ad02c4f5SSoheil Hassas Yeganeh if (!(flags & MSG_SENDPAGE_NOTLAST)) 1114f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1115ad02c4f5SSoheil Hassas Yeganeh } 11161da177e4SLinus Torvalds return copied; 11171da177e4SLinus Torvalds 11181da177e4SLinus Torvalds do_error: 111927728ba8SEric Dumazet tcp_remove_empty_skb(sk); 11201da177e4SLinus Torvalds if (copied) 11211da177e4SLinus Torvalds goto out; 11221da177e4SLinus Torvalds out_err: 1123ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */ 1124216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1125ce5ec440SJason Baron sk->sk_write_space(sk); 1126b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1127b0f71bd3SFrancis Yan } 11281da177e4SLinus Torvalds return sk_stream_error(sk, flags, err); 11291da177e4SLinus Torvalds } 1130e3b5616aSDave Watson EXPORT_SYMBOL_GPL(do_tcp_sendpages); 11311da177e4SLinus Torvalds 1132306b13ebSTom Herbert int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 11331da177e4SLinus Torvalds size_t size, int flags) 11341da177e4SLinus Torvalds { 1135dead7cdbSEric Dumazet if (!(sk->sk_route_caps & NETIF_F_SG)) 1136bd9dfc54SEric Dumazet return sock_no_sendpage_locked(sk, page, offset, size, flags); 11371da177e4SLinus Torvalds 1138d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1139d7722e85SSoheil Hassas Yeganeh 1140306b13ebSTom Herbert return do_tcp_sendpages(sk, page, offset, size, flags); 1141306b13ebSTom Herbert } 1142774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendpage_locked); 1143306b13ebSTom Herbert 1144306b13ebSTom Herbert int tcp_sendpage(struct sock *sk, struct page *page, int offset, 1145306b13ebSTom Herbert size_t size, int flags) 1146306b13ebSTom Herbert { 1147306b13ebSTom Herbert int ret; 1148306b13ebSTom Herbert 1149306b13ebSTom Herbert lock_sock(sk); 1150306b13ebSTom Herbert ret = tcp_sendpage_locked(sk, page, offset, size, flags); 11511da177e4SLinus Torvalds release_sock(sk); 1152306b13ebSTom Herbert 1153306b13ebSTom Herbert return ret; 11541da177e4SLinus Torvalds } 11554bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendpage); 11561da177e4SLinus Torvalds 1157cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp) 1158cf60af03SYuchung Cheng { 115900db4124SIan Morris if (tp->fastopen_req) { 1160cf60af03SYuchung Cheng kfree(tp->fastopen_req); 1161cf60af03SYuchung Cheng tp->fastopen_req = NULL; 1162cf60af03SYuchung Cheng } 1163cf60af03SYuchung Cheng } 1164cf60af03SYuchung Cheng 1165f5ddcbbbSEric Dumazet static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, 1166f859a448SWillem de Bruijn int *copied, size_t size, 1167f859a448SWillem de Bruijn struct ubuf_info *uarg) 1168cf60af03SYuchung Cheng { 1169cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 117019f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk); 1171ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name; 1172cf60af03SYuchung Cheng int err, flags; 1173cf60af03SYuchung Cheng 11745a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 11755a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) || 1176ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 1177ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC)) 1178cf60af03SYuchung Cheng return -EOPNOTSUPP; 117900db4124SIan Morris if (tp->fastopen_req) 1180cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */ 1181cf60af03SYuchung Cheng 1182cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1183cf60af03SYuchung Cheng sk->sk_allocation); 118451456b29SIan Morris if (unlikely(!tp->fastopen_req)) 1185cf60af03SYuchung Cheng return -ENOBUFS; 1186cf60af03SYuchung Cheng tp->fastopen_req->data = msg; 1187f5ddcbbbSEric Dumazet tp->fastopen_req->size = size; 1188f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg; 1189cf60af03SYuchung Cheng 119019f6d3f3SWei Wang if (inet->defer_connect) { 119119f6d3f3SWei Wang err = tcp_connect(sk); 119219f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */ 119319f6d3f3SWei Wang if (err) { 119419f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE); 119519f6d3f3SWei Wang inet->inet_dport = 0; 119619f6d3f3SWei Wang sk->sk_route_caps = 0; 119719f6d3f3SWei Wang } 119819f6d3f3SWei Wang } 1199cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1200ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr, 12013979ad7eSWilly Tarreau msg->msg_namelen, flags, 1); 12027db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect 12037db92362SWei Wang * if the connection times out or gets rst 12047db92362SWei Wang */ 12057db92362SWei Wang if (tp->fastopen_req) { 1206f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied; 1207cf60af03SYuchung Cheng tcp_free_fastopen_req(tp); 12087db92362SWei Wang inet->defer_connect = 0; 12097db92362SWei Wang } 1210cf60af03SYuchung Cheng return err; 1211cf60af03SYuchung Cheng } 1212cf60af03SYuchung Cheng 1213306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 12141da177e4SLinus Torvalds { 12151da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1216f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL; 12171da177e4SLinus Torvalds struct sk_buff *skb; 1218c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc; 121957be5bdaSAl Viro int flags, err, copied = 0; 122057be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0; 12211a991488SEric Dumazet int process_backlog = 0; 122274d4a8f8SEric Dumazet bool zc = false; 12231da177e4SLinus Torvalds long timeo; 12241da177e4SLinus Torvalds 12251da177e4SLinus Torvalds flags = msg->msg_flags; 1226f214f915SWillem de Bruijn 1227eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) { 122875c119afSEric Dumazet skb = tcp_write_queue_tail(sk); 1229eb315a7dSPavel Begunkov 1230eb315a7dSPavel Begunkov if (msg->msg_ubuf) { 1231eb315a7dSPavel Begunkov uarg = msg->msg_ubuf; 1232eb315a7dSPavel Begunkov net_zcopy_get(uarg); 1233eb315a7dSPavel Begunkov zc = sk->sk_route_caps & NETIF_F_SG; 1234eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 12358c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1236f214f915SWillem de Bruijn if (!uarg) { 1237f214f915SWillem de Bruijn err = -ENOBUFS; 1238f214f915SWillem de Bruijn goto out_err; 1239f214f915SWillem de Bruijn } 1240dead7cdbSEric Dumazet zc = sk->sk_route_caps & NETIF_F_SG; 124102583adeSWillem de Bruijn if (!zc) 1242f214f915SWillem de Bruijn uarg->zerocopy = 0; 1243f214f915SWillem de Bruijn } 1244eb315a7dSPavel Begunkov } 1245f214f915SWillem de Bruijn 124616ae6aa1SYuchung Cheng if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) && 124716ae6aa1SYuchung Cheng !tp->repair) { 1248f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1249cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0) 1250cf60af03SYuchung Cheng goto out; 1251cf60af03SYuchung Cheng else if (err) 1252cf60af03SYuchung Cheng goto out_err; 1253cf60af03SYuchung Cheng } 1254cf60af03SYuchung Cheng 12551da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 12561da177e4SLinus Torvalds 1257d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1258d7722e85SSoheil Hassas Yeganeh 12598336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open 12608336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection 12618336886fSJerry Chu * is fully established. 12628336886fSJerry Chu */ 12638336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 12648336886fSJerry Chu !tcp_passive_fastopen(sk)) { 1265686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo); 1266686a5624SYuvaraja Mariappan if (err != 0) 1267cf60af03SYuchung Cheng goto do_error; 12688336886fSJerry Chu } 12691da177e4SLinus Torvalds 1270c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 1271c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) { 1272c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size); 12735924f17aSChristoph Paasch goto out_nopush; 1274c0e88ff0SPavel Emelyanov } 1275c0e88ff0SPavel Emelyanov 1276c0e88ff0SPavel Emelyanov err = -EINVAL; 1277c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 1278c0e88ff0SPavel Emelyanov goto out_err; 1279c0e88ff0SPavel Emelyanov 1280c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */ 1281c0e88ff0SPavel Emelyanov } 1282c0e88ff0SPavel Emelyanov 1283657a0667SWillem de Bruijn sockcm_init(&sockc, sk); 1284c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) { 1285c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc); 1286c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) { 1287c14ac945SSoheil Hassas Yeganeh err = -EINVAL; 1288c14ac945SSoheil Hassas Yeganeh goto out_err; 1289c14ac945SSoheil Hassas Yeganeh } 1290c14ac945SSoheil Hassas Yeganeh } 1291c14ac945SSoheil Hassas Yeganeh 12921da177e4SLinus Torvalds /* This should be in poll */ 12939cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 12941da177e4SLinus Torvalds 12951da177e4SLinus Torvalds /* Ok commence sending. */ 12961da177e4SLinus Torvalds copied = 0; 12971da177e4SLinus Torvalds 1298d41a69f1SEric Dumazet restart: 1299d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags); 1300d41a69f1SEric Dumazet 13011da177e4SLinus Torvalds err = -EPIPE; 13021da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 130379d8665bSEric Dumazet goto do_error; 13041da177e4SLinus Torvalds 130501e97e65SAl Viro while (msg_data_left(msg)) { 13066828b92bSHerbert Xu int copy = 0; 13071da177e4SLinus Torvalds 1308fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk); 130965ec6097SEric Dumazet if (skb) 131065ec6097SEric Dumazet copy = size_goal - skb->len; 13111da177e4SLinus Torvalds 1312c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 13133613b3dbSEric Dumazet bool first_skb; 13143613b3dbSEric Dumazet 13151da177e4SLinus Torvalds new_segment: 13161da177e4SLinus Torvalds if (!sk_stream_memory_free(sk)) 1317afb83012SSoheil Hassas Yeganeh goto wait_for_space; 13181da177e4SLinus Torvalds 13191a991488SEric Dumazet if (unlikely(process_backlog >= 16)) { 13201a991488SEric Dumazet process_backlog = 0; 13211a991488SEric Dumazet if (sk_flush_backlog(sk)) 1322d41a69f1SEric Dumazet goto restart; 1323d4011239SEric Dumazet } 132475c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk); 1325f8dd3b8dSEric Dumazet skb = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, 13263613b3dbSEric Dumazet first_skb); 13271da177e4SLinus Torvalds if (!skb) 1328afb83012SSoheil Hassas Yeganeh goto wait_for_space; 13291da177e4SLinus Torvalds 13301a991488SEric Dumazet process_backlog++; 13311da177e4SLinus Torvalds 133204d8825cSPaolo Abeni tcp_skb_entail(sk, skb); 1333c1b4a7e6SDavid S. Miller copy = size_goal; 13349d186cacSAndrey Vagin 13359d186cacSAndrey Vagin /* All packets are restored as if they have 1336d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to 13379d186cacSAndrey Vagin * avoid wrong rtt estimation. 13389d186cacSAndrey Vagin */ 13399d186cacSAndrey Vagin if (tp->repair) 13409d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 13411da177e4SLinus Torvalds } 13421da177e4SLinus Torvalds 13431da177e4SLinus Torvalds /* Try to append data to the end of skb. */ 134401e97e65SAl Viro if (copy > msg_data_left(msg)) 134501e97e65SAl Viro copy = msg_data_left(msg); 13461da177e4SLinus Torvalds 13473ded97bcSEric Dumazet if (!zc) { 13485640f768SEric Dumazet bool merge = true; 13491da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags; 13505640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 1351761965eaSEric Dumazet 13525640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 1353afb83012SSoheil Hassas Yeganeh goto wait_for_space; 1354761965eaSEric Dumazet 13555640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page, 13565640f768SEric Dumazet pfrag->offset)) { 135774d4a8f8SEric Dumazet if (i >= sysctl_max_skb_frags) { 13581da177e4SLinus Torvalds tcp_mark_push(tp, skb); 13591da177e4SLinus Torvalds goto new_segment; 13601da177e4SLinus Torvalds } 13615640f768SEric Dumazet merge = false; 13625640f768SEric Dumazet } 1363ef015786SHerbert Xu 13645640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset); 1365ef015786SHerbert Xu 1366eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1367849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb)) 1368849b425cSEric Dumazet goto wait_for_space; 1369eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb); 1370eb315a7dSPavel Begunkov } 1371849b425cSEric Dumazet 1372849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1373849b425cSEric Dumazet if (!copy) 1374afb83012SSoheil Hassas Yeganeh goto wait_for_space; 13751da177e4SLinus Torvalds 137657be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 13775640f768SEric Dumazet pfrag->page, 13785640f768SEric Dumazet pfrag->offset, 13795640f768SEric Dumazet copy); 13805640f768SEric Dumazet if (err) 13811da177e4SLinus Torvalds goto do_error; 13821da177e4SLinus Torvalds 13831da177e4SLinus Torvalds /* Update the skb. */ 13841da177e4SLinus Torvalds if (merge) { 13859e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 13861da177e4SLinus Torvalds } else { 13875640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page, 13885640f768SEric Dumazet pfrag->offset, copy); 13894e33e346SEric Dumazet page_ref_inc(pfrag->page); 13901da177e4SLinus Torvalds } 13915640f768SEric Dumazet pfrag->offset += copy; 1392f214f915SWillem de Bruijn } else { 13939b65b17dSTalal Ahmad /* First append to a fragless skb builds initial 13949b65b17dSTalal Ahmad * pure zerocopy skb 13959b65b17dSTalal Ahmad */ 13969b65b17dSTalal Ahmad if (!skb->len) 13979b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 13989b65b17dSTalal Ahmad 13999b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) { 1400849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1401849b425cSEric Dumazet if (!copy) 1402358ed624STalal Ahmad goto wait_for_space; 14039b65b17dSTalal Ahmad } 1404358ed624STalal Ahmad 1405f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1406111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) { 1407111856c7SWillem de Bruijn tcp_mark_push(tp, skb); 1408f214f915SWillem de Bruijn goto new_segment; 1409111856c7SWillem de Bruijn } 1410f214f915SWillem de Bruijn if (err < 0) 1411f214f915SWillem de Bruijn goto do_error; 1412f214f915SWillem de Bruijn copy = err; 14131da177e4SLinus Torvalds } 14141da177e4SLinus Torvalds 14151da177e4SLinus Torvalds if (!copied) 14164de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 14171da177e4SLinus Torvalds 14180f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 14191da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy; 1420cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0); 14211da177e4SLinus Torvalds 14221da177e4SLinus Torvalds copied += copy; 142301e97e65SAl Viro if (!msg_data_left(msg)) { 1424c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR)) 1425c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1; 14261da177e4SLinus Torvalds goto out; 14274ed2d765SWillem de Bruijn } 14281da177e4SLinus Torvalds 142965ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 14301da177e4SLinus Torvalds continue; 14311da177e4SLinus Torvalds 14321da177e4SLinus Torvalds if (forced_push(tp)) { 14331da177e4SLinus Torvalds tcp_mark_push(tp, skb); 14349e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1435fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk)) 14361da177e4SLinus Torvalds tcp_push_one(sk, mss_now); 14371da177e4SLinus Torvalds continue; 14381da177e4SLinus Torvalds 1439afb83012SSoheil Hassas Yeganeh wait_for_space: 14401da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1441ec342325SAndrew Vagin if (copied) 1442f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now, 1443f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal); 14441da177e4SLinus Torvalds 1445686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo); 1446686a5624SYuvaraja Mariappan if (err != 0) 14471da177e4SLinus Torvalds goto do_error; 14481da177e4SLinus Torvalds 14490c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 14501da177e4SLinus Torvalds } 14511da177e4SLinus Torvalds 14521da177e4SLinus Torvalds out: 1453ad02c4f5SSoheil Hassas Yeganeh if (copied) { 14544e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags); 1455f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1456ad02c4f5SSoheil Hassas Yeganeh } 14575924f17aSChristoph Paasch out_nopush: 14588e044917SJonathan Lemon net_zcopy_put(uarg); 1459cf60af03SYuchung Cheng return copied + copied_syn; 14601da177e4SLinus Torvalds 14611da177e4SLinus Torvalds do_error: 146227728ba8SEric Dumazet tcp_remove_empty_skb(sk); 1463fdfc5c85SEric Dumazet 1464cf60af03SYuchung Cheng if (copied + copied_syn) 14651da177e4SLinus Torvalds goto out; 14661da177e4SLinus Torvalds out_err: 14678e044917SJonathan Lemon net_zcopy_put_abort(uarg, true); 14681da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err); 1469ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */ 1470216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1471ce5ec440SJason Baron sk->sk_write_space(sk); 1472b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1473b0f71bd3SFrancis Yan } 14741da177e4SLinus Torvalds return err; 14751da177e4SLinus Torvalds } 1476774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1477306b13ebSTom Herbert 1478306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1479306b13ebSTom Herbert { 1480306b13ebSTom Herbert int ret; 1481306b13ebSTom Herbert 1482306b13ebSTom Herbert lock_sock(sk); 1483306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size); 1484306b13ebSTom Herbert release_sock(sk); 1485306b13ebSTom Herbert 1486306b13ebSTom Herbert return ret; 1487306b13ebSTom Herbert } 14884bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg); 14891da177e4SLinus Torvalds 14901da177e4SLinus Torvalds /* 14911da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for 14921da177e4SLinus Torvalds * this, no blocking and very strange errors 8) 14931da177e4SLinus Torvalds */ 14941da177e4SLinus Torvalds 1495377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 14961da177e4SLinus Torvalds { 14971da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 14981da177e4SLinus Torvalds 14991da177e4SLinus Torvalds /* No URG data to read. */ 15001da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 15011da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ) 15021da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */ 15031da177e4SLinus Torvalds 15041da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 15051da177e4SLinus Torvalds return -ENOTCONN; 15061da177e4SLinus Torvalds 15071da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) { 15081da177e4SLinus Torvalds int err = 0; 15091da177e4SLinus Torvalds char c = tp->urg_data; 15101da177e4SLinus Torvalds 15111da177e4SLinus Torvalds if (!(flags & MSG_PEEK)) 15127b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ); 15131da177e4SLinus Torvalds 15141da177e4SLinus Torvalds /* Read urgent data. */ 15151da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB; 15161da177e4SLinus Torvalds 15171da177e4SLinus Torvalds if (len > 0) { 15181da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) 15197eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1); 15201da177e4SLinus Torvalds len = 1; 15211da177e4SLinus Torvalds } else 15221da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC; 15231da177e4SLinus Torvalds 15241da177e4SLinus Torvalds return err ? -EFAULT : len; 15251da177e4SLinus Torvalds } 15261da177e4SLinus Torvalds 15271da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 15281da177e4SLinus Torvalds return 0; 15291da177e4SLinus Torvalds 15301da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 15311da177e4SLinus Torvalds * the available implementations agree in this case: 15321da177e4SLinus Torvalds * this call should never block, independent of the 15331da177e4SLinus Torvalds * blocking state of the socket. 15341da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de> 15351da177e4SLinus Torvalds */ 15361da177e4SLinus Torvalds return -EAGAIN; 15371da177e4SLinus Torvalds } 15381da177e4SLinus Torvalds 1539c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1540c0e88ff0SPavel Emelyanov { 1541c0e88ff0SPavel Emelyanov struct sk_buff *skb; 1542c0e88ff0SPavel Emelyanov int copied = 0, err = 0; 1543c0e88ff0SPavel Emelyanov 1544c0e88ff0SPavel Emelyanov /* XXX -- need to support SO_PEEK_OFF */ 1545c0e88ff0SPavel Emelyanov 154675c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 154775c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 154875c119afSEric Dumazet if (err) 154975c119afSEric Dumazet return err; 155075c119afSEric Dumazet copied += skb->len; 155175c119afSEric Dumazet } 155275c119afSEric Dumazet 1553c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) { 155451f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1555c0e88ff0SPavel Emelyanov if (err) 1556c0e88ff0SPavel Emelyanov break; 1557c0e88ff0SPavel Emelyanov 1558c0e88ff0SPavel Emelyanov copied += skb->len; 1559c0e88ff0SPavel Emelyanov } 1560c0e88ff0SPavel Emelyanov 1561c0e88ff0SPavel Emelyanov return err ?: copied; 1562c0e88ff0SPavel Emelyanov } 1563c0e88ff0SPavel Emelyanov 15641da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user, 15651da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes 15661da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the 15671da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of 15681da177e4SLinus Torvalds * a window update. 15691da177e4SLinus Torvalds */ 1570c457985aSCong Wang static void __tcp_cleanup_rbuf(struct sock *sk, int copied) 15711da177e4SLinus Torvalds { 15721da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1573a2a385d6SEric Dumazet bool time_to_ack = false; 15741da177e4SLinus Torvalds 1575463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) { 1576463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1577b6b6d653SEric Dumazet 1578b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1579463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 15801da177e4SLinus Torvalds /* 15811da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if 15821da177e4SLinus Torvalds * connection is not bidirectional, user drained 15831da177e4SLinus Torvalds * receive buffer and there was a small segment 15841da177e4SLinus Torvalds * in queue. 15851da177e4SLinus Torvalds */ 15861ef9696cSAlexey Kuznetsov (copied > 0 && 15871ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 15881ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 158931954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) && 15901ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc))) 1591a2a385d6SEric Dumazet time_to_ack = true; 15921da177e4SLinus Torvalds } 15931da177e4SLinus Torvalds 15941da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window 15951da177e4SLinus Torvalds * which has been raised "significantly". 15961da177e4SLinus Torvalds * 15971da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK 15981da177e4SLinus Torvalds * in states, where we will not receive more. It is useless. 15991da177e4SLinus Torvalds */ 16001da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 16011da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp); 16021da177e4SLinus Torvalds 16031da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */ 16041da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) { 16051da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk); 16061da177e4SLinus Torvalds 16071da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space 16081da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window. 16091da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one. 16101da177e4SLinus Torvalds * "Lots" means "at least twice" here. 16111da177e4SLinus Torvalds */ 16121da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now) 1613a2a385d6SEric Dumazet time_to_ack = true; 16141da177e4SLinus Torvalds } 16151da177e4SLinus Torvalds } 16161da177e4SLinus Torvalds if (time_to_ack) 16171da177e4SLinus Torvalds tcp_send_ack(sk); 16181da177e4SLinus Torvalds } 16191da177e4SLinus Torvalds 1620c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied) 1621c457985aSCong Wang { 1622c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1623c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk); 1624c457985aSCong Wang 1625c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1626c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1627c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1628c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied); 1629c457985aSCong Wang } 1630c457985aSCong Wang 16313df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 16323df684c1SEric Dumazet { 1633f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue); 16343df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) { 16353df684c1SEric Dumazet sock_rfree(skb); 16363df684c1SEric Dumazet skb->destructor = NULL; 16373df684c1SEric Dumazet skb->sk = NULL; 163868822bdfSEric Dumazet return skb_attempt_defer_free(skb); 1639f35f8219SEric Dumazet } 1640f35f8219SEric Dumazet __kfree_skb(skb); 16413df684c1SEric Dumazet } 16423df684c1SEric Dumazet 16433f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 16441da177e4SLinus Torvalds { 16451da177e4SLinus Torvalds struct sk_buff *skb; 16461da177e4SLinus Torvalds u32 offset; 16471da177e4SLinus Torvalds 1648f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 16491da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq; 16509d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 16519d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 16521da177e4SLinus Torvalds offset--; 16539d691539SEric Dumazet } 1654e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 16551da177e4SLinus Torvalds *off = offset; 16561da177e4SLinus Torvalds return skb; 16571da177e4SLinus Torvalds } 1658f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing 1659f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock 1660f26845b4SEric Dumazet * in skb_splice_bits() 1661f26845b4SEric Dumazet */ 16623df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16631da177e4SLinus Torvalds } 16641da177e4SLinus Torvalds return NULL; 16651da177e4SLinus Torvalds } 16663f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb); 16671da177e4SLinus Torvalds 16681da177e4SLinus Torvalds /* 16691da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines 16701da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile' 16711da177e4SLinus Torvalds * fashion. 16721da177e4SLinus Torvalds * Note: 16731da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller. 16741da177e4SLinus Torvalds * - The routine does not block. 16751da177e4SLinus Torvalds * - At present, there is no support for reading OOB data 16761da177e4SLinus Torvalds * or for 'peeking' the socket using this routine 16771da177e4SLinus Torvalds * (although both would be easy to implement). 16781da177e4SLinus Torvalds */ 16791da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 16801da177e4SLinus Torvalds sk_read_actor_t recv_actor) 16811da177e4SLinus Torvalds { 16821da177e4SLinus Torvalds struct sk_buff *skb; 16831da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 16841da177e4SLinus Torvalds u32 seq = tp->copied_seq; 16851da177e4SLinus Torvalds u32 offset; 16861da177e4SLinus Torvalds int copied = 0; 16871da177e4SLinus Torvalds 16881da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 16891da177e4SLinus Torvalds return -ENOTCONN; 16901da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 16911da177e4SLinus Torvalds if (offset < skb->len) { 1692374e7b59SOctavian Purdila int used; 1693374e7b59SOctavian Purdila size_t len; 16941da177e4SLinus Torvalds 16951da177e4SLinus Torvalds len = skb->len - offset; 16961da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */ 1697b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 16981da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq; 16991da177e4SLinus Torvalds if (urg_offset < len) 17001da177e4SLinus Torvalds len = urg_offset; 17011da177e4SLinus Torvalds if (!len) 17021da177e4SLinus Torvalds break; 17031da177e4SLinus Torvalds } 17041da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len); 1705ff905b1eSEric Dumazet if (used <= 0) { 1706ddb61a57SJens Axboe if (!copied) 1707ddb61a57SJens Axboe copied = used; 1708ddb61a57SJens Axboe break; 1709e3d5ea2cSEric Dumazet } 1710e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len)) 1711e3d5ea2cSEric Dumazet used = len; 17121da177e4SLinus Torvalds seq += used; 17131da177e4SLinus Torvalds copied += used; 17141da177e4SLinus Torvalds offset += used; 1715e3d5ea2cSEric Dumazet 171602275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice 1717293ad604SOctavian Purdila * receive) the skb pointer might be invalid when 1718293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it 1719293ad604SOctavian Purdila * while aggregating skbs from the socket queue. 1720293ad604SOctavian Purdila */ 1721293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset); 172202275a2eSWilly Tarreau if (!skb) 17231da177e4SLinus Torvalds break; 172402275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb. 172502275a2eSWilly Tarreau * Try to splice more frags 172602275a2eSWilly Tarreau */ 172702275a2eSWilly Tarreau if (offset + 1 != skb->len) 172802275a2eSWilly Tarreau continue; 17291da177e4SLinus Torvalds } 1730e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 17313df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 17321da177e4SLinus Torvalds ++seq; 17331da177e4SLinus Torvalds break; 17341da177e4SLinus Torvalds } 17353df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 17361da177e4SLinus Torvalds if (!desc->count) 17371da177e4SLinus Torvalds break; 17387db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 17391da177e4SLinus Torvalds } 17407db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 17411da177e4SLinus Torvalds 17421da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 17431da177e4SLinus Torvalds 17441da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 1745f26845b4SEric Dumazet if (copied > 0) { 1746f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset); 17470e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 1748f26845b4SEric Dumazet } 17491da177e4SLinus Torvalds return copied; 17501da177e4SLinus Torvalds } 17514bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock); 17521da177e4SLinus Torvalds 1753965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 175404919bedSCong Wang { 175504919bedSCong Wang struct tcp_sock *tp = tcp_sk(sk); 175604919bedSCong Wang u32 seq = tp->copied_seq; 175704919bedSCong Wang struct sk_buff *skb; 175804919bedSCong Wang int copied = 0; 175904919bedSCong Wang u32 offset; 176004919bedSCong Wang 176104919bedSCong Wang if (sk->sk_state == TCP_LISTEN) 176204919bedSCong Wang return -ENOTCONN; 176304919bedSCong Wang 1764a8688821SCong Wang skb = tcp_recv_skb(sk, seq, &offset); 1765a8688821SCong Wang if (!skb) 1766a8688821SCong Wang return 0; 176704919bedSCong Wang 176804919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue); 1769e9c6e797SCong Wang WARN_ON(!skb_set_owner_sk_safe(skb, sk)); 1770a8688821SCong Wang copied = recv_actor(sk, skb); 1771*2e23acd9SCong Wang if (copied >= 0) { 1772a8688821SCong Wang seq += copied; 1773a8688821SCong Wang if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 177404919bedSCong Wang ++seq; 177504919bedSCong Wang } 1776c457985aSCong Wang consume_skb(skb); 177704919bedSCong Wang WRITE_ONCE(tp->copied_seq, seq); 177804919bedSCong Wang 177904919bedSCong Wang tcp_rcv_space_adjust(sk); 178004919bedSCong Wang 178104919bedSCong Wang /* Clean up data we have read: This will do ACK frames. */ 178204919bedSCong Wang if (copied > 0) 1783c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied); 178404919bedSCong Wang 178504919bedSCong Wang return copied; 178604919bedSCong Wang } 178704919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb); 178804919bedSCong Wang 17893f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len) 17903f92a64eSJakub Kicinski { 17913f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk); 17923f92a64eSJakub Kicinski u32 seq = tp->copied_seq; 17933f92a64eSJakub Kicinski struct sk_buff *skb; 17943f92a64eSJakub Kicinski size_t left; 17953f92a64eSJakub Kicinski u32 offset; 17963f92a64eSJakub Kicinski 17973f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN) 17983f92a64eSJakub Kicinski return; 17993f92a64eSJakub Kicinski 18003f92a64eSJakub Kicinski left = len; 18013f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 18023f92a64eSJakub Kicinski int used; 18033f92a64eSJakub Kicinski 18043f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left); 18053f92a64eSJakub Kicinski seq += used; 18063f92a64eSJakub Kicinski left -= used; 18073f92a64eSJakub Kicinski 18083f92a64eSJakub Kicinski if (skb->len > offset + used) 18093f92a64eSJakub Kicinski break; 18103f92a64eSJakub Kicinski 18113f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 18123f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 18133f92a64eSJakub Kicinski ++seq; 18143f92a64eSJakub Kicinski break; 18153f92a64eSJakub Kicinski } 18163f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 18173f92a64eSJakub Kicinski } 18183f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq); 18193f92a64eSJakub Kicinski 18203f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk); 18213f92a64eSJakub Kicinski 18223f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */ 18233f92a64eSJakub Kicinski if (left != len) 18243f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left); 18253f92a64eSJakub Kicinski } 18263f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done); 18273f92a64eSJakub Kicinski 182832035585STom Herbert int tcp_peek_len(struct socket *sock) 182932035585STom Herbert { 183032035585STom Herbert return tcp_inq(sock->sk); 183132035585STom Herbert } 183232035585STom Herbert EXPORT_SYMBOL(tcp_peek_len); 183332035585STom Herbert 1834d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1835d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val) 1836d1361840SEric Dumazet { 1837867f816bSSoheil Hassas Yeganeh int cap; 1838867f816bSSoheil Hassas Yeganeh 1839867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1840867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1; 1841867f816bSSoheil Hassas Yeganeh else 184202739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1843867f816bSSoheil Hassas Yeganeh val = min(val, cap); 1844eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 184503f45c88SEric Dumazet 184603f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */ 184703f45c88SEric Dumazet tcp_data_ready(sk); 184803f45c88SEric Dumazet 1849d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1850d1361840SEric Dumazet return 0; 1851d1361840SEric Dumazet 1852d1361840SEric Dumazet val <<= 1; 1853d1361840SEric Dumazet if (val > sk->sk_rcvbuf) { 1854ebb3b78dSEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, val); 1855d1361840SEric Dumazet tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val); 1856d1361840SEric Dumazet } 1857d1361840SEric Dumazet return 0; 1858d1361840SEric Dumazet } 1859d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat); 1860d1361840SEric Dumazet 1861892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb, 18627eeba170SArjun Roy struct scm_timestamping_internal *tss) 18637eeba170SArjun Roy { 18647eeba170SArjun Roy if (skb->tstamp) 18657eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp); 18667eeba170SArjun Roy else 18677eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0}; 18687eeba170SArjun Roy 18697eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp) 18707eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 18717eeba170SArjun Roy else 18727eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0}; 18737eeba170SArjun Roy } 18747eeba170SArjun Roy 187505255b82SEric Dumazet #ifdef CONFIG_MMU 187605255b82SEric Dumazet static const struct vm_operations_struct tcp_vm_ops = { 187705255b82SEric Dumazet }; 187805255b82SEric Dumazet 187993ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock, 188093ab6cc6SEric Dumazet struct vm_area_struct *vma) 188193ab6cc6SEric Dumazet { 188205255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 188305255b82SEric Dumazet return -EPERM; 188405255b82SEric Dumazet vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC); 188505255b82SEric Dumazet 18863e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 188705255b82SEric Dumazet vma->vm_flags |= VM_MIXEDMAP; 188805255b82SEric Dumazet 188905255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops; 189005255b82SEric Dumazet return 0; 189105255b82SEric Dumazet } 189205255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap); 189305255b82SEric Dumazet 18947fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 18957fba5309SArjun Roy u32 *offset_frag) 18967fba5309SArjun Roy { 18977fba5309SArjun Roy skb_frag_t *frag; 18987fba5309SArjun Roy 189970701b83SArjun Roy if (unlikely(offset_skb >= skb->len)) 190070701b83SArjun Roy return NULL; 190170701b83SArjun Roy 19027fba5309SArjun Roy offset_skb -= skb_headlen(skb); 19037fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 19047fba5309SArjun Roy return NULL; 19057fba5309SArjun Roy 19067fba5309SArjun Roy frag = skb_shinfo(skb)->frags; 19077fba5309SArjun Roy while (offset_skb) { 19087fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) { 19097fba5309SArjun Roy *offset_frag = offset_skb; 19107fba5309SArjun Roy return frag; 19117fba5309SArjun Roy } 19127fba5309SArjun Roy offset_skb -= skb_frag_size(frag); 19137fba5309SArjun Roy ++frag; 19147fba5309SArjun Roy } 19157fba5309SArjun Roy *offset_frag = 0; 19167fba5309SArjun Roy return frag; 19177fba5309SArjun Roy } 19187fba5309SArjun Roy 191998917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag) 192098917cf0SArjun Roy { 192198917cf0SArjun Roy return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag); 192298917cf0SArjun Roy } 192398917cf0SArjun Roy 192498917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag, 192598917cf0SArjun Roy int remaining_in_skb) 192698917cf0SArjun Roy { 192798917cf0SArjun Roy int offset = 0; 192898917cf0SArjun Roy 192998917cf0SArjun Roy if (likely(can_map_frag(frag))) 193098917cf0SArjun Roy return 0; 193198917cf0SArjun Roy 193298917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) { 193398917cf0SArjun Roy offset += skb_frag_size(frag); 193498917cf0SArjun Roy ++frag; 193598917cf0SArjun Roy } 193698917cf0SArjun Roy return offset; 193798917cf0SArjun Roy } 193898917cf0SArjun Roy 19390c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 19400c3936d3SArjun Roy struct tcp_zerocopy_receive *zc, 19410c3936d3SArjun Roy struct sk_buff *skb, u32 offset) 19420c3936d3SArjun Roy { 19430c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0; 19440c3936d3SArjun Roy int mappable_offset; 19450c3936d3SArjun Roy skb_frag_t *frag; 19460c3936d3SArjun Roy 19470c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */ 19480c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset; 19490c3936d3SArjun Roy 19500c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */ 19510c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset); 19520c3936d3SArjun Roy if (!frag) 19530c3936d3SArjun Roy return; 19540c3936d3SArjun Roy 19550c3936d3SArjun Roy if (frag_offset) { 19560c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb); 19570c3936d3SArjun Roy 19580c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */ 19590c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1]) 19600c3936d3SArjun Roy return; 19610c3936d3SArjun Roy 19620c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */ 19630c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset; 19640c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder; 19650c3936d3SArjun Roy ++frag; 19660c3936d3SArjun Roy } 19670c3936d3SArjun Roy 19680c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest. 19690c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes 19700c3936d3SArjun Roy * in partial_frag_remainder. 19710c3936d3SArjun Roy */ 19720c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 19730c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 19740c3936d3SArjun Roy } 19750c3936d3SArjun Roy 1976f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1977ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 1978f21a3c48SArjun Roy int *cmsg_flags); 1979f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk, 19807eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq, 19817eeba170SArjun Roy struct scm_timestamping_internal *tss) 1982f21a3c48SArjun Roy { 1983f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 1984f21a3c48SArjun Roy struct msghdr msg = {}; 1985f21a3c48SArjun Roy struct iovec iov; 19867eeba170SArjun Roy int err; 1987f21a3c48SArjun Roy 1988f21a3c48SArjun Roy zc->length = 0; 1989f21a3c48SArjun Roy zc->recv_skip_hint = 0; 1990f21a3c48SArjun Roy 1991f21a3c48SArjun Roy if (copy_address != zc->copybuf_address) 1992f21a3c48SArjun Roy return -EINVAL; 1993f21a3c48SArjun Roy 1994f21a3c48SArjun Roy err = import_single_range(READ, (void __user *)copy_address, 1995f21a3c48SArjun Roy inq, &iov, &msg.msg_iter); 1996f21a3c48SArjun Roy if (err) 1997f21a3c48SArjun Roy return err; 1998f21a3c48SArjun Roy 1999ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 20007eeba170SArjun Roy tss, &zc->msg_flags); 2001f21a3c48SArjun Roy if (err < 0) 2002f21a3c48SArjun Roy return err; 2003f21a3c48SArjun Roy 2004f21a3c48SArjun Roy zc->copybuf_len = err; 20050c3936d3SArjun Roy if (likely(zc->copybuf_len)) { 20060c3936d3SArjun Roy struct sk_buff *skb; 20070c3936d3SArjun Roy u32 offset; 20080c3936d3SArjun Roy 20090c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 20100c3936d3SArjun Roy if (skb) 20110c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 20120c3936d3SArjun Roy } 2013f21a3c48SArjun Roy return 0; 2014f21a3c48SArjun Roy } 2015f21a3c48SArjun Roy 201618fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 201718fb76edSArjun Roy struct sk_buff *skb, u32 copylen, 201818fb76edSArjun Roy u32 *offset, u32 *seq) 201918fb76edSArjun Roy { 202018fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 202118fb76edSArjun Roy struct msghdr msg = {}; 202218fb76edSArjun Roy struct iovec iov; 202318fb76edSArjun Roy int err; 202418fb76edSArjun Roy 202518fb76edSArjun Roy if (copy_address != zc->copybuf_address) 202618fb76edSArjun Roy return -EINVAL; 202718fb76edSArjun Roy 202818fb76edSArjun Roy err = import_single_range(READ, (void __user *)copy_address, 202918fb76edSArjun Roy copylen, &iov, &msg.msg_iter); 203018fb76edSArjun Roy if (err) 203118fb76edSArjun Roy return err; 203218fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 203318fb76edSArjun Roy if (err) 203418fb76edSArjun Roy return err; 203518fb76edSArjun Roy zc->recv_skip_hint -= copylen; 203618fb76edSArjun Roy *offset += copylen; 203718fb76edSArjun Roy *seq += copylen; 203818fb76edSArjun Roy return (__s32)copylen; 203918fb76edSArjun Roy } 204018fb76edSArjun Roy 20417eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 204218fb76edSArjun Roy struct sock *sk, 204318fb76edSArjun Roy struct sk_buff *skb, 204418fb76edSArjun Roy u32 *seq, 20457eeba170SArjun Roy s32 copybuf_len, 20467eeba170SArjun Roy struct scm_timestamping_internal *tss) 204718fb76edSArjun Roy { 204818fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 204918fb76edSArjun Roy 205018fb76edSArjun Roy if (!copylen) 205118fb76edSArjun Roy return 0; 205218fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */ 20537eeba170SArjun Roy if (skb) { 205418fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq; 20557eeba170SArjun Roy } else { 205618fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset); 20577eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 20587eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 20597eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 20607eeba170SArjun Roy } 20617eeba170SArjun Roy } 206218fb76edSArjun Roy 206318fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 206418fb76edSArjun Roy seq); 206518fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen; 206618fb76edSArjun Roy } 206718fb76edSArjun Roy 206894ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 206994ab9eb9SArjun Roy struct page **pending_pages, 207094ab9eb9SArjun Roy unsigned long pages_remaining, 207194ab9eb9SArjun Roy unsigned long *address, 207294ab9eb9SArjun Roy u32 *length, 207394ab9eb9SArjun Roy u32 *seq, 207494ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 207594ab9eb9SArjun Roy u32 total_bytes_to_map, 207694ab9eb9SArjun Roy int err) 207794ab9eb9SArjun Roy { 207894ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */ 207994ab9eb9SArjun Roy if (err == -EBUSY && 208094ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 208194ab9eb9SArjun Roy u32 maybe_zap_len; 208294ab9eb9SArjun Roy 208394ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 208494ab9eb9SArjun Roy *length + /* Mapped or pending */ 208594ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */ 208694ab9eb9SArjun Roy zap_page_range(vma, *address, maybe_zap_len); 208794ab9eb9SArjun Roy err = 0; 208894ab9eb9SArjun Roy } 208994ab9eb9SArjun Roy 209094ab9eb9SArjun Roy if (!err) { 209194ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining; 209294ab9eb9SArjun Roy int bytes_mapped; 209394ab9eb9SArjun Roy 209494ab9eb9SArjun Roy /* We called zap_page_range, try to reinsert. */ 209594ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, 209694ab9eb9SArjun Roy pending_pages, 209794ab9eb9SArjun Roy &pages_remaining); 209894ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 209994ab9eb9SArjun Roy *seq += bytes_mapped; 210094ab9eb9SArjun Roy *address += bytes_mapped; 210194ab9eb9SArjun Roy } 210294ab9eb9SArjun Roy if (err) { 210394ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an 210494ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining 210594ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll 210694ab9eb9SArjun Roy * some state we speculatively touched before. 210794ab9eb9SArjun Roy */ 210894ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 210994ab9eb9SArjun Roy 211094ab9eb9SArjun Roy *length -= bytes_not_mapped; 211194ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped; 211294ab9eb9SArjun Roy } 211394ab9eb9SArjun Roy return err; 211494ab9eb9SArjun Roy } 211594ab9eb9SArjun Roy 21163763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 21173763a24cSArjun Roy struct page **pages, 211894ab9eb9SArjun Roy unsigned int pages_to_map, 211994ab9eb9SArjun Roy unsigned long *address, 212094ab9eb9SArjun Roy u32 *length, 21213763a24cSArjun Roy u32 *seq, 212294ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 212394ab9eb9SArjun Roy u32 total_bytes_to_map) 21243763a24cSArjun Roy { 21253763a24cSArjun Roy unsigned long pages_remaining = pages_to_map; 212694ab9eb9SArjun Roy unsigned int pages_mapped; 212794ab9eb9SArjun Roy unsigned int bytes_mapped; 212894ab9eb9SArjun Roy int err; 21293763a24cSArjun Roy 213094ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining); 213194ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining; 213294ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped; 21333763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in 21343763a24cSArjun Roy * mapping (some but not all of the pages). 21353763a24cSArjun Roy */ 21363763a24cSArjun Roy *seq += bytes_mapped; 213794ab9eb9SArjun Roy *address += bytes_mapped; 213894ab9eb9SArjun Roy 213994ab9eb9SArjun Roy if (likely(!err)) 214094ab9eb9SArjun Roy return 0; 214194ab9eb9SArjun Roy 214294ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */ 214394ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 214494ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map, 214594ab9eb9SArjun Roy err); 21463763a24cSArjun Roy } 21473763a24cSArjun Roy 21483c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 21497eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 21507eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 21517eeba170SArjun Roy struct scm_timestamping_internal *tss) 21527eeba170SArjun Roy { 21537eeba170SArjun Roy unsigned long msg_control_addr; 21547eeba170SArjun Roy struct msghdr cmsg_dummy; 21557eeba170SArjun Roy 21567eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control; 21577eeba170SArjun Roy cmsg_dummy.msg_control = (void *)msg_control_addr; 21587eeba170SArjun Roy cmsg_dummy.msg_controllen = 21597eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen; 21607eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall() 21617eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0; 2162a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true; 21637eeba170SArjun Roy zc->msg_flags = 0; 21647eeba170SArjun Roy if (zc->msg_control == msg_control_addr && 21657eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) { 21667eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss); 21677eeba170SArjun Roy zc->msg_control = (__u64) 21687eeba170SArjun Roy ((uintptr_t)cmsg_dummy.msg_control); 21697eeba170SArjun Roy zc->msg_controllen = 21707eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen; 21717eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 21727eeba170SArjun Roy } 21737eeba170SArjun Roy } 21747eeba170SArjun Roy 217594ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 217605255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk, 21777eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 21787eeba170SArjun Roy struct scm_timestamping_internal *tss) 217905255b82SEric Dumazet { 218094ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0; 218105255b82SEric Dumazet unsigned long address = (unsigned long)zc->address; 218294ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 218318fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len; 218418fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk); 218505255b82SEric Dumazet const skb_frag_t *frags = NULL; 218694ab9eb9SArjun Roy unsigned int pages_to_map = 0; 218705255b82SEric Dumazet struct vm_area_struct *vma; 218805255b82SEric Dumazet struct sk_buff *skb = NULL; 218918fb76edSArjun Roy u32 seq = tp->copied_seq; 219094ab9eb9SArjun Roy u32 total_bytes_to_map; 219118fb76edSArjun Roy int inq = tcp_inq(sk); 219293ab6cc6SEric Dumazet int ret; 219393ab6cc6SEric Dumazet 219418fb76edSArjun Roy zc->copybuf_len = 0; 21957eeba170SArjun Roy zc->msg_flags = 0; 219618fb76edSArjun Roy 219705255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address) 219893ab6cc6SEric Dumazet return -EINVAL; 219993ab6cc6SEric Dumazet 220093ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN) 220105255b82SEric Dumazet return -ENOTCONN; 220293ab6cc6SEric Dumazet 220393ab6cc6SEric Dumazet sock_rps_record_flow(sk); 220493ab6cc6SEric Dumazet 2205f21a3c48SArjun Roy if (inq && inq <= copybuf_len) 22067eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss); 2207f21a3c48SArjun Roy 2208936ced41SArjun Roy if (inq < PAGE_SIZE) { 2209936ced41SArjun Roy zc->length = 0; 2210936ced41SArjun Roy zc->recv_skip_hint = inq; 2211936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE)) 2212936ced41SArjun Roy return -EIO; 2213936ced41SArjun Roy return 0; 2214936ced41SArjun Roy } 2215936ced41SArjun Roy 2216d8ed45c5SMichel Lespinasse mmap_read_lock(current->mm); 221793ab6cc6SEric Dumazet 221847bdd1dbSLiam Howlett vma = vma_lookup(current->mm, address); 221947bdd1dbSLiam Howlett if (!vma || vma->vm_ops != &tcp_vm_ops) { 2220d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 2221e776af60SEric Dumazet return -EINVAL; 2222e776af60SEric Dumazet } 222318fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 222418fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq); 222594ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 222694ab9eb9SArjun Roy if (total_bytes_to_map) { 222794ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 222894ab9eb9SArjun Roy zap_page_range(vma, address, total_bytes_to_map); 222994ab9eb9SArjun Roy zc->length = total_bytes_to_map; 223005255b82SEric Dumazet zc->recv_skip_hint = 0; 22318f2b0293SSoheil Hassas Yeganeh } else { 223218fb76edSArjun Roy zc->length = avail_len; 223318fb76edSArjun Roy zc->recv_skip_hint = avail_len; 22348f2b0293SSoheil Hassas Yeganeh } 223505255b82SEric Dumazet ret = 0; 223605255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) { 223798917cf0SArjun Roy int mappable_offset; 223894ab9eb9SArjun Roy struct page *page; 223998917cf0SArjun Roy 224005255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) { 22417fba5309SArjun Roy u32 offset_frag; 22427fba5309SArjun Roy 224305255b82SEric Dumazet if (skb) { 22440e627190SArjun Roy if (zc->recv_skip_hint > 0) 22450e627190SArjun Roy break; 224605255b82SEric Dumazet skb = skb->next; 224705255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq; 224805255b82SEric Dumazet } else { 224993ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset); 225005255b82SEric Dumazet } 22517eeba170SArjun Roy 22527eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 22537eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 22547eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 22557eeba170SArjun Roy } 225605255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset; 22577fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag); 22587fba5309SArjun Roy if (!frags || offset_frag) 225905255b82SEric Dumazet break; 226005255b82SEric Dumazet } 2261789762ceSSoheil Hassas Yeganeh 226298917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags, 226398917cf0SArjun Roy zc->recv_skip_hint); 226498917cf0SArjun Roy if (mappable_offset) { 226598917cf0SArjun Roy zc->recv_skip_hint = mappable_offset; 226605255b82SEric Dumazet break; 2267789762ceSSoheil Hassas Yeganeh } 226894ab9eb9SArjun Roy page = skb_frag_page(frags); 226994ab9eb9SArjun Roy prefetchw(page); 227094ab9eb9SArjun Roy pages[pages_to_map++] = page; 227105255b82SEric Dumazet length += PAGE_SIZE; 227205255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE; 227305255b82SEric Dumazet frags++; 227494ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 227594ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) { 227694ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb 227794ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs). 227894ab9eb9SArjun Roy */ 227994ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, 228094ab9eb9SArjun Roy pages_to_map, 228194ab9eb9SArjun Roy &address, &length, 228294ab9eb9SArjun Roy &seq, zc, 228394ab9eb9SArjun Roy total_bytes_to_map); 22843763a24cSArjun Roy if (ret) 22853763a24cSArjun Roy goto out; 228694ab9eb9SArjun Roy pages_to_map = 0; 22873763a24cSArjun Roy } 22883763a24cSArjun Roy } 228994ab9eb9SArjun Roy if (pages_to_map) { 229094ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 229194ab9eb9SArjun Roy &address, &length, &seq, 229294ab9eb9SArjun Roy zc, total_bytes_to_map); 229393ab6cc6SEric Dumazet } 229405255b82SEric Dumazet out: 2295d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 229618fb76edSArjun Roy /* Try to copy straggler data. */ 229718fb76edSArjun Roy if (!ret) 22987eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 229918fb76edSArjun Roy 230018fb76edSArjun Roy if (length + copylen) { 23017db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 230293ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk); 230393ab6cc6SEric Dumazet 230493ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */ 230593ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset); 230618fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen); 230793ab6cc6SEric Dumazet ret = 0; 230805255b82SEric Dumazet if (length == zc->length) 230905255b82SEric Dumazet zc->recv_skip_hint = 0; 231005255b82SEric Dumazet } else { 231105255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 231205255b82SEric Dumazet ret = -EIO; 231305255b82SEric Dumazet } 231405255b82SEric Dumazet zc->length = length; 231593ab6cc6SEric Dumazet return ret; 231693ab6cc6SEric Dumazet } 231705255b82SEric Dumazet #endif 231893ab6cc6SEric Dumazet 231998aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */ 2320892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 23219718475eSDeepa Dinamani struct scm_timestamping_internal *tss) 232298aaa913SMike Maloney { 2323887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 232498aaa913SMike Maloney bool has_timestamping = false; 232598aaa913SMike Maloney 232698aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 232798aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) { 232898aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2329887feae3SDeepa Dinamani if (new_tstamp) { 2330df1b4ba9SArnd Bergmann struct __kernel_timespec kts = { 2331df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2332df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2333df1b4ba9SArnd Bergmann }; 2334887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2335887feae3SDeepa Dinamani sizeof(kts), &kts); 2336887feae3SDeepa Dinamani } else { 2337df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = { 2338df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2339df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2340df1b4ba9SArnd Bergmann }; 23417f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 23429718475eSDeepa Dinamani sizeof(ts_old), &ts_old); 2343887feae3SDeepa Dinamani } 234498aaa913SMike Maloney } else { 2345887feae3SDeepa Dinamani if (new_tstamp) { 2346df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = { 2347df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2348df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2349df1b4ba9SArnd Bergmann }; 2350887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2351887feae3SDeepa Dinamani sizeof(stv), &stv); 2352887feae3SDeepa Dinamani } else { 2353df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = { 2354df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2355df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2356df1b4ba9SArnd Bergmann }; 23577f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 235898aaa913SMike Maloney sizeof(tv), &tv); 235998aaa913SMike Maloney } 236098aaa913SMike Maloney } 2361887feae3SDeepa Dinamani } 236298aaa913SMike Maloney 236398aaa913SMike Maloney if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) 236498aaa913SMike Maloney has_timestamping = true; 236598aaa913SMike Maloney else 23669718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0}; 236798aaa913SMike Maloney } 236898aaa913SMike Maloney 236998aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 237098aaa913SMike Maloney if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) 237198aaa913SMike Maloney has_timestamping = true; 237298aaa913SMike Maloney else 23739718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0}; 237498aaa913SMike Maloney } 237598aaa913SMike Maloney 237698aaa913SMike Maloney if (has_timestamping) { 23779718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0}; 23789718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW)) 23799718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss); 23809718475eSDeepa Dinamani else 23819718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss); 238298aaa913SMike Maloney } 238398aaa913SMike Maloney } 238498aaa913SMike Maloney 2385b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk) 2386b75eba76SSoheil Hassas Yeganeh { 2387b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk); 2388b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq); 2389b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2390b75eba76SSoheil Hassas Yeganeh int inq; 2391b75eba76SSoheil Hassas Yeganeh 2392b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq; 2393b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2394b75eba76SSoheil Hassas Yeganeh lock_sock(sk); 2395b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq; 2396b75eba76SSoheil Hassas Yeganeh release_sock(sk); 2397b75eba76SSoheil Hassas Yeganeh } 23986466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading 23996466e715SSoheil Hassas Yeganeh * by returning a non-zero inq. 24006466e715SSoheil Hassas Yeganeh */ 24016466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE)) 24026466e715SSoheil Hassas Yeganeh inq = 1; 2403b75eba76SSoheil Hassas Yeganeh return inq; 2404b75eba76SSoheil Hassas Yeganeh } 2405b75eba76SSoheil Hassas Yeganeh 24061da177e4SLinus Torvalds /* 24071da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer. 24081da177e4SLinus Torvalds * 24091da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that 24101da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required. 24111da177e4SLinus Torvalds * Probably, code can be easily improved even more. 24121da177e4SLinus Torvalds */ 24131da177e4SLinus Torvalds 24142cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2415ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 24162cd81161SArjun Roy int *cmsg_flags) 24171da177e4SLinus Torvalds { 24181da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 24191da177e4SLinus Torvalds int copied = 0; 24201da177e4SLinus Torvalds u32 peek_seq; 24211da177e4SLinus Torvalds u32 *seq; 24221da177e4SLinus Torvalds unsigned long used; 24232cd81161SArjun Roy int err; 24241da177e4SLinus Torvalds int target; /* Read at least this many bytes */ 24251da177e4SLinus Torvalds long timeo; 2426dfbafc99SSabrina Dubroca struct sk_buff *skb, *last; 242777527313SIlpo Järvinen u32 urg_hole = 0; 24281da177e4SLinus Torvalds 24291da177e4SLinus Torvalds err = -ENOTCONN; 24301da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 24311da177e4SLinus Torvalds goto out; 24321da177e4SLinus Torvalds 2433f94fd25cSJens Axboe if (tp->recvmsg_inq) { 2434925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ; 2435f94fd25cSJens Axboe msg->msg_get_inq = 1; 2436f94fd25cSJens Axboe } 2437ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 24381da177e4SLinus Torvalds 24391da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */ 24401da177e4SLinus Torvalds if (flags & MSG_OOB) 24411da177e4SLinus Torvalds goto recv_urg; 24421da177e4SLinus Torvalds 2443c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 2444c0e88ff0SPavel Emelyanov err = -EPERM; 2445c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK)) 2446c0e88ff0SPavel Emelyanov goto out; 2447c0e88ff0SPavel Emelyanov 2448c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 2449c0e88ff0SPavel Emelyanov goto recv_sndq; 2450c0e88ff0SPavel Emelyanov 2451c0e88ff0SPavel Emelyanov err = -EINVAL; 2452c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 2453c0e88ff0SPavel Emelyanov goto out; 2454c0e88ff0SPavel Emelyanov 2455c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */ 2456c0e88ff0SPavel Emelyanov } 2457c0e88ff0SPavel Emelyanov 24581da177e4SLinus Torvalds seq = &tp->copied_seq; 24591da177e4SLinus Torvalds if (flags & MSG_PEEK) { 24601da177e4SLinus Torvalds peek_seq = tp->copied_seq; 24611da177e4SLinus Torvalds seq = &peek_seq; 24621da177e4SLinus Torvalds } 24631da177e4SLinus Torvalds 24641da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 24651da177e4SLinus Torvalds 24661da177e4SLinus Torvalds do { 24671da177e4SLinus Torvalds u32 offset; 24681da177e4SLinus Torvalds 24691da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2470b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 24711da177e4SLinus Torvalds if (copied) 24721da177e4SLinus Torvalds break; 24731da177e4SLinus Torvalds if (signal_pending(current)) { 24741da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 24751da177e4SLinus Torvalds break; 24761da177e4SLinus Torvalds } 24771da177e4SLinus Torvalds } 24781da177e4SLinus Torvalds 24791da177e4SLinus Torvalds /* Next get a buffer. */ 24801da177e4SLinus Torvalds 2481dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue); 248291521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) { 2483dfbafc99SSabrina Dubroca last = skb; 24841da177e4SLinus Torvalds /* Now that we have two receive queues this 24851da177e4SLinus Torvalds * shouldn't happen. 24861da177e4SLinus Torvalds */ 2487d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2488e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 24892af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2490d792c100SIlpo Järvinen flags)) 24911da177e4SLinus Torvalds break; 2492d792c100SIlpo Järvinen 24931da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq; 24949d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 24959d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 24961da177e4SLinus Torvalds offset--; 24979d691539SEric Dumazet } 24981da177e4SLinus Torvalds if (offset < skb->len) 24991da177e4SLinus Torvalds goto found_ok_skb; 2500e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 25011da177e4SLinus Torvalds goto found_fin_ok; 25022af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK), 2503e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 25042af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 250591521944SDavid S. Miller } 25061da177e4SLinus Torvalds 25071da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */ 25081da177e4SLinus Torvalds 25099ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 25101da177e4SLinus Torvalds break; 25111da177e4SLinus Torvalds 25121da177e4SLinus Torvalds if (copied) { 25138bd172b7SEric Dumazet if (!timeo || 25148bd172b7SEric Dumazet sk->sk_err || 25151da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE || 25161da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) || 2517518a09efSDavid S. Miller signal_pending(current)) 25181da177e4SLinus Torvalds break; 25191da177e4SLinus Torvalds } else { 25201da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE)) 25211da177e4SLinus Torvalds break; 25221da177e4SLinus Torvalds 25231da177e4SLinus Torvalds if (sk->sk_err) { 25241da177e4SLinus Torvalds copied = sock_error(sk); 25251da177e4SLinus Torvalds break; 25261da177e4SLinus Torvalds } 25271da177e4SLinus Torvalds 25281da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) 25291da177e4SLinus Torvalds break; 25301da177e4SLinus Torvalds 25311da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) { 25321da177e4SLinus Torvalds /* This occurs when user tries to read 25331da177e4SLinus Torvalds * from never connected socket. 25341da177e4SLinus Torvalds */ 25351da177e4SLinus Torvalds copied = -ENOTCONN; 25361da177e4SLinus Torvalds break; 25371da177e4SLinus Torvalds } 25381da177e4SLinus Torvalds 25391da177e4SLinus Torvalds if (!timeo) { 25401da177e4SLinus Torvalds copied = -EAGAIN; 25411da177e4SLinus Torvalds break; 25421da177e4SLinus Torvalds } 25431da177e4SLinus Torvalds 25441da177e4SLinus Torvalds if (signal_pending(current)) { 25451da177e4SLinus Torvalds copied = sock_intr_errno(timeo); 25461da177e4SLinus Torvalds break; 25471da177e4SLinus Torvalds } 25481da177e4SLinus Torvalds } 25491da177e4SLinus Torvalds 25501da177e4SLinus Torvalds if (copied >= target) { 25511da177e4SLinus Torvalds /* Do not sleep, just process backlog. */ 255293afcfd1SEric Dumazet __sk_flush_backlog(sk); 2553dfbafc99SSabrina Dubroca } else { 255429fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied); 2555dfbafc99SSabrina Dubroca sk_wait_data(sk, &timeo, last); 2556dfbafc99SSabrina Dubroca } 25571da177e4SLinus Torvalds 255877527313SIlpo Järvinen if ((flags & MSG_PEEK) && 255977527313SIlpo Järvinen (peek_seq - copied - urg_hole != tp->copied_seq)) { 2560e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2561e87cc472SJoe Perches current->comm, 2562e87cc472SJoe Perches task_pid_nr(current)); 25631da177e4SLinus Torvalds peek_seq = tp->copied_seq; 25641da177e4SLinus Torvalds } 25651da177e4SLinus Torvalds continue; 25661da177e4SLinus Torvalds 25671da177e4SLinus Torvalds found_ok_skb: 25681da177e4SLinus Torvalds /* Ok so how much can we use? */ 25691da177e4SLinus Torvalds used = skb->len - offset; 25701da177e4SLinus Torvalds if (len < used) 25711da177e4SLinus Torvalds used = len; 25721da177e4SLinus Torvalds 25731da177e4SLinus Torvalds /* Do we have urgent data here? */ 2574b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 25751da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq; 25761da177e4SLinus Torvalds if (urg_offset < used) { 25771da177e4SLinus Torvalds if (!urg_offset) { 25781da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) { 25797db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 258077527313SIlpo Järvinen urg_hole++; 25811da177e4SLinus Torvalds offset++; 25821da177e4SLinus Torvalds used--; 25831da177e4SLinus Torvalds if (!used) 25841da177e4SLinus Torvalds goto skip_copy; 25851da177e4SLinus Torvalds } 25861da177e4SLinus Torvalds } else 25871da177e4SLinus Torvalds used = urg_offset; 25881da177e4SLinus Torvalds } 25891da177e4SLinus Torvalds } 25901da177e4SLinus Torvalds 25911da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) { 259251f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, offset, msg, used); 25931da177e4SLinus Torvalds if (err) { 25941da177e4SLinus Torvalds /* Exception. Bailout! */ 25951da177e4SLinus Torvalds if (!copied) 25961da177e4SLinus Torvalds copied = -EFAULT; 25971da177e4SLinus Torvalds break; 25981da177e4SLinus Torvalds } 25991da177e4SLinus Torvalds } 26001da177e4SLinus Torvalds 26017db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used); 26021da177e4SLinus Torvalds copied += used; 26031da177e4SLinus Torvalds len -= used; 26041da177e4SLinus Torvalds 26051da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 26061da177e4SLinus Torvalds 26071da177e4SLinus Torvalds skip_copy: 2608b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 26097b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 261031770e34SFlorian Westphal tcp_fast_path_check(sk); 261131770e34SFlorian Westphal } 26121da177e4SLinus Torvalds 261398aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) { 26142cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss); 2615925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS; 261698aaa913SMike Maloney } 2617cc4de047SKelly Littlepage 2618cc4de047SKelly Littlepage if (used + offset < skb->len) 2619cc4de047SKelly Littlepage continue; 2620cc4de047SKelly Littlepage 2621e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 26221da177e4SLinus Torvalds goto found_fin_ok; 26237bced397SDan Williams if (!(flags & MSG_PEEK)) 26243df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 26251da177e4SLinus Torvalds continue; 26261da177e4SLinus Torvalds 26271da177e4SLinus Torvalds found_fin_ok: 26281da177e4SLinus Torvalds /* Process the FIN. */ 26297db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 26307bced397SDan Williams if (!(flags & MSG_PEEK)) 26313df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 26321da177e4SLinus Torvalds break; 26331da177e4SLinus Torvalds } while (len > 0); 26341da177e4SLinus Torvalds 26351da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored 26361da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK 26371da177e4SLinus Torvalds */ 26381da177e4SLinus Torvalds 26391da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 26400e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 26411da177e4SLinus Torvalds return copied; 26421da177e4SLinus Torvalds 26431da177e4SLinus Torvalds out: 26441da177e4SLinus Torvalds return err; 26451da177e4SLinus Torvalds 26461da177e4SLinus Torvalds recv_urg: 2647377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags); 26481da177e4SLinus Torvalds goto out; 2649c0e88ff0SPavel Emelyanov 2650c0e88ff0SPavel Emelyanov recv_sndq: 2651c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len); 2652c0e88ff0SPavel Emelyanov goto out; 26531da177e4SLinus Torvalds } 26542cd81161SArjun Roy 2655ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2656ec095263SOliver Hartkopp int *addr_len) 26572cd81161SArjun Roy { 2658f94fd25cSJens Axboe int cmsg_flags = 0, ret; 26592cd81161SArjun Roy struct scm_timestamping_internal tss; 26602cd81161SArjun Roy 26612cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE)) 26622cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len); 26632cd81161SArjun Roy 26642cd81161SArjun Roy if (sk_can_busy_loop(sk) && 26652cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) && 26662cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED) 2667ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT); 26682cd81161SArjun Roy 26692cd81161SArjun Roy lock_sock(sk); 2670ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 26712cd81161SArjun Roy release_sock(sk); 26722cd81161SArjun Roy 2673f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2674925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS) 26752cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss); 2676f94fd25cSJens Axboe if (msg->msg_get_inq) { 2677f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk); 2678f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ) 2679f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2680f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq); 26812cd81161SArjun Roy } 26822cd81161SArjun Roy } 26832cd81161SArjun Roy return ret; 26842cd81161SArjun Roy } 26854bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg); 26861da177e4SLinus Torvalds 2687490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state) 2688490d5046SIlpo Järvinen { 2689490d5046SIlpo Järvinen int oldstate = sk->sk_state; 2690490d5046SIlpo Järvinen 2691d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF 2692d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The 2693d4487491SLawrence Brakmo * following checks will detect if an internal state value ever 2694d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will 2695d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling 2696d4487491SLawrence Brakmo * tcp_call_bpf_2arg. 2697d4487491SLawrence Brakmo */ 2698d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2699d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2700d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2701d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2702d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2703d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2704d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2705d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2706d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2707d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2708d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2709d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2710d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2711d4487491SLawrence Brakmo 271297a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values 271397a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 271497a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 271597a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF 271697a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo. 271797a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the 271897a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 271997a19cafSYonghong Song * regardless of which compiler is used. 272097a19cafSYonghong Song */ 272197a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 272297a19cafSYonghong Song 2723d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2724d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2725e8fce239SSong Liu 2726490d5046SIlpo Järvinen switch (state) { 2727490d5046SIlpo Järvinen case TCP_ESTABLISHED: 2728490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED) 272981cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2730490d5046SIlpo Järvinen break; 2731490d5046SIlpo Järvinen 2732490d5046SIlpo Järvinen case TCP_CLOSE: 2733490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 273481cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2735490d5046SIlpo Järvinen 2736490d5046SIlpo Järvinen sk->sk_prot->unhash(sk); 2737490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash && 2738490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2739ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk); 2740a8eceea8SJoe Perches fallthrough; 2741490d5046SIlpo Järvinen default: 2742490d5046SIlpo Järvinen if (oldstate == TCP_ESTABLISHED) 274374688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2744490d5046SIlpo Järvinen } 2745490d5046SIlpo Järvinen 2746490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed 2747490d5046SIlpo Järvinen * socket sitting in hash tables. 2748490d5046SIlpo Järvinen */ 2749563e0bb0SYafang Shao inet_sk_state_store(sk, state); 2750490d5046SIlpo Järvinen } 2751490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state); 2752490d5046SIlpo Järvinen 27531da177e4SLinus Torvalds /* 27541da177e4SLinus Torvalds * State processing on a close. This implements the state shift for 27551da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some 27561da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be 27571da177e4SLinus Torvalds * closed. 27581da177e4SLinus Torvalds */ 27591da177e4SLinus Torvalds 27609b5b5cffSArjan van de Ven static const unsigned char new_state[16] = { 27611da177e4SLinus Torvalds /* current state: new state: action: */ 27620980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE, 27630980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 27640980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE, 27650980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 27660980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 27670980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 27680980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE, 27690980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE, 27700980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 27710980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK, 27720980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE, 27730980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING, 27740980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 27751da177e4SLinus Torvalds }; 27761da177e4SLinus Torvalds 27771da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk) 27781da177e4SLinus Torvalds { 27791da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state]; 27801da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK; 27811da177e4SLinus Torvalds 27821da177e4SLinus Torvalds tcp_set_state(sk, ns); 27831da177e4SLinus Torvalds 27841da177e4SLinus Torvalds return next & TCP_ACTION_FIN; 27851da177e4SLinus Torvalds } 27861da177e4SLinus Torvalds 27871da177e4SLinus Torvalds /* 27881da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except 27891f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 27901da177e4SLinus Torvalds */ 27911da177e4SLinus Torvalds 27921da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how) 27931da177e4SLinus Torvalds { 27941da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN, 27951da177e4SLinus Torvalds * and then put it into the queue to be sent. 27961da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 27971da177e4SLinus Torvalds */ 27981da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN)) 27991da177e4SLinus Torvalds return; 28001da177e4SLinus Torvalds 28011da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */ 28021da177e4SLinus Torvalds if ((1 << sk->sk_state) & 28031da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT | 28041da177e4SLinus Torvalds TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 28051da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */ 28061da177e4SLinus Torvalds if (tcp_close_state(sk)) 28071da177e4SLinus Torvalds tcp_send_fin(sk); 28081da177e4SLinus Torvalds } 28091da177e4SLinus Torvalds } 28104bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown); 28111da177e4SLinus Torvalds 281219757cebSEric Dumazet int tcp_orphan_count_sum(void) 281319757cebSEric Dumazet { 281419757cebSEric Dumazet int i, total = 0; 281519757cebSEric Dumazet 281619757cebSEric Dumazet for_each_possible_cpu(i) 281719757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i); 281819757cebSEric Dumazet 281919757cebSEric Dumazet return max(total, 0); 282019757cebSEric Dumazet } 282119757cebSEric Dumazet 282219757cebSEric Dumazet static int tcp_orphan_cache; 282319757cebSEric Dumazet static struct timer_list tcp_orphan_timer; 282419757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 282519757cebSEric Dumazet 282619757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused) 282719757cebSEric Dumazet { 282819757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 282919757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 283019757cebSEric Dumazet } 283119757cebSEric Dumazet 283219757cebSEric Dumazet static bool tcp_too_many_orphans(int shift) 283319757cebSEric Dumazet { 283447e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift > 283547e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans); 283619757cebSEric Dumazet } 283719757cebSEric Dumazet 2838efcdbf24SArun Sharma bool tcp_check_oom(struct sock *sk, int shift) 2839efcdbf24SArun Sharma { 2840efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory; 2841efcdbf24SArun Sharma 284219757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift); 2843efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk); 2844efcdbf24SArun Sharma 2845e87cc472SJoe Perches if (too_many_orphans) 2846e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n"); 2847e87cc472SJoe Perches if (out_of_socket_memory) 2848e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2849efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory; 2850efcdbf24SArun Sharma } 2851efcdbf24SArun Sharma 285277c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout) 28531da177e4SLinus Torvalds { 28541da177e4SLinus Torvalds struct sk_buff *skb; 28551da177e4SLinus Torvalds int data_was_unread = 0; 285675c2d907SHerbert Xu int state; 28571da177e4SLinus Torvalds 28581da177e4SLinus Torvalds sk->sk_shutdown = SHUTDOWN_MASK; 28591da177e4SLinus Torvalds 28601da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) { 28611da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 28621da177e4SLinus Torvalds 28631da177e4SLinus Torvalds /* Special case. */ 28640a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 28651da177e4SLinus Torvalds 28661da177e4SLinus Torvalds goto adjudge_to_death; 28671da177e4SLinus Torvalds } 28681da177e4SLinus Torvalds 28691da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the 28701da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the 28711da177e4SLinus Torvalds * reader process may not have drained the data yet! 28721da177e4SLinus Torvalds */ 28731da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2874e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2875e11ecddfSEric Dumazet 2876e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2877e11ecddfSEric Dumazet len--; 28781da177e4SLinus Torvalds data_was_unread += len; 28791da177e4SLinus Torvalds __kfree_skb(skb); 28801da177e4SLinus Torvalds } 28811da177e4SLinus Torvalds 2882565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2883565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE) 2884565b7b2dSKonstantin Khorenko goto adjudge_to_death; 2885565b7b2dSKonstantin Khorenko 288665bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because 288765bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of 288865bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 288965bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to 289065bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee... 289165bb723cSGerrit Renker * Note: timeout is always zero in such a case. 28921da177e4SLinus Torvalds */ 2893ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) { 2894ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0); 2895ee995283SPavel Emelyanov } else if (data_was_unread) { 28961da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */ 28976aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 28981da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 2899aa133076SWu Fengguang tcp_send_active_reset(sk, sk->sk_allocation); 29001da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 29011da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */ 29021da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0); 29036aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 29041da177e4SLinus Torvalds } else if (tcp_close_state(sk)) { 29051da177e4SLinus Torvalds /* We FIN if the application ate all the data before 29061da177e4SLinus Torvalds * zapping the connection. 29071da177e4SLinus Torvalds */ 29081da177e4SLinus Torvalds 29091da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state 29101da177e4SLinus Torvalds * machine. State transitions: 29111da177e4SLinus Torvalds * 29121da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1 29131da177e4SLinus Torvalds * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 29141da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK 29151da177e4SLinus Torvalds * 29161da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window), 29171da177e4SLinus Torvalds * rather than queued out of window. Purists blame. 29181da177e4SLinus Torvalds * 29191da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED, 29201da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent. 29211da177e4SLinus Torvalds * 29221da177e4SLinus Torvalds * The visible declinations are that sometimes 29231da177e4SLinus Torvalds * we enter time-wait state, when it is not required really 29241da177e4SLinus Torvalds * (harmless), do not send active resets, when they are 29251da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 29261da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux) 29271da177e4SLinus Torvalds * Probably, I missed some more holelets. 29281da177e4SLinus Torvalds * --ANK 29298336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN 29308336886fSJerry Chu * in a single packet! (May consider it later but will 29318336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until 29328336886fSJerry Chu * data is written and socket is closed.) 29331da177e4SLinus Torvalds */ 29341da177e4SLinus Torvalds tcp_send_fin(sk); 29351da177e4SLinus Torvalds } 29361da177e4SLinus Torvalds 29371da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout); 29381da177e4SLinus Torvalds 29391da177e4SLinus Torvalds adjudge_to_death: 294075c2d907SHerbert Xu state = sk->sk_state; 294175c2d907SHerbert Xu sock_hold(sk); 294275c2d907SHerbert Xu sock_orphan(sk); 294375c2d907SHerbert Xu 29441da177e4SLinus Torvalds local_bh_disable(); 29451da177e4SLinus Torvalds bh_lock_sock(sk); 29468873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */ 29478873c064SEric Dumazet __release_sock(sk); 29481da177e4SLinus Torvalds 294919757cebSEric Dumazet this_cpu_inc(tcp_orphan_count); 2950eb4dea58SHerbert Xu 295175c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */ 295275c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 295375c2d907SHerbert Xu goto out; 29541da177e4SLinus Torvalds 29551da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a 29561da177e4SLinus Torvalds * problem with TCP as specified in that the other end could 29571da177e4SLinus Torvalds * keep a socket open forever with no application left this end. 2958b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill 29591da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough 29601da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops 29611da177e4SLinus Torvalds * reset mistake. 29621da177e4SLinus Torvalds * 29631da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour 29641da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but 29651da177e4SLinus Torvalds * consume significant resources. Let's do it with special 29661da177e4SLinus Torvalds * linger2 option. --ANK 29671da177e4SLinus Torvalds */ 29681da177e4SLinus Torvalds 29691da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) { 29701da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29711da177e4SLinus Torvalds if (tp->linger2 < 0) { 29721da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29731da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 297402a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2975de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER); 29761da177e4SLinus Torvalds } else { 2977463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk); 29781da177e4SLinus Torvalds 29791da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 298052499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk, 298152499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN); 29821da177e4SLinus Torvalds } else { 29831da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 29841da177e4SLinus Torvalds goto out; 29851da177e4SLinus Torvalds } 29861da177e4SLinus Torvalds } 29871da177e4SLinus Torvalds } 29881da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 2989efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) { 29901da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29911da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 299202a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2993de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY); 29944ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) { 29954ee806d5SDan Streetman /* Not possible to send reset; just close */ 29964ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE); 29971da177e4SLinus Torvalds } 29981da177e4SLinus Torvalds } 29991da177e4SLinus Torvalds 30008336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) { 3001d983ea6fSEric Dumazet struct request_sock *req; 3002d983ea6fSEric Dumazet 3003d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 3004d983ea6fSEric Dumazet lockdep_sock_is_held(sk)); 30058336886fSJerry Chu /* We could get here with a non-NULL req if the socket is 30068336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS 30078336886fSJerry Chu * finishes. 30088336886fSJerry Chu */ 300900db4124SIan Morris if (req) 30108336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 30110a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk); 30128336886fSJerry Chu } 30131da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */ 30141da177e4SLinus Torvalds 30151da177e4SLinus Torvalds out: 30161da177e4SLinus Torvalds bh_unlock_sock(sk); 30171da177e4SLinus Torvalds local_bh_enable(); 301877c3c956SPaolo Abeni } 301977c3c956SPaolo Abeni 302077c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout) 302177c3c956SPaolo Abeni { 302277c3c956SPaolo Abeni lock_sock(sk); 302377c3c956SPaolo Abeni __tcp_close(sk, timeout); 30248873c064SEric Dumazet release_sock(sk); 30251da177e4SLinus Torvalds sock_put(sk); 30261da177e4SLinus Torvalds } 30274bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close); 30281da177e4SLinus Torvalds 30291da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */ 30301da177e4SLinus Torvalds 3031a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state) 30321da177e4SLinus Torvalds { 30331da177e4SLinus Torvalds return (1 << state) & 30341da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 3035a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 30361da177e4SLinus Torvalds } 30371da177e4SLinus Torvalds 303875c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk) 303975c119afSEric Dumazet { 304075c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 304175c119afSEric Dumazet 30422bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL; 304375c119afSEric Dumazet while (p) { 304475c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p); 304575c119afSEric Dumazet 304675c119afSEric Dumazet p = rb_next(p); 304775c119afSEric Dumazet /* Since we are deleting whole queue, no need to 304875c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor) 304975c119afSEric Dumazet */ 305075c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk); 305103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 305275c119afSEric Dumazet } 305375c119afSEric Dumazet } 305475c119afSEric Dumazet 3055ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk) 3056ac3f09baSEric Dumazet { 3057ac3f09baSEric Dumazet struct sk_buff *skb; 3058ac3f09baSEric Dumazet 3059ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 3060ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 3061ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb); 306203271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 3063ac3f09baSEric Dumazet } 306475c119afSEric Dumazet tcp_rtx_queue_purge(sk); 3065ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 3066ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk)); 3067bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0; 306804c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0; 3069ac3f09baSEric Dumazet } 3070ac3f09baSEric Dumazet 30711da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags) 30721da177e4SLinus Torvalds { 30731da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 3074463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 30751da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 30761da177e4SLinus Torvalds int old_state = sk->sk_state; 30770f317464SEric Dumazet u32 seq; 30781da177e4SLinus Torvalds 30791da177e4SLinus Torvalds if (old_state != TCP_CLOSE) 30801da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 30811da177e4SLinus Torvalds 30821da177e4SLinus Torvalds /* ABORT function of RFC793 */ 30831da177e4SLinus Torvalds if (old_state == TCP_LISTEN) { 30840a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 3085ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) { 3086ee995283SPavel Emelyanov sk->sk_err = ECONNABORTED; 30871da177e4SLinus Torvalds } else if (tcp_need_reset(old_state) || 30881da177e4SLinus Torvalds (tp->snd_nxt != tp->write_seq && 30891da177e4SLinus Torvalds (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 3090caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC 30911da177e4SLinus Torvalds * states 30921da177e4SLinus Torvalds */ 30931da177e4SLinus Torvalds tcp_send_active_reset(sk, gfp_any()); 30941da177e4SLinus Torvalds sk->sk_err = ECONNRESET; 3095a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT) 3096a7150e38SEric Dumazet sk->sk_err = ECONNRESET; 30971da177e4SLinus Torvalds 30981da177e4SLinus Torvalds tcp_clear_xmit_timers(sk); 30991da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue); 31007db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 31017b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 3102fe067e8aSDavid S. Miller tcp_write_queue_purge(sk); 3103cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk); 31049f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue); 31051da177e4SLinus Torvalds 3106c720c7e8SEric Dumazet inet->inet_dport = 0; 31071da177e4SLinus Torvalds 31081da177e4SLinus Torvalds if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 31091da177e4SLinus Torvalds inet_reset_saddr(sk); 31101da177e4SLinus Torvalds 31111da177e4SLinus Torvalds sk->sk_shutdown = 0; 31121da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 3113740b0f18SEric Dumazet tp->srtt_us = 0; 3114b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 31153f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0; 31160f317464SEric Dumazet 31170f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2; 31180f317464SEric Dumazet if (!seq) 31190f317464SEric Dumazet seq = 1; 31200f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq); 31210f317464SEric Dumazet 3122463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 31236687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 31249d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0; 31256a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT; 3126ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 31272b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 31280b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 312940570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 31301da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 31311fdf475aSEric Dumazet tp->window_clamp = 0; 31322fbdd562SEric Dumazet tp->delivered = 0; 3133e21db6f6SYuchung Cheng tp->delivered_ce = 0; 3134ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release) 3135ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk); 3136ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 31378919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0; 31386687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 3139d4761754SYousuk Seung tp->is_sack_reneg = 0; 31401da177e4SLinus Torvalds tcp_clear_retrans(tp); 3141c13c48c0SEric Dumazet tp->total_retrans = 0; 3142463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk); 3143499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3144499350a5SWei Wang * issue in __tcp_select_window() 3145499350a5SWei Wang */ 3146499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3147b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 31481da177e4SLinus Torvalds __sk_dst_reset(sk); 31498f905c0eSEric Dumazet dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); 315017c3060bSEric Dumazet tcp_saved_syn_free(tp); 31515d9f4262SEric Dumazet tp->compressed_ack = 0; 3152784f8344SEric Dumazet tp->segs_in = 0; 3153784f8344SEric Dumazet tp->segs_out = 0; 3154ba113c3aSWei Wang tp->bytes_sent = 0; 3155e858faf5SChristoph Paasch tp->bytes_acked = 0; 3156e858faf5SChristoph Paasch tp->bytes_received = 0; 3157fb31c9b9SWei Wang tp->bytes_retrans = 0; 3158db7ffee6SEric Dumazet tp->data_segs_in = 0; 3159db7ffee6SEric Dumazet tp->data_segs_out = 0; 31607788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0; 31617788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0; 31627e10b655SWei Wang tp->dsack_dups = 0; 31637ec65372SWei Wang tp->reord_seen = 0; 31645c701549SEric Dumazet tp->retrans_out = 0; 31655c701549SEric Dumazet tp->sacked_out = 0; 31665c701549SEric Dumazet tp->tlp_high_seq = 0; 31675c701549SEric Dumazet tp->last_oow_ack_time = 0; 31686cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */ 31696cda8b74SEric Dumazet tp->app_limited = ~0U; 3170792c4354SEric Dumazet tp->rack.mstamp = 0; 3171792c4354SEric Dumazet tp->rack.advanced = 0; 3172792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1; 3173792c4354SEric Dumazet tp->rack.last_delivered = 0; 3174792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0; 3175792c4354SEric Dumazet tp->rack.dsack_seen = 0; 31766bcdc40dSEric Dumazet tp->syn_data_acked = 0; 31776bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0; 31786bcdc40dSEric Dumazet tp->rx_opt.dsack = 0; 31796bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0; 3180f9af2dbbSThomas Higdon tp->rcv_ooopack = 0; 31816cda8b74SEric Dumazet 31821da177e4SLinus Torvalds 31837db92362SWei Wang /* Clean up fastopen related fields */ 31847db92362SWei Wang tcp_free_fastopen_req(tp); 31857db92362SWei Wang inet->defer_connect = 0; 318648027478SJason Baron tp->fastopen_client_fail = 0; 31877db92362SWei Wang 3188c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 31891da177e4SLinus Torvalds 31909b42d55aSLi RongQing if (sk->sk_frag.page) { 31919b42d55aSLi RongQing put_page(sk->sk_frag.page); 31929b42d55aSLi RongQing sk->sk_frag.page = NULL; 31939b42d55aSLi RongQing sk->sk_frag.offset = 0; 31949b42d55aSLi RongQing } 3195e3ae2365SAlexander Aring sk_error_report(sk); 3196a01512b1SYueHaibing return 0; 31971da177e4SLinus Torvalds } 31984bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect); 31991da177e4SLinus Torvalds 3200a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk) 3201ee995283SPavel Emelyanov { 320252e804c6SEric W. Biederman return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3203319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN); 3204ee995283SPavel Emelyanov } 3205ee995283SPavel Emelyanov 3206d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3207b1ed4c4fSAndrey Vagin { 3208b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 3209b1ed4c4fSAndrey Vagin 3210b1ed4c4fSAndrey Vagin if (!tp->repair) 3211b1ed4c4fSAndrey Vagin return -EPERM; 3212b1ed4c4fSAndrey Vagin 3213b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 3214b1ed4c4fSAndrey Vagin return -EINVAL; 3215b1ed4c4fSAndrey Vagin 3216d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3217b1ed4c4fSAndrey Vagin return -EFAULT; 3218b1ed4c4fSAndrey Vagin 3219b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd) 3220b1ed4c4fSAndrey Vagin return -EINVAL; 3221b1ed4c4fSAndrey Vagin 3222b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3223b1ed4c4fSAndrey Vagin return -EINVAL; 3224b1ed4c4fSAndrey Vagin 3225b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt)) 3226b1ed4c4fSAndrey Vagin return -EINVAL; 3227b1ed4c4fSAndrey Vagin 3228b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1; 3229b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd; 3230b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window; 3231b1ed4c4fSAndrey Vagin 3232b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd; 3233b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup; 3234b1ed4c4fSAndrey Vagin 3235b1ed4c4fSAndrey Vagin return 0; 3236b1ed4c4fSAndrey Vagin } 3237b1ed4c4fSAndrey Vagin 3238d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3239d38d2b00SChristoph Hellwig unsigned int len) 3240b139ba4eSPavel Emelyanov { 324115e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk); 3242de248a75SPavel Emelyanov struct tcp_repair_opt opt; 3243d3c48151SChristoph Hellwig size_t offset = 0; 3244b139ba4eSPavel Emelyanov 3245de248a75SPavel Emelyanov while (len >= sizeof(opt)) { 3246d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3247b139ba4eSPavel Emelyanov return -EFAULT; 3248b139ba4eSPavel Emelyanov 3249d3c48151SChristoph Hellwig offset += sizeof(opt); 3250de248a75SPavel Emelyanov len -= sizeof(opt); 3251b139ba4eSPavel Emelyanov 3252de248a75SPavel Emelyanov switch (opt.opt_code) { 3253de248a75SPavel Emelyanov case TCPOPT_MSS: 3254de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val; 325515e56515SDouglas Caetano dos Santos tcp_mtup_init(sk); 3256b139ba4eSPavel Emelyanov break; 3257de248a75SPavel Emelyanov case TCPOPT_WINDOW: 3258bc26ccd8SAndrey Vagin { 3259bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF; 3260bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16; 3261bc26ccd8SAndrey Vagin 3262589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3263b139ba4eSPavel Emelyanov return -EFBIG; 3264b139ba4eSPavel Emelyanov 3265bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale; 3266bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale; 3267bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1; 3268bc26ccd8SAndrey Vagin } 3269b139ba4eSPavel Emelyanov break; 3270b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM: 3271de248a75SPavel Emelyanov if (opt.opt_val != 0) 3272de248a75SPavel Emelyanov return -EINVAL; 3273de248a75SPavel Emelyanov 3274b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3275b139ba4eSPavel Emelyanov break; 3276b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP: 3277de248a75SPavel Emelyanov if (opt.opt_val != 0) 3278de248a75SPavel Emelyanov return -EINVAL; 3279de248a75SPavel Emelyanov 3280b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1; 3281b139ba4eSPavel Emelyanov break; 3282b139ba4eSPavel Emelyanov } 3283b139ba4eSPavel Emelyanov } 3284b139ba4eSPavel Emelyanov 3285b139ba4eSPavel Emelyanov return 0; 3286b139ba4eSPavel Emelyanov } 3287b139ba4eSPavel Emelyanov 3288a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3289a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled); 3290a842fe14SEric Dumazet 3291a842fe14SEric Dumazet static void tcp_enable_tx_delay(void) 3292a842fe14SEric Dumazet { 3293a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3294a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0; 3295a842fe14SEric Dumazet 3296a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3297a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled); 3298a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n"); 3299a842fe14SEric Dumazet } 3300a842fe14SEric Dumazet } 3301a842fe14SEric Dumazet } 3302a842fe14SEric Dumazet 3303db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears 3304db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is 3305db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the 3306db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then 3307db10538aSChristoph Hellwig * use sendfile to send out the data parts. 3308db10538aSChristoph Hellwig * 3309db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3310db10538aSChristoph Hellwig * TCP_NODELAY. 3311db10538aSChristoph Hellwig */ 33126fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on) 3313db10538aSChristoph Hellwig { 3314db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 3315db10538aSChristoph Hellwig 3316db10538aSChristoph Hellwig if (on) { 3317db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK; 3318db10538aSChristoph Hellwig } else { 3319db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK; 3320db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF) 3321db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH; 3322db10538aSChristoph Hellwig tcp_push_pending_frames(sk); 3323db10538aSChristoph Hellwig } 3324db10538aSChristoph Hellwig } 3325db10538aSChristoph Hellwig 3326db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on) 3327db10538aSChristoph Hellwig { 3328db10538aSChristoph Hellwig lock_sock(sk); 3329db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on); 3330db10538aSChristoph Hellwig release_sock(sk); 3331db10538aSChristoph Hellwig } 3332db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork); 3333db10538aSChristoph Hellwig 333412abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 333512abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared. 333612abc5eeSChristoph Hellwig * 333712abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides 333812abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments. 333912abc5eeSChristoph Hellwig */ 33406fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on) 334112abc5eeSChristoph Hellwig { 334212abc5eeSChristoph Hellwig if (on) { 334312abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 334412abc5eeSChristoph Hellwig tcp_push_pending_frames(sk); 334512abc5eeSChristoph Hellwig } else { 334612abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 334712abc5eeSChristoph Hellwig } 334812abc5eeSChristoph Hellwig } 334912abc5eeSChristoph Hellwig 335012abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk) 335112abc5eeSChristoph Hellwig { 335212abc5eeSChristoph Hellwig lock_sock(sk); 335312abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true); 335412abc5eeSChristoph Hellwig release_sock(sk); 335512abc5eeSChristoph Hellwig } 335612abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay); 335712abc5eeSChristoph Hellwig 3358ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val) 3359ddd061b8SChristoph Hellwig { 3360ddd061b8SChristoph Hellwig if (!val) { 3361ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3362ddd061b8SChristoph Hellwig return; 3363ddd061b8SChristoph Hellwig } 3364ddd061b8SChristoph Hellwig 3365ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk); 3366ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3367ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) { 3368ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3369ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1); 3370ddd061b8SChristoph Hellwig if (!(val & 1)) 3371ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3372ddd061b8SChristoph Hellwig } 3373ddd061b8SChristoph Hellwig } 3374ddd061b8SChristoph Hellwig 3375ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val) 3376ddd061b8SChristoph Hellwig { 3377ddd061b8SChristoph Hellwig lock_sock(sk); 3378ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 3379ddd061b8SChristoph Hellwig release_sock(sk); 3380ddd061b8SChristoph Hellwig } 3381ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack); 3382ddd061b8SChristoph Hellwig 3383557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val) 3384557eadfcSChristoph Hellwig { 3385557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT) 3386557eadfcSChristoph Hellwig return -EINVAL; 3387557eadfcSChristoph Hellwig 3388557eadfcSChristoph Hellwig lock_sock(sk); 3389557eadfcSChristoph Hellwig inet_csk(sk)->icsk_syn_retries = val; 3390557eadfcSChristoph Hellwig release_sock(sk); 3391557eadfcSChristoph Hellwig return 0; 3392557eadfcSChristoph Hellwig } 3393557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt); 3394557eadfcSChristoph Hellwig 3395c488aeadSChristoph Hellwig void tcp_sock_set_user_timeout(struct sock *sk, u32 val) 3396c488aeadSChristoph Hellwig { 3397c488aeadSChristoph Hellwig lock_sock(sk); 3398c488aeadSChristoph Hellwig inet_csk(sk)->icsk_user_timeout = val; 3399c488aeadSChristoph Hellwig release_sock(sk); 3400c488aeadSChristoph Hellwig } 3401c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3402c488aeadSChristoph Hellwig 3403aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 340471c48eb8SChristoph Hellwig { 340571c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 340671c48eb8SChristoph Hellwig 340771c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE) 340871c48eb8SChristoph Hellwig return -EINVAL; 340971c48eb8SChristoph Hellwig 341071c48eb8SChristoph Hellwig tp->keepalive_time = val * HZ; 341171c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) && 341271c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 341371c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp); 341471c48eb8SChristoph Hellwig 341571c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed) 341671c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed; 341771c48eb8SChristoph Hellwig else 341871c48eb8SChristoph Hellwig elapsed = 0; 341971c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed); 342071c48eb8SChristoph Hellwig } 342171c48eb8SChristoph Hellwig 342271c48eb8SChristoph Hellwig return 0; 342371c48eb8SChristoph Hellwig } 342471c48eb8SChristoph Hellwig 342571c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val) 342671c48eb8SChristoph Hellwig { 342771c48eb8SChristoph Hellwig int err; 342871c48eb8SChristoph Hellwig 342971c48eb8SChristoph Hellwig lock_sock(sk); 3430aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 343171c48eb8SChristoph Hellwig release_sock(sk); 343271c48eb8SChristoph Hellwig return err; 343371c48eb8SChristoph Hellwig } 343471c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle); 343571c48eb8SChristoph Hellwig 3436d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val) 3437d41ecaacSChristoph Hellwig { 3438d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL) 3439d41ecaacSChristoph Hellwig return -EINVAL; 3440d41ecaacSChristoph Hellwig 3441d41ecaacSChristoph Hellwig lock_sock(sk); 3442d41ecaacSChristoph Hellwig tcp_sk(sk)->keepalive_intvl = val * HZ; 3443d41ecaacSChristoph Hellwig release_sock(sk); 3444d41ecaacSChristoph Hellwig return 0; 3445d41ecaacSChristoph Hellwig } 3446d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3447d41ecaacSChristoph Hellwig 3448480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val) 3449480aeb96SChristoph Hellwig { 3450480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT) 3451480aeb96SChristoph Hellwig return -EINVAL; 3452480aeb96SChristoph Hellwig 3453480aeb96SChristoph Hellwig lock_sock(sk); 3454480aeb96SChristoph Hellwig tcp_sk(sk)->keepalive_probes = val; 3455480aeb96SChristoph Hellwig release_sock(sk); 3456480aeb96SChristoph Hellwig return 0; 3457480aeb96SChristoph Hellwig } 3458480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3459480aeb96SChristoph Hellwig 3460cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val) 3461cb811109SPrankur gupta { 3462cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk); 3463cb811109SPrankur gupta 3464cb811109SPrankur gupta if (!val) { 3465cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE) 3466cb811109SPrankur gupta return -EINVAL; 3467cb811109SPrankur gupta tp->window_clamp = 0; 3468cb811109SPrankur gupta } else { 3469cb811109SPrankur gupta tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3470cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val; 34713aa7857fSNeil Spring tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 3472cb811109SPrankur gupta } 3473cb811109SPrankur gupta return 0; 3474cb811109SPrankur gupta } 3475cb811109SPrankur gupta 34761da177e4SLinus Torvalds /* 34771da177e4SLinus Torvalds * Socket option code for TCP. 34781da177e4SLinus Torvalds */ 3479d38d2b00SChristoph Hellwig static int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3480d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen) 34811da177e4SLinus Torvalds { 34821da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3483463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 34841e579caaSNikolay Borisov struct net *net = sock_net(sk); 34851da177e4SLinus Torvalds int val; 34861da177e4SLinus Torvalds int err = 0; 34871da177e4SLinus Torvalds 3488e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */ 3489e56fb50fSWilliam Allen Simpson switch (optname) { 3490e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: { 34915f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX]; 34925f8ef48dSStephen Hemminger 34935f8ef48dSStephen Hemminger if (optlen < 1) 34945f8ef48dSStephen Hemminger return -EINVAL; 34955f8ef48dSStephen Hemminger 3496d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 34974fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen)); 34985f8ef48dSStephen Hemminger if (val < 0) 34995f8ef48dSStephen Hemminger return -EFAULT; 35005f8ef48dSStephen Hemminger name[val] = 0; 35015f8ef48dSStephen Hemminger 35025f8ef48dSStephen Hemminger lock_sock(sk); 350329a94932SNeal Cardwell err = tcp_set_congestion_control(sk, name, true, 35048d650cdeSEric Dumazet ns_capable(sock_net(sk)->user_ns, 35058d650cdeSEric Dumazet CAP_NET_ADMIN)); 35065f8ef48dSStephen Hemminger release_sock(sk); 35075f8ef48dSStephen Hemminger return err; 35085f8ef48dSStephen Hemminger } 3509734942ccSDave Watson case TCP_ULP: { 3510734942ccSDave Watson char name[TCP_ULP_NAME_MAX]; 3511734942ccSDave Watson 3512734942ccSDave Watson if (optlen < 1) 3513734942ccSDave Watson return -EINVAL; 3514734942ccSDave Watson 3515d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 3516734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1, 3517734942ccSDave Watson optlen)); 3518734942ccSDave Watson if (val < 0) 3519734942ccSDave Watson return -EFAULT; 3520734942ccSDave Watson name[val] = 0; 3521734942ccSDave Watson 3522734942ccSDave Watson lock_sock(sk); 3523734942ccSDave Watson err = tcp_set_ulp(sk, name); 3524734942ccSDave Watson release_sock(sk); 3525734942ccSDave Watson return err; 3526734942ccSDave Watson } 35271fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 35280f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 35290f1ce023SJason Baron __u8 *backup_key = NULL; 35301fba70e5SYuchung Cheng 35310f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation 35320f1ce023SJason Baron * First key is the active one. 35330f1ce023SJason Baron */ 35340f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH && 35350f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 35361fba70e5SYuchung Cheng return -EINVAL; 35371fba70e5SYuchung Cheng 3538d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen)) 35391fba70e5SYuchung Cheng return -EFAULT; 35401fba70e5SYuchung Cheng 35410f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 35420f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 35430f1ce023SJason Baron 3544438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 35451fba70e5SYuchung Cheng } 3546e56fb50fSWilliam Allen Simpson default: 3547e56fb50fSWilliam Allen Simpson /* fallthru */ 3548e56fb50fSWilliam Allen Simpson break; 3549ccbd6a5aSJoe Perches } 35505f8ef48dSStephen Hemminger 35511da177e4SLinus Torvalds if (optlen < sizeof(int)) 35521da177e4SLinus Torvalds return -EINVAL; 35531da177e4SLinus Torvalds 3554d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val))) 35551da177e4SLinus Torvalds return -EFAULT; 35561da177e4SLinus Torvalds 35571da177e4SLinus Torvalds lock_sock(sk); 35581da177e4SLinus Torvalds 35591da177e4SLinus Torvalds switch (optname) { 35601da177e4SLinus Torvalds case TCP_MAXSEG: 35611da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However 35621da177e4SLinus Torvalds * at the point when this call is done we typically don't yet 3563a777f715SRohit Chavan * know which interface is going to be used 3564a777f715SRohit Chavan */ 3565cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 35661da177e4SLinus Torvalds err = -EINVAL; 35671da177e4SLinus Torvalds break; 35681da177e4SLinus Torvalds } 35691da177e4SLinus Torvalds tp->rx_opt.user_mss = val; 35701da177e4SLinus Torvalds break; 35711da177e4SLinus Torvalds 35721da177e4SLinus Torvalds case TCP_NODELAY: 357312abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val); 35741da177e4SLinus Torvalds break; 35751da177e4SLinus Torvalds 357636e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS: 357736e31b0aSAndreas Petlund if (val < 0 || val > 1) 357836e31b0aSAndreas Petlund err = -EINVAL; 357936e31b0aSAndreas Petlund else 358036e31b0aSAndreas Petlund tp->thin_lto = val; 358136e31b0aSAndreas Petlund break; 358236e31b0aSAndreas Petlund 35837e380175SAndreas Petlund case TCP_THIN_DUPACK: 35847e380175SAndreas Petlund if (val < 0 || val > 1) 35857e380175SAndreas Petlund err = -EINVAL; 35867e380175SAndreas Petlund break; 35877e380175SAndreas Petlund 3588ee995283SPavel Emelyanov case TCP_REPAIR: 3589ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk)) 3590ee995283SPavel Emelyanov err = -EPERM; 359131048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) { 3592ee995283SPavel Emelyanov tp->repair = 1; 3593ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE; 3594ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE; 359531048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) { 3596ee995283SPavel Emelyanov tp->repair = 0; 3597ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE; 3598ee995283SPavel Emelyanov tcp_send_window_probe(sk); 359931048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) { 360031048d7aSStefan Baranoff tp->repair = 0; 360131048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE; 3602ee995283SPavel Emelyanov } else 3603ee995283SPavel Emelyanov err = -EINVAL; 3604ee995283SPavel Emelyanov 3605ee995283SPavel Emelyanov break; 3606ee995283SPavel Emelyanov 3607ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 3608ee995283SPavel Emelyanov if (!tp->repair) 3609ee995283SPavel Emelyanov err = -EPERM; 3610bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR) 3611ee995283SPavel Emelyanov tp->repair_queue = val; 3612ee995283SPavel Emelyanov else 3613ee995283SPavel Emelyanov err = -EINVAL; 3614ee995283SPavel Emelyanov break; 3615ee995283SPavel Emelyanov 3616ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 36178811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) { 3618ee995283SPavel Emelyanov err = -EPERM; 36198811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) { 36208811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk)) 36218811f4a9SEric Dumazet err = -EPERM; 36228811f4a9SEric Dumazet else 36230f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val); 36248811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) { 36258811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) { 36268811f4a9SEric Dumazet err = -EPERM; 36278811f4a9SEric Dumazet } else { 3628dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val); 36296cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val); 36306cd6cbf5SEric Dumazet } 36318811f4a9SEric Dumazet } else { 3632ee995283SPavel Emelyanov err = -EINVAL; 36338811f4a9SEric Dumazet } 3634ee995283SPavel Emelyanov break; 3635ee995283SPavel Emelyanov 3636b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS: 3637b139ba4eSPavel Emelyanov if (!tp->repair) 3638b139ba4eSPavel Emelyanov err = -EINVAL; 3639b139ba4eSPavel Emelyanov else if (sk->sk_state == TCP_ESTABLISHED) 3640d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen); 3641b139ba4eSPavel Emelyanov else 3642b139ba4eSPavel Emelyanov err = -EPERM; 3643b139ba4eSPavel Emelyanov break; 3644b139ba4eSPavel Emelyanov 36451da177e4SLinus Torvalds case TCP_CORK: 3646db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val); 36471da177e4SLinus Torvalds break; 36481da177e4SLinus Torvalds 36491da177e4SLinus Torvalds case TCP_KEEPIDLE: 3650aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 36511da177e4SLinus Torvalds break; 36521da177e4SLinus Torvalds case TCP_KEEPINTVL: 36531da177e4SLinus Torvalds if (val < 1 || val > MAX_TCP_KEEPINTVL) 36541da177e4SLinus Torvalds err = -EINVAL; 36551da177e4SLinus Torvalds else 36561da177e4SLinus Torvalds tp->keepalive_intvl = val * HZ; 36571da177e4SLinus Torvalds break; 36581da177e4SLinus Torvalds case TCP_KEEPCNT: 36591da177e4SLinus Torvalds if (val < 1 || val > MAX_TCP_KEEPCNT) 36601da177e4SLinus Torvalds err = -EINVAL; 36611da177e4SLinus Torvalds else 36621da177e4SLinus Torvalds tp->keepalive_probes = val; 36631da177e4SLinus Torvalds break; 36641da177e4SLinus Torvalds case TCP_SYNCNT: 36651da177e4SLinus Torvalds if (val < 1 || val > MAX_TCP_SYNCNT) 36661da177e4SLinus Torvalds err = -EINVAL; 36671da177e4SLinus Torvalds else 3668463c84b9SArnaldo Carvalho de Melo icsk->icsk_syn_retries = val; 36691da177e4SLinus Torvalds break; 36701da177e4SLinus Torvalds 3671cd8ae852SEric Dumazet case TCP_SAVE_SYN: 3672267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */ 3673267cf9faSMartin KaFai Lau if (val < 0 || val > 2) 3674cd8ae852SEric Dumazet err = -EINVAL; 3675cd8ae852SEric Dumazet else 3676cd8ae852SEric Dumazet tp->save_syn = val; 3677cd8ae852SEric Dumazet break; 3678cd8ae852SEric Dumazet 36791da177e4SLinus Torvalds case TCP_LINGER2: 36801da177e4SLinus Torvalds if (val < 0) 36811da177e4SLinus Torvalds tp->linger2 = -1; 3682f0628c52SCambda Zhu else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3683f0628c52SCambda Zhu tp->linger2 = TCP_FIN_TIMEOUT_MAX; 36841da177e4SLinus Torvalds else 36851da177e4SLinus Torvalds tp->linger2 = val * HZ; 36861da177e4SLinus Torvalds break; 36871da177e4SLinus Torvalds 36881da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 3689b103cf34SJulian Anastasov /* Translate value in seconds to number of retransmits */ 3690b103cf34SJulian Anastasov icsk->icsk_accept_queue.rskq_defer_accept = 3691b103cf34SJulian Anastasov secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 3692b103cf34SJulian Anastasov TCP_RTO_MAX / HZ); 36931da177e4SLinus Torvalds break; 36941da177e4SLinus Torvalds 36951da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 3696cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val); 36971da177e4SLinus Torvalds break; 36981da177e4SLinus Torvalds 36991da177e4SLinus Torvalds case TCP_QUICKACK: 3700ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 37011da177e4SLinus Torvalds break; 37021da177e4SLinus Torvalds 3703cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3704cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG: 37058917a777SIvan Delalande case TCP_MD5SIG_EXT: 3706d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3707cfb6eeb4SYOSHIFUJI Hideaki break; 3708cfb6eeb4SYOSHIFUJI Hideaki #endif 3709dca43c75SJerry Chu case TCP_USER_TIMEOUT: 3710b248230cSYuchung Cheng /* Cap the max time in ms TCP will retry or probe the window 3711dca43c75SJerry Chu * before giving up and aborting (ETIMEDOUT) a connection. 3712dca43c75SJerry Chu */ 371342493570SHangbin Liu if (val < 0) 371442493570SHangbin Liu err = -EINVAL; 371542493570SHangbin Liu else 37169bcc66e1SJon Maxwell icsk->icsk_user_timeout = val; 3717dca43c75SJerry Chu break; 37188336886fSJerry Chu 37198336886fSJerry Chu case TCP_FASTOPEN: 37208336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3721dfea2aa6SChristoph Paasch TCPF_LISTEN))) { 372243713848SHaishuang Yan tcp_fastopen_init_key_once(net); 3723dfea2aa6SChristoph Paasch 37240536fcc0SEric Dumazet fastopen_queue_tune(sk, val); 3725dfea2aa6SChristoph Paasch } else { 37268336886fSJerry Chu err = -EINVAL; 3727dfea2aa6SChristoph Paasch } 37288336886fSJerry Chu break; 372919f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 373019f6d3f3SWei Wang if (val > 1 || val < 0) { 373119f6d3f3SWei Wang err = -EINVAL; 37325a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 37335a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) { 373419f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE) 373519f6d3f3SWei Wang tp->fastopen_connect = val; 373619f6d3f3SWei Wang else 373719f6d3f3SWei Wang err = -EINVAL; 373819f6d3f3SWei Wang } else { 373919f6d3f3SWei Wang err = -EOPNOTSUPP; 374019f6d3f3SWei Wang } 374119f6d3f3SWei Wang break; 374271c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 374371c02379SChristoph Paasch if (val > 1 || val < 0) 374471c02379SChristoph Paasch err = -EINVAL; 374571c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 374671c02379SChristoph Paasch err = -EINVAL; 374771c02379SChristoph Paasch else 374871c02379SChristoph Paasch tp->fastopen_no_cookie = val; 374971c02379SChristoph Paasch break; 375093be6ce0SAndrey Vagin case TCP_TIMESTAMP: 375193be6ce0SAndrey Vagin if (!tp->repair) 375293be6ce0SAndrey Vagin err = -EPERM; 375393be6ce0SAndrey Vagin else 37549a568de4SEric Dumazet tp->tsoffset = val - tcp_time_stamp_raw(); 375593be6ce0SAndrey Vagin break; 3756b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: 3757b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen); 3758b1ed4c4fSAndrey Vagin break; 3759c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 3760c9bee3b7SEric Dumazet tp->notsent_lowat = val; 3761c9bee3b7SEric Dumazet sk->sk_write_space(sk); 3762c9bee3b7SEric Dumazet break; 3763b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 3764b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0) 3765b75eba76SSoheil Hassas Yeganeh err = -EINVAL; 3766b75eba76SSoheil Hassas Yeganeh else 3767b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val; 3768b75eba76SSoheil Hassas Yeganeh break; 3769a842fe14SEric Dumazet case TCP_TX_DELAY: 3770a842fe14SEric Dumazet if (val) 3771a842fe14SEric Dumazet tcp_enable_tx_delay(); 3772a842fe14SEric Dumazet tp->tcp_tx_delay = val; 3773a842fe14SEric Dumazet break; 37741da177e4SLinus Torvalds default: 37751da177e4SLinus Torvalds err = -ENOPROTOOPT; 37761da177e4SLinus Torvalds break; 37773ff50b79SStephen Hemminger } 37783ff50b79SStephen Hemminger 37791da177e4SLinus Torvalds release_sock(sk); 37801da177e4SLinus Torvalds return err; 37811da177e4SLinus Torvalds } 37821da177e4SLinus Torvalds 3783a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3784b7058842SDavid S. Miller unsigned int optlen) 37853fdadf7dSDmitry Mishin { 3786cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 37873fdadf7dSDmitry Mishin 37883fdadf7dSDmitry Mishin if (level != SOL_TCP) 37893fdadf7dSDmitry Mishin return icsk->icsk_af_ops->setsockopt(sk, level, optname, 37903fdadf7dSDmitry Mishin optval, optlen); 3791a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen); 37923fdadf7dSDmitry Mishin } 37934bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt); 37943fdadf7dSDmitry Mishin 3795efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3796efd90174SFrancis Yan struct tcp_info *info) 3797efd90174SFrancis Yan { 3798efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0; 3799efd90174SFrancis Yan enum tcp_chrono i; 3800efd90174SFrancis Yan 3801efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3802efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1]; 3803efd90174SFrancis Yan if (i == tp->chrono_type) 3804628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start; 3805efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ; 3806efd90174SFrancis Yan total += stats[i]; 3807efd90174SFrancis Yan } 3808efd90174SFrancis Yan 3809efd90174SFrancis Yan info->tcpi_busy_time = total; 3810efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3811efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3812efd90174SFrancis Yan } 3813efd90174SFrancis Yan 38141da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */ 38150df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info) 38161da177e4SLinus Torvalds { 381735ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3818463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 381976a9ebe8SEric Dumazet unsigned long rate; 38200263598cSWei Wang u32 now; 3821ff5d7497SEric Dumazet u64 rate64; 382267db3e4bSEric Dumazet bool slow; 38231da177e4SLinus Torvalds 38241da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 382535ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM) 382635ac838aSCraig Gallek return; 38271da177e4SLinus Torvalds 3828986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk); 382900fd38d9SEric Dumazet 3830ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */ 3831ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate); 383276a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3833f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64; 3834ccbf3bfaSEric Dumazet 3835ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate); 383676a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3837f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64; 3838ccbf3bfaSEric Dumazet 3839ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering; 384040570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3841ccbf3bfaSEric Dumazet 3842ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) { 3843ccbf3bfaSEric Dumazet /* listeners aliased fields : 3844ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept() 3845ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog 3846ccbf3bfaSEric Dumazet */ 3847288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3848099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3849ccbf3bfaSEric Dumazet return; 3850ccbf3bfaSEric Dumazet } 3851b369e7fdSEric Dumazet 3852b369e7fdSEric Dumazet slow = lock_sock_fast(sk); 3853b369e7fdSEric Dumazet 38546687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state; 3855463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits; 38566687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out; 3857463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff; 38581da177e4SLinus Torvalds 38591da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 38601da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3861e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 38621da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK; 38631da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) { 38641da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE; 38651da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 38661da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 38671da177e4SLinus Torvalds } 38681da177e4SLinus Torvalds 38691da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK) 38701da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN; 3871b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN) 3872b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN; 38736f73601eSYuchung Cheng if (tp->syn_data_acked) 38746f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA; 38751da177e4SLinus Torvalds 3876463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 3877463c84b9SArnaldo Carvalho de Melo info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 3878c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache; 3879463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 38801da177e4SLinus Torvalds 38811da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out; 38821da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out; 3883ccbf3bfaSEric Dumazet 38841da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out; 38851da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out; 38861da177e4SLinus Torvalds 3887d635fbe2SEric Dumazet now = tcp_jiffies32; 38881da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3889463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 38901da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 38911da177e4SLinus Torvalds 3892d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 38931da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3894740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3; 3895740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2; 38961da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh; 38971da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss; 38981da177e4SLinus Torvalds 3899645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 39001da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space; 39011da177e4SLinus Torvalds 39021da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans; 3903977cb0ecSEric Dumazet 3904f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked; 3905f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received; 390667db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3907efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info); 390867db3e4bSEric Dumazet 39092efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out; 39100307a0b7SEric Dumazet 39110307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 39120307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in); 39130307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3914cd9b2660SEric Dumazet 3915cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp); 3916a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out; 3917eb8329e0SYuchung Cheng 3918eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 39190263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp); 39200263598cSWei Wang if (rate64) 3921f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64; 3922feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered; 3923feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce; 3924ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent; 3925fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans; 39267e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups; 39277ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen; 3928f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack; 39298f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd; 393048027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 3931b369e7fdSEric Dumazet unlock_sock_fast(sk, slow); 39321da177e4SLinus Torvalds } 39331da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info); 39341da177e4SLinus Torvalds 3935984988aaSWei Wang static size_t tcp_opt_stats_get_size(void) 3936984988aaSWei Wang { 3937984988aaSWei Wang return 3938984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3939984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3940984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3941984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3942984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3943984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3944984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3945984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3946984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3947984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3948984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3949984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3950984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3951984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3952984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3953984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3954984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3955ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3956fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 39577e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 39587ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3959e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 396032efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3961e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 396248040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3963e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 3964984988aaSWei Wang 0; 3965984988aaSWei Wang } 3966984988aaSWei Wang 3967e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */ 3968e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3969e7ed11eeSYousuk Seung { 3970e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP)) 3971e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl; 3972e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6)) 3973e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit; 3974e7ed11eeSYousuk Seung else 3975e7ed11eeSYousuk Seung return 0; 3976e7ed11eeSYousuk Seung } 3977e7ed11eeSYousuk Seung 397848040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3979e7ed11eeSYousuk Seung const struct sk_buff *orig_skb, 3980e7ed11eeSYousuk Seung const struct sk_buff *ack_skb) 39811c885808SFrancis Yan { 39821c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk); 39831c885808SFrancis Yan struct sk_buff *stats; 39841c885808SFrancis Yan struct tcp_info info; 398576a9ebe8SEric Dumazet unsigned long rate; 3986bb7c19f9SWei Wang u64 rate64; 39871c885808SFrancis Yan 3988984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 39891c885808SFrancis Yan if (!stats) 39901c885808SFrancis Yan return NULL; 39911c885808SFrancis Yan 39921c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info); 39931c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY, 39941c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD); 39951c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 39961c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD); 39971c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 39981c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD); 39997e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 40007e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD); 40017e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 40027e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD); 4003bb7c19f9SWei Wang 4004bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate); 400576a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 4006bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 4007bb7c19f9SWei Wang 4008bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp); 4009bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 4010bb7c19f9SWei Wang 401140570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 4012bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 4013bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 4014bb7c19f9SWei Wang 4015bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 4016bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 40177156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 4018feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 4019feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 402087ecc95dSPriyaranjan Jha 402187ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 4022be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 4023feb5f2ecSYuchung Cheng 4024ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 4025ba113c3aSWei Wang TCP_NLA_PAD); 4026fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 4027fb31c9b9SWei Wang TCP_NLA_PAD); 40287e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 40297ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 4030e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 403132efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 4032e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 4033e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt)); 403448040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 403548040793SYousuk Seung TCP_NLA_PAD); 4036e7ed11eeSYousuk Seung if (ack_skb) 4037e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL, 4038e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb)); 4039ba113c3aSWei Wang 40401c885808SFrancis Yan return stats; 40411c885808SFrancis Yan } 40421c885808SFrancis Yan 40433fdadf7dSDmitry Mishin static int do_tcp_getsockopt(struct sock *sk, int level, 40443fdadf7dSDmitry Mishin int optname, char __user *optval, int __user *optlen) 40451da177e4SLinus Torvalds { 4046295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 40471da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 40486fa25166SNikolay Borisov struct net *net = sock_net(sk); 40491da177e4SLinus Torvalds int val, len; 40501da177e4SLinus Torvalds 40511da177e4SLinus Torvalds if (get_user(len, optlen)) 40521da177e4SLinus Torvalds return -EFAULT; 40531da177e4SLinus Torvalds 40541da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(int)); 40551da177e4SLinus Torvalds 40561da177e4SLinus Torvalds if (len < 0) 40571da177e4SLinus Torvalds return -EINVAL; 40581da177e4SLinus Torvalds 40591da177e4SLinus Torvalds switch (optname) { 40601da177e4SLinus Torvalds case TCP_MAXSEG: 4061c1b4a7e6SDavid S. Miller val = tp->mss_cache; 40621da177e4SLinus Torvalds if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 40631da177e4SLinus Torvalds val = tp->rx_opt.user_mss; 40645e6a3ce6SPavel Emelyanov if (tp->repair) 40655e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp; 40661da177e4SLinus Torvalds break; 40671da177e4SLinus Torvalds case TCP_NODELAY: 40681da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF); 40691da177e4SLinus Torvalds break; 40701da177e4SLinus Torvalds case TCP_CORK: 40711da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK); 40721da177e4SLinus Torvalds break; 40731da177e4SLinus Torvalds case TCP_KEEPIDLE: 4074df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ; 40751da177e4SLinus Torvalds break; 40761da177e4SLinus Torvalds case TCP_KEEPINTVL: 4077df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ; 40781da177e4SLinus Torvalds break; 40791da177e4SLinus Torvalds case TCP_KEEPCNT: 4080df19a626SEric Dumazet val = keepalive_probes(tp); 40811da177e4SLinus Torvalds break; 40821da177e4SLinus Torvalds case TCP_SYNCNT: 408320a3b1c0SKuniyuki Iwashima val = icsk->icsk_syn_retries ? : 408420a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 40851da177e4SLinus Torvalds break; 40861da177e4SLinus Torvalds case TCP_LINGER2: 40871da177e4SLinus Torvalds val = tp->linger2; 40881da177e4SLinus Torvalds if (val >= 0) 408939e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 40901da177e4SLinus Torvalds break; 40911da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 4092b103cf34SJulian Anastasov val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 4093b103cf34SJulian Anastasov TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 40941da177e4SLinus Torvalds break; 40951da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 40961da177e4SLinus Torvalds val = tp->window_clamp; 40971da177e4SLinus Torvalds break; 40981da177e4SLinus Torvalds case TCP_INFO: { 40991da177e4SLinus Torvalds struct tcp_info info; 41001da177e4SLinus Torvalds 41011da177e4SLinus Torvalds if (get_user(len, optlen)) 41021da177e4SLinus Torvalds return -EFAULT; 41031da177e4SLinus Torvalds 41041da177e4SLinus Torvalds tcp_get_info(sk, &info); 41051da177e4SLinus Torvalds 41061da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info)); 41071da177e4SLinus Torvalds if (put_user(len, optlen)) 41081da177e4SLinus Torvalds return -EFAULT; 41091da177e4SLinus Torvalds if (copy_to_user(optval, &info, len)) 41101da177e4SLinus Torvalds return -EFAULT; 41111da177e4SLinus Torvalds return 0; 41121da177e4SLinus Torvalds } 41136e9250f5SEric Dumazet case TCP_CC_INFO: { 41146e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops; 41156e9250f5SEric Dumazet union tcp_cc_info info; 41166e9250f5SEric Dumazet size_t sz = 0; 41176e9250f5SEric Dumazet int attr; 41186e9250f5SEric Dumazet 41196e9250f5SEric Dumazet if (get_user(len, optlen)) 41206e9250f5SEric Dumazet return -EFAULT; 41216e9250f5SEric Dumazet 41226e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops; 41236e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info) 41246e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info); 41256e9250f5SEric Dumazet 41266e9250f5SEric Dumazet len = min_t(unsigned int, len, sz); 41276e9250f5SEric Dumazet if (put_user(len, optlen)) 41286e9250f5SEric Dumazet return -EFAULT; 41296e9250f5SEric Dumazet if (copy_to_user(optval, &info, len)) 41306e9250f5SEric Dumazet return -EFAULT; 41316e9250f5SEric Dumazet return 0; 41326e9250f5SEric Dumazet } 41331da177e4SLinus Torvalds case TCP_QUICKACK: 413431954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk); 41351da177e4SLinus Torvalds break; 41365f8ef48dSStephen Hemminger 41375f8ef48dSStephen Hemminger case TCP_CONGESTION: 41385f8ef48dSStephen Hemminger if (get_user(len, optlen)) 41395f8ef48dSStephen Hemminger return -EFAULT; 41405f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 41415f8ef48dSStephen Hemminger if (put_user(len, optlen)) 41425f8ef48dSStephen Hemminger return -EFAULT; 41436687e988SArnaldo Carvalho de Melo if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) 41445f8ef48dSStephen Hemminger return -EFAULT; 41455f8ef48dSStephen Hemminger return 0; 4146e56fb50fSWilliam Allen Simpson 4147734942ccSDave Watson case TCP_ULP: 4148734942ccSDave Watson if (get_user(len, optlen)) 4149734942ccSDave Watson return -EFAULT; 4150734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4151d97af30fSDave Watson if (!icsk->icsk_ulp_ops) { 4152d97af30fSDave Watson if (put_user(0, optlen)) 4153d97af30fSDave Watson return -EFAULT; 4154d97af30fSDave Watson return 0; 4155d97af30fSDave Watson } 4156734942ccSDave Watson if (put_user(len, optlen)) 4157734942ccSDave Watson return -EFAULT; 4158734942ccSDave Watson if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) 4159734942ccSDave Watson return -EFAULT; 4160734942ccSDave Watson return 0; 4161734942ccSDave Watson 41621fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 4163f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4164f19008e6SJason Baron unsigned int key_len; 41651fba70e5SYuchung Cheng 41661fba70e5SYuchung Cheng if (get_user(len, optlen)) 41671fba70e5SYuchung Cheng return -EFAULT; 41681fba70e5SYuchung Cheng 4169f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) * 41700f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH; 41710f1ce023SJason Baron len = min_t(unsigned int, len, key_len); 41721fba70e5SYuchung Cheng if (put_user(len, optlen)) 41731fba70e5SYuchung Cheng return -EFAULT; 41741fba70e5SYuchung Cheng if (copy_to_user(optval, key, len)) 41751fba70e5SYuchung Cheng return -EFAULT; 41761fba70e5SYuchung Cheng return 0; 41771fba70e5SYuchung Cheng } 41783c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS: 41793c0fef0bSJosh Hunt val = tp->thin_lto; 41803c0fef0bSJosh Hunt break; 41814a7f6009SYuchung Cheng 41823c0fef0bSJosh Hunt case TCP_THIN_DUPACK: 41834a7f6009SYuchung Cheng val = 0; 41843c0fef0bSJosh Hunt break; 4185dca43c75SJerry Chu 4186ee995283SPavel Emelyanov case TCP_REPAIR: 4187ee995283SPavel Emelyanov val = tp->repair; 4188ee995283SPavel Emelyanov break; 4189ee995283SPavel Emelyanov 4190ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 4191ee995283SPavel Emelyanov if (tp->repair) 4192ee995283SPavel Emelyanov val = tp->repair_queue; 4193ee995283SPavel Emelyanov else 4194ee995283SPavel Emelyanov return -EINVAL; 4195ee995283SPavel Emelyanov break; 4196ee995283SPavel Emelyanov 4197b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: { 4198b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 4199b1ed4c4fSAndrey Vagin 4200b1ed4c4fSAndrey Vagin if (get_user(len, optlen)) 4201b1ed4c4fSAndrey Vagin return -EFAULT; 4202b1ed4c4fSAndrey Vagin 4203b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 4204b1ed4c4fSAndrey Vagin return -EINVAL; 4205b1ed4c4fSAndrey Vagin 4206b1ed4c4fSAndrey Vagin if (!tp->repair) 4207b1ed4c4fSAndrey Vagin return -EPERM; 4208b1ed4c4fSAndrey Vagin 4209b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1; 4210b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd; 4211b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window; 4212b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd; 4213b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup; 4214b1ed4c4fSAndrey Vagin 4215b1ed4c4fSAndrey Vagin if (copy_to_user(optval, &opt, len)) 4216b1ed4c4fSAndrey Vagin return -EFAULT; 4217b1ed4c4fSAndrey Vagin return 0; 4218b1ed4c4fSAndrey Vagin } 4219ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 4220ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 4221ee995283SPavel Emelyanov val = tp->write_seq; 4222ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE) 4223ee995283SPavel Emelyanov val = tp->rcv_nxt; 4224ee995283SPavel Emelyanov else 4225ee995283SPavel Emelyanov return -EINVAL; 4226ee995283SPavel Emelyanov break; 4227ee995283SPavel Emelyanov 4228dca43c75SJerry Chu case TCP_USER_TIMEOUT: 42299bcc66e1SJon Maxwell val = icsk->icsk_user_timeout; 4230dca43c75SJerry Chu break; 42311536e285SKenjiro Nakayama 42321536e285SKenjiro Nakayama case TCP_FASTOPEN: 42330536fcc0SEric Dumazet val = icsk->icsk_accept_queue.fastopenq.max_qlen; 42341536e285SKenjiro Nakayama break; 42351536e285SKenjiro Nakayama 423619f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 423719f6d3f3SWei Wang val = tp->fastopen_connect; 423819f6d3f3SWei Wang break; 423919f6d3f3SWei Wang 424071c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 424171c02379SChristoph Paasch val = tp->fastopen_no_cookie; 424271c02379SChristoph Paasch break; 424371c02379SChristoph Paasch 4244a842fe14SEric Dumazet case TCP_TX_DELAY: 4245a842fe14SEric Dumazet val = tp->tcp_tx_delay; 4246a842fe14SEric Dumazet break; 4247a842fe14SEric Dumazet 424893be6ce0SAndrey Vagin case TCP_TIMESTAMP: 42499a568de4SEric Dumazet val = tcp_time_stamp_raw() + tp->tsoffset; 425093be6ce0SAndrey Vagin break; 4251c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 4252c9bee3b7SEric Dumazet val = tp->notsent_lowat; 4253c9bee3b7SEric Dumazet break; 4254b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 4255b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq; 4256b75eba76SSoheil Hassas Yeganeh break; 4257cd8ae852SEric Dumazet case TCP_SAVE_SYN: 4258cd8ae852SEric Dumazet val = tp->save_syn; 4259cd8ae852SEric Dumazet break; 4260cd8ae852SEric Dumazet case TCP_SAVED_SYN: { 4261cd8ae852SEric Dumazet if (get_user(len, optlen)) 4262cd8ae852SEric Dumazet return -EFAULT; 4263cd8ae852SEric Dumazet 4264cd8ae852SEric Dumazet lock_sock(sk); 4265cd8ae852SEric Dumazet if (tp->saved_syn) { 426670a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) { 426770a217f1SMartin KaFai Lau if (put_user(tcp_saved_syn_len(tp->saved_syn), 426870a217f1SMartin KaFai Lau optlen)) { 4269aea0929eSEric B Munson release_sock(sk); 4270aea0929eSEric B Munson return -EFAULT; 4271aea0929eSEric B Munson } 4272aea0929eSEric B Munson release_sock(sk); 4273aea0929eSEric B Munson return -EINVAL; 4274aea0929eSEric B Munson } 427570a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 4276cd8ae852SEric Dumazet if (put_user(len, optlen)) { 4277cd8ae852SEric Dumazet release_sock(sk); 4278cd8ae852SEric Dumazet return -EFAULT; 4279cd8ae852SEric Dumazet } 428070a217f1SMartin KaFai Lau if (copy_to_user(optval, tp->saved_syn->data, len)) { 4281cd8ae852SEric Dumazet release_sock(sk); 4282cd8ae852SEric Dumazet return -EFAULT; 4283cd8ae852SEric Dumazet } 4284cd8ae852SEric Dumazet tcp_saved_syn_free(tp); 4285cd8ae852SEric Dumazet release_sock(sk); 4286cd8ae852SEric Dumazet } else { 4287cd8ae852SEric Dumazet release_sock(sk); 4288cd8ae852SEric Dumazet len = 0; 4289cd8ae852SEric Dumazet if (put_user(len, optlen)) 4290cd8ae852SEric Dumazet return -EFAULT; 4291cd8ae852SEric Dumazet } 4292cd8ae852SEric Dumazet return 0; 4293cd8ae852SEric Dumazet } 429405255b82SEric Dumazet #ifdef CONFIG_MMU 429505255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: { 42967eeba170SArjun Roy struct scm_timestamping_internal tss; 4297e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {}; 429805255b82SEric Dumazet int err; 429905255b82SEric Dumazet 430005255b82SEric Dumazet if (get_user(len, optlen)) 430105255b82SEric Dumazet return -EFAULT; 43022107d45fSArjun Roy if (len < 0 || 43032107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length)) 430405255b82SEric Dumazet return -EINVAL; 43053c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) { 43063c5a2fd0SArjun Roy err = check_zeroed_user(optval + sizeof(zc), 43073c5a2fd0SArjun Roy len - sizeof(zc)); 43083c5a2fd0SArjun Roy if (err < 1) 43093c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err; 4310c8856c05SArjun Roy len = sizeof(zc); 43110b7f41f6SArjun Roy if (put_user(len, optlen)) 43120b7f41f6SArjun Roy return -EFAULT; 43130b7f41f6SArjun Roy } 431405255b82SEric Dumazet if (copy_from_user(&zc, optval, len)) 431505255b82SEric Dumazet return -EFAULT; 43163c5a2fd0SArjun Roy if (zc.reserved) 43173c5a2fd0SArjun Roy return -EINVAL; 43183c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 43193c5a2fd0SArjun Roy return -EINVAL; 432005255b82SEric Dumazet lock_sock(sk); 43217eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss); 43229cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 43239cacf81fSStanislav Fomichev &zc, &len, err); 432405255b82SEric Dumazet release_sock(sk); 43257eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 43267eeba170SArjun Roy goto zerocopy_rcv_cmsg; 4327c8856c05SArjun Roy switch (len) { 43287eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags): 43297eeba170SArjun Roy goto zerocopy_rcv_cmsg; 43307eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 43317eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control): 43327eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags): 43337eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 43347eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 433533946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err): 433633946518SArjun Roy goto zerocopy_rcv_sk_err; 4337c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq): 4338c8856c05SArjun Roy goto zerocopy_rcv_inq; 4339c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length): 4340c8856c05SArjun Roy default: 4341c8856c05SArjun Roy goto zerocopy_rcv_out; 4342c8856c05SArjun Roy } 43437eeba170SArjun Roy zerocopy_rcv_cmsg: 43447eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS) 43457eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 43467eeba170SArjun Roy else 43477eeba170SArjun Roy zc.msg_flags = 0; 434833946518SArjun Roy zerocopy_rcv_sk_err: 434933946518SArjun Roy if (!err) 435033946518SArjun Roy zc.err = sock_error(sk); 4351c8856c05SArjun Roy zerocopy_rcv_inq: 4352c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk); 4353c8856c05SArjun Roy zerocopy_rcv_out: 435405255b82SEric Dumazet if (!err && copy_to_user(optval, &zc, len)) 435505255b82SEric Dumazet err = -EFAULT; 435605255b82SEric Dumazet return err; 435705255b82SEric Dumazet } 435805255b82SEric Dumazet #endif 43591da177e4SLinus Torvalds default: 43601da177e4SLinus Torvalds return -ENOPROTOOPT; 43613ff50b79SStephen Hemminger } 43621da177e4SLinus Torvalds 43631da177e4SLinus Torvalds if (put_user(len, optlen)) 43641da177e4SLinus Torvalds return -EFAULT; 43651da177e4SLinus Torvalds if (copy_to_user(optval, &val, len)) 43661da177e4SLinus Torvalds return -EFAULT; 43671da177e4SLinus Torvalds return 0; 43681da177e4SLinus Torvalds } 43691da177e4SLinus Torvalds 43709cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname) 43719cacf81fSStanislav Fomichev { 43729cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation 43739cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 43749cacf81fSStanislav Fomichev */ 43759cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 43769cacf81fSStanislav Fomichev return true; 43779cacf81fSStanislav Fomichev 43789cacf81fSStanislav Fomichev return false; 43799cacf81fSStanislav Fomichev } 43809cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 43819cacf81fSStanislav Fomichev 43823fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 43833fdadf7dSDmitry Mishin int __user *optlen) 43843fdadf7dSDmitry Mishin { 43853fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk); 43863fdadf7dSDmitry Mishin 43873fdadf7dSDmitry Mishin if (level != SOL_TCP) 43883fdadf7dSDmitry Mishin return icsk->icsk_af_ops->getsockopt(sk, level, optname, 43893fdadf7dSDmitry Mishin optval, optlen); 43903fdadf7dSDmitry Mishin return do_tcp_getsockopt(sk, level, optname, optval, optlen); 43913fdadf7dSDmitry Mishin } 43924bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt); 43933fdadf7dSDmitry Mishin 4394cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 4395349ce993SEric Dumazet static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); 439671cea17eSEric Dumazet static DEFINE_MUTEX(tcp_md5sig_mutex); 4397349ce993SEric Dumazet static bool tcp_md5sig_pool_populated = false; 4398cfb6eeb4SYOSHIFUJI Hideaki 439971cea17eSEric Dumazet static void __tcp_alloc_md5sig_pool(void) 4400cfb6eeb4SYOSHIFUJI Hideaki { 4401cf80e0e4SHerbert Xu struct crypto_ahash *hash; 4402cfb6eeb4SYOSHIFUJI Hideaki int cpu; 4403cfb6eeb4SYOSHIFUJI Hideaki 4404cf80e0e4SHerbert Xu hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 44051eea84b7SInsu Yun if (IS_ERR(hash)) 4406349ce993SEric Dumazet return; 4407cf80e0e4SHerbert Xu 4408cf80e0e4SHerbert Xu for_each_possible_cpu(cpu) { 440919689e38SEric Dumazet void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; 4410cf80e0e4SHerbert Xu struct ahash_request *req; 4411cf80e0e4SHerbert Xu 441219689e38SEric Dumazet if (!scratch) { 441319689e38SEric Dumazet scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + 441419689e38SEric Dumazet sizeof(struct tcphdr), 441519689e38SEric Dumazet GFP_KERNEL, 441619689e38SEric Dumazet cpu_to_node(cpu)); 441719689e38SEric Dumazet if (!scratch) 441819689e38SEric Dumazet return; 441919689e38SEric Dumazet per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; 442019689e38SEric Dumazet } 4421cf80e0e4SHerbert Xu if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 4422cf80e0e4SHerbert Xu continue; 4423cf80e0e4SHerbert Xu 4424cf80e0e4SHerbert Xu req = ahash_request_alloc(hash, GFP_KERNEL); 4425cf80e0e4SHerbert Xu if (!req) 4426cf80e0e4SHerbert Xu return; 4427cf80e0e4SHerbert Xu 4428cf80e0e4SHerbert Xu ahash_request_set_callback(req, 0, NULL, NULL); 4429cf80e0e4SHerbert Xu 4430cf80e0e4SHerbert Xu per_cpu(tcp_md5sig_pool, cpu).md5_req = req; 4431349ce993SEric Dumazet } 4432349ce993SEric Dumazet /* before setting tcp_md5sig_pool_populated, we must commit all writes 4433349ce993SEric Dumazet * to memory. See smp_rmb() in tcp_get_md5sig_pool() 443471cea17eSEric Dumazet */ 443571cea17eSEric Dumazet smp_wmb(); 4436349ce993SEric Dumazet tcp_md5sig_pool_populated = true; 4437cfb6eeb4SYOSHIFUJI Hideaki } 4438cfb6eeb4SYOSHIFUJI Hideaki 443971cea17eSEric Dumazet bool tcp_alloc_md5sig_pool(void) 4440cfb6eeb4SYOSHIFUJI Hideaki { 4441349ce993SEric Dumazet if (unlikely(!tcp_md5sig_pool_populated)) { 444271cea17eSEric Dumazet mutex_lock(&tcp_md5sig_mutex); 4443cfb6eeb4SYOSHIFUJI Hideaki 44446015c71eSEric Dumazet if (!tcp_md5sig_pool_populated) { 444571cea17eSEric Dumazet __tcp_alloc_md5sig_pool(); 44466015c71eSEric Dumazet if (tcp_md5sig_pool_populated) 4447921f9a0fSEric Dumazet static_branch_inc(&tcp_md5_needed); 44486015c71eSEric Dumazet } 4449cfb6eeb4SYOSHIFUJI Hideaki 445071cea17eSEric Dumazet mutex_unlock(&tcp_md5sig_mutex); 4451cfb6eeb4SYOSHIFUJI Hideaki } 4452349ce993SEric Dumazet return tcp_md5sig_pool_populated; 4453cfb6eeb4SYOSHIFUJI Hideaki } 4454cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 4455cfb6eeb4SYOSHIFUJI Hideaki 445635790c04SEric Dumazet 445735790c04SEric Dumazet /** 445835790c04SEric Dumazet * tcp_get_md5sig_pool - get md5sig_pool for this user 445935790c04SEric Dumazet * 446035790c04SEric Dumazet * We use percpu structure, so if we succeed, we exit with preemption 446135790c04SEric Dumazet * and BH disabled, to make sure another thread or softirq handling 446235790c04SEric Dumazet * wont try to get same context. 446335790c04SEric Dumazet */ 446435790c04SEric Dumazet struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 4465cfb6eeb4SYOSHIFUJI Hideaki { 446635790c04SEric Dumazet local_bh_disable(); 446735790c04SEric Dumazet 4468349ce993SEric Dumazet if (tcp_md5sig_pool_populated) { 4469349ce993SEric Dumazet /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ 4470349ce993SEric Dumazet smp_rmb(); 4471349ce993SEric Dumazet return this_cpu_ptr(&tcp_md5sig_pool); 4472349ce993SEric Dumazet } 447335790c04SEric Dumazet local_bh_enable(); 447435790c04SEric Dumazet return NULL; 4475cfb6eeb4SYOSHIFUJI Hideaki } 447635790c04SEric Dumazet EXPORT_SYMBOL(tcp_get_md5sig_pool); 4477cfb6eeb4SYOSHIFUJI Hideaki 447849a72dfbSAdam Langley int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 4479cf533ea5SEric Dumazet const struct sk_buff *skb, unsigned int header_len) 448049a72dfbSAdam Langley { 448149a72dfbSAdam Langley struct scatterlist sg; 448249a72dfbSAdam Langley const struct tcphdr *tp = tcp_hdr(skb); 4483cf80e0e4SHerbert Xu struct ahash_request *req = hp->md5_req; 448495c96174SEric Dumazet unsigned int i; 448595c96174SEric Dumazet const unsigned int head_data_len = skb_headlen(skb) > header_len ? 448649a72dfbSAdam Langley skb_headlen(skb) - header_len : 0; 448749a72dfbSAdam Langley const struct skb_shared_info *shi = skb_shinfo(skb); 4488d7fd1b57SEric Dumazet struct sk_buff *frag_iter; 448949a72dfbSAdam Langley 449049a72dfbSAdam Langley sg_init_table(&sg, 1); 449149a72dfbSAdam Langley 449249a72dfbSAdam Langley sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 4493cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, head_data_len); 4494cf80e0e4SHerbert Xu if (crypto_ahash_update(req)) 449549a72dfbSAdam Langley return 1; 449649a72dfbSAdam Langley 449749a72dfbSAdam Langley for (i = 0; i < shi->nr_frags; ++i) { 4498d8e18a51SMatthew Wilcox (Oracle) const skb_frag_t *f = &shi->frags[i]; 4499b54c9d5bSJonathan Lemon unsigned int offset = skb_frag_off(f); 450054d27fcbSEric Dumazet struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 450154d27fcbSEric Dumazet 450254d27fcbSEric Dumazet sg_set_page(&sg, page, skb_frag_size(f), 450354d27fcbSEric Dumazet offset_in_page(offset)); 4504cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); 4505cf80e0e4SHerbert Xu if (crypto_ahash_update(req)) 450649a72dfbSAdam Langley return 1; 450749a72dfbSAdam Langley } 450849a72dfbSAdam Langley 4509d7fd1b57SEric Dumazet skb_walk_frags(skb, frag_iter) 4510d7fd1b57SEric Dumazet if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 4511d7fd1b57SEric Dumazet return 1; 4512d7fd1b57SEric Dumazet 451349a72dfbSAdam Langley return 0; 451449a72dfbSAdam Langley } 451549a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_skb_data); 451649a72dfbSAdam Langley 4517cf533ea5SEric Dumazet int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 451849a72dfbSAdam Langley { 4519e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 452049a72dfbSAdam Langley struct scatterlist sg; 452149a72dfbSAdam Langley 45226a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen); 45236a2febecSEric Dumazet ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); 4524e6ced831SEric Dumazet 4525e6ced831SEric Dumazet /* We use data_race() because tcp_md5_do_add() might change key->key under us */ 4526e6ced831SEric Dumazet return data_race(crypto_ahash_update(hp->md5_req)); 452749a72dfbSAdam Langley } 452849a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key); 452949a72dfbSAdam Langley 45307bbb765bSDmitry Safonov /* Called with rcu_read_lock() */ 45311330b6efSJakub Kicinski enum skb_drop_reason 45321330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 45337bbb765bSDmitry Safonov const void *saddr, const void *daddr, 45347bbb765bSDmitry Safonov int family, int dif, int sdif) 45357bbb765bSDmitry Safonov { 45367bbb765bSDmitry Safonov /* 45377bbb765bSDmitry Safonov * This gets called for each TCP segment that arrives 45387bbb765bSDmitry Safonov * so we want to be efficient. 45397bbb765bSDmitry Safonov * We have 3 drop cases: 45407bbb765bSDmitry Safonov * o No MD5 hash and one expected. 45417bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one. 45427bbb765bSDmitry Safonov * o MD5 hash and its wrong. 45437bbb765bSDmitry Safonov */ 45447bbb765bSDmitry Safonov const __u8 *hash_location = NULL; 45457bbb765bSDmitry Safonov struct tcp_md5sig_key *hash_expected; 45467bbb765bSDmitry Safonov const struct tcphdr *th = tcp_hdr(skb); 45477bbb765bSDmitry Safonov struct tcp_sock *tp = tcp_sk(sk); 45487bbb765bSDmitry Safonov int genhash, l3index; 45497bbb765bSDmitry Safonov u8 newhash[16]; 45507bbb765bSDmitry Safonov 45517bbb765bSDmitry Safonov /* sdif set, means packet ingressed via a device 45527bbb765bSDmitry Safonov * in an L3 domain and dif is set to the l3mdev 45537bbb765bSDmitry Safonov */ 45547bbb765bSDmitry Safonov l3index = sdif ? dif : 0; 45557bbb765bSDmitry Safonov 45567bbb765bSDmitry Safonov hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family); 45577bbb765bSDmitry Safonov hash_location = tcp_parse_md5sig_option(th); 45587bbb765bSDmitry Safonov 45597bbb765bSDmitry Safonov /* We've parsed the options - do we have a hash? */ 45607bbb765bSDmitry Safonov if (!hash_expected && !hash_location) 45611330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 45627bbb765bSDmitry Safonov 45637bbb765bSDmitry Safonov if (hash_expected && !hash_location) { 45647bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 45651330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5NOTFOUND; 45667bbb765bSDmitry Safonov } 45677bbb765bSDmitry Safonov 45687bbb765bSDmitry Safonov if (!hash_expected && hash_location) { 45697bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 45701330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 45717bbb765bSDmitry Safonov } 45727bbb765bSDmitry Safonov 4573e62d2e11SEric Dumazet /* Check the signature. 4574e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle 4575e62d2e11SEric Dumazet * IPv4-mapped case. 4576e62d2e11SEric Dumazet */ 4577e62d2e11SEric Dumazet if (family == AF_INET) 4578e62d2e11SEric Dumazet genhash = tcp_v4_md5_hash_skb(newhash, 4579e62d2e11SEric Dumazet hash_expected, 4580e62d2e11SEric Dumazet NULL, skb); 4581e62d2e11SEric Dumazet else 4582e62d2e11SEric Dumazet genhash = tp->af_specific->calc_md5_hash(newhash, 4583e62d2e11SEric Dumazet hash_expected, 45847bbb765bSDmitry Safonov NULL, skb); 45857bbb765bSDmitry Safonov 45867bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) { 45877bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 45887bbb765bSDmitry Safonov if (family == AF_INET) { 45897bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", 45907bbb765bSDmitry Safonov saddr, ntohs(th->source), 45917bbb765bSDmitry Safonov daddr, ntohs(th->dest), 45927bbb765bSDmitry Safonov genhash ? " tcp_v4_calc_md5_hash failed" 45937bbb765bSDmitry Safonov : "", l3index); 45947bbb765bSDmitry Safonov } else { 45957bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", 45967bbb765bSDmitry Safonov genhash ? "failed" : "mismatch", 45977bbb765bSDmitry Safonov saddr, ntohs(th->source), 45987bbb765bSDmitry Safonov daddr, ntohs(th->dest), l3index); 45997bbb765bSDmitry Safonov } 46001330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE; 46017bbb765bSDmitry Safonov } 46021330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 46037bbb765bSDmitry Safonov } 46047bbb765bSDmitry Safonov EXPORT_SYMBOL(tcp_inbound_md5_hash); 46057bbb765bSDmitry Safonov 4606cfb6eeb4SYOSHIFUJI Hideaki #endif 4607cfb6eeb4SYOSHIFUJI Hideaki 46084ac02babSAndi Kleen void tcp_done(struct sock *sk) 46094ac02babSAndi Kleen { 4610d983ea6fSEric Dumazet struct request_sock *req; 46118336886fSJerry Chu 4612cab209e5SEric Dumazet /* We might be called with a new socket, after 4613cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called 4614cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk) 4615cab209e5SEric Dumazet */ 4616cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 46174ac02babSAndi Kleen 46184ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4619c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 46204ac02babSAndi Kleen 46214ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE); 46224ac02babSAndi Kleen tcp_clear_xmit_timers(sk); 462300db4124SIan Morris if (req) 46248336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 46254ac02babSAndi Kleen 46264ac02babSAndi Kleen sk->sk_shutdown = SHUTDOWN_MASK; 46274ac02babSAndi Kleen 46284ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD)) 46294ac02babSAndi Kleen sk->sk_state_change(sk); 46304ac02babSAndi Kleen else 46314ac02babSAndi Kleen inet_csk_destroy_sock(sk); 46324ac02babSAndi Kleen } 46334ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done); 46344ac02babSAndi Kleen 4635c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err) 4636c1e64e29SLorenzo Colitti { 4637af9784d0SEric Dumazet int state = inet_sk_state_load(sk); 4638af9784d0SEric Dumazet 4639af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) { 464007f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk); 464107f6f4a3SEric Dumazet 464207f6f4a3SEric Dumazet local_bh_disable(); 4643acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req); 464407f6f4a3SEric Dumazet local_bh_enable(); 464507f6f4a3SEric Dumazet return 0; 464607f6f4a3SEric Dumazet } 4647af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) { 4648af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk); 4649af9784d0SEric Dumazet 4650af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt); 4651af9784d0SEric Dumazet local_bh_disable(); 4652af9784d0SEric Dumazet inet_twsk_deschedule_put(tw); 4653af9784d0SEric Dumazet local_bh_enable(); 4654af9784d0SEric Dumazet return 0; 4655c1e64e29SLorenzo Colitti } 4656c1e64e29SLorenzo Colitti 4657c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */ 4658c1e64e29SLorenzo Colitti lock_sock(sk); 4659c1e64e29SLorenzo Colitti 46602010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) { 46612010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE); 46622010b93eSLorenzo Colitti inet_csk_listen_stop(sk); 46632010b93eSLorenzo Colitti } 46642010b93eSLorenzo Colitti 4665c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4666c1e64e29SLorenzo Colitti local_bh_disable(); 4667c1e64e29SLorenzo Colitti bh_lock_sock(sk); 4668c1e64e29SLorenzo Colitti 4669c1e64e29SLorenzo Colitti if (!sock_flag(sk, SOCK_DEAD)) { 4670c1e64e29SLorenzo Colitti sk->sk_err = err; 4671c1e64e29SLorenzo Colitti /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4672c1e64e29SLorenzo Colitti smp_wmb(); 4673e3ae2365SAlexander Aring sk_error_report(sk); 4674c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state)) 4675c1e64e29SLorenzo Colitti tcp_send_active_reset(sk, GFP_ATOMIC); 4676c1e64e29SLorenzo Colitti tcp_done(sk); 4677c1e64e29SLorenzo Colitti } 4678c1e64e29SLorenzo Colitti 4679c1e64e29SLorenzo Colitti bh_unlock_sock(sk); 4680c1e64e29SLorenzo Colitti local_bh_enable(); 4681e05836acSSoheil Hassas Yeganeh tcp_write_queue_purge(sk); 4682c1e64e29SLorenzo Colitti release_sock(sk); 4683c1e64e29SLorenzo Colitti return 0; 4684c1e64e29SLorenzo Colitti } 4685c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort); 4686c1e64e29SLorenzo Colitti 46875f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno; 46881da177e4SLinus Torvalds 46891da177e4SLinus Torvalds static __initdata unsigned long thash_entries; 46901da177e4SLinus Torvalds static int __init set_thash_entries(char *str) 46911da177e4SLinus Torvalds { 4692413c27d8SEldad Zack ssize_t ret; 4693413c27d8SEldad Zack 46941da177e4SLinus Torvalds if (!str) 46951da177e4SLinus Torvalds return 0; 4696413c27d8SEldad Zack 4697413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries); 4698413c27d8SEldad Zack if (ret) 4699413c27d8SEldad Zack return 0; 4700413c27d8SEldad Zack 47011da177e4SLinus Torvalds return 1; 47021da177e4SLinus Torvalds } 47031da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries); 47041da177e4SLinus Torvalds 470547d7a88cSFabian Frederick static void __init tcp_init_mem(void) 47064acb4190SGlauber Costa { 4707b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16; 4708b66e91ccSEric Dumazet 47094acb4190SGlauber Costa limit = max(limit, 128UL); 4710b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4711b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4712b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 47134acb4190SGlauber Costa } 47144acb4190SGlauber Costa 47151da177e4SLinus Torvalds void __init tcp_init(void) 47161da177e4SLinus Torvalds { 4717b49960a0SEric Dumazet int max_rshare, max_wshare, cnt; 4718b2d3ea4aSEric Dumazet unsigned long limit; 4719074b8517SDimitri Sivanich unsigned int i; 47201da177e4SLinus Torvalds 47213b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4722b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4723c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb)); 47241da177e4SLinus Torvalds 4725908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 472619757cebSEric Dumazet 472719757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 472819757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 472919757cebSEric Dumazet 473027da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 473127da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/ 473227da6d37SMartin KaFai Lau 0, 64 * 1024); 47336e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep = 47346e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket", 47356e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0, 4736990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4737990c74e3SVasily Averin SLAB_ACCOUNT, 4738990c74e3SVasily Averin NULL); 47391da177e4SLinus Torvalds 47401da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket 47411da177e4SLinus Torvalds * hash tables. 47421da177e4SLinus Torvalds * 47431da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache. 47441da177e4SLinus Torvalds */ 47456e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash = 47461da177e4SLinus Torvalds alloc_large_system_hash("TCP established", 47470f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket), 47481da177e4SLinus Torvalds thash_entries, 4749fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 47509e950efaSJohn Heffner 0, 47511da177e4SLinus Torvalds NULL, 4752f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask, 475331fe62b9STim Bird 0, 47540ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024); 475505dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 47563ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 475705dbc7b5SEric Dumazet 4758230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4759230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks"); 47606e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash = 4761593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind", 4762593d1ebeSJoanne Koong sizeof(struct inet_bind_hashbucket), 4763f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, 4764fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 47659e950efaSJohn Heffner 0, 47666e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size, 47671da177e4SLinus Torvalds NULL, 476831fe62b9STim Bird 0, 47691da177e4SLinus Torvalds 64 * 1024); 4770074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 47716e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 47726e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock); 47736e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 47741da177e4SLinus Torvalds } 47751da177e4SLinus Torvalds 4776c5ed63d6SEric Dumazet 4777c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1; 4778c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2; 47791da177e4SLinus Torvalds 4780a4fe34bfSEric W. Biederman tcp_init_mem(); 4781c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */ 47825fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4783b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit); 4784b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit); 47857b4f4b5eSJohn Heffner 4786100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 4787356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4788356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 47897b4f4b5eSJohn Heffner 4790100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 4791a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4792a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 47931da177e4SLinus Torvalds 4794afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n", 4795f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4796317a76f9SStephen Hemminger 47971946e672SHaishuang Yan tcp_v4_init(); 479851c5d0c4SDavid S. Miller tcp_metrics_init(); 479955d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 480046d3ceabSEric Dumazet tcp_tasklet_init(); 4801f870fa0bSMat Martineau mptcp_init(); 48021da177e4SLinus Torvalds } 4803