12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 51da177e4SLinus Torvalds * interface as the means of communication with the user level. 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP). 81da177e4SLinus Torvalds * 902c30a84SJesper Juhl * Authors: Ross Biro 101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk> 121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net> 131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de> 141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi> 161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org> 171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com> 181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net> 201da177e4SLinus Torvalds * 211da177e4SLinus Torvalds * Fixes: 221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls 231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset 241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while 251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect 261da177e4SLinus Torvalds * (tcp_err()). 271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken 281da177e4SLinus Torvalds * pointers passed where wrong and the 291da177e4SLinus Torvalds * socket was looked up backwards. Nobody 301da177e4SLinus Torvalds * tested any icmp error code obviously. 311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It 321da177e4SLinus Torvalds * wakes people on errors. poll 331da177e4SLinus Torvalds * behaves and the icmp error race 341da177e4SLinus Torvalds * has gone by moving it into sock.c 351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for 361da177e4SLinus Torvalds * everything not just packets for 371da177e4SLinus Torvalds * unknown sockets. 381da177e4SLinus Torvalds * Alan Cox : tcp option processing. 391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had 401da177e4SLinus Torvalds * syn rule wrong] 411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes 421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames. 431da177e4SLinus Torvalds * Acking any kind of RST is right out. 441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst 451da177e4SLinus Torvalds * receive otherwise odd bits of prattle 461da177e4SLinus Torvalds * escape still 471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug. 481da177e4SLinus Torvalds * Should stop LAN workplace lockups. 491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list 501da177e4SLinus Torvalds * facilities 511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work 521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts 531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors 541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a 551da177e4SLinus Torvalds * bit to skb ops. 561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential 571da177e4SLinus Torvalds * nasty. 581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the 591da177e4SLinus Torvalds * tcp is hard to follow 601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh 611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix. 621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet). 631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH 641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries. 651da177e4SLinus Torvalds * Also added awareness of them to accept. 661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP) 671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks, 681da177e4SLinus Torvalds * so the kernel can layer network 691da177e4SLinus Torvalds * sockets. 701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings. 711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope). 721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised 731da177e4SLinus Torvalds * state ack error. 741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit. 751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E 761da177e4SLinus Torvalds * window non shrink trick. 771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer 781da177e4SLinus Torvalds * fixes 791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes 801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes 811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight 821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work 831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling 841da177e4SLinus Torvalds * completely 851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling 861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC 871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds 881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors 891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug 901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes. 911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing 921da177e4SLinus Torvalds * (not yet usable) 931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine. 941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging 951da177e4SLinus Torvalds * Alan Cox : Window clamping 961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check() 971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes 981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the 991da177e4SLinus Torvalds * TCP code (Be very nice to this man if 1001da177e4SLinus Torvalds * tcp finally works 100%) 8) 1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics. 1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug. 1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). 1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in 1051da177e4SLinus Torvalds * all cases. 1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg() 1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin). 1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in 1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG 1101da177e4SLinus Torvalds * works now. 1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the 1121da177e4SLinus Torvalds * BSD api. 1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to 1141da177e4SLinus Torvalds * fix a race and a signal problem with 1151da177e4SLinus Torvalds * accept() and async I/O. 1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto(). 1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem. 1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for 1191da177e4SLinus Torvalds * clients/servers which listen in on 1201da177e4SLinus Torvalds * fixed ports. 1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to 1221da177e4SLinus Torvalds * a sensible code size. 1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix. 1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast. 1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master 1261da177e4SLinus Torvalds * socket close. 1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code. 1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown. 1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error 1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an 1311da177e4SLinus Torvalds * accept. 1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since 1331da177e4SLinus Torvalds * it's handy for debugging. 1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes. 1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on 1361da177e4SLinus Torvalds * the RFC's for other useful protocol 1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and 1381da177e4SLinus Torvalds * for a reference on the difference 1391da177e4SLinus Torvalds * between specifications and how BSD 1401da177e4SLinus Torvalds * works see the 4.4lite source. 1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy 1421da177e4SLinus Torvalds * close. 1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes. 1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn 1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC 1461da177e4SLinus Torvalds * and using multiple timers for sanity. 1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new 1481da177e4SLinus Torvalds * comments. 1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking 1501da177e4SLinus Torvalds * the buffers (much like datagram.c) 1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe 1521da177e4SLinus Torvalds * now gets fed up of retrying without 1531da177e4SLinus Torvalds * (even a no space) answer. 1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better 1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to 1561da177e4SLinus Torvalds * resemble the RFC. 1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes. 1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming. 1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH 1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack 1611da177e4SLinus Torvalds * generates them. 1621da177e4SLinus Torvalds * Alan Cox : Cache last socket. 1631da177e4SLinus Torvalds * Alan Cox : Per route irtt. 1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error 1651da177e4SLinus Torvalds * Alan Cox : New buffers 1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and 1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed. 1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close, 1691da177e4SLinus Torvalds * and TCP syn retries gets used now. 1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an 1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED. 1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may 1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right 1741da177e4SLinus Torvalds * but it's a start! 1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes. 1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications. 1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors. 1781da177e4SLinus Torvalds * Alan Cox : Block double connect(). 1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP. 1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery. 1811da177e4SLinus Torvalds * Alan Cox : Support soft errors. 1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case 1831da177e4SLinus Torvalds * when the remote claims no mtu! 1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix. 1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in 1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems) 1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack. 1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in 1891da177e4SLinus Torvalds * tcp_do_retransmit() 1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window 1911da177e4SLinus Torvalds * avoidance algorithm to BSD style 1921da177e4SLinus Torvalds * algorithm. This doubles throughput 1931da177e4SLinus Torvalds * against machines running Solaris, 1941da177e4SLinus Torvalds * and seems to result in general 1951da177e4SLinus Torvalds * improvement. 1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD 1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support. 1981da177e4SLinus Torvalds * Mike McLagan : Routing by source 1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in 2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness. 2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with 2021da177e4SLinus Torvalds * shutdown() followed by close(). 2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO 2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and 2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call) 2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of 2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible. 2081da177e4SLinus Torvalds * 2091da177e4SLinus Torvalds * Description of States: 2101da177e4SLinus Torvalds * 2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack 2121da177e4SLinus Torvalds * 2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack, 2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake. 2151da177e4SLinus Torvalds * 2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established 2171da177e4SLinus Torvalds * 2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete 2191da177e4SLinus Torvalds * transmission of remaining buffered data 2201da177e4SLinus Torvalds * 2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote 2221da177e4SLinus Torvalds * to shutdown 2231da177e4SLinus Torvalds * 2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have 2251da177e4SLinus Torvalds * data we have to finish sending 2261da177e4SLinus Torvalds * 2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering 2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2 2291da177e4SLinus Torvalds * or CLOSING. Required because the other end 2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it 2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore) 2321da177e4SLinus Torvalds * 2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for 2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown 2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK) 2361da177e4SLinus Torvalds * 2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has 2381da177e4SLinus Torvalds * shutdown. There may still be data in our 2391da177e4SLinus Torvalds * buffer that we have to finish sending 2401da177e4SLinus Torvalds * 2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds 244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt 245afd46503SJoe Perches 246cf80e0e4SHerbert Xu #include <crypto/hash.h> 247172589ccSIlpo Järvinen #include <linux/kernel.h> 2481da177e4SLinus Torvalds #include <linux/module.h> 2491da177e4SLinus Torvalds #include <linux/types.h> 2501da177e4SLinus Torvalds #include <linux/fcntl.h> 2511da177e4SLinus Torvalds #include <linux/poll.h> 2526e9250f5SEric Dumazet #include <linux/inet_diag.h> 2531da177e4SLinus Torvalds #include <linux/init.h> 2541da177e4SLinus Torvalds #include <linux/fs.h> 2559c55e01cSJens Axboe #include <linux/skbuff.h> 25681b23b4aSAndrew Morton #include <linux/scatterlist.h> 2579c55e01cSJens Axboe #include <linux/splice.h> 2589c55e01cSJens Axboe #include <linux/net.h> 2599c55e01cSJens Axboe #include <linux/socket.h> 2601da177e4SLinus Torvalds #include <linux/random.h> 26157c8a661SMike Rapoport #include <linux/memblock.h> 26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h> 263b8059eadSDavid S. Miller #include <linux/cache.h> 264f4c50d99SHerbert Xu #include <linux/err.h> 265da5c78c8SWilliam Allen Simpson #include <linux/time.h> 2665a0e3ad6STejun Heo #include <linux/slab.h> 26798aaa913SMike Maloney #include <linux/errqueue.h> 26860e2a778SUrsula Braun #include <linux/static_key.h> 26997a19cafSYonghong Song #include <linux/btf.h> 2701da177e4SLinus Torvalds 2711da177e4SLinus Torvalds #include <net/icmp.h> 272cf60af03SYuchung Cheng #include <net/inet_common.h> 2731da177e4SLinus Torvalds #include <net/tcp.h> 274f870fa0bSMat Martineau #include <net/mptcp.h> 2751da177e4SLinus Torvalds #include <net/xfrm.h> 2761da177e4SLinus Torvalds #include <net/ip.h> 2779c55e01cSJens Axboe #include <net/sock.h> 2781da177e4SLinus Torvalds 2797c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 2801da177e4SLinus Torvalds #include <asm/ioctls.h> 281076bb0c8SEliezer Tamir #include <net/busy_poll.h> 2821da177e4SLinus Torvalds 283925bba24SArjun Roy /* Track pending CMSGs. */ 284925bba24SArjun Roy enum { 285925bba24SArjun Roy TCP_CMSG_INQ = 1, 286925bba24SArjun Roy TCP_CMSG_TS = 2 287925bba24SArjun Roy }; 288925bba24SArjun Roy 28919757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count); 29019757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count); 2910a5578cfSArnaldo Carvalho de Melo 292a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly; 293a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem); 2941da177e4SLinus Torvalds 29591b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */ 2961da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated); 2970defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 2980defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc); 2991748376bSEric Dumazet 30060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC) 30160e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc); 30260e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc); 30360e2a778SUrsula Braun #endif 30460e2a778SUrsula Braun 3051748376bSEric Dumazet /* 3061748376bSEric Dumazet * Current number of TCP sockets. 3071748376bSEric Dumazet */ 30891b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp; 3091da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated); 3101da177e4SLinus Torvalds 3111da177e4SLinus Torvalds /* 3129c55e01cSJens Axboe * TCP splice context 3139c55e01cSJens Axboe */ 3149c55e01cSJens Axboe struct tcp_splice_state { 3159c55e01cSJens Axboe struct pipe_inode_info *pipe; 3169c55e01cSJens Axboe size_t len; 3179c55e01cSJens Axboe unsigned int flags; 3189c55e01cSJens Axboe }; 3199c55e01cSJens Axboe 3209c55e01cSJens Axboe /* 3211da177e4SLinus Torvalds * Pressure flag: try to collapse. 3221da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically. 3233ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting 3241da177e4SLinus Torvalds * is strict, actions are advisory and have some latency. 3251da177e4SLinus Torvalds */ 32606044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly; 32706044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure); 3281da177e4SLinus Torvalds 3295c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk) 3301da177e4SLinus Torvalds { 33106044751SEric Dumazet unsigned long val; 33206044751SEric Dumazet 3331f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure)) 33406044751SEric Dumazet return; 33506044751SEric Dumazet val = jiffies; 33606044751SEric Dumazet 33706044751SEric Dumazet if (!val) 33806044751SEric Dumazet val--; 33906044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val)) 3404e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); 3411da177e4SLinus Torvalds } 34206044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure); 34306044751SEric Dumazet 34406044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk) 34506044751SEric Dumazet { 34606044751SEric Dumazet unsigned long val; 34706044751SEric Dumazet 3481f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure)) 34906044751SEric Dumazet return; 35006044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0); 35106044751SEric Dumazet if (val) 35206044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO, 35306044751SEric Dumazet jiffies_to_msecs(jiffies - val)); 3541da177e4SLinus Torvalds } 35506044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure); 3561da177e4SLinus Torvalds 357b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */ 358b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 359b103cf34SJulian Anastasov { 360b103cf34SJulian Anastasov u8 res = 0; 361b103cf34SJulian Anastasov 362b103cf34SJulian Anastasov if (seconds > 0) { 363b103cf34SJulian Anastasov int period = timeout; 364b103cf34SJulian Anastasov 365b103cf34SJulian Anastasov res = 1; 366b103cf34SJulian Anastasov while (seconds > period && res < 255) { 367b103cf34SJulian Anastasov res++; 368b103cf34SJulian Anastasov timeout <<= 1; 369b103cf34SJulian Anastasov if (timeout > rto_max) 370b103cf34SJulian Anastasov timeout = rto_max; 371b103cf34SJulian Anastasov period += timeout; 372b103cf34SJulian Anastasov } 373b103cf34SJulian Anastasov } 374b103cf34SJulian Anastasov return res; 375b103cf34SJulian Anastasov } 376b103cf34SJulian Anastasov 377b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */ 378b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 379b103cf34SJulian Anastasov { 380b103cf34SJulian Anastasov int period = 0; 381b103cf34SJulian Anastasov 382b103cf34SJulian Anastasov if (retrans > 0) { 383b103cf34SJulian Anastasov period = timeout; 384b103cf34SJulian Anastasov while (--retrans) { 385b103cf34SJulian Anastasov timeout <<= 1; 386b103cf34SJulian Anastasov if (timeout > rto_max) 387b103cf34SJulian Anastasov timeout = rto_max; 388b103cf34SJulian Anastasov period += timeout; 389b103cf34SJulian Anastasov } 390b103cf34SJulian Anastasov } 391b103cf34SJulian Anastasov return period; 392b103cf34SJulian Anastasov } 393b103cf34SJulian Anastasov 3940263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) 3950263598cSWei Wang { 3960263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered); 3970263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us); 3980263598cSWei Wang u64 rate64 = 0; 3990263598cSWei Wang 4000263598cSWei Wang if (rate && intv) { 4010263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; 4020263598cSWei Wang do_div(rate64, intv); 4030263598cSWei Wang } 4040263598cSWei Wang return rate64; 4050263598cSWei Wang } 4060263598cSWei Wang 407900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock. 408900f65d3SNeal Cardwell * 409900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to 410900f65d3SNeal Cardwell * sk_alloc() so need not be done here. 411900f65d3SNeal Cardwell */ 412900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk) 413900f65d3SNeal Cardwell { 414900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk); 415900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk); 416900f65d3SNeal Cardwell 4179f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT; 41875c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT; 419900f65d3SNeal Cardwell tcp_init_xmit_timers(sk); 42046d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node); 421e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue); 422900f65d3SNeal Cardwell 423900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT; 424ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 4252b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 426740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 427ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); 428900f65d3SNeal Cardwell 429900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the 430900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control 431900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk 432900f65d3SNeal Cardwell * efficiently to them. -DaveM 433900f65d3SNeal Cardwell */ 43440570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 435900f65d3SNeal Cardwell 436d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */ 437d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U; 438300b655dSDavid Morley tp->rate_app_limited = 1; 439d7722e85SSoheil Hassas Yeganeh 440900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the 441900f65d3SNeal Cardwell * initialization of these values. 442900f65d3SNeal Cardwell */ 443900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 444900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0; 445900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT; 446900f65d3SNeal Cardwell 44746778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); 44855d8694fSFlorian Westphal tcp_assign_congestion_control(sk); 449900f65d3SNeal Cardwell 450ceaa1fefSAndrey Vagin tp->tsoffset = 0; 4511f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1; 452ceaa1fefSAndrey Vagin 453900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space; 454900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 455900f65d3SNeal Cardwell 456900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss; 457900f65d3SNeal Cardwell 45802739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1])); 45902739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1])); 460dfa2f048SEric Dumazet tcp_scaling_ratio_init(sk); 461900f65d3SNeal Cardwell 462e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags); 463900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk); 464900f65d3SNeal Cardwell } 465900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock); 466900f65d3SNeal Cardwell 4674e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags) 4684ed2d765SWillem de Bruijn { 4694e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 4704e8cc228SEric Dumazet 471ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) { 4724ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb); 4736b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 4744ed2d765SWillem de Bruijn 475c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); 4760a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK) 4770a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1; 4780a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) 4794ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; 4804ed2d765SWillem de Bruijn } 481f066e2b0SWillem de Bruijn } 4824ed2d765SWillem de Bruijn 48305dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target) 4848934ce2fSJohn Fastabend { 48505dc72abSEric Dumazet if (tcp_epollin_ready(sk, target)) 48605dc72abSEric Dumazet return true; 4877b50ecfcSCong Wang return sk_is_readable(sk); 4888934ce2fSJohn Fastabend } 4898934ce2fSJohn Fastabend 4901da177e4SLinus Torvalds /* 491a11e1d43SLinus Torvalds * Wait for a TCP event. 492a11e1d43SLinus Torvalds * 493a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers 494a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't 495a11e1d43SLinus Torvalds * go look at any of the socket buffers directly. 4961da177e4SLinus Torvalds */ 497a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) 4981da177e4SLinus Torvalds { 499a11e1d43SLinus Torvalds __poll_t mask; 5001da177e4SLinus Torvalds struct sock *sk = sock->sk; 501cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 502e14cadfdSEric Dumazet u8 shutdown; 50300fd38d9SEric Dumazet int state; 5041da177e4SLinus Torvalds 50589ab066dSKarsten Graul sock_poll_wait(file, sock, wait); 506a11e1d43SLinus Torvalds 507986ffdfdSYafang Shao state = inet_sk_state_load(sk); 50800fd38d9SEric Dumazet if (state == TCP_LISTEN) 509dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk); 5101da177e4SLinus Torvalds 511a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events 512a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes 513a11e1d43SLinus Torvalds * made by other threads is impossible in any case. 514a11e1d43SLinus Torvalds */ 515a11e1d43SLinus Torvalds 516a11e1d43SLinus Torvalds mask = 0; 517a11e1d43SLinus Torvalds 5181da177e4SLinus Torvalds /* 519a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't 5201da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a 5211da177e4SLinus Torvalds * socket the read side is more interesting. 5221da177e4SLinus Torvalds * 523a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible 524a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this 5251da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many 5261da177e4SLinus Torvalds * bits than too few, and you can easily break real applications 5271da177e4SLinus Torvalds * if you don't tell them that something has hung up! 5281da177e4SLinus Torvalds * 5291da177e4SLinus Torvalds * Check-me. 5301da177e4SLinus Torvalds * 531a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and 5321da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF, 5331da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write() 534a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP 5351da177e4SLinus Torvalds * if and only if shutdown has been made in both directions. 5361da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX 537a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable, 5381da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given 5391da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains 540a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK 5411da177e4SLinus Torvalds * 5421da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent 5431da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK 5441da177e4SLinus Torvalds */ 545e14cadfdSEric Dumazet shutdown = READ_ONCE(sk->sk_shutdown); 546e14cadfdSEric Dumazet if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) 547a9a08845SLinus Torvalds mask |= EPOLLHUP; 548e14cadfdSEric Dumazet if (shutdown & RCV_SHUTDOWN) 549a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; 5501da177e4SLinus Torvalds 5518336886fSJerry Chu /* Connected or passive Fast Open socket? */ 55200fd38d9SEric Dumazet if (state != TCP_SYN_SENT && 553d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) { 554c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX); 5557b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data); 556c7004482SDavid S. Miller 557b96c51bdSEric Dumazet if (unlikely(urg_data) && 5587b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) && 5597b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE)) 560b634f875SAlexandra Kossovsky target++; 561c7004482SDavid S. Miller 56205dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target)) 563a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM; 5641da177e4SLinus Torvalds 565e14cadfdSEric Dumazet if (!(shutdown & SEND_SHUTDOWN)) { 5668ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) { 567a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5681da177e4SLinus Torvalds } else { /* send SIGIO later */ 5699cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 5701da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 5711da177e4SLinus Torvalds 5721da177e4SLinus Torvalds /* Race breaker. If space is freed after 5731da177e4SLinus Torvalds * wspace test but before the flags are set, 5743c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier 5753c715127Sjbaron@akamai.com * pairs with the input side. 5761da177e4SLinus Torvalds */ 5773c715127Sjbaron@akamai.com smp_mb__after_atomic(); 5788ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) 579a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5801da177e4SLinus Torvalds } 581d84ba638SKOSAKI Motohiro } else 582a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5831da177e4SLinus Torvalds 5847b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID) 585a9a08845SLinus Torvalds mask |= EPOLLPRI; 58608e39c0dSEric Dumazet } else if (state == TCP_SYN_SENT && 58708e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) { 58819f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect 589a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write() 59019f6d3f3SWei Wang * in order for kernel to generate SYN+data 59119f6d3f3SWei Wang */ 592a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM; 5931da177e4SLinus Torvalds } 594a4d25803STom Marshall /* This barrier is coupled with smp_wmb() in tcp_reset() */ 595a4d25803STom Marshall smp_rmb(); 596e13ec3daSEric Dumazet if (READ_ONCE(sk->sk_err) || 597e13ec3daSEric Dumazet !skb_queue_empty_lockless(&sk->sk_error_queue)) 598a9a08845SLinus Torvalds mask |= EPOLLERR; 599a4d25803STom Marshall 6001da177e4SLinus Torvalds return mask; 6011da177e4SLinus Torvalds } 602a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll); 6031da177e4SLinus Torvalds 604e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg) 6051da177e4SLinus Torvalds { 6061da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 6071da177e4SLinus Torvalds int answ; 6080e71c55cSEric Dumazet bool slow; 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds switch (cmd) { 6111da177e4SLinus Torvalds case SIOCINQ: 6121da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6131da177e4SLinus Torvalds return -EINVAL; 6141da177e4SLinus Torvalds 6150e71c55cSEric Dumazet slow = lock_sock_fast(sk); 616473bd239STom Herbert answ = tcp_inq(sk); 6170e71c55cSEric Dumazet unlock_sock_fast(sk, slow); 6181da177e4SLinus Torvalds break; 6191da177e4SLinus Torvalds case SIOCATMARK: 6207b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) && 621d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq); 6221da177e4SLinus Torvalds break; 6231da177e4SLinus Torvalds case SIOCOUTQ: 6241da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 6251da177e4SLinus Torvalds return -EINVAL; 6261da177e4SLinus Torvalds 6271da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6281da177e4SLinus Torvalds answ = 0; 6291da177e4SLinus Torvalds else 6300f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una; 6311da177e4SLinus Torvalds break; 6322f4e1b39SMario Schuknecht case SIOCOUTQNSD: 6332f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN) 6342f4e1b39SMario Schuknecht return -EINVAL; 6352f4e1b39SMario Schuknecht 6362f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) 6372f4e1b39SMario Schuknecht answ = 0; 6382f4e1b39SMario Schuknecht else 639e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) - 640e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt); 6412f4e1b39SMario Schuknecht break; 6421da177e4SLinus Torvalds default: 6431da177e4SLinus Torvalds return -ENOIOCTLCMD; 6443ff50b79SStephen Hemminger } 6451da177e4SLinus Torvalds 646e1d001faSBreno Leitao *karg = answ; 647e1d001faSBreno Leitao return 0; 6481da177e4SLinus Torvalds } 6494bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl); 6501da177e4SLinus Torvalds 65104d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 6521da177e4SLinus Torvalds { 6534de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 6541da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq; 6551da177e4SLinus Torvalds } 6561da177e4SLinus Torvalds 657a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp) 6581da177e4SLinus Torvalds { 6591da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 6601da177e4SLinus Torvalds } 6611da177e4SLinus Torvalds 66204d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb) 6631da177e4SLinus Torvalds { 6649e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 665352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 666352d4800SArnaldo Carvalho de Melo 667352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq; 6684de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK; 669f4a775d1SEric Dumazet __skb_header_release(skb); 670fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb); 671ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize); 6723ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize); 67389ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH) 6741da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH; 6756f021c62SEric Dumazet 6766f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk); 6771da177e4SLinus Torvalds } 6781da177e4SLinus Torvalds 679afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) 6801da177e4SLinus Torvalds { 68133f5f57eSIlpo Järvinen if (flags & MSG_OOB) 6821da177e4SLinus Torvalds tp->snd_up = tp->write_seq; 6831da177e4SLinus Torvalds } 6841da177e4SLinus Torvalds 685f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if 686a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues : 687f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance 688f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without 689f54b3111SEric Dumazet * need for a timer, and with no latency trade off. 690f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize 691a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent 692a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues, 693a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet. 694f54b3111SEric Dumazet */ 695f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, 696f54b3111SEric Dumazet int size_goal) 6971da177e4SLinus Torvalds { 698f54b3111SEric Dumazet return skb->len < size_goal && 69985225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) && 700114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) && 701b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize && 702b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb); 703f54b3111SEric Dumazet } 7049e412ba7SIlpo Järvinen 70535b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now, 706f54b3111SEric Dumazet int nonagle, int size_goal) 707f54b3111SEric Dumazet { 708f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 709f54b3111SEric Dumazet struct sk_buff *skb; 710f54b3111SEric Dumazet 711f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk); 71275c119afSEric Dumazet if (!skb) 71375c119afSEric Dumazet return; 7141da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp)) 715f54b3111SEric Dumazet tcp_mark_push(tp, skb); 716afeca340SKrishna Kumar 717afeca340SKrishna Kumar tcp_mark_urg(tp, flags); 718f54b3111SEric Dumazet 719f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) { 720f54b3111SEric Dumazet 721f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */ 7227aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { 723f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); 7247aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); 7251da177e4SLinus Torvalds } 726a181ceb5SEric Dumazet /* It is possible TX completion already happened 727a181ceb5SEric Dumazet * before we set TSQ_THROTTLED. 728a181ceb5SEric Dumazet */ 72914afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize) 730f54b3111SEric Dumazet return; 731f54b3111SEric Dumazet } 732f54b3111SEric Dumazet 733f54b3111SEric Dumazet if (flags & MSG_MORE) 734f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK; 735f54b3111SEric Dumazet 736f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle); 7371da177e4SLinus Torvalds } 7381da177e4SLinus Torvalds 7396ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 7409c55e01cSJens Axboe unsigned int offset, size_t len) 7419c55e01cSJens Axboe { 7429c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data; 74333966dd0SWilly Tarreau int ret; 7449c55e01cSJens Axboe 745a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, 74625869262SAl Viro min(rd_desc->count, len), tss->flags); 74733966dd0SWilly Tarreau if (ret > 0) 74833966dd0SWilly Tarreau rd_desc->count -= ret; 74933966dd0SWilly Tarreau return ret; 7509c55e01cSJens Axboe } 7519c55e01cSJens Axboe 7529c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) 7539c55e01cSJens Axboe { 7549c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */ 7559c55e01cSJens Axboe read_descriptor_t rd_desc = { 7569c55e01cSJens Axboe .arg.data = tss, 75733966dd0SWilly Tarreau .count = tss->len, 7589c55e01cSJens Axboe }; 7599c55e01cSJens Axboe 7609c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); 7619c55e01cSJens Axboe } 7629c55e01cSJens Axboe 7639c55e01cSJens Axboe /** 7649c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe 7659c55e01cSJens Axboe * @sock: socket to splice from 7669c55e01cSJens Axboe * @ppos: position (not valid) 7679c55e01cSJens Axboe * @pipe: pipe to splice to 7689c55e01cSJens Axboe * @len: number of bytes to splice 7699c55e01cSJens Axboe * @flags: splice modifier flags 7709c55e01cSJens Axboe * 7719c55e01cSJens Axboe * Description: 7729c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe. 7739c55e01cSJens Axboe * 7749c55e01cSJens Axboe **/ 7759c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, 7769c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len, 7779c55e01cSJens Axboe unsigned int flags) 7789c55e01cSJens Axboe { 7799c55e01cSJens Axboe struct sock *sk = sock->sk; 7809c55e01cSJens Axboe struct tcp_splice_state tss = { 7819c55e01cSJens Axboe .pipe = pipe, 7829c55e01cSJens Axboe .len = len, 7839c55e01cSJens Axboe .flags = flags, 7849c55e01cSJens Axboe }; 7859c55e01cSJens Axboe long timeo; 7869c55e01cSJens Axboe ssize_t spliced; 7879c55e01cSJens Axboe int ret; 7889c55e01cSJens Axboe 7893a047bf8SChangli Gao sock_rps_record_flow(sk); 7909c55e01cSJens Axboe /* 7919c55e01cSJens Axboe * We can't seek on a socket input 7929c55e01cSJens Axboe */ 7939c55e01cSJens Axboe if (unlikely(*ppos)) 7949c55e01cSJens Axboe return -ESPIPE; 7959c55e01cSJens Axboe 7969c55e01cSJens Axboe ret = spliced = 0; 7979c55e01cSJens Axboe 7989c55e01cSJens Axboe lock_sock(sk); 7999c55e01cSJens Axboe 80042324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); 8019c55e01cSJens Axboe while (tss.len) { 8029c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss); 8039c55e01cSJens Axboe if (ret < 0) 8049c55e01cSJens Axboe break; 8059c55e01cSJens Axboe else if (!ret) { 8069c55e01cSJens Axboe if (spliced) 8079c55e01cSJens Axboe break; 8089c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE)) 8099c55e01cSJens Axboe break; 8109c55e01cSJens Axboe if (sk->sk_err) { 8119c55e01cSJens Axboe ret = sock_error(sk); 8129c55e01cSJens Axboe break; 8139c55e01cSJens Axboe } 8149c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN) 8159c55e01cSJens Axboe break; 8169c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) { 8179c55e01cSJens Axboe /* 8189c55e01cSJens Axboe * This occurs when user tries to read 8199c55e01cSJens Axboe * from never connected socket. 8209c55e01cSJens Axboe */ 8219c55e01cSJens Axboe ret = -ENOTCONN; 8229c55e01cSJens Axboe break; 8239c55e01cSJens Axboe } 8249c55e01cSJens Axboe if (!timeo) { 8259c55e01cSJens Axboe ret = -EAGAIN; 8269c55e01cSJens Axboe break; 8279c55e01cSJens Axboe } 828ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have 829ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop. 830ccf7abb9SEric Dumazet * This might happen with URG data. 831ccf7abb9SEric Dumazet */ 832ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue)) 833ccf7abb9SEric Dumazet break; 834dfbafc99SSabrina Dubroca sk_wait_data(sk, &timeo, NULL); 8359c55e01cSJens Axboe if (signal_pending(current)) { 8369c55e01cSJens Axboe ret = sock_intr_errno(timeo); 8379c55e01cSJens Axboe break; 8389c55e01cSJens Axboe } 8399c55e01cSJens Axboe continue; 8409c55e01cSJens Axboe } 8419c55e01cSJens Axboe tss.len -= ret; 8429c55e01cSJens Axboe spliced += ret; 8439c55e01cSJens Axboe 8442fe11c9dSPavel Begunkov if (!tss.len || !timeo) 84533966dd0SWilly Tarreau break; 8469c55e01cSJens Axboe release_sock(sk); 8479c55e01cSJens Axboe lock_sock(sk); 8489c55e01cSJens Axboe 8499c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE || 85033966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) || 8519c55e01cSJens Axboe signal_pending(current)) 8529c55e01cSJens Axboe break; 8539c55e01cSJens Axboe } 8549c55e01cSJens Axboe 8559c55e01cSJens Axboe release_sock(sk); 8569c55e01cSJens Axboe 8579c55e01cSJens Axboe if (spliced) 8589c55e01cSJens Axboe return spliced; 8599c55e01cSJens Axboe 8609c55e01cSJens Axboe return ret; 8619c55e01cSJens Axboe } 8624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read); 8639c55e01cSJens Axboe 8645882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 865eb934478SEric Dumazet bool force_schedule) 866f561d0f2SPavel Emelyanov { 867f561d0f2SPavel Emelyanov struct sk_buff *skb; 868f561d0f2SPavel Emelyanov 8695882efffSEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp); 8708e4d980aSEric Dumazet if (likely(skb)) { 871eb934478SEric Dumazet bool mem_scheduled; 8728e4d980aSEric Dumazet 8739b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 874eb934478SEric Dumazet if (force_schedule) { 875eb934478SEric Dumazet mem_scheduled = true; 8768e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize); 8778e4d980aSEric Dumazet } else { 878eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize); 8798e4d980aSEric Dumazet } 880eb934478SEric Dumazet if (likely(mem_scheduled)) { 8818a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER); 882a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL; 883e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); 884f561d0f2SPavel Emelyanov return skb; 885f561d0f2SPavel Emelyanov } 886f561d0f2SPavel Emelyanov __kfree_skb(skb); 887f561d0f2SPavel Emelyanov } else { 8885c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk); 889f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk); 890f561d0f2SPavel Emelyanov } 891f561d0f2SPavel Emelyanov return NULL; 892f561d0f2SPavel Emelyanov } 893f561d0f2SPavel Emelyanov 8940c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, 8950c54b85fSIlpo Järvinen int large_allowed) 8960c54b85fSIlpo Järvinen { 8970c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk); 8986c09fa09SEric Dumazet u32 new_size_goal, size_goal; 8990c54b85fSIlpo Järvinen 90074d4a8f8SEric Dumazet if (!large_allowed) 901605ad7f1SEric Dumazet return mss_now; 9020c54b85fSIlpo Järvinen 9036c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */ 904ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size); 9052a3a041cSIlpo Järvinen 9062a3a041cSIlpo Järvinen /* We try hard to avoid divides here */ 907605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 908605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal || 909605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) { 910605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now, 9111485348dSBen Hutchings sk->sk_gso_max_segs); 912605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now; 9130c54b85fSIlpo Järvinen } 9140c54b85fSIlpo Järvinen 915605ad7f1SEric Dumazet return max(size_goal, mss_now); 9160c54b85fSIlpo Järvinen } 9170c54b85fSIlpo Järvinen 91835b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags) 9190c54b85fSIlpo Järvinen { 9200c54b85fSIlpo Järvinen int mss_now; 9210c54b85fSIlpo Järvinen 9220c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk); 9230c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); 9240c54b85fSIlpo Järvinen 9250c54b85fSIlpo Järvinen return mss_now; 9260c54b85fSIlpo Järvinen } 9270c54b85fSIlpo Järvinen 928dc97391eSDavid Howells /* In some cases, both sendmsg() could have added an skb to the write queue, 929dc97391eSDavid Howells * but failed adding payload on it. We need to remove it to consume less 930dc97391eSDavid Howells * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger 931dc97391eSDavid Howells * epoll() users. 932fdfc5c85SEric Dumazet */ 93327728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk) 934fdfc5c85SEric Dumazet { 93527728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk); 93627728ba8SEric Dumazet 937cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { 938fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk); 939fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk)) 940fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 94103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 942fdfc5c85SEric Dumazet } 943fdfc5c85SEric Dumazet } 944fdfc5c85SEric Dumazet 945f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */ 946f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) 947f8d9d938SEric Dumazet { 948f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) { 949f8d9d938SEric Dumazet u32 extra = skb->truesize - 950f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb)); 951f8d9d938SEric Dumazet 952f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra)) 953f8d9d938SEric Dumazet return -ENOMEM; 954f8d9d938SEric Dumazet 955f8d9d938SEric Dumazet sk_mem_charge(sk, extra); 956f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; 957f8d9d938SEric Dumazet } 958f8d9d938SEric Dumazet return 0; 959f8d9d938SEric Dumazet } 960f8d9d938SEric Dumazet 961849b425cSEric Dumazet 962fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy) 963f54755f6SEric Dumazet { 964f54755f6SEric Dumazet int left; 965f54755f6SEric Dumazet 966f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy))) 967f54755f6SEric Dumazet return copy; 968f54755f6SEric Dumazet 969f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued. 970f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0] 971f54755f6SEric Dumazet * to guarantee some progress. 972f54755f6SEric Dumazet */ 973f54755f6SEric Dumazet left = sock_net(sk)->ipv4.sysctl_tcp_wmem[0] - sk->sk_wmem_queued; 974f54755f6SEric Dumazet if (left > 0) 975f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy)); 976f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc); 977f54755f6SEric Dumazet } 978f54755f6SEric Dumazet 979cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp) 980cf60af03SYuchung Cheng { 98100db4124SIan Morris if (tp->fastopen_req) { 982cf60af03SYuchung Cheng kfree(tp->fastopen_req); 983cf60af03SYuchung Cheng tp->fastopen_req = NULL; 984cf60af03SYuchung Cheng } 985cf60af03SYuchung Cheng } 986cf60af03SYuchung Cheng 9873242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 9883242abebSBenjamin Hesmans size_t size, struct ubuf_info *uarg) 989cf60af03SYuchung Cheng { 990cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk); 99119f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk); 992ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name; 993cf60af03SYuchung Cheng int err, flags; 994cf60af03SYuchung Cheng 9955a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & 9965a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) || 997ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && 998ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC)) 999cf60af03SYuchung Cheng return -EOPNOTSUPP; 100000db4124SIan Morris if (tp->fastopen_req) 1001cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */ 1002cf60af03SYuchung Cheng 1003cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request), 1004cf60af03SYuchung Cheng sk->sk_allocation); 100551456b29SIan Morris if (unlikely(!tp->fastopen_req)) 1006cf60af03SYuchung Cheng return -ENOBUFS; 1007cf60af03SYuchung Cheng tp->fastopen_req->data = msg; 1008f5ddcbbbSEric Dumazet tp->fastopen_req->size = size; 1009f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg; 1010cf60af03SYuchung Cheng 101108e39c0dSEric Dumazet if (inet_test_bit(DEFER_CONNECT, sk)) { 101219f6d3f3SWei Wang err = tcp_connect(sk); 101319f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */ 101419f6d3f3SWei Wang if (err) { 101519f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE); 101619f6d3f3SWei Wang inet->inet_dport = 0; 101719f6d3f3SWei Wang sk->sk_route_caps = 0; 101819f6d3f3SWei Wang } 101919f6d3f3SWei Wang } 1020cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1021ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr, 10223979ad7eSWilly Tarreau msg->msg_namelen, flags, 1); 10237db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect 10247db92362SWei Wang * if the connection times out or gets rst 10257db92362SWei Wang */ 10267db92362SWei Wang if (tp->fastopen_req) { 1027f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied; 1028cf60af03SYuchung Cheng tcp_free_fastopen_req(tp); 102908e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk); 10307db92362SWei Wang } 1031cf60af03SYuchung Cheng return err; 1032cf60af03SYuchung Cheng } 1033cf60af03SYuchung Cheng 1034306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) 10351da177e4SLinus Torvalds { 10361da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1037f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL; 10381da177e4SLinus Torvalds struct sk_buff *skb; 1039c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc; 104057be5bdaSAl Viro int flags, err, copied = 0; 104157be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0; 10421a991488SEric Dumazet int process_backlog = 0; 1043270a1c3dSDavid Howells int zc = 0; 10441da177e4SLinus Torvalds long timeo; 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds flags = msg->msg_flags; 1047f214f915SWillem de Bruijn 1048eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) { 1049eb315a7dSPavel Begunkov if (msg->msg_ubuf) { 1050eb315a7dSPavel Begunkov uarg = msg->msg_ubuf; 1051270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1052270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1053eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) { 1054eea96a3eSPavel Begunkov skb = tcp_write_queue_tail(sk); 10558c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb)); 1056f214f915SWillem de Bruijn if (!uarg) { 1057f214f915SWillem de Bruijn err = -ENOBUFS; 1058f214f915SWillem de Bruijn goto out_err; 1059f214f915SWillem de Bruijn } 1060270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1061270a1c3dSDavid Howells zc = MSG_ZEROCOPY; 1062270a1c3dSDavid Howells else 1063e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0; 1064f214f915SWillem de Bruijn } 1065270a1c3dSDavid Howells } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) { 1066270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG) 1067270a1c3dSDavid Howells zc = MSG_SPLICE_PAGES; 1068eb315a7dSPavel Begunkov } 1069f214f915SWillem de Bruijn 107008e39c0dSEric Dumazet if (unlikely(flags & MSG_FASTOPEN || 107108e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) && 107216ae6aa1SYuchung Cheng !tp->repair) { 1073f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg); 1074cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0) 1075cf60af03SYuchung Cheng goto out; 1076cf60af03SYuchung Cheng else if (err) 1077cf60af03SYuchung Cheng goto out_err; 1078cf60af03SYuchung Cheng } 1079cf60af03SYuchung Cheng 10801da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 10811da177e4SLinus Torvalds 1082d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */ 1083d7722e85SSoheil Hassas Yeganeh 10848336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open 10858336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection 10868336886fSJerry Chu * is fully established. 10878336886fSJerry Chu */ 10888336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && 10898336886fSJerry Chu !tcp_passive_fastopen(sk)) { 1090686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo); 1091686a5624SYuvaraja Mariappan if (err != 0) 1092cf60af03SYuchung Cheng goto do_error; 10938336886fSJerry Chu } 10941da177e4SLinus Torvalds 1095c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 1096c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) { 1097c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size); 10985924f17aSChristoph Paasch goto out_nopush; 1099c0e88ff0SPavel Emelyanov } 1100c0e88ff0SPavel Emelyanov 1101c0e88ff0SPavel Emelyanov err = -EINVAL; 1102c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 1103c0e88ff0SPavel Emelyanov goto out_err; 1104c0e88ff0SPavel Emelyanov 1105c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */ 1106c0e88ff0SPavel Emelyanov } 1107c0e88ff0SPavel Emelyanov 1108657a0667SWillem de Bruijn sockcm_init(&sockc, sk); 1109c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) { 1110c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc); 1111c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) { 1112c14ac945SSoheil Hassas Yeganeh err = -EINVAL; 1113c14ac945SSoheil Hassas Yeganeh goto out_err; 1114c14ac945SSoheil Hassas Yeganeh } 1115c14ac945SSoheil Hassas Yeganeh } 1116c14ac945SSoheil Hassas Yeganeh 11171da177e4SLinus Torvalds /* This should be in poll */ 11189cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 11191da177e4SLinus Torvalds 11201da177e4SLinus Torvalds /* Ok commence sending. */ 11211da177e4SLinus Torvalds copied = 0; 11221da177e4SLinus Torvalds 1123d41a69f1SEric Dumazet restart: 1124d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags); 1125d41a69f1SEric Dumazet 11261da177e4SLinus Torvalds err = -EPIPE; 11271da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 112879d8665bSEric Dumazet goto do_error; 11291da177e4SLinus Torvalds 113001e97e65SAl Viro while (msg_data_left(msg)) { 1131270a1c3dSDavid Howells ssize_t copy = 0; 11321da177e4SLinus Torvalds 1133fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk); 113465ec6097SEric Dumazet if (skb) 113565ec6097SEric Dumazet copy = size_goal - skb->len; 11361da177e4SLinus Torvalds 1137c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) { 11383613b3dbSEric Dumazet bool first_skb; 11393613b3dbSEric Dumazet 11401da177e4SLinus Torvalds new_segment: 11411da177e4SLinus Torvalds if (!sk_stream_memory_free(sk)) 1142afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11431da177e4SLinus Torvalds 11441a991488SEric Dumazet if (unlikely(process_backlog >= 16)) { 11451a991488SEric Dumazet process_backlog = 0; 11461a991488SEric Dumazet if (sk_flush_backlog(sk)) 1147d41a69f1SEric Dumazet goto restart; 1148d4011239SEric Dumazet } 114975c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk); 11505882efffSEric Dumazet skb = tcp_stream_alloc_skb(sk, sk->sk_allocation, 11513613b3dbSEric Dumazet first_skb); 11521da177e4SLinus Torvalds if (!skb) 1153afb83012SSoheil Hassas Yeganeh goto wait_for_space; 11541da177e4SLinus Torvalds 11551a991488SEric Dumazet process_backlog++; 11561da177e4SLinus Torvalds 115704d8825cSPaolo Abeni tcp_skb_entail(sk, skb); 1158c1b4a7e6SDavid S. Miller copy = size_goal; 11599d186cacSAndrey Vagin 11609d186cacSAndrey Vagin /* All packets are restored as if they have 1161d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to 11629d186cacSAndrey Vagin * avoid wrong rtt estimation. 11639d186cacSAndrey Vagin */ 11649d186cacSAndrey Vagin if (tp->repair) 11659d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED; 11661da177e4SLinus Torvalds } 11671da177e4SLinus Torvalds 11681da177e4SLinus Torvalds /* Try to append data to the end of skb. */ 116901e97e65SAl Viro if (copy > msg_data_left(msg)) 117001e97e65SAl Viro copy = msg_data_left(msg); 11711da177e4SLinus Torvalds 1172270a1c3dSDavid Howells if (zc == 0) { 11735640f768SEric Dumazet bool merge = true; 11741da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags; 11755640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk); 1176761965eaSEric Dumazet 11775640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag)) 1178afb83012SSoheil Hassas Yeganeh goto wait_for_space; 1179761965eaSEric Dumazet 11805640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page, 11815640f768SEric Dumazet pfrag->offset)) { 1182657b991aSKuniyuki Iwashima if (i >= READ_ONCE(sysctl_max_skb_frags)) { 11831da177e4SLinus Torvalds tcp_mark_push(tp, skb); 11841da177e4SLinus Torvalds goto new_segment; 11851da177e4SLinus Torvalds } 11865640f768SEric Dumazet merge = false; 11875640f768SEric Dumazet } 1188ef015786SHerbert Xu 11895640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset); 1190ef015786SHerbert Xu 1191eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) { 1192849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb)) 1193849b425cSEric Dumazet goto wait_for_space; 1194eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb); 1195eb315a7dSPavel Begunkov } 1196849b425cSEric Dumazet 1197849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1198849b425cSEric Dumazet if (!copy) 1199afb83012SSoheil Hassas Yeganeh goto wait_for_space; 12001da177e4SLinus Torvalds 120157be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, 12025640f768SEric Dumazet pfrag->page, 12035640f768SEric Dumazet pfrag->offset, 12045640f768SEric Dumazet copy); 12055640f768SEric Dumazet if (err) 12061da177e4SLinus Torvalds goto do_error; 12071da177e4SLinus Torvalds 12081da177e4SLinus Torvalds /* Update the skb. */ 12091da177e4SLinus Torvalds if (merge) { 12109e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 12111da177e4SLinus Torvalds } else { 12125640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page, 12135640f768SEric Dumazet pfrag->offset, copy); 12144e33e346SEric Dumazet page_ref_inc(pfrag->page); 12151da177e4SLinus Torvalds } 12165640f768SEric Dumazet pfrag->offset += copy; 1217270a1c3dSDavid Howells } else if (zc == MSG_ZEROCOPY) { 12189b65b17dSTalal Ahmad /* First append to a fragless skb builds initial 12199b65b17dSTalal Ahmad * pure zerocopy skb 12209b65b17dSTalal Ahmad */ 12219b65b17dSTalal Ahmad if (!skb->len) 12229b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY; 12239b65b17dSTalal Ahmad 12249b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) { 1225849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy); 1226849b425cSEric Dumazet if (!copy) 1227358ed624STalal Ahmad goto wait_for_space; 12289b65b17dSTalal Ahmad } 1229358ed624STalal Ahmad 1230f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); 1231111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) { 1232111856c7SWillem de Bruijn tcp_mark_push(tp, skb); 1233f214f915SWillem de Bruijn goto new_segment; 1234111856c7SWillem de Bruijn } 1235f214f915SWillem de Bruijn if (err < 0) 1236f214f915SWillem de Bruijn goto do_error; 1237f214f915SWillem de Bruijn copy = err; 1238270a1c3dSDavid Howells } else if (zc == MSG_SPLICE_PAGES) { 1239270a1c3dSDavid Howells /* Splice in data if we can; copy if we can't. */ 1240270a1c3dSDavid Howells if (tcp_downgrade_zcopy_pure(sk, skb)) 1241270a1c3dSDavid Howells goto wait_for_space; 1242270a1c3dSDavid Howells copy = tcp_wmem_schedule(sk, copy); 1243270a1c3dSDavid Howells if (!copy) 1244270a1c3dSDavid Howells goto wait_for_space; 1245270a1c3dSDavid Howells 1246270a1c3dSDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy, 1247270a1c3dSDavid Howells sk->sk_allocation); 1248270a1c3dSDavid Howells if (err < 0) { 1249270a1c3dSDavid Howells if (err == -EMSGSIZE) { 1250270a1c3dSDavid Howells tcp_mark_push(tp, skb); 1251270a1c3dSDavid Howells goto new_segment; 1252270a1c3dSDavid Howells } 1253270a1c3dSDavid Howells goto do_error; 1254270a1c3dSDavid Howells } 1255270a1c3dSDavid Howells copy = err; 1256270a1c3dSDavid Howells 1257270a1c3dSDavid Howells if (!(flags & MSG_NO_SHARED_FRAGS)) 1258270a1c3dSDavid Howells skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG; 1259270a1c3dSDavid Howells 1260270a1c3dSDavid Howells sk_wmem_queued_add(sk, copy); 1261270a1c3dSDavid Howells sk_mem_charge(sk, copy); 12621da177e4SLinus Torvalds } 12631da177e4SLinus Torvalds 12641da177e4SLinus Torvalds if (!copied) 12654de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; 12661da177e4SLinus Torvalds 12670f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy); 12681da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy; 1269cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0); 12701da177e4SLinus Torvalds 12711da177e4SLinus Torvalds copied += copy; 127201e97e65SAl Viro if (!msg_data_left(msg)) { 1273c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR)) 1274c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1; 12751da177e4SLinus Torvalds goto out; 12764ed2d765SWillem de Bruijn } 12771da177e4SLinus Torvalds 127865ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair)) 12791da177e4SLinus Torvalds continue; 12801da177e4SLinus Torvalds 12811da177e4SLinus Torvalds if (forced_push(tp)) { 12821da177e4SLinus Torvalds tcp_mark_push(tp, skb); 12839e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); 1284fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk)) 12851da177e4SLinus Torvalds tcp_push_one(sk, mss_now); 12861da177e4SLinus Torvalds continue; 12871da177e4SLinus Torvalds 1288afb83012SSoheil Hassas Yeganeh wait_for_space: 12891da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1290ec342325SAndrew Vagin if (copied) 1291f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now, 1292f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal); 12931da177e4SLinus Torvalds 1294686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo); 1295686a5624SYuvaraja Mariappan if (err != 0) 12961da177e4SLinus Torvalds goto do_error; 12971da177e4SLinus Torvalds 12980c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags); 12991da177e4SLinus Torvalds } 13001da177e4SLinus Torvalds 13011da177e4SLinus Torvalds out: 1302ad02c4f5SSoheil Hassas Yeganeh if (copied) { 13034e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags); 1304f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1305ad02c4f5SSoheil Hassas Yeganeh } 13065924f17aSChristoph Paasch out_nopush: 1307a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1308a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13098e044917SJonathan Lemon net_zcopy_put(uarg); 1310cf60af03SYuchung Cheng return copied + copied_syn; 13111da177e4SLinus Torvalds 13121da177e4SLinus Torvalds do_error: 131327728ba8SEric Dumazet tcp_remove_empty_skb(sk); 1314fdfc5c85SEric Dumazet 1315cf60af03SYuchung Cheng if (copied + copied_syn) 13161da177e4SLinus Torvalds goto out; 13171da177e4SLinus Torvalds out_err: 1318a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */ 1319a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf) 13208e044917SJonathan Lemon net_zcopy_put_abort(uarg, true); 13211da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err); 1322ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */ 1323216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) { 1324ce5ec440SJason Baron sk->sk_write_space(sk); 1325b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); 1326b0f71bd3SFrancis Yan } 13271da177e4SLinus Torvalds return err; 13281da177e4SLinus Torvalds } 1329774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); 1330306b13ebSTom Herbert 1331306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 1332306b13ebSTom Herbert { 1333306b13ebSTom Herbert int ret; 1334306b13ebSTom Herbert 1335306b13ebSTom Herbert lock_sock(sk); 1336306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size); 1337306b13ebSTom Herbert release_sock(sk); 1338306b13ebSTom Herbert 1339306b13ebSTom Herbert return ret; 1340306b13ebSTom Herbert } 13414bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg); 13421da177e4SLinus Torvalds 13431d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock) 13441d7e4538SDavid Howells { 13451d7e4538SDavid Howells struct sock *sk = sock->sk; 13461d7e4538SDavid Howells struct tcp_sock *tp = tcp_sk(sk); 13471d7e4538SDavid Howells int mss_now, size_goal; 13481d7e4538SDavid Howells 13491d7e4538SDavid Howells if (!tcp_write_queue_tail(sk)) 13501d7e4538SDavid Howells return; 13511d7e4538SDavid Howells 13521d7e4538SDavid Howells lock_sock(sk); 13531d7e4538SDavid Howells mss_now = tcp_send_mss(sk, &size_goal, 0); 13541d7e4538SDavid Howells tcp_push(sk, 0, mss_now, tp->nonagle, size_goal); 13551d7e4538SDavid Howells release_sock(sk); 13561d7e4538SDavid Howells } 13571d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof); 13581d7e4538SDavid Howells 13591da177e4SLinus Torvalds /* 13601da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for 13611da177e4SLinus Torvalds * this, no blocking and very strange errors 8) 13621da177e4SLinus Torvalds */ 13631da177e4SLinus Torvalds 1364377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) 13651da177e4SLinus Torvalds { 13661da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 13671da177e4SLinus Torvalds 13681da177e4SLinus Torvalds /* No URG data to read. */ 13691da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || 13701da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ) 13711da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */ 13721da177e4SLinus Torvalds 13731da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) 13741da177e4SLinus Torvalds return -ENOTCONN; 13751da177e4SLinus Torvalds 13761da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) { 13771da177e4SLinus Torvalds int err = 0; 13781da177e4SLinus Torvalds char c = tp->urg_data; 13791da177e4SLinus Torvalds 13801da177e4SLinus Torvalds if (!(flags & MSG_PEEK)) 13817b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ); 13821da177e4SLinus Torvalds 13831da177e4SLinus Torvalds /* Read urgent data. */ 13841da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB; 13851da177e4SLinus Torvalds 13861da177e4SLinus Torvalds if (len > 0) { 13871da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) 13887eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1); 13891da177e4SLinus Torvalds len = 1; 13901da177e4SLinus Torvalds } else 13911da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC; 13921da177e4SLinus Torvalds 13931da177e4SLinus Torvalds return err ? -EFAULT : len; 13941da177e4SLinus Torvalds } 13951da177e4SLinus Torvalds 13961da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) 13971da177e4SLinus Torvalds return 0; 13981da177e4SLinus Torvalds 13991da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and 14001da177e4SLinus Torvalds * the available implementations agree in this case: 14011da177e4SLinus Torvalds * this call should never block, independent of the 14021da177e4SLinus Torvalds * blocking state of the socket. 14031da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de> 14041da177e4SLinus Torvalds */ 14051da177e4SLinus Torvalds return -EAGAIN; 14061da177e4SLinus Torvalds } 14071da177e4SLinus Torvalds 1408c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) 1409c0e88ff0SPavel Emelyanov { 1410c0e88ff0SPavel Emelyanov struct sk_buff *skb; 1411c0e88ff0SPavel Emelyanov int copied = 0, err = 0; 1412c0e88ff0SPavel Emelyanov 1413c0e88ff0SPavel Emelyanov /* XXX -- need to support SO_PEEK_OFF */ 1414c0e88ff0SPavel Emelyanov 141575c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) { 141675c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 141775c119afSEric Dumazet if (err) 141875c119afSEric Dumazet return err; 141975c119afSEric Dumazet copied += skb->len; 142075c119afSEric Dumazet } 142175c119afSEric Dumazet 1422c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) { 142351f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len); 1424c0e88ff0SPavel Emelyanov if (err) 1425c0e88ff0SPavel Emelyanov break; 1426c0e88ff0SPavel Emelyanov 1427c0e88ff0SPavel Emelyanov copied += skb->len; 1428c0e88ff0SPavel Emelyanov } 1429c0e88ff0SPavel Emelyanov 1430c0e88ff0SPavel Emelyanov return err ?: copied; 1431c0e88ff0SPavel Emelyanov } 1432c0e88ff0SPavel Emelyanov 14331da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user, 14341da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes 14351da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the 14361da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of 14371da177e4SLinus Torvalds * a window update. 14381da177e4SLinus Torvalds */ 1439e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied) 14401da177e4SLinus Torvalds { 14411da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 1442a2a385d6SEric Dumazet bool time_to_ack = false; 14431da177e4SLinus Torvalds 1444463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) { 1445463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 1446b6b6d653SEric Dumazet 1447b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */ 1448463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 14491da177e4SLinus Torvalds /* 14501da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if 14511da177e4SLinus Torvalds * connection is not bidirectional, user drained 14521da177e4SLinus Torvalds * receive buffer and there was a small segment 14531da177e4SLinus Torvalds * in queue. 14541da177e4SLinus Torvalds */ 14551ef9696cSAlexey Kuznetsov (copied > 0 && 14561ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || 14571ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 145831954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) && 14591ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc))) 1460a2a385d6SEric Dumazet time_to_ack = true; 14611da177e4SLinus Torvalds } 14621da177e4SLinus Torvalds 14631da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window 14641da177e4SLinus Torvalds * which has been raised "significantly". 14651da177e4SLinus Torvalds * 14661da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK 14671da177e4SLinus Torvalds * in states, where we will not receive more. It is useless. 14681da177e4SLinus Torvalds */ 14691da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { 14701da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp); 14711da177e4SLinus Torvalds 14721da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */ 14731da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) { 14741da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk); 14751da177e4SLinus Torvalds 14761da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space 14771da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window. 14781da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one. 14791da177e4SLinus Torvalds * "Lots" means "at least twice" here. 14801da177e4SLinus Torvalds */ 14811da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now) 1482a2a385d6SEric Dumazet time_to_ack = true; 14831da177e4SLinus Torvalds } 14841da177e4SLinus Torvalds } 14851da177e4SLinus Torvalds if (time_to_ack) 14861da177e4SLinus Torvalds tcp_send_ack(sk); 14871da177e4SLinus Torvalds } 14881da177e4SLinus Torvalds 1489c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied) 1490c457985aSCong Wang { 1491c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); 1492c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk); 1493c457985aSCong Wang 1494c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), 1495c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", 1496c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); 1497c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied); 1498c457985aSCong Wang } 1499c457985aSCong Wang 15003df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb) 15013df684c1SEric Dumazet { 1502f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue); 15033df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) { 15043df684c1SEric Dumazet sock_rfree(skb); 15053df684c1SEric Dumazet skb->destructor = NULL; 15063df684c1SEric Dumazet skb->sk = NULL; 150768822bdfSEric Dumazet return skb_attempt_defer_free(skb); 1508f35f8219SEric Dumazet } 1509f35f8219SEric Dumazet __kfree_skb(skb); 15103df684c1SEric Dumazet } 15113df684c1SEric Dumazet 15123f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) 15131da177e4SLinus Torvalds { 15141da177e4SLinus Torvalds struct sk_buff *skb; 15151da177e4SLinus Torvalds u32 offset; 15161da177e4SLinus Torvalds 1517f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { 15181da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq; 15199d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 15209d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 15211da177e4SLinus Torvalds offset--; 15229d691539SEric Dumazet } 1523e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) { 15241da177e4SLinus Torvalds *off = offset; 15251da177e4SLinus Torvalds return skb; 15261da177e4SLinus Torvalds } 1527f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing 1528f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock 1529f26845b4SEric Dumazet * in skb_splice_bits() 1530f26845b4SEric Dumazet */ 15313df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 15321da177e4SLinus Torvalds } 15331da177e4SLinus Torvalds return NULL; 15341da177e4SLinus Torvalds } 15353f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb); 15361da177e4SLinus Torvalds 15371da177e4SLinus Torvalds /* 15381da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines 15391da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile' 15401da177e4SLinus Torvalds * fashion. 15411da177e4SLinus Torvalds * Note: 15421da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller. 15431da177e4SLinus Torvalds * - The routine does not block. 15441da177e4SLinus Torvalds * - At present, there is no support for reading OOB data 15451da177e4SLinus Torvalds * or for 'peeking' the socket using this routine 15461da177e4SLinus Torvalds * (although both would be easy to implement). 15471da177e4SLinus Torvalds */ 15481da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 15491da177e4SLinus Torvalds sk_read_actor_t recv_actor) 15501da177e4SLinus Torvalds { 15511da177e4SLinus Torvalds struct sk_buff *skb; 15521da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 15531da177e4SLinus Torvalds u32 seq = tp->copied_seq; 15541da177e4SLinus Torvalds u32 offset; 15551da177e4SLinus Torvalds int copied = 0; 15561da177e4SLinus Torvalds 15571da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 15581da177e4SLinus Torvalds return -ENOTCONN; 15591da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 15601da177e4SLinus Torvalds if (offset < skb->len) { 1561374e7b59SOctavian Purdila int used; 1562374e7b59SOctavian Purdila size_t len; 15631da177e4SLinus Torvalds 15641da177e4SLinus Torvalds len = skb->len - offset; 15651da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */ 1566b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 15671da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq; 15681da177e4SLinus Torvalds if (urg_offset < len) 15691da177e4SLinus Torvalds len = urg_offset; 15701da177e4SLinus Torvalds if (!len) 15711da177e4SLinus Torvalds break; 15721da177e4SLinus Torvalds } 15731da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len); 1574ff905b1eSEric Dumazet if (used <= 0) { 1575ddb61a57SJens Axboe if (!copied) 1576ddb61a57SJens Axboe copied = used; 1577ddb61a57SJens Axboe break; 1578e3d5ea2cSEric Dumazet } 1579e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len)) 1580e3d5ea2cSEric Dumazet used = len; 15811da177e4SLinus Torvalds seq += used; 15821da177e4SLinus Torvalds copied += used; 15831da177e4SLinus Torvalds offset += used; 1584e3d5ea2cSEric Dumazet 158502275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice 1586293ad604SOctavian Purdila * receive) the skb pointer might be invalid when 1587293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it 1588293ad604SOctavian Purdila * while aggregating skbs from the socket queue. 1589293ad604SOctavian Purdila */ 1590293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset); 159102275a2eSWilly Tarreau if (!skb) 15921da177e4SLinus Torvalds break; 159302275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb. 159402275a2eSWilly Tarreau * Try to splice more frags 159502275a2eSWilly Tarreau */ 159602275a2eSWilly Tarreau if (offset + 1 != skb->len) 159702275a2eSWilly Tarreau continue; 15981da177e4SLinus Torvalds } 1599e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16003df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16011da177e4SLinus Torvalds ++seq; 16021da177e4SLinus Torvalds break; 16031da177e4SLinus Torvalds } 16043df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 16051da177e4SLinus Torvalds if (!desc->count) 16061da177e4SLinus Torvalds break; 16077db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16081da177e4SLinus Torvalds } 16097db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 16101da177e4SLinus Torvalds 16111da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 16121da177e4SLinus Torvalds 16131da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 1614f26845b4SEric Dumazet if (copied > 0) { 1615f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset); 16160e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 1617f26845b4SEric Dumazet } 16181da177e4SLinus Torvalds return copied; 16191da177e4SLinus Torvalds } 16204bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock); 16211da177e4SLinus Torvalds 1622965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) 162304919bedSCong Wang { 162404919bedSCong Wang struct tcp_sock *tp = tcp_sk(sk); 162504919bedSCong Wang u32 seq = tp->copied_seq; 162604919bedSCong Wang struct sk_buff *skb; 162704919bedSCong Wang int copied = 0; 162804919bedSCong Wang u32 offset; 162904919bedSCong Wang 163004919bedSCong Wang if (sk->sk_state == TCP_LISTEN) 163104919bedSCong Wang return -ENOTCONN; 163204919bedSCong Wang 1633db4192a7SCong Wang while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 1634db4192a7SCong Wang u8 tcp_flags; 1635db4192a7SCong Wang int used; 163604919bedSCong Wang 163704919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue); 163896628951SPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk)); 1639db4192a7SCong Wang tcp_flags = TCP_SKB_CB(skb)->tcp_flags; 1640db4192a7SCong Wang used = recv_actor(sk, skb); 1641db4192a7SCong Wang if (used < 0) { 1642db4192a7SCong Wang if (!copied) 1643db4192a7SCong Wang copied = used; 1644db4192a7SCong Wang break; 1645db4192a7SCong Wang } 1646db4192a7SCong Wang seq += used; 1647db4192a7SCong Wang copied += used; 1648db4192a7SCong Wang 1649db4192a7SCong Wang if (tcp_flags & TCPHDR_FIN) { 1650db4192a7SCong Wang ++seq; 1651db4192a7SCong Wang break; 1652db4192a7SCong Wang } 1653db4192a7SCong Wang } 165404919bedSCong Wang return copied; 165504919bedSCong Wang } 165604919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb); 165704919bedSCong Wang 16583f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len) 16593f92a64eSJakub Kicinski { 16603f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk); 16613f92a64eSJakub Kicinski u32 seq = tp->copied_seq; 16623f92a64eSJakub Kicinski struct sk_buff *skb; 16633f92a64eSJakub Kicinski size_t left; 16643f92a64eSJakub Kicinski u32 offset; 16653f92a64eSJakub Kicinski 16663f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN) 16673f92a64eSJakub Kicinski return; 16683f92a64eSJakub Kicinski 16693f92a64eSJakub Kicinski left = len; 16703f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { 16713f92a64eSJakub Kicinski int used; 16723f92a64eSJakub Kicinski 16733f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left); 16743f92a64eSJakub Kicinski seq += used; 16753f92a64eSJakub Kicinski left -= used; 16763f92a64eSJakub Kicinski 16773f92a64eSJakub Kicinski if (skb->len > offset + used) 16783f92a64eSJakub Kicinski break; 16793f92a64eSJakub Kicinski 16803f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { 16813f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 16823f92a64eSJakub Kicinski ++seq; 16833f92a64eSJakub Kicinski break; 16843f92a64eSJakub Kicinski } 16853f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb); 16863f92a64eSJakub Kicinski } 16873f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq); 16883f92a64eSJakub Kicinski 16893f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk); 16903f92a64eSJakub Kicinski 16913f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */ 16923f92a64eSJakub Kicinski if (left != len) 16933f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left); 16943f92a64eSJakub Kicinski } 16953f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done); 16963f92a64eSJakub Kicinski 169732035585STom Herbert int tcp_peek_len(struct socket *sock) 169832035585STom Herbert { 169932035585STom Herbert return tcp_inq(sock->sk); 170032035585STom Herbert } 170132035585STom Herbert EXPORT_SYMBOL(tcp_peek_len); 170232035585STom Herbert 1703d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */ 1704d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val) 1705d1361840SEric Dumazet { 1706dfa2f048SEric Dumazet int space, cap; 1707867f816bSSoheil Hassas Yeganeh 1708867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1709867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1; 1710867f816bSSoheil Hassas Yeganeh else 171102739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1; 1712867f816bSSoheil Hassas Yeganeh val = min(val, cap); 1713eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1); 171403f45c88SEric Dumazet 171503f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */ 171603f45c88SEric Dumazet tcp_data_ready(sk); 171703f45c88SEric Dumazet 1718d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) 1719d1361840SEric Dumazet return 0; 1720d1361840SEric Dumazet 1721dfa2f048SEric Dumazet space = tcp_space_from_win(sk, val); 1722dfa2f048SEric Dumazet if (space > sk->sk_rcvbuf) { 1723dfa2f048SEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, space); 1724dfa2f048SEric Dumazet tcp_sk(sk)->window_clamp = val; 1725d1361840SEric Dumazet } 1726d1361840SEric Dumazet return 0; 1727d1361840SEric Dumazet } 1728d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat); 1729d1361840SEric Dumazet 1730892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb, 17317eeba170SArjun Roy struct scm_timestamping_internal *tss) 17327eeba170SArjun Roy { 17337eeba170SArjun Roy if (skb->tstamp) 17347eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp); 17357eeba170SArjun Roy else 17367eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0}; 17377eeba170SArjun Roy 17387eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp) 17397eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp); 17407eeba170SArjun Roy else 17417eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0}; 17427eeba170SArjun Roy } 17437eeba170SArjun Roy 174405255b82SEric Dumazet #ifdef CONFIG_MMU 17457a7f0946SArjun Roy const struct vm_operations_struct tcp_vm_ops = { 174605255b82SEric Dumazet }; 174705255b82SEric Dumazet 174893ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock, 174993ab6cc6SEric Dumazet struct vm_area_struct *vma) 175093ab6cc6SEric Dumazet { 175105255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC)) 175205255b82SEric Dumazet return -EPERM; 17531c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC); 175405255b82SEric Dumazet 17553e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */ 17561c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MIXEDMAP); 175705255b82SEric Dumazet 175805255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops; 175905255b82SEric Dumazet return 0; 176005255b82SEric Dumazet } 176105255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap); 176205255b82SEric Dumazet 17637fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, 17647fba5309SArjun Roy u32 *offset_frag) 17657fba5309SArjun Roy { 17667fba5309SArjun Roy skb_frag_t *frag; 17677fba5309SArjun Roy 176870701b83SArjun Roy if (unlikely(offset_skb >= skb->len)) 176970701b83SArjun Roy return NULL; 177070701b83SArjun Roy 17717fba5309SArjun Roy offset_skb -= skb_headlen(skb); 17727fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb)) 17737fba5309SArjun Roy return NULL; 17747fba5309SArjun Roy 17757fba5309SArjun Roy frag = skb_shinfo(skb)->frags; 17767fba5309SArjun Roy while (offset_skb) { 17777fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) { 17787fba5309SArjun Roy *offset_frag = offset_skb; 17797fba5309SArjun Roy return frag; 17807fba5309SArjun Roy } 17817fba5309SArjun Roy offset_skb -= skb_frag_size(frag); 17827fba5309SArjun Roy ++frag; 17837fba5309SArjun Roy } 17847fba5309SArjun Roy *offset_frag = 0; 17857fba5309SArjun Roy return frag; 17867fba5309SArjun Roy } 17877fba5309SArjun Roy 178898917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag) 178998917cf0SArjun Roy { 179098917cf0SArjun Roy return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag); 179198917cf0SArjun Roy } 179298917cf0SArjun Roy 179398917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag, 179498917cf0SArjun Roy int remaining_in_skb) 179598917cf0SArjun Roy { 179698917cf0SArjun Roy int offset = 0; 179798917cf0SArjun Roy 179898917cf0SArjun Roy if (likely(can_map_frag(frag))) 179998917cf0SArjun Roy return 0; 180098917cf0SArjun Roy 180198917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) { 180298917cf0SArjun Roy offset += skb_frag_size(frag); 180398917cf0SArjun Roy ++frag; 180498917cf0SArjun Roy } 180598917cf0SArjun Roy return offset; 180698917cf0SArjun Roy } 180798917cf0SArjun Roy 18080c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk, 18090c3936d3SArjun Roy struct tcp_zerocopy_receive *zc, 18100c3936d3SArjun Roy struct sk_buff *skb, u32 offset) 18110c3936d3SArjun Roy { 18120c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0; 18130c3936d3SArjun Roy int mappable_offset; 18140c3936d3SArjun Roy skb_frag_t *frag; 18150c3936d3SArjun Roy 18160c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */ 18170c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset; 18180c3936d3SArjun Roy 18190c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */ 18200c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset); 18210c3936d3SArjun Roy if (!frag) 18220c3936d3SArjun Roy return; 18230c3936d3SArjun Roy 18240c3936d3SArjun Roy if (frag_offset) { 18250c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb); 18260c3936d3SArjun Roy 18270c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */ 18280c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1]) 18290c3936d3SArjun Roy return; 18300c3936d3SArjun Roy 18310c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */ 18320c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset; 18330c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder; 18340c3936d3SArjun Roy ++frag; 18350c3936d3SArjun Roy } 18360c3936d3SArjun Roy 18370c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest. 18380c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes 18390c3936d3SArjun Roy * in partial_frag_remainder. 18400c3936d3SArjun Roy */ 18410c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint); 18420c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder; 18430c3936d3SArjun Roy } 18440c3936d3SArjun Roy 1845f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 1846ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 1847f21a3c48SArjun Roy int *cmsg_flags); 1848f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk, 18497eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq, 18507eeba170SArjun Roy struct scm_timestamping_internal *tss) 1851f21a3c48SArjun Roy { 1852f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 1853f21a3c48SArjun Roy struct msghdr msg = {}; 1854f21a3c48SArjun Roy struct iovec iov; 18557eeba170SArjun Roy int err; 1856f21a3c48SArjun Roy 1857f21a3c48SArjun Roy zc->length = 0; 1858f21a3c48SArjun Roy zc->recv_skip_hint = 0; 1859f21a3c48SArjun Roy 1860f21a3c48SArjun Roy if (copy_address != zc->copybuf_address) 1861f21a3c48SArjun Roy return -EINVAL; 1862f21a3c48SArjun Roy 1863de4eda9dSAl Viro err = import_single_range(ITER_DEST, (void __user *)copy_address, 1864f21a3c48SArjun Roy inq, &iov, &msg.msg_iter); 1865f21a3c48SArjun Roy if (err) 1866f21a3c48SArjun Roy return err; 1867f21a3c48SArjun Roy 1868ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT, 18697eeba170SArjun Roy tss, &zc->msg_flags); 1870f21a3c48SArjun Roy if (err < 0) 1871f21a3c48SArjun Roy return err; 1872f21a3c48SArjun Roy 1873f21a3c48SArjun Roy zc->copybuf_len = err; 18740c3936d3SArjun Roy if (likely(zc->copybuf_len)) { 18750c3936d3SArjun Roy struct sk_buff *skb; 18760c3936d3SArjun Roy u32 offset; 18770c3936d3SArjun Roy 18780c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset); 18790c3936d3SArjun Roy if (skb) 18800c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset); 18810c3936d3SArjun Roy } 1882f21a3c48SArjun Roy return 0; 1883f21a3c48SArjun Roy } 1884f21a3c48SArjun Roy 188518fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, 188618fb76edSArjun Roy struct sk_buff *skb, u32 copylen, 188718fb76edSArjun Roy u32 *offset, u32 *seq) 188818fb76edSArjun Roy { 188918fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address; 189018fb76edSArjun Roy struct msghdr msg = {}; 189118fb76edSArjun Roy struct iovec iov; 189218fb76edSArjun Roy int err; 189318fb76edSArjun Roy 189418fb76edSArjun Roy if (copy_address != zc->copybuf_address) 189518fb76edSArjun Roy return -EINVAL; 189618fb76edSArjun Roy 1897de4eda9dSAl Viro err = import_single_range(ITER_DEST, (void __user *)copy_address, 189818fb76edSArjun Roy copylen, &iov, &msg.msg_iter); 189918fb76edSArjun Roy if (err) 190018fb76edSArjun Roy return err; 190118fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen); 190218fb76edSArjun Roy if (err) 190318fb76edSArjun Roy return err; 190418fb76edSArjun Roy zc->recv_skip_hint -= copylen; 190518fb76edSArjun Roy *offset += copylen; 190618fb76edSArjun Roy *seq += copylen; 190718fb76edSArjun Roy return (__s32)copylen; 190818fb76edSArjun Roy } 190918fb76edSArjun Roy 19107eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc, 191118fb76edSArjun Roy struct sock *sk, 191218fb76edSArjun Roy struct sk_buff *skb, 191318fb76edSArjun Roy u32 *seq, 19147eeba170SArjun Roy s32 copybuf_len, 19157eeba170SArjun Roy struct scm_timestamping_internal *tss) 191618fb76edSArjun Roy { 191718fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint); 191818fb76edSArjun Roy 191918fb76edSArjun Roy if (!copylen) 192018fb76edSArjun Roy return 0; 192118fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */ 19227eeba170SArjun Roy if (skb) { 192318fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq; 19247eeba170SArjun Roy } else { 192518fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset); 19267eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 19277eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 19287eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 19297eeba170SArjun Roy } 19307eeba170SArjun Roy } 193118fb76edSArjun Roy 193218fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset, 193318fb76edSArjun Roy seq); 193418fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen; 193518fb76edSArjun Roy } 193618fb76edSArjun Roy 193794ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, 193894ab9eb9SArjun Roy struct page **pending_pages, 193994ab9eb9SArjun Roy unsigned long pages_remaining, 194094ab9eb9SArjun Roy unsigned long *address, 194194ab9eb9SArjun Roy u32 *length, 194294ab9eb9SArjun Roy u32 *seq, 194394ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 194494ab9eb9SArjun Roy u32 total_bytes_to_map, 194594ab9eb9SArjun Roy int err) 194694ab9eb9SArjun Roy { 194794ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */ 194894ab9eb9SArjun Roy if (err == -EBUSY && 194994ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) { 195094ab9eb9SArjun Roy u32 maybe_zap_len; 195194ab9eb9SArjun Roy 195294ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */ 195394ab9eb9SArjun Roy *length + /* Mapped or pending */ 195494ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */ 1955e9adcfecSMike Kravetz zap_page_range_single(vma, *address, maybe_zap_len, NULL); 195694ab9eb9SArjun Roy err = 0; 195794ab9eb9SArjun Roy } 195894ab9eb9SArjun Roy 195994ab9eb9SArjun Roy if (!err) { 196094ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining; 196194ab9eb9SArjun Roy int bytes_mapped; 196294ab9eb9SArjun Roy 1963e9adcfecSMike Kravetz /* We called zap_page_range_single, try to reinsert. */ 196494ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, 196594ab9eb9SArjun Roy pending_pages, 196694ab9eb9SArjun Roy &pages_remaining); 196794ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining); 196894ab9eb9SArjun Roy *seq += bytes_mapped; 196994ab9eb9SArjun Roy *address += bytes_mapped; 197094ab9eb9SArjun Roy } 197194ab9eb9SArjun Roy if (err) { 197294ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an 197394ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining 197494ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll 197594ab9eb9SArjun Roy * some state we speculatively touched before. 197694ab9eb9SArjun Roy */ 197794ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining; 197894ab9eb9SArjun Roy 197994ab9eb9SArjun Roy *length -= bytes_not_mapped; 198094ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped; 198194ab9eb9SArjun Roy } 198294ab9eb9SArjun Roy return err; 198394ab9eb9SArjun Roy } 198494ab9eb9SArjun Roy 19853763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma, 19863763a24cSArjun Roy struct page **pages, 198794ab9eb9SArjun Roy unsigned int pages_to_map, 198894ab9eb9SArjun Roy unsigned long *address, 198994ab9eb9SArjun Roy u32 *length, 19903763a24cSArjun Roy u32 *seq, 199194ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc, 199294ab9eb9SArjun Roy u32 total_bytes_to_map) 19933763a24cSArjun Roy { 19943763a24cSArjun Roy unsigned long pages_remaining = pages_to_map; 199594ab9eb9SArjun Roy unsigned int pages_mapped; 199694ab9eb9SArjun Roy unsigned int bytes_mapped; 199794ab9eb9SArjun Roy int err; 19983763a24cSArjun Roy 199994ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining); 200094ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining; 200194ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped; 20023763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in 20033763a24cSArjun Roy * mapping (some but not all of the pages). 20043763a24cSArjun Roy */ 20053763a24cSArjun Roy *seq += bytes_mapped; 200694ab9eb9SArjun Roy *address += bytes_mapped; 200794ab9eb9SArjun Roy 200894ab9eb9SArjun Roy if (likely(!err)) 200994ab9eb9SArjun Roy return 0; 201094ab9eb9SArjun Roy 201194ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */ 201294ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, 201394ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map, 201494ab9eb9SArjun Roy err); 20153763a24cSArjun Roy } 20163763a24cSArjun Roy 20173c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS) 20187eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk, 20197eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20207eeba170SArjun Roy struct scm_timestamping_internal *tss) 20217eeba170SArjun Roy { 20227eeba170SArjun Roy unsigned long msg_control_addr; 20237eeba170SArjun Roy struct msghdr cmsg_dummy; 20247eeba170SArjun Roy 20257eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control; 2026c39ef213SKevin Brodsky cmsg_dummy.msg_control_user = (void __user *)msg_control_addr; 20277eeba170SArjun Roy cmsg_dummy.msg_controllen = 20287eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen; 20297eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall() 20307eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0; 2031a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true; 20327eeba170SArjun Roy zc->msg_flags = 0; 20337eeba170SArjun Roy if (zc->msg_control == msg_control_addr && 20347eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) { 20357eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss); 20367eeba170SArjun Roy zc->msg_control = (__u64) 2037c39ef213SKevin Brodsky ((uintptr_t)cmsg_dummy.msg_control_user); 20387eeba170SArjun Roy zc->msg_controllen = 20397eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen; 20407eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags; 20417eeba170SArjun Roy } 20427eeba170SArjun Roy } 20437eeba170SArjun Roy 20447a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm, 20457a7f0946SArjun Roy unsigned long address, 20467a7f0946SArjun Roy bool *mmap_locked) 20477a7f0946SArjun Roy { 20487a7f0946SArjun Roy struct vm_area_struct *vma = NULL; 20497a7f0946SArjun Roy 20507a7f0946SArjun Roy #ifdef CONFIG_PER_VMA_LOCK 20517a7f0946SArjun Roy vma = lock_vma_under_rcu(mm, address); 20527a7f0946SArjun Roy #endif 20537a7f0946SArjun Roy if (vma) { 20547a7f0946SArjun Roy if (!vma_is_tcp(vma)) { 20557a7f0946SArjun Roy vma_end_read(vma); 20567a7f0946SArjun Roy return NULL; 20577a7f0946SArjun Roy } 20587a7f0946SArjun Roy *mmap_locked = false; 20597a7f0946SArjun Roy return vma; 20607a7f0946SArjun Roy } 20617a7f0946SArjun Roy 20627a7f0946SArjun Roy mmap_read_lock(mm); 20637a7f0946SArjun Roy vma = vma_lookup(mm, address); 20647a7f0946SArjun Roy if (!vma || !vma_is_tcp(vma)) { 20657a7f0946SArjun Roy mmap_read_unlock(mm); 20667a7f0946SArjun Roy return NULL; 20677a7f0946SArjun Roy } 20687a7f0946SArjun Roy *mmap_locked = true; 20697a7f0946SArjun Roy return vma; 20707a7f0946SArjun Roy } 20717a7f0946SArjun Roy 207294ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32 207305255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk, 20747eeba170SArjun Roy struct tcp_zerocopy_receive *zc, 20757eeba170SArjun Roy struct scm_timestamping_internal *tss) 207605255b82SEric Dumazet { 207794ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0; 207805255b82SEric Dumazet unsigned long address = (unsigned long)zc->address; 207994ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE]; 208018fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len; 208118fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk); 208205255b82SEric Dumazet const skb_frag_t *frags = NULL; 208394ab9eb9SArjun Roy unsigned int pages_to_map = 0; 208405255b82SEric Dumazet struct vm_area_struct *vma; 208505255b82SEric Dumazet struct sk_buff *skb = NULL; 208618fb76edSArjun Roy u32 seq = tp->copied_seq; 208794ab9eb9SArjun Roy u32 total_bytes_to_map; 208818fb76edSArjun Roy int inq = tcp_inq(sk); 20897a7f0946SArjun Roy bool mmap_locked; 209093ab6cc6SEric Dumazet int ret; 209193ab6cc6SEric Dumazet 209218fb76edSArjun Roy zc->copybuf_len = 0; 20937eeba170SArjun Roy zc->msg_flags = 0; 209418fb76edSArjun Roy 209505255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address) 209693ab6cc6SEric Dumazet return -EINVAL; 209793ab6cc6SEric Dumazet 209893ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN) 209905255b82SEric Dumazet return -ENOTCONN; 210093ab6cc6SEric Dumazet 210193ab6cc6SEric Dumazet sock_rps_record_flow(sk); 210293ab6cc6SEric Dumazet 2103f21a3c48SArjun Roy if (inq && inq <= copybuf_len) 21047eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss); 2105f21a3c48SArjun Roy 2106936ced41SArjun Roy if (inq < PAGE_SIZE) { 2107936ced41SArjun Roy zc->length = 0; 2108936ced41SArjun Roy zc->recv_skip_hint = inq; 2109936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE)) 2110936ced41SArjun Roy return -EIO; 2111936ced41SArjun Roy return 0; 2112936ced41SArjun Roy } 2113936ced41SArjun Roy 21147a7f0946SArjun Roy vma = find_tcp_vma(current->mm, address, &mmap_locked); 21157a7f0946SArjun Roy if (!vma) 2116e776af60SEric Dumazet return -EINVAL; 21177a7f0946SArjun Roy 211818fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address); 211918fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq); 212094ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); 212194ab9eb9SArjun Roy if (total_bytes_to_map) { 212294ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) 2123e9adcfecSMike Kravetz zap_page_range_single(vma, address, total_bytes_to_map, 2124e9adcfecSMike Kravetz NULL); 212594ab9eb9SArjun Roy zc->length = total_bytes_to_map; 212605255b82SEric Dumazet zc->recv_skip_hint = 0; 21278f2b0293SSoheil Hassas Yeganeh } else { 212818fb76edSArjun Roy zc->length = avail_len; 212918fb76edSArjun Roy zc->recv_skip_hint = avail_len; 21308f2b0293SSoheil Hassas Yeganeh } 213105255b82SEric Dumazet ret = 0; 213205255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) { 213398917cf0SArjun Roy int mappable_offset; 213494ab9eb9SArjun Roy struct page *page; 213598917cf0SArjun Roy 213605255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) { 21377fba5309SArjun Roy u32 offset_frag; 21387fba5309SArjun Roy 213905255b82SEric Dumazet if (skb) { 21400e627190SArjun Roy if (zc->recv_skip_hint > 0) 21410e627190SArjun Roy break; 214205255b82SEric Dumazet skb = skb->next; 214305255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq; 214405255b82SEric Dumazet } else { 214593ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset); 214605255b82SEric Dumazet } 21477eeba170SArjun Roy 21487eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) { 21497eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss); 21507eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS; 21517eeba170SArjun Roy } 215205255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset; 21537fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag); 21547fba5309SArjun Roy if (!frags || offset_frag) 215505255b82SEric Dumazet break; 215605255b82SEric Dumazet } 2157789762ceSSoheil Hassas Yeganeh 215898917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags, 215998917cf0SArjun Roy zc->recv_skip_hint); 216098917cf0SArjun Roy if (mappable_offset) { 216198917cf0SArjun Roy zc->recv_skip_hint = mappable_offset; 216205255b82SEric Dumazet break; 2163789762ceSSoheil Hassas Yeganeh } 216494ab9eb9SArjun Roy page = skb_frag_page(frags); 216594ab9eb9SArjun Roy prefetchw(page); 216694ab9eb9SArjun Roy pages[pages_to_map++] = page; 216705255b82SEric Dumazet length += PAGE_SIZE; 216805255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE; 216905255b82SEric Dumazet frags++; 217094ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE || 217194ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) { 217294ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb 217394ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs). 217494ab9eb9SArjun Roy */ 217594ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, 217694ab9eb9SArjun Roy pages_to_map, 217794ab9eb9SArjun Roy &address, &length, 217894ab9eb9SArjun Roy &seq, zc, 217994ab9eb9SArjun Roy total_bytes_to_map); 21803763a24cSArjun Roy if (ret) 21813763a24cSArjun Roy goto out; 218294ab9eb9SArjun Roy pages_to_map = 0; 21833763a24cSArjun Roy } 21843763a24cSArjun Roy } 218594ab9eb9SArjun Roy if (pages_to_map) { 218694ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map, 218794ab9eb9SArjun Roy &address, &length, &seq, 218894ab9eb9SArjun Roy zc, total_bytes_to_map); 218993ab6cc6SEric Dumazet } 219005255b82SEric Dumazet out: 21917a7f0946SArjun Roy if (mmap_locked) 2192d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm); 21937a7f0946SArjun Roy else 21947a7f0946SArjun Roy vma_end_read(vma); 219518fb76edSArjun Roy /* Try to copy straggler data. */ 219618fb76edSArjun Roy if (!ret) 21977eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss); 219818fb76edSArjun Roy 219918fb76edSArjun Roy if (length + copylen) { 22007db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq); 220193ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk); 220293ab6cc6SEric Dumazet 220393ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */ 220493ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset); 220518fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen); 220693ab6cc6SEric Dumazet ret = 0; 220705255b82SEric Dumazet if (length == zc->length) 220805255b82SEric Dumazet zc->recv_skip_hint = 0; 220905255b82SEric Dumazet } else { 221005255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE)) 221105255b82SEric Dumazet ret = -EIO; 221205255b82SEric Dumazet } 221305255b82SEric Dumazet zc->length = length; 221493ab6cc6SEric Dumazet return ret; 221593ab6cc6SEric Dumazet } 221605255b82SEric Dumazet #endif 221793ab6cc6SEric Dumazet 221898aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */ 2219892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 22209718475eSDeepa Dinamani struct scm_timestamping_internal *tss) 222198aaa913SMike Maloney { 2222887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW); 222398aaa913SMike Maloney bool has_timestamping = false; 222498aaa913SMike Maloney 222598aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { 222698aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) { 222798aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { 2228887feae3SDeepa Dinamani if (new_tstamp) { 2229df1b4ba9SArnd Bergmann struct __kernel_timespec kts = { 2230df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2231df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2232df1b4ba9SArnd Bergmann }; 2233887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW, 2234887feae3SDeepa Dinamani sizeof(kts), &kts); 2235887feae3SDeepa Dinamani } else { 2236df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = { 2237df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2238df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec, 2239df1b4ba9SArnd Bergmann }; 22407f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD, 22419718475eSDeepa Dinamani sizeof(ts_old), &ts_old); 2242887feae3SDeepa Dinamani } 224398aaa913SMike Maloney } else { 2244887feae3SDeepa Dinamani if (new_tstamp) { 2245df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = { 2246df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2247df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2248df1b4ba9SArnd Bergmann }; 2249887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 2250887feae3SDeepa Dinamani sizeof(stv), &stv); 2251887feae3SDeepa Dinamani } else { 2252df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = { 2253df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec, 2254df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000, 2255df1b4ba9SArnd Bergmann }; 22567f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 225798aaa913SMike Maloney sizeof(tv), &tv); 225898aaa913SMike Maloney } 225998aaa913SMike Maloney } 2260887feae3SDeepa Dinamani } 226198aaa913SMike Maloney 2262*e3390b30SEric Dumazet if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE) 226398aaa913SMike Maloney has_timestamping = true; 226498aaa913SMike Maloney else 22659718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0}; 226698aaa913SMike Maloney } 226798aaa913SMike Maloney 226898aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { 2269*e3390b30SEric Dumazet if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE) 227098aaa913SMike Maloney has_timestamping = true; 227198aaa913SMike Maloney else 22729718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0}; 227398aaa913SMike Maloney } 227498aaa913SMike Maloney 227598aaa913SMike Maloney if (has_timestamping) { 22769718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0}; 22779718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW)) 22789718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss); 22799718475eSDeepa Dinamani else 22809718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss); 228198aaa913SMike Maloney } 228298aaa913SMike Maloney } 228398aaa913SMike Maloney 2284b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk) 2285b75eba76SSoheil Hassas Yeganeh { 2286b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk); 2287b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq); 2288b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt); 2289b75eba76SSoheil Hassas Yeganeh int inq; 2290b75eba76SSoheil Hassas Yeganeh 2291b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq; 2292b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) { 2293b75eba76SSoheil Hassas Yeganeh lock_sock(sk); 2294b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq; 2295b75eba76SSoheil Hassas Yeganeh release_sock(sk); 2296b75eba76SSoheil Hassas Yeganeh } 22976466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading 22986466e715SSoheil Hassas Yeganeh * by returning a non-zero inq. 22996466e715SSoheil Hassas Yeganeh */ 23006466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE)) 23016466e715SSoheil Hassas Yeganeh inq = 1; 2302b75eba76SSoheil Hassas Yeganeh return inq; 2303b75eba76SSoheil Hassas Yeganeh } 2304b75eba76SSoheil Hassas Yeganeh 23051da177e4SLinus Torvalds /* 23061da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer. 23071da177e4SLinus Torvalds * 23081da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that 23091da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required. 23101da177e4SLinus Torvalds * Probably, code can be easily improved even more. 23111da177e4SLinus Torvalds */ 23121da177e4SLinus Torvalds 23132cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len, 2314ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss, 23152cd81161SArjun Roy int *cmsg_flags) 23161da177e4SLinus Torvalds { 23171da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 23181da177e4SLinus Torvalds int copied = 0; 23191da177e4SLinus Torvalds u32 peek_seq; 23201da177e4SLinus Torvalds u32 *seq; 23211da177e4SLinus Torvalds unsigned long used; 23222cd81161SArjun Roy int err; 23231da177e4SLinus Torvalds int target; /* Read at least this many bytes */ 23241da177e4SLinus Torvalds long timeo; 2325dfbafc99SSabrina Dubroca struct sk_buff *skb, *last; 232677527313SIlpo Järvinen u32 urg_hole = 0; 23271da177e4SLinus Torvalds 23281da177e4SLinus Torvalds err = -ENOTCONN; 23291da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) 23301da177e4SLinus Torvalds goto out; 23311da177e4SLinus Torvalds 2332f94fd25cSJens Axboe if (tp->recvmsg_inq) { 2333925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ; 2334f94fd25cSJens Axboe msg->msg_get_inq = 1; 2335f94fd25cSJens Axboe } 2336ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 23371da177e4SLinus Torvalds 23381da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */ 23391da177e4SLinus Torvalds if (flags & MSG_OOB) 23401da177e4SLinus Torvalds goto recv_urg; 23411da177e4SLinus Torvalds 2342c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) { 2343c0e88ff0SPavel Emelyanov err = -EPERM; 2344c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK)) 2345c0e88ff0SPavel Emelyanov goto out; 2346c0e88ff0SPavel Emelyanov 2347c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 2348c0e88ff0SPavel Emelyanov goto recv_sndq; 2349c0e88ff0SPavel Emelyanov 2350c0e88ff0SPavel Emelyanov err = -EINVAL; 2351c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE) 2352c0e88ff0SPavel Emelyanov goto out; 2353c0e88ff0SPavel Emelyanov 2354c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */ 2355c0e88ff0SPavel Emelyanov } 2356c0e88ff0SPavel Emelyanov 23571da177e4SLinus Torvalds seq = &tp->copied_seq; 23581da177e4SLinus Torvalds if (flags & MSG_PEEK) { 23591da177e4SLinus Torvalds peek_seq = tp->copied_seq; 23601da177e4SLinus Torvalds seq = &peek_seq; 23611da177e4SLinus Torvalds } 23621da177e4SLinus Torvalds 23631da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 23641da177e4SLinus Torvalds 23651da177e4SLinus Torvalds do { 23661da177e4SLinus Torvalds u32 offset; 23671da177e4SLinus Torvalds 23681da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ 2369b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) { 23701da177e4SLinus Torvalds if (copied) 23711da177e4SLinus Torvalds break; 23721da177e4SLinus Torvalds if (signal_pending(current)) { 23731da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; 23741da177e4SLinus Torvalds break; 23751da177e4SLinus Torvalds } 23761da177e4SLinus Torvalds } 23771da177e4SLinus Torvalds 23781da177e4SLinus Torvalds /* Next get a buffer. */ 23791da177e4SLinus Torvalds 2380dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue); 238191521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) { 2382dfbafc99SSabrina Dubroca last = skb; 23831da177e4SLinus Torvalds /* Now that we have two receive queues this 23841da177e4SLinus Torvalds * shouldn't happen. 23851da177e4SLinus Torvalds */ 2386d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), 2387e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n", 23882af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, 2389d792c100SIlpo Järvinen flags)) 23901da177e4SLinus Torvalds break; 2391d792c100SIlpo Järvinen 23921da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq; 23939d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { 23949d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__); 23951da177e4SLinus Torvalds offset--; 23969d691539SEric Dumazet } 23971da177e4SLinus Torvalds if (offset < skb->len) 23981da177e4SLinus Torvalds goto found_ok_skb; 2399e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 24001da177e4SLinus Torvalds goto found_fin_ok; 24012af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK), 2402e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n", 24032af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); 240491521944SDavid S. Miller } 24051da177e4SLinus Torvalds 24061da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */ 24071da177e4SLinus Torvalds 24089ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) 24091da177e4SLinus Torvalds break; 24101da177e4SLinus Torvalds 24111da177e4SLinus Torvalds if (copied) { 24128bd172b7SEric Dumazet if (!timeo || 24138bd172b7SEric Dumazet sk->sk_err || 24141da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE || 24151da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) || 2416518a09efSDavid S. Miller signal_pending(current)) 24171da177e4SLinus Torvalds break; 24181da177e4SLinus Torvalds } else { 24191da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE)) 24201da177e4SLinus Torvalds break; 24211da177e4SLinus Torvalds 24221da177e4SLinus Torvalds if (sk->sk_err) { 24231da177e4SLinus Torvalds copied = sock_error(sk); 24241da177e4SLinus Torvalds break; 24251da177e4SLinus Torvalds } 24261da177e4SLinus Torvalds 24271da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN) 24281da177e4SLinus Torvalds break; 24291da177e4SLinus Torvalds 24301da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) { 24311da177e4SLinus Torvalds /* This occurs when user tries to read 24321da177e4SLinus Torvalds * from never connected socket. 24331da177e4SLinus Torvalds */ 24341da177e4SLinus Torvalds copied = -ENOTCONN; 24351da177e4SLinus Torvalds break; 24361da177e4SLinus Torvalds } 24371da177e4SLinus Torvalds 24381da177e4SLinus Torvalds if (!timeo) { 24391da177e4SLinus Torvalds copied = -EAGAIN; 24401da177e4SLinus Torvalds break; 24411da177e4SLinus Torvalds } 24421da177e4SLinus Torvalds 24431da177e4SLinus Torvalds if (signal_pending(current)) { 24441da177e4SLinus Torvalds copied = sock_intr_errno(timeo); 24451da177e4SLinus Torvalds break; 24461da177e4SLinus Torvalds } 24471da177e4SLinus Torvalds } 24481da177e4SLinus Torvalds 24491da177e4SLinus Torvalds if (copied >= target) { 24501da177e4SLinus Torvalds /* Do not sleep, just process backlog. */ 245193afcfd1SEric Dumazet __sk_flush_backlog(sk); 2452dfbafc99SSabrina Dubroca } else { 245329fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied); 2454dfbafc99SSabrina Dubroca sk_wait_data(sk, &timeo, last); 2455dfbafc99SSabrina Dubroca } 24561da177e4SLinus Torvalds 245777527313SIlpo Järvinen if ((flags & MSG_PEEK) && 245877527313SIlpo Järvinen (peek_seq - copied - urg_hole != tp->copied_seq)) { 2459e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", 2460e87cc472SJoe Perches current->comm, 2461e87cc472SJoe Perches task_pid_nr(current)); 24621da177e4SLinus Torvalds peek_seq = tp->copied_seq; 24631da177e4SLinus Torvalds } 24641da177e4SLinus Torvalds continue; 24651da177e4SLinus Torvalds 24661da177e4SLinus Torvalds found_ok_skb: 24671da177e4SLinus Torvalds /* Ok so how much can we use? */ 24681da177e4SLinus Torvalds used = skb->len - offset; 24691da177e4SLinus Torvalds if (len < used) 24701da177e4SLinus Torvalds used = len; 24711da177e4SLinus Torvalds 24721da177e4SLinus Torvalds /* Do we have urgent data here? */ 2473b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) { 24741da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq; 24751da177e4SLinus Torvalds if (urg_offset < used) { 24761da177e4SLinus Torvalds if (!urg_offset) { 24771da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) { 24787db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 247977527313SIlpo Järvinen urg_hole++; 24801da177e4SLinus Torvalds offset++; 24811da177e4SLinus Torvalds used--; 24821da177e4SLinus Torvalds if (!used) 24831da177e4SLinus Torvalds goto skip_copy; 24841da177e4SLinus Torvalds } 24851da177e4SLinus Torvalds } else 24861da177e4SLinus Torvalds used = urg_offset; 24871da177e4SLinus Torvalds } 24881da177e4SLinus Torvalds } 24891da177e4SLinus Torvalds 24901da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) { 249151f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, offset, msg, used); 24921da177e4SLinus Torvalds if (err) { 24931da177e4SLinus Torvalds /* Exception. Bailout! */ 24941da177e4SLinus Torvalds if (!copied) 24951da177e4SLinus Torvalds copied = -EFAULT; 24961da177e4SLinus Torvalds break; 24971da177e4SLinus Torvalds } 24981da177e4SLinus Torvalds } 24991da177e4SLinus Torvalds 25007db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used); 25011da177e4SLinus Torvalds copied += used; 25021da177e4SLinus Torvalds len -= used; 25031da177e4SLinus Torvalds 25041da177e4SLinus Torvalds tcp_rcv_space_adjust(sk); 25051da177e4SLinus Torvalds 25061da177e4SLinus Torvalds skip_copy: 2507b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { 25087b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 250931770e34SFlorian Westphal tcp_fast_path_check(sk); 251031770e34SFlorian Westphal } 25111da177e4SLinus Torvalds 251298aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) { 25132cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss); 2514925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS; 251598aaa913SMike Maloney } 2516cc4de047SKelly Littlepage 2517cc4de047SKelly Littlepage if (used + offset < skb->len) 2518cc4de047SKelly Littlepage continue; 2519cc4de047SKelly Littlepage 2520e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 25211da177e4SLinus Torvalds goto found_fin_ok; 25227bced397SDan Williams if (!(flags & MSG_PEEK)) 25233df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25241da177e4SLinus Torvalds continue; 25251da177e4SLinus Torvalds 25261da177e4SLinus Torvalds found_fin_ok: 25271da177e4SLinus Torvalds /* Process the FIN. */ 25287db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1); 25297bced397SDan Williams if (!(flags & MSG_PEEK)) 25303df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb); 25311da177e4SLinus Torvalds break; 25321da177e4SLinus Torvalds } while (len > 0); 25331da177e4SLinus Torvalds 25341da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored 25351da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK 25361da177e4SLinus Torvalds */ 25371da177e4SLinus Torvalds 25381da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */ 25390e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied); 25401da177e4SLinus Torvalds return copied; 25411da177e4SLinus Torvalds 25421da177e4SLinus Torvalds out: 25431da177e4SLinus Torvalds return err; 25441da177e4SLinus Torvalds 25451da177e4SLinus Torvalds recv_urg: 2546377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags); 25471da177e4SLinus Torvalds goto out; 2548c0e88ff0SPavel Emelyanov 2549c0e88ff0SPavel Emelyanov recv_sndq: 2550c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len); 2551c0e88ff0SPavel Emelyanov goto out; 25521da177e4SLinus Torvalds } 25532cd81161SArjun Roy 2554ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, 2555ec095263SOliver Hartkopp int *addr_len) 25562cd81161SArjun Roy { 2557f94fd25cSJens Axboe int cmsg_flags = 0, ret; 25582cd81161SArjun Roy struct scm_timestamping_internal tss; 25592cd81161SArjun Roy 25602cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE)) 25612cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len); 25622cd81161SArjun Roy 25632cd81161SArjun Roy if (sk_can_busy_loop(sk) && 25642cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) && 25652cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED) 2566ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT); 25672cd81161SArjun Roy 25682cd81161SArjun Roy lock_sock(sk); 2569ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags); 25702cd81161SArjun Roy release_sock(sk); 25712cd81161SArjun Roy 2572f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) { 2573925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS) 25742cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss); 2575f94fd25cSJens Axboe if (msg->msg_get_inq) { 2576f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk); 2577f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ) 2578f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ, 2579f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq); 25802cd81161SArjun Roy } 25812cd81161SArjun Roy } 25822cd81161SArjun Roy return ret; 25832cd81161SArjun Roy } 25844bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg); 25851da177e4SLinus Torvalds 2586490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state) 2587490d5046SIlpo Järvinen { 2588490d5046SIlpo Järvinen int oldstate = sk->sk_state; 2589490d5046SIlpo Järvinen 2590d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF 2591d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The 2592d4487491SLawrence Brakmo * following checks will detect if an internal state value ever 2593d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will 2594d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling 2595d4487491SLawrence Brakmo * tcp_call_bpf_2arg. 2596d4487491SLawrence Brakmo */ 2597d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED); 2598d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT); 2599d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV); 2600d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1); 2601d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2); 2602d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT); 2603d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE); 2604d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT); 2605d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK); 2606d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN); 2607d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING); 2608d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV); 2609d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES); 2610d4487491SLawrence Brakmo 261197a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values 261297a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux 261397a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON. 261497a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF 261597a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo. 261697a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the 261797a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF 261897a19cafSYonghong Song * regardless of which compiler is used. 261997a19cafSYonghong Song */ 262097a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED); 262197a19cafSYonghong Song 2622d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG)) 2623d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state); 2624e8fce239SSong Liu 2625490d5046SIlpo Järvinen switch (state) { 2626490d5046SIlpo Järvinen case TCP_ESTABLISHED: 2627490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED) 262881cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2629490d5046SIlpo Järvinen break; 2630490d5046SIlpo Järvinen 2631490d5046SIlpo Järvinen case TCP_CLOSE: 2632490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) 263381cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); 2634490d5046SIlpo Järvinen 2635490d5046SIlpo Järvinen sk->sk_prot->unhash(sk); 2636490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash && 2637490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 2638ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk); 2639a8eceea8SJoe Perches fallthrough; 2640490d5046SIlpo Järvinen default: 2641490d5046SIlpo Järvinen if (oldstate == TCP_ESTABLISHED) 264274688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); 2643490d5046SIlpo Järvinen } 2644490d5046SIlpo Järvinen 2645490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed 2646490d5046SIlpo Järvinen * socket sitting in hash tables. 2647490d5046SIlpo Järvinen */ 2648563e0bb0SYafang Shao inet_sk_state_store(sk, state); 2649490d5046SIlpo Järvinen } 2650490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state); 2651490d5046SIlpo Järvinen 26521da177e4SLinus Torvalds /* 26531da177e4SLinus Torvalds * State processing on a close. This implements the state shift for 26541da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some 26551da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be 26561da177e4SLinus Torvalds * closed. 26571da177e4SLinus Torvalds */ 26581da177e4SLinus Torvalds 26599b5b5cffSArjan van de Ven static const unsigned char new_state[16] = { 26601da177e4SLinus Torvalds /* current state: new state: action: */ 26610980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE, 26620980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 26630980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE, 26640980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, 26650980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, 26660980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, 26670980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE, 26680980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE, 26690980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, 26700980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK, 26710980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE, 26720980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING, 26730980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ 26741da177e4SLinus Torvalds }; 26751da177e4SLinus Torvalds 26761da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk) 26771da177e4SLinus Torvalds { 26781da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state]; 26791da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK; 26801da177e4SLinus Torvalds 26811da177e4SLinus Torvalds tcp_set_state(sk, ns); 26821da177e4SLinus Torvalds 26831da177e4SLinus Torvalds return next & TCP_ACTION_FIN; 26841da177e4SLinus Torvalds } 26851da177e4SLinus Torvalds 26861da177e4SLinus Torvalds /* 26871da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except 26881f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). 26891da177e4SLinus Torvalds */ 26901da177e4SLinus Torvalds 26911da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how) 26921da177e4SLinus Torvalds { 26931da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN, 26941da177e4SLinus Torvalds * and then put it into the queue to be sent. 26951da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. 26961da177e4SLinus Torvalds */ 26971da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN)) 26981da177e4SLinus Torvalds return; 26991da177e4SLinus Torvalds 27001da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */ 27011da177e4SLinus Torvalds if ((1 << sk->sk_state) & 27021da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT | 27031da177e4SLinus Torvalds TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { 27041da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */ 27051da177e4SLinus Torvalds if (tcp_close_state(sk)) 27061da177e4SLinus Torvalds tcp_send_fin(sk); 27071da177e4SLinus Torvalds } 27081da177e4SLinus Torvalds } 27094bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown); 27101da177e4SLinus Torvalds 271119757cebSEric Dumazet int tcp_orphan_count_sum(void) 271219757cebSEric Dumazet { 271319757cebSEric Dumazet int i, total = 0; 271419757cebSEric Dumazet 271519757cebSEric Dumazet for_each_possible_cpu(i) 271619757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i); 271719757cebSEric Dumazet 271819757cebSEric Dumazet return max(total, 0); 271919757cebSEric Dumazet } 272019757cebSEric Dumazet 272119757cebSEric Dumazet static int tcp_orphan_cache; 272219757cebSEric Dumazet static struct timer_list tcp_orphan_timer; 272319757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100) 272419757cebSEric Dumazet 272519757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused) 272619757cebSEric Dumazet { 272719757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum()); 272819757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 272919757cebSEric Dumazet } 273019757cebSEric Dumazet 273119757cebSEric Dumazet static bool tcp_too_many_orphans(int shift) 273219757cebSEric Dumazet { 273347e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift > 273447e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans); 273519757cebSEric Dumazet } 273619757cebSEric Dumazet 2737efcdbf24SArun Sharma bool tcp_check_oom(struct sock *sk, int shift) 2738efcdbf24SArun Sharma { 2739efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory; 2740efcdbf24SArun Sharma 274119757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift); 2742efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk); 2743efcdbf24SArun Sharma 2744e87cc472SJoe Perches if (too_many_orphans) 2745e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n"); 2746e87cc472SJoe Perches if (out_of_socket_memory) 2747e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); 2748efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory; 2749efcdbf24SArun Sharma } 2750efcdbf24SArun Sharma 275177c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout) 27521da177e4SLinus Torvalds { 27531da177e4SLinus Torvalds struct sk_buff *skb; 27541da177e4SLinus Torvalds int data_was_unread = 0; 275575c2d907SHerbert Xu int state; 27561da177e4SLinus Torvalds 2757e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 27581da177e4SLinus Torvalds 27591da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) { 27601da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 27611da177e4SLinus Torvalds 27621da177e4SLinus Torvalds /* Special case. */ 27630a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 27641da177e4SLinus Torvalds 27651da177e4SLinus Torvalds goto adjudge_to_death; 27661da177e4SLinus Torvalds } 27671da177e4SLinus Torvalds 27681da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the 27691da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the 27701da177e4SLinus Torvalds * reader process may not have drained the data yet! 27711da177e4SLinus Torvalds */ 27721da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { 2773e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq; 2774e11ecddfSEric Dumazet 2775e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 2776e11ecddfSEric Dumazet len--; 27771da177e4SLinus Torvalds data_was_unread += len; 27781da177e4SLinus Torvalds __kfree_skb(skb); 27791da177e4SLinus Torvalds } 27801da177e4SLinus Torvalds 2781565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ 2782565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE) 2783565b7b2dSKonstantin Khorenko goto adjudge_to_death; 2784565b7b2dSKonstantin Khorenko 278565bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because 278665bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of 278765bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 278865bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to 278965bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee... 279065bb723cSGerrit Renker * Note: timeout is always zero in such a case. 27911da177e4SLinus Torvalds */ 2792ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) { 2793ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0); 2794ee995283SPavel Emelyanov } else if (data_was_unread) { 27951da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */ 27966aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 27971da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 2798aa133076SWu Fengguang tcp_send_active_reset(sk, sk->sk_allocation); 27991da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 28001da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */ 28011da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0); 28026aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); 28031da177e4SLinus Torvalds } else if (tcp_close_state(sk)) { 28041da177e4SLinus Torvalds /* We FIN if the application ate all the data before 28051da177e4SLinus Torvalds * zapping the connection. 28061da177e4SLinus Torvalds */ 28071da177e4SLinus Torvalds 28081da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state 28091da177e4SLinus Torvalds * machine. State transitions: 28101da177e4SLinus Torvalds * 28111da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1 28121da177e4SLinus Torvalds * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) 28131da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK 28141da177e4SLinus Torvalds * 28151da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window), 28161da177e4SLinus Torvalds * rather than queued out of window. Purists blame. 28171da177e4SLinus Torvalds * 28181da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED, 28191da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent. 28201da177e4SLinus Torvalds * 28211da177e4SLinus Torvalds * The visible declinations are that sometimes 28221da177e4SLinus Torvalds * we enter time-wait state, when it is not required really 28231da177e4SLinus Torvalds * (harmless), do not send active resets, when they are 28241da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when 28251da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux) 28261da177e4SLinus Torvalds * Probably, I missed some more holelets. 28271da177e4SLinus Torvalds * --ANK 28288336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN 28298336886fSJerry Chu * in a single packet! (May consider it later but will 28308336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until 28318336886fSJerry Chu * data is written and socket is closed.) 28321da177e4SLinus Torvalds */ 28331da177e4SLinus Torvalds tcp_send_fin(sk); 28341da177e4SLinus Torvalds } 28351da177e4SLinus Torvalds 28361da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout); 28371da177e4SLinus Torvalds 28381da177e4SLinus Torvalds adjudge_to_death: 283975c2d907SHerbert Xu state = sk->sk_state; 284075c2d907SHerbert Xu sock_hold(sk); 284175c2d907SHerbert Xu sock_orphan(sk); 284275c2d907SHerbert Xu 28431da177e4SLinus Torvalds local_bh_disable(); 28441da177e4SLinus Torvalds bh_lock_sock(sk); 28458873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */ 28468873c064SEric Dumazet __release_sock(sk); 28471da177e4SLinus Torvalds 284819757cebSEric Dumazet this_cpu_inc(tcp_orphan_count); 2849eb4dea58SHerbert Xu 285075c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */ 285175c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) 285275c2d907SHerbert Xu goto out; 28531da177e4SLinus Torvalds 28541da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a 28551da177e4SLinus Torvalds * problem with TCP as specified in that the other end could 28561da177e4SLinus Torvalds * keep a socket open forever with no application left this end. 2857b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill 28581da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough 28591da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops 28601da177e4SLinus Torvalds * reset mistake. 28611da177e4SLinus Torvalds * 28621da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour 28631da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but 28641da177e4SLinus Torvalds * consume significant resources. Let's do it with special 28651da177e4SLinus Torvalds * linger2 option. --ANK 28661da177e4SLinus Torvalds */ 28671da177e4SLinus Torvalds 28681da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) { 28691da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 2870a81722ddSEric Dumazet if (READ_ONCE(tp->linger2) < 0) { 28711da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 28721da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 287302a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2874de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER); 28751da177e4SLinus Torvalds } else { 2876463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk); 28771da177e4SLinus Torvalds 28781da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) { 287952499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk, 288052499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN); 28811da177e4SLinus Torvalds } else { 28821da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 28831da177e4SLinus Torvalds goto out; 28841da177e4SLinus Torvalds } 28851da177e4SLinus Torvalds } 28861da177e4SLinus Torvalds } 28871da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) { 2888efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) { 28891da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 28901da177e4SLinus Torvalds tcp_send_active_reset(sk, GFP_ATOMIC); 289102a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), 2892de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY); 28934ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) { 28944ee806d5SDan Streetman /* Not possible to send reset; just close */ 28954ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE); 28961da177e4SLinus Torvalds } 28971da177e4SLinus Torvalds } 28981da177e4SLinus Torvalds 28998336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) { 2900d983ea6fSEric Dumazet struct request_sock *req; 2901d983ea6fSEric Dumazet 2902d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 2903d983ea6fSEric Dumazet lockdep_sock_is_held(sk)); 29048336886fSJerry Chu /* We could get here with a non-NULL req if the socket is 29058336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS 29068336886fSJerry Chu * finishes. 29078336886fSJerry Chu */ 290800db4124SIan Morris if (req) 29098336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 29100a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk); 29118336886fSJerry Chu } 29121da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */ 29131da177e4SLinus Torvalds 29141da177e4SLinus Torvalds out: 29151da177e4SLinus Torvalds bh_unlock_sock(sk); 29161da177e4SLinus Torvalds local_bh_enable(); 291777c3c956SPaolo Abeni } 291877c3c956SPaolo Abeni 291977c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout) 292077c3c956SPaolo Abeni { 292177c3c956SPaolo Abeni lock_sock(sk); 292277c3c956SPaolo Abeni __tcp_close(sk, timeout); 29238873c064SEric Dumazet release_sock(sk); 29241da177e4SLinus Torvalds sock_put(sk); 29251da177e4SLinus Torvalds } 29264bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close); 29271da177e4SLinus Torvalds 29281da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */ 29291da177e4SLinus Torvalds 2930a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state) 29311da177e4SLinus Torvalds { 29321da177e4SLinus Torvalds return (1 << state) & 29331da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | 2934a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV); 29351da177e4SLinus Torvalds } 29361da177e4SLinus Torvalds 293775c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk) 293875c119afSEric Dumazet { 293975c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue); 294075c119afSEric Dumazet 29412bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL; 294275c119afSEric Dumazet while (p) { 294375c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p); 294475c119afSEric Dumazet 294575c119afSEric Dumazet p = rb_next(p); 294675c119afSEric Dumazet /* Since we are deleting whole queue, no need to 294775c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor) 294875c119afSEric Dumazet */ 294975c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk); 295003271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 295175c119afSEric Dumazet } 295275c119afSEric Dumazet } 295375c119afSEric Dumazet 2954ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk) 2955ac3f09baSEric Dumazet { 2956ac3f09baSEric Dumazet struct sk_buff *skb; 2957ac3f09baSEric Dumazet 2958ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 2959ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 2960ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb); 296103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb); 2962ac3f09baSEric Dumazet } 296375c119afSEric Dumazet tcp_rtx_queue_purge(sk); 2964ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); 2965ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk)); 2966bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0; 296704c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0; 2968ac3f09baSEric Dumazet } 2969ac3f09baSEric Dumazet 29701da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags) 29711da177e4SLinus Torvalds { 29721da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk); 2973463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 29741da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 29751da177e4SLinus Torvalds int old_state = sk->sk_state; 29760f317464SEric Dumazet u32 seq; 29771da177e4SLinus Torvalds 29784faeee0cSEric Dumazet /* Deny disconnect if other threads are blocked in sk_wait_event() 29794faeee0cSEric Dumazet * or inet_wait_for_connect(). 29804faeee0cSEric Dumazet */ 29814faeee0cSEric Dumazet if (sk->sk_wait_pending) 29824faeee0cSEric Dumazet return -EBUSY; 29834faeee0cSEric Dumazet 29841da177e4SLinus Torvalds if (old_state != TCP_CLOSE) 29851da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE); 29861da177e4SLinus Torvalds 29871da177e4SLinus Torvalds /* ABORT function of RFC793 */ 29881da177e4SLinus Torvalds if (old_state == TCP_LISTEN) { 29890a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk); 2990ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) { 2991e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNABORTED); 29921da177e4SLinus Torvalds } else if (tcp_need_reset(old_state) || 29931da177e4SLinus Torvalds (tp->snd_nxt != tp->write_seq && 29941da177e4SLinus Torvalds (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2995caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC 29961da177e4SLinus Torvalds * states 29971da177e4SLinus Torvalds */ 29981da177e4SLinus Torvalds tcp_send_active_reset(sk, gfp_any()); 2999e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 3000a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT) 3001e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET); 30021da177e4SLinus Torvalds 30031da177e4SLinus Torvalds tcp_clear_xmit_timers(sk); 30041da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue); 30057db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); 30067b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0); 3007fe067e8aSDavid S. Miller tcp_write_queue_purge(sk); 3008cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk); 30099f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue); 30101da177e4SLinus Torvalds 3011c720c7e8SEric Dumazet inet->inet_dport = 0; 30121da177e4SLinus Torvalds 3013e0833d1fSKuniyuki Iwashima inet_bhash2_reset_saddr(sk); 30141da177e4SLinus Torvalds 3015e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, 0); 30161da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE); 3017740b0f18SEric Dumazet tp->srtt_us = 0; 3018b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); 30193f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0; 30200f317464SEric Dumazet 30210f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2; 30220f317464SEric Dumazet if (!seq) 30230f317464SEric Dumazet seq = 1; 30240f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq); 30250f317464SEric Dumazet 3026463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0; 30276687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0; 30289d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0; 30296a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT; 3030ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN; 30312b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX; 30320b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 303340570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 30341da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0; 3035f4ce91ceSNeal Cardwell tp->is_cwnd_limited = 0; 3036f4ce91ceSNeal Cardwell tp->max_packets_out = 0; 30371fdf475aSEric Dumazet tp->window_clamp = 0; 30382fbdd562SEric Dumazet tp->delivered = 0; 3039e21db6f6SYuchung Cheng tp->delivered_ce = 0; 3040ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release) 3041ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk); 3042ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); 30438919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0; 30446687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open); 3045d4761754SYousuk Seung tp->is_sack_reneg = 0; 30461da177e4SLinus Torvalds tcp_clear_retrans(tp); 3047c13c48c0SEric Dumazet tp->total_retrans = 0; 3048463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk); 3049499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 3050499350a5SWei Wang * issue in __tcp_select_window() 3051499350a5SWei Wang */ 3052499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; 3053b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 30541da177e4SLinus Torvalds __sk_dst_reset(sk); 30558f905c0eSEric Dumazet dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); 305617c3060bSEric Dumazet tcp_saved_syn_free(tp); 30575d9f4262SEric Dumazet tp->compressed_ack = 0; 3058784f8344SEric Dumazet tp->segs_in = 0; 3059784f8344SEric Dumazet tp->segs_out = 0; 3060ba113c3aSWei Wang tp->bytes_sent = 0; 3061e858faf5SChristoph Paasch tp->bytes_acked = 0; 3062e858faf5SChristoph Paasch tp->bytes_received = 0; 3063fb31c9b9SWei Wang tp->bytes_retrans = 0; 3064db7ffee6SEric Dumazet tp->data_segs_in = 0; 3065db7ffee6SEric Dumazet tp->data_segs_out = 0; 30667788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0; 30677788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0; 30687e10b655SWei Wang tp->dsack_dups = 0; 30697ec65372SWei Wang tp->reord_seen = 0; 30705c701549SEric Dumazet tp->retrans_out = 0; 30715c701549SEric Dumazet tp->sacked_out = 0; 30725c701549SEric Dumazet tp->tlp_high_seq = 0; 30735c701549SEric Dumazet tp->last_oow_ack_time = 0; 307429c1c446SMubashir Adnan Qureshi tp->plb_rehash = 0; 30756cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */ 30766cda8b74SEric Dumazet tp->app_limited = ~0U; 3077300b655dSDavid Morley tp->rate_app_limited = 1; 3078792c4354SEric Dumazet tp->rack.mstamp = 0; 3079792c4354SEric Dumazet tp->rack.advanced = 0; 3080792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1; 3081792c4354SEric Dumazet tp->rack.last_delivered = 0; 3082792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0; 3083792c4354SEric Dumazet tp->rack.dsack_seen = 0; 30846bcdc40dSEric Dumazet tp->syn_data_acked = 0; 30856bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0; 30866bcdc40dSEric Dumazet tp->rx_opt.dsack = 0; 30876bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0; 3088f9af2dbbSThomas Higdon tp->rcv_ooopack = 0; 30896cda8b74SEric Dumazet 30901da177e4SLinus Torvalds 30917db92362SWei Wang /* Clean up fastopen related fields */ 30927db92362SWei Wang tcp_free_fastopen_req(tp); 309308e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk); 309448027478SJason Baron tp->fastopen_client_fail = 0; 30957db92362SWei Wang 3096c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); 30971da177e4SLinus Torvalds 30989b42d55aSLi RongQing if (sk->sk_frag.page) { 30999b42d55aSLi RongQing put_page(sk->sk_frag.page); 31009b42d55aSLi RongQing sk->sk_frag.page = NULL; 31019b42d55aSLi RongQing sk->sk_frag.offset = 0; 31029b42d55aSLi RongQing } 3103e3ae2365SAlexander Aring sk_error_report(sk); 3104a01512b1SYueHaibing return 0; 31051da177e4SLinus Torvalds } 31064bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect); 31071da177e4SLinus Torvalds 3108a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk) 3109ee995283SPavel Emelyanov { 3110cb388e7eSMartin KaFai Lau return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && 3111319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN); 3112ee995283SPavel Emelyanov } 3113ee995283SPavel Emelyanov 3114d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len) 3115b1ed4c4fSAndrey Vagin { 3116b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 3117b1ed4c4fSAndrey Vagin 3118b1ed4c4fSAndrey Vagin if (!tp->repair) 3119b1ed4c4fSAndrey Vagin return -EPERM; 3120b1ed4c4fSAndrey Vagin 3121b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 3122b1ed4c4fSAndrey Vagin return -EINVAL; 3123b1ed4c4fSAndrey Vagin 3124d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt))) 3125b1ed4c4fSAndrey Vagin return -EFAULT; 3126b1ed4c4fSAndrey Vagin 3127b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd) 3128b1ed4c4fSAndrey Vagin return -EINVAL; 3129b1ed4c4fSAndrey Vagin 3130b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd)) 3131b1ed4c4fSAndrey Vagin return -EINVAL; 3132b1ed4c4fSAndrey Vagin 3133b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt)) 3134b1ed4c4fSAndrey Vagin return -EINVAL; 3135b1ed4c4fSAndrey Vagin 3136b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1; 3137b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd; 3138b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window; 3139b1ed4c4fSAndrey Vagin 3140b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd; 3141b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup; 3142b1ed4c4fSAndrey Vagin 3143b1ed4c4fSAndrey Vagin return 0; 3144b1ed4c4fSAndrey Vagin } 3145b1ed4c4fSAndrey Vagin 3146d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf, 3147d38d2b00SChristoph Hellwig unsigned int len) 3148b139ba4eSPavel Emelyanov { 314915e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk); 3150de248a75SPavel Emelyanov struct tcp_repair_opt opt; 3151d3c48151SChristoph Hellwig size_t offset = 0; 3152b139ba4eSPavel Emelyanov 3153de248a75SPavel Emelyanov while (len >= sizeof(opt)) { 3154d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt))) 3155b139ba4eSPavel Emelyanov return -EFAULT; 3156b139ba4eSPavel Emelyanov 3157d3c48151SChristoph Hellwig offset += sizeof(opt); 3158de248a75SPavel Emelyanov len -= sizeof(opt); 3159b139ba4eSPavel Emelyanov 3160de248a75SPavel Emelyanov switch (opt.opt_code) { 3161de248a75SPavel Emelyanov case TCPOPT_MSS: 3162de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val; 316315e56515SDouglas Caetano dos Santos tcp_mtup_init(sk); 3164b139ba4eSPavel Emelyanov break; 3165de248a75SPavel Emelyanov case TCPOPT_WINDOW: 3166bc26ccd8SAndrey Vagin { 3167bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF; 3168bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16; 3169bc26ccd8SAndrey Vagin 3170589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE) 3171b139ba4eSPavel Emelyanov return -EFBIG; 3172b139ba4eSPavel Emelyanov 3173bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale; 3174bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale; 3175bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1; 3176bc26ccd8SAndrey Vagin } 3177b139ba4eSPavel Emelyanov break; 3178b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM: 3179de248a75SPavel Emelyanov if (opt.opt_val != 0) 3180de248a75SPavel Emelyanov return -EINVAL; 3181de248a75SPavel Emelyanov 3182b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN; 3183b139ba4eSPavel Emelyanov break; 3184b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP: 3185de248a75SPavel Emelyanov if (opt.opt_val != 0) 3186de248a75SPavel Emelyanov return -EINVAL; 3187de248a75SPavel Emelyanov 3188b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1; 3189b139ba4eSPavel Emelyanov break; 3190b139ba4eSPavel Emelyanov } 3191b139ba4eSPavel Emelyanov } 3192b139ba4eSPavel Emelyanov 3193b139ba4eSPavel Emelyanov return 0; 3194b139ba4eSPavel Emelyanov } 3195b139ba4eSPavel Emelyanov 3196a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 3197a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled); 3198a842fe14SEric Dumazet 3199a842fe14SEric Dumazet static void tcp_enable_tx_delay(void) 3200a842fe14SEric Dumazet { 3201a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) { 3202a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0; 3203a842fe14SEric Dumazet 3204a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) { 3205a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled); 3206a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n"); 3207a842fe14SEric Dumazet } 3208a842fe14SEric Dumazet } 3209a842fe14SEric Dumazet } 3210a842fe14SEric Dumazet 3211db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears 3212db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is 3213db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the 3214db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then 3215db10538aSChristoph Hellwig * use sendfile to send out the data parts. 3216db10538aSChristoph Hellwig * 3217db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than 3218db10538aSChristoph Hellwig * TCP_NODELAY. 3219db10538aSChristoph Hellwig */ 32206fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on) 3221db10538aSChristoph Hellwig { 3222db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 3223db10538aSChristoph Hellwig 3224db10538aSChristoph Hellwig if (on) { 3225db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK; 3226db10538aSChristoph Hellwig } else { 3227db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK; 3228db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF) 3229db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH; 3230db10538aSChristoph Hellwig tcp_push_pending_frames(sk); 3231db10538aSChristoph Hellwig } 3232db10538aSChristoph Hellwig } 3233db10538aSChristoph Hellwig 3234db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on) 3235db10538aSChristoph Hellwig { 3236db10538aSChristoph Hellwig lock_sock(sk); 3237db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on); 3238db10538aSChristoph Hellwig release_sock(sk); 3239db10538aSChristoph Hellwig } 3240db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork); 3241db10538aSChristoph Hellwig 324212abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is 324312abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared. 324412abc5eeSChristoph Hellwig * 324512abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides 324612abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments. 324712abc5eeSChristoph Hellwig */ 32486fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on) 324912abc5eeSChristoph Hellwig { 325012abc5eeSChristoph Hellwig if (on) { 325112abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; 325212abc5eeSChristoph Hellwig tcp_push_pending_frames(sk); 325312abc5eeSChristoph Hellwig } else { 325412abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF; 325512abc5eeSChristoph Hellwig } 325612abc5eeSChristoph Hellwig } 325712abc5eeSChristoph Hellwig 325812abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk) 325912abc5eeSChristoph Hellwig { 326012abc5eeSChristoph Hellwig lock_sock(sk); 326112abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true); 326212abc5eeSChristoph Hellwig release_sock(sk); 326312abc5eeSChristoph Hellwig } 326412abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay); 326512abc5eeSChristoph Hellwig 3266ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val) 3267ddd061b8SChristoph Hellwig { 3268ddd061b8SChristoph Hellwig if (!val) { 3269ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3270ddd061b8SChristoph Hellwig return; 3271ddd061b8SChristoph Hellwig } 3272ddd061b8SChristoph Hellwig 3273ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk); 3274ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 3275ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) { 3276ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED; 3277ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1); 3278ddd061b8SChristoph Hellwig if (!(val & 1)) 3279ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk); 3280ddd061b8SChristoph Hellwig } 3281ddd061b8SChristoph Hellwig } 3282ddd061b8SChristoph Hellwig 3283ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val) 3284ddd061b8SChristoph Hellwig { 3285ddd061b8SChristoph Hellwig lock_sock(sk); 3286ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 3287ddd061b8SChristoph Hellwig release_sock(sk); 3288ddd061b8SChristoph Hellwig } 3289ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack); 3290ddd061b8SChristoph Hellwig 3291557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val) 3292557eadfcSChristoph Hellwig { 3293557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT) 3294557eadfcSChristoph Hellwig return -EINVAL; 3295557eadfcSChristoph Hellwig 32963a037f0fSEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val); 3297557eadfcSChristoph Hellwig return 0; 3298557eadfcSChristoph Hellwig } 3299557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt); 3300557eadfcSChristoph Hellwig 3301d58f2e15SEric Dumazet int tcp_sock_set_user_timeout(struct sock *sk, int val) 3302c488aeadSChristoph Hellwig { 3303d58f2e15SEric Dumazet /* Cap the max time in ms TCP will retry or probe the window 3304d58f2e15SEric Dumazet * before giving up and aborting (ETIMEDOUT) a connection. 3305d58f2e15SEric Dumazet */ 3306d58f2e15SEric Dumazet if (val < 0) 3307d58f2e15SEric Dumazet return -EINVAL; 3308d58f2e15SEric Dumazet 330926023e91SEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val); 3310d58f2e15SEric Dumazet return 0; 3311c488aeadSChristoph Hellwig } 3312c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout); 3313c488aeadSChristoph Hellwig 3314aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val) 331571c48eb8SChristoph Hellwig { 331671c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk); 331771c48eb8SChristoph Hellwig 331871c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE) 331971c48eb8SChristoph Hellwig return -EINVAL; 332071c48eb8SChristoph Hellwig 33214164245cSEric Dumazet /* Paired with WRITE_ONCE() in keepalive_time_when() */ 33224164245cSEric Dumazet WRITE_ONCE(tp->keepalive_time, val * HZ); 332371c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) && 332471c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { 332571c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp); 332671c48eb8SChristoph Hellwig 332771c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed) 332871c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed; 332971c48eb8SChristoph Hellwig else 333071c48eb8SChristoph Hellwig elapsed = 0; 333171c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed); 333271c48eb8SChristoph Hellwig } 333371c48eb8SChristoph Hellwig 333471c48eb8SChristoph Hellwig return 0; 333571c48eb8SChristoph Hellwig } 333671c48eb8SChristoph Hellwig 333771c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val) 333871c48eb8SChristoph Hellwig { 333971c48eb8SChristoph Hellwig int err; 334071c48eb8SChristoph Hellwig 334171c48eb8SChristoph Hellwig lock_sock(sk); 3342aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 334371c48eb8SChristoph Hellwig release_sock(sk); 334471c48eb8SChristoph Hellwig return err; 334571c48eb8SChristoph Hellwig } 334671c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle); 334771c48eb8SChristoph Hellwig 3348d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val) 3349d41ecaacSChristoph Hellwig { 3350d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL) 3351d41ecaacSChristoph Hellwig return -EINVAL; 3352d41ecaacSChristoph Hellwig 33535ecf9d4fSEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ); 3354d41ecaacSChristoph Hellwig return 0; 3355d41ecaacSChristoph Hellwig } 3356d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl); 3357d41ecaacSChristoph Hellwig 3358480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val) 3359480aeb96SChristoph Hellwig { 3360480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT) 3361480aeb96SChristoph Hellwig return -EINVAL; 3362480aeb96SChristoph Hellwig 33636e5e1de6SEric Dumazet /* Paired with READ_ONCE() in keepalive_probes() */ 33646e5e1de6SEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val); 3365480aeb96SChristoph Hellwig return 0; 3366480aeb96SChristoph Hellwig } 3367480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt); 3368480aeb96SChristoph Hellwig 3369cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val) 3370cb811109SPrankur gupta { 3371cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk); 3372cb811109SPrankur gupta 3373cb811109SPrankur gupta if (!val) { 3374cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE) 3375cb811109SPrankur gupta return -EINVAL; 3376cb811109SPrankur gupta tp->window_clamp = 0; 3377cb811109SPrankur gupta } else { 3378cb811109SPrankur gupta tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? 3379cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val; 33803aa7857fSNeil Spring tp->rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp); 3381cb811109SPrankur gupta } 3382cb811109SPrankur gupta return 0; 3383cb811109SPrankur gupta } 3384cb811109SPrankur gupta 33851da177e4SLinus Torvalds /* 33861da177e4SLinus Torvalds * Socket option code for TCP. 33871da177e4SLinus Torvalds */ 33880c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname, 3389d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen) 33901da177e4SLinus Torvalds { 33911da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 3392463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 33931e579caaSNikolay Borisov struct net *net = sock_net(sk); 33941da177e4SLinus Torvalds int val; 33951da177e4SLinus Torvalds int err = 0; 33961da177e4SLinus Torvalds 3397e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */ 3398e56fb50fSWilliam Allen Simpson switch (optname) { 3399e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: { 34005f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX]; 34015f8ef48dSStephen Hemminger 34025f8ef48dSStephen Hemminger if (optlen < 1) 34035f8ef48dSStephen Hemminger return -EINVAL; 34045f8ef48dSStephen Hemminger 3405d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 34064fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen)); 34075f8ef48dSStephen Hemminger if (val < 0) 34085f8ef48dSStephen Hemminger return -EFAULT; 34095f8ef48dSStephen Hemminger name[val] = 0; 34105f8ef48dSStephen Hemminger 3411cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 341284e5a0f2SMartin KaFai Lau err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(), 3413cb388e7eSMartin KaFai Lau sockopt_ns_capable(sock_net(sk)->user_ns, 34148d650cdeSEric Dumazet CAP_NET_ADMIN)); 3415cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 34165f8ef48dSStephen Hemminger return err; 34175f8ef48dSStephen Hemminger } 3418734942ccSDave Watson case TCP_ULP: { 3419734942ccSDave Watson char name[TCP_ULP_NAME_MAX]; 3420734942ccSDave Watson 3421734942ccSDave Watson if (optlen < 1) 3422734942ccSDave Watson return -EINVAL; 3423734942ccSDave Watson 3424d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval, 3425734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1, 3426734942ccSDave Watson optlen)); 3427734942ccSDave Watson if (val < 0) 3428734942ccSDave Watson return -EFAULT; 3429734942ccSDave Watson name[val] = 0; 3430734942ccSDave Watson 3431cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 3432734942ccSDave Watson err = tcp_set_ulp(sk, name); 3433cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 3434734942ccSDave Watson return err; 3435734942ccSDave Watson } 34361fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 34370f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH]; 34380f1ce023SJason Baron __u8 *backup_key = NULL; 34391fba70e5SYuchung Cheng 34400f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation 34410f1ce023SJason Baron * First key is the active one. 34420f1ce023SJason Baron */ 34430f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH && 34440f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH) 34451fba70e5SYuchung Cheng return -EINVAL; 34461fba70e5SYuchung Cheng 3447d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen)) 34481fba70e5SYuchung Cheng return -EFAULT; 34491fba70e5SYuchung Cheng 34500f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH) 34510f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH; 34520f1ce023SJason Baron 3453438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key); 34541fba70e5SYuchung Cheng } 3455e56fb50fSWilliam Allen Simpson default: 3456e56fb50fSWilliam Allen Simpson /* fallthru */ 3457e56fb50fSWilliam Allen Simpson break; 3458ccbd6a5aSJoe Perches } 34595f8ef48dSStephen Hemminger 34601da177e4SLinus Torvalds if (optlen < sizeof(int)) 34611da177e4SLinus Torvalds return -EINVAL; 34621da177e4SLinus Torvalds 3463d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val))) 34641da177e4SLinus Torvalds return -EFAULT; 34651da177e4SLinus Torvalds 3466d44fd4a7SEric Dumazet /* Handle options that can be set without locking the socket. */ 3467d44fd4a7SEric Dumazet switch (optname) { 3468d44fd4a7SEric Dumazet case TCP_SYNCNT: 3469d44fd4a7SEric Dumazet return tcp_sock_set_syncnt(sk, val); 3470d58f2e15SEric Dumazet case TCP_USER_TIMEOUT: 3471d58f2e15SEric Dumazet return tcp_sock_set_user_timeout(sk, val); 34726fd70a6bSEric Dumazet case TCP_KEEPINTVL: 34736fd70a6bSEric Dumazet return tcp_sock_set_keepintvl(sk, val); 347484485080SEric Dumazet case TCP_KEEPCNT: 347584485080SEric Dumazet return tcp_sock_set_keepcnt(sk, val); 3476a81722ddSEric Dumazet case TCP_LINGER2: 3477a81722ddSEric Dumazet if (val < 0) 3478a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, -1); 3479a81722ddSEric Dumazet else if (val > TCP_FIN_TIMEOUT_MAX / HZ) 3480a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX); 3481a81722ddSEric Dumazet else 3482a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, val * HZ); 3483a81722ddSEric Dumazet return 0; 34846e97ba55SEric Dumazet case TCP_DEFER_ACCEPT: 34856e97ba55SEric Dumazet /* Translate value in seconds to number of retransmits */ 34866e97ba55SEric Dumazet WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept, 34876e97ba55SEric Dumazet secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 34886e97ba55SEric Dumazet TCP_RTO_MAX / HZ)); 34896e97ba55SEric Dumazet return 0; 3490d44fd4a7SEric Dumazet } 3491d44fd4a7SEric Dumazet 3492cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk); 34931da177e4SLinus Torvalds 34941da177e4SLinus Torvalds switch (optname) { 34951da177e4SLinus Torvalds case TCP_MAXSEG: 34961da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However 34971da177e4SLinus Torvalds * at the point when this call is done we typically don't yet 3498a777f715SRohit Chavan * know which interface is going to be used 3499a777f715SRohit Chavan */ 3500cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) { 35011da177e4SLinus Torvalds err = -EINVAL; 35021da177e4SLinus Torvalds break; 35031da177e4SLinus Torvalds } 35041da177e4SLinus Torvalds tp->rx_opt.user_mss = val; 35051da177e4SLinus Torvalds break; 35061da177e4SLinus Torvalds 35071da177e4SLinus Torvalds case TCP_NODELAY: 350812abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val); 35091da177e4SLinus Torvalds break; 35101da177e4SLinus Torvalds 351136e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS: 351236e31b0aSAndreas Petlund if (val < 0 || val > 1) 351336e31b0aSAndreas Petlund err = -EINVAL; 351436e31b0aSAndreas Petlund else 351536e31b0aSAndreas Petlund tp->thin_lto = val; 351636e31b0aSAndreas Petlund break; 351736e31b0aSAndreas Petlund 35187e380175SAndreas Petlund case TCP_THIN_DUPACK: 35197e380175SAndreas Petlund if (val < 0 || val > 1) 35207e380175SAndreas Petlund err = -EINVAL; 35217e380175SAndreas Petlund break; 35227e380175SAndreas Petlund 3523ee995283SPavel Emelyanov case TCP_REPAIR: 3524ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk)) 3525ee995283SPavel Emelyanov err = -EPERM; 352631048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) { 3527ee995283SPavel Emelyanov tp->repair = 1; 3528ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE; 3529ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE; 353031048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) { 3531ee995283SPavel Emelyanov tp->repair = 0; 3532ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE; 3533ee995283SPavel Emelyanov tcp_send_window_probe(sk); 353431048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) { 353531048d7aSStefan Baranoff tp->repair = 0; 353631048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE; 3537ee995283SPavel Emelyanov } else 3538ee995283SPavel Emelyanov err = -EINVAL; 3539ee995283SPavel Emelyanov 3540ee995283SPavel Emelyanov break; 3541ee995283SPavel Emelyanov 3542ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 3543ee995283SPavel Emelyanov if (!tp->repair) 3544ee995283SPavel Emelyanov err = -EPERM; 3545bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR) 3546ee995283SPavel Emelyanov tp->repair_queue = val; 3547ee995283SPavel Emelyanov else 3548ee995283SPavel Emelyanov err = -EINVAL; 3549ee995283SPavel Emelyanov break; 3550ee995283SPavel Emelyanov 3551ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 35528811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) { 3553ee995283SPavel Emelyanov err = -EPERM; 35548811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) { 35558811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk)) 35568811f4a9SEric Dumazet err = -EPERM; 35578811f4a9SEric Dumazet else 35580f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val); 35598811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) { 35608811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) { 35618811f4a9SEric Dumazet err = -EPERM; 35628811f4a9SEric Dumazet } else { 3563dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val); 35646cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val); 35656cd6cbf5SEric Dumazet } 35668811f4a9SEric Dumazet } else { 3567ee995283SPavel Emelyanov err = -EINVAL; 35688811f4a9SEric Dumazet } 3569ee995283SPavel Emelyanov break; 3570ee995283SPavel Emelyanov 3571b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS: 3572b139ba4eSPavel Emelyanov if (!tp->repair) 3573b139ba4eSPavel Emelyanov err = -EINVAL; 35740c175da7SLu Wei else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent) 3575d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen); 3576b139ba4eSPavel Emelyanov else 3577b139ba4eSPavel Emelyanov err = -EPERM; 3578b139ba4eSPavel Emelyanov break; 3579b139ba4eSPavel Emelyanov 35801da177e4SLinus Torvalds case TCP_CORK: 3581db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val); 35821da177e4SLinus Torvalds break; 35831da177e4SLinus Torvalds 35841da177e4SLinus Torvalds case TCP_KEEPIDLE: 3585aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val); 35861da177e4SLinus Torvalds break; 3587cd8ae852SEric Dumazet case TCP_SAVE_SYN: 3588267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */ 3589267cf9faSMartin KaFai Lau if (val < 0 || val > 2) 3590cd8ae852SEric Dumazet err = -EINVAL; 3591cd8ae852SEric Dumazet else 3592cd8ae852SEric Dumazet tp->save_syn = val; 3593cd8ae852SEric Dumazet break; 3594cd8ae852SEric Dumazet 35951da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 3596cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val); 35971da177e4SLinus Torvalds break; 35981da177e4SLinus Torvalds 35991da177e4SLinus Torvalds case TCP_QUICKACK: 3600ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val); 36011da177e4SLinus Torvalds break; 36021da177e4SLinus Torvalds 3603cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 3604cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG: 36058917a777SIvan Delalande case TCP_MD5SIG_EXT: 3606d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen); 3607cfb6eeb4SYOSHIFUJI Hideaki break; 3608cfb6eeb4SYOSHIFUJI Hideaki #endif 36098336886fSJerry Chu case TCP_FASTOPEN: 36108336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE | 3611dfea2aa6SChristoph Paasch TCPF_LISTEN))) { 361243713848SHaishuang Yan tcp_fastopen_init_key_once(net); 3613dfea2aa6SChristoph Paasch 36140536fcc0SEric Dumazet fastopen_queue_tune(sk, val); 3615dfea2aa6SChristoph Paasch } else { 36168336886fSJerry Chu err = -EINVAL; 3617dfea2aa6SChristoph Paasch } 36188336886fSJerry Chu break; 361919f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 362019f6d3f3SWei Wang if (val > 1 || val < 0) { 362119f6d3f3SWei Wang err = -EINVAL; 36225a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & 36235a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) { 362419f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE) 362519f6d3f3SWei Wang tp->fastopen_connect = val; 362619f6d3f3SWei Wang else 362719f6d3f3SWei Wang err = -EINVAL; 362819f6d3f3SWei Wang } else { 362919f6d3f3SWei Wang err = -EOPNOTSUPP; 363019f6d3f3SWei Wang } 363119f6d3f3SWei Wang break; 363271c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 363371c02379SChristoph Paasch if (val > 1 || val < 0) 363471c02379SChristoph Paasch err = -EINVAL; 363571c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 363671c02379SChristoph Paasch err = -EINVAL; 363771c02379SChristoph Paasch else 363871c02379SChristoph Paasch tp->fastopen_no_cookie = val; 363971c02379SChristoph Paasch break; 364093be6ce0SAndrey Vagin case TCP_TIMESTAMP: 364193be6ce0SAndrey Vagin if (!tp->repair) 364293be6ce0SAndrey Vagin err = -EPERM; 364393be6ce0SAndrey Vagin else 3644dd23c9f1SEric Dumazet WRITE_ONCE(tp->tsoffset, val - tcp_time_stamp_raw()); 364593be6ce0SAndrey Vagin break; 3646b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: 3647b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen); 3648b1ed4c4fSAndrey Vagin break; 3649c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 36501aeb87bcSEric Dumazet WRITE_ONCE(tp->notsent_lowat, val); 3651c9bee3b7SEric Dumazet sk->sk_write_space(sk); 3652c9bee3b7SEric Dumazet break; 3653b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 3654b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0) 3655b75eba76SSoheil Hassas Yeganeh err = -EINVAL; 3656b75eba76SSoheil Hassas Yeganeh else 3657b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val; 3658b75eba76SSoheil Hassas Yeganeh break; 3659a842fe14SEric Dumazet case TCP_TX_DELAY: 3660a842fe14SEric Dumazet if (val) 3661a842fe14SEric Dumazet tcp_enable_tx_delay(); 3662348b81b6SEric Dumazet WRITE_ONCE(tp->tcp_tx_delay, val); 3663a842fe14SEric Dumazet break; 36641da177e4SLinus Torvalds default: 36651da177e4SLinus Torvalds err = -ENOPROTOOPT; 36661da177e4SLinus Torvalds break; 36673ff50b79SStephen Hemminger } 36683ff50b79SStephen Hemminger 3669cb388e7eSMartin KaFai Lau sockopt_release_sock(sk); 36701da177e4SLinus Torvalds return err; 36711da177e4SLinus Torvalds } 36721da177e4SLinus Torvalds 3673a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 3674b7058842SDavid S. Miller unsigned int optlen) 36753fdadf7dSDmitry Mishin { 3676cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk); 36773fdadf7dSDmitry Mishin 36783fdadf7dSDmitry Mishin if (level != SOL_TCP) 3679f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 3680f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname, 36813fdadf7dSDmitry Mishin optval, optlen); 3682a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen); 36833fdadf7dSDmitry Mishin } 36844bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt); 36853fdadf7dSDmitry Mishin 3686efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, 3687efd90174SFrancis Yan struct tcp_info *info) 3688efd90174SFrancis Yan { 3689efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0; 3690efd90174SFrancis Yan enum tcp_chrono i; 3691efd90174SFrancis Yan 3692efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { 3693efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1]; 3694efd90174SFrancis Yan if (i == tp->chrono_type) 3695628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start; 3696efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ; 3697efd90174SFrancis Yan total += stats[i]; 3698efd90174SFrancis Yan } 3699efd90174SFrancis Yan 3700efd90174SFrancis Yan info->tcpi_busy_time = total; 3701efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED]; 3702efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED]; 3703efd90174SFrancis Yan } 3704efd90174SFrancis Yan 37051da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */ 37060df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info) 37071da177e4SLinus Torvalds { 370835ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ 3709463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 371076a9ebe8SEric Dumazet unsigned long rate; 37110263598cSWei Wang u32 now; 3712ff5d7497SEric Dumazet u64 rate64; 371367db3e4bSEric Dumazet bool slow; 37141da177e4SLinus Torvalds 37151da177e4SLinus Torvalds memset(info, 0, sizeof(*info)); 371635ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM) 371735ac838aSCraig Gallek return; 37181da177e4SLinus Torvalds 3719986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk); 372000fd38d9SEric Dumazet 3721ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */ 3722ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate); 372376a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3724f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64; 3725ccbf3bfaSEric Dumazet 3726ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate); 372776a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3728f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64; 3729ccbf3bfaSEric Dumazet 3730ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering; 373140570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3732ccbf3bfaSEric Dumazet 3733ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) { 3734ccbf3bfaSEric Dumazet /* listeners aliased fields : 3735ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept() 3736ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog 3737ccbf3bfaSEric Dumazet */ 3738288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog); 3739099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog); 3740ccbf3bfaSEric Dumazet return; 3741ccbf3bfaSEric Dumazet } 3742b369e7fdSEric Dumazet 3743b369e7fdSEric Dumazet slow = lock_sock_fast(sk); 3744b369e7fdSEric Dumazet 37456687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state; 3746463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits; 37476687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out; 3748463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff; 37491da177e4SLinus Torvalds 37501da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok) 37511da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS; 3752e60402d0SIlpo Järvinen if (tcp_is_sack(tp)) 37531da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK; 37541da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) { 37551da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE; 37561da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; 37571da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; 37581da177e4SLinus Torvalds } 37591da177e4SLinus Torvalds 37601da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK) 37611da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN; 3762b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN) 3763b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN; 37646f73601eSYuchung Cheng if (tp->syn_data_acked) 37656f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA; 37661da177e4SLinus Torvalds 3767463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 3768463c84b9SArnaldo Carvalho de Melo info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 3769c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache; 3770463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 37711da177e4SLinus Torvalds 37721da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out; 37731da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out; 3774ccbf3bfaSEric Dumazet 37751da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out; 37761da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out; 37771da177e4SLinus Torvalds 3778d635fbe2SEric Dumazet now = tcp_jiffies32; 37791da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 3780463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 37811da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 37821da177e4SLinus Torvalds 3783d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie; 37841da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; 3785740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3; 3786740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2; 37871da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh; 37881da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss; 37891da177e4SLinus Torvalds 3790645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3; 37911da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space; 37921da177e4SLinus Torvalds 37931da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans; 3794977cb0ecSEric Dumazet 3795f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked; 3796f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received; 379767db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt); 3798efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info); 379967db3e4bSEric Dumazet 38002efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out; 38010307a0b7SEric Dumazet 38020307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */ 38030307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in); 38040307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in); 3805cd9b2660SEric Dumazet 3806cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp); 3807a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out; 3808eb8329e0SYuchung Cheng 3809eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; 38100263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp); 38110263598cSWei Wang if (rate64) 3812f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64; 3813feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered; 3814feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce; 3815ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent; 3816fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans; 38177e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups; 38187ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen; 3819f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack; 38208f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd; 382171fc7047SMubashir Adnan Qureshi info->tcpi_rcv_wnd = tp->rcv_wnd; 382271fc7047SMubashir Adnan Qureshi info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash; 382348027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail; 3824b369e7fdSEric Dumazet unlock_sock_fast(sk, slow); 38251da177e4SLinus Torvalds } 38261da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info); 38271da177e4SLinus Torvalds 3828984988aaSWei Wang static size_t tcp_opt_stats_get_size(void) 3829984988aaSWei Wang { 3830984988aaSWei Wang return 3831984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */ 3832984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */ 3833984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */ 3834984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */ 3835984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */ 3836984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */ 3837984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */ 3838984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */ 3839984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */ 3840984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */ 3841984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */ 3842984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */ 3843984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */ 3844984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */ 3845984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */ 3846984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */ 3847984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */ 3848ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */ 3849fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */ 38507e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */ 38517ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */ 3852e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */ 385332efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */ 3854e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */ 385548040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */ 3856e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */ 385729c1c446SMubashir Adnan Qureshi nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */ 3858984988aaSWei Wang 0; 3859984988aaSWei Wang } 3860984988aaSWei Wang 3861e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */ 3862e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb) 3863e7ed11eeSYousuk Seung { 3864e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP)) 3865e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl; 3866e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6)) 3867e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit; 3868e7ed11eeSYousuk Seung else 3869e7ed11eeSYousuk Seung return 0; 3870e7ed11eeSYousuk Seung } 3871e7ed11eeSYousuk Seung 387248040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 3873e7ed11eeSYousuk Seung const struct sk_buff *orig_skb, 3874e7ed11eeSYousuk Seung const struct sk_buff *ack_skb) 38751c885808SFrancis Yan { 38761c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk); 38771c885808SFrancis Yan struct sk_buff *stats; 38781c885808SFrancis Yan struct tcp_info info; 387976a9ebe8SEric Dumazet unsigned long rate; 3880bb7c19f9SWei Wang u64 rate64; 38811c885808SFrancis Yan 3882984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC); 38831c885808SFrancis Yan if (!stats) 38841c885808SFrancis Yan return NULL; 38851c885808SFrancis Yan 38861c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info); 38871c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY, 38881c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD); 38891c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED, 38901c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD); 38911c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED, 38921c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD); 38937e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT, 38947e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD); 38957e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, 38967e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD); 3897bb7c19f9SWei Wang 3898bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate); 389976a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL; 3900bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); 3901bb7c19f9SWei Wang 3902bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp); 3903bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 3904bb7c19f9SWei Wang 390540570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 3906bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 3907bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 3908bb7c19f9SWei Wang 3909bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); 3910bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); 39117156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh); 3912feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered); 3913feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce); 391487ecc95dSPriyaranjan Jha 391587ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una); 3916be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state); 3917feb5f2ecSYuchung Cheng 3918ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent, 3919ba113c3aSWei Wang TCP_NLA_PAD); 3920fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans, 3921fb31c9b9SWei Wang TCP_NLA_PAD); 39227e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups); 39237ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen); 3924e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3); 392532efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash); 3926e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT, 3927e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt)); 392848040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns, 392948040793SYousuk Seung TCP_NLA_PAD); 3930e7ed11eeSYousuk Seung if (ack_skb) 3931e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL, 3932e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb)); 3933ba113c3aSWei Wang 393429c1c446SMubashir Adnan Qureshi nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash); 39351c885808SFrancis Yan return stats; 39361c885808SFrancis Yan } 39371c885808SFrancis Yan 3938273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level, 393934704ef0SMartin KaFai Lau int optname, sockptr_t optval, sockptr_t optlen) 39401da177e4SLinus Torvalds { 3941295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 39421da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk); 39436fa25166SNikolay Borisov struct net *net = sock_net(sk); 39441da177e4SLinus Torvalds int val, len; 39451da177e4SLinus Torvalds 394634704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 39471da177e4SLinus Torvalds return -EFAULT; 39481da177e4SLinus Torvalds 39491da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(int)); 39501da177e4SLinus Torvalds 39511da177e4SLinus Torvalds if (len < 0) 39521da177e4SLinus Torvalds return -EINVAL; 39531da177e4SLinus Torvalds 39541da177e4SLinus Torvalds switch (optname) { 39551da177e4SLinus Torvalds case TCP_MAXSEG: 3956c1b4a7e6SDavid S. Miller val = tp->mss_cache; 395734dfde4aSCambda Zhu if (tp->rx_opt.user_mss && 395834dfde4aSCambda Zhu ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 39591da177e4SLinus Torvalds val = tp->rx_opt.user_mss; 39605e6a3ce6SPavel Emelyanov if (tp->repair) 39615e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp; 39621da177e4SLinus Torvalds break; 39631da177e4SLinus Torvalds case TCP_NODELAY: 39641da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF); 39651da177e4SLinus Torvalds break; 39661da177e4SLinus Torvalds case TCP_CORK: 39671da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK); 39681da177e4SLinus Torvalds break; 39691da177e4SLinus Torvalds case TCP_KEEPIDLE: 3970df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ; 39711da177e4SLinus Torvalds break; 39721da177e4SLinus Torvalds case TCP_KEEPINTVL: 3973df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ; 39741da177e4SLinus Torvalds break; 39751da177e4SLinus Torvalds case TCP_KEEPCNT: 3976df19a626SEric Dumazet val = keepalive_probes(tp); 39771da177e4SLinus Torvalds break; 39781da177e4SLinus Torvalds case TCP_SYNCNT: 39793a037f0fSEric Dumazet val = READ_ONCE(icsk->icsk_syn_retries) ? : 398020a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); 39811da177e4SLinus Torvalds break; 39821da177e4SLinus Torvalds case TCP_LINGER2: 39839df5335cSEric Dumazet val = READ_ONCE(tp->linger2); 39841da177e4SLinus Torvalds if (val >= 0) 398539e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; 39861da177e4SLinus Torvalds break; 39871da177e4SLinus Torvalds case TCP_DEFER_ACCEPT: 3988ae488c74SEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept); 3989ae488c74SEric Dumazet val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ, 3990ae488c74SEric Dumazet TCP_RTO_MAX / HZ); 39911da177e4SLinus Torvalds break; 39921da177e4SLinus Torvalds case TCP_WINDOW_CLAMP: 39931da177e4SLinus Torvalds val = tp->window_clamp; 39941da177e4SLinus Torvalds break; 39951da177e4SLinus Torvalds case TCP_INFO: { 39961da177e4SLinus Torvalds struct tcp_info info; 39971da177e4SLinus Torvalds 399834704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 39991da177e4SLinus Torvalds return -EFAULT; 40001da177e4SLinus Torvalds 40011da177e4SLinus Torvalds tcp_get_info(sk, &info); 40021da177e4SLinus Torvalds 40031da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info)); 400434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40051da177e4SLinus Torvalds return -EFAULT; 400634704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 40071da177e4SLinus Torvalds return -EFAULT; 40081da177e4SLinus Torvalds return 0; 40091da177e4SLinus Torvalds } 40106e9250f5SEric Dumazet case TCP_CC_INFO: { 40116e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops; 40126e9250f5SEric Dumazet union tcp_cc_info info; 40136e9250f5SEric Dumazet size_t sz = 0; 40146e9250f5SEric Dumazet int attr; 40156e9250f5SEric Dumazet 401634704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40176e9250f5SEric Dumazet return -EFAULT; 40186e9250f5SEric Dumazet 40196e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops; 40206e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info) 40216e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info); 40226e9250f5SEric Dumazet 40236e9250f5SEric Dumazet len = min_t(unsigned int, len, sz); 402434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40256e9250f5SEric Dumazet return -EFAULT; 402634704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len)) 40276e9250f5SEric Dumazet return -EFAULT; 40286e9250f5SEric Dumazet return 0; 40296e9250f5SEric Dumazet } 40301da177e4SLinus Torvalds case TCP_QUICKACK: 403131954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk); 40321da177e4SLinus Torvalds break; 40335f8ef48dSStephen Hemminger 40345f8ef48dSStephen Hemminger case TCP_CONGESTION: 403534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40365f8ef48dSStephen Hemminger return -EFAULT; 40375f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX); 403834704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40395f8ef48dSStephen Hemminger return -EFAULT; 404034704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len)) 40415f8ef48dSStephen Hemminger return -EFAULT; 40425f8ef48dSStephen Hemminger return 0; 4043e56fb50fSWilliam Allen Simpson 4044734942ccSDave Watson case TCP_ULP: 404534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4046734942ccSDave Watson return -EFAULT; 4047734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX); 4048d97af30fSDave Watson if (!icsk->icsk_ulp_ops) { 404934704ef0SMartin KaFai Lau len = 0; 405034704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4051d97af30fSDave Watson return -EFAULT; 4052d97af30fSDave Watson return 0; 4053d97af30fSDave Watson } 405434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4055734942ccSDave Watson return -EFAULT; 405634704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len)) 4057734942ccSDave Watson return -EFAULT; 4058734942ccSDave Watson return 0; 4059734942ccSDave Watson 40601fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: { 4061f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)]; 4062f19008e6SJason Baron unsigned int key_len; 40631fba70e5SYuchung Cheng 406434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 40651fba70e5SYuchung Cheng return -EFAULT; 40661fba70e5SYuchung Cheng 4067f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) * 40680f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH; 40690f1ce023SJason Baron len = min_t(unsigned int, len, key_len); 407034704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 40711fba70e5SYuchung Cheng return -EFAULT; 407234704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, key, len)) 40731fba70e5SYuchung Cheng return -EFAULT; 40741fba70e5SYuchung Cheng return 0; 40751fba70e5SYuchung Cheng } 40763c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS: 40773c0fef0bSJosh Hunt val = tp->thin_lto; 40783c0fef0bSJosh Hunt break; 40794a7f6009SYuchung Cheng 40803c0fef0bSJosh Hunt case TCP_THIN_DUPACK: 40814a7f6009SYuchung Cheng val = 0; 40823c0fef0bSJosh Hunt break; 4083dca43c75SJerry Chu 4084ee995283SPavel Emelyanov case TCP_REPAIR: 4085ee995283SPavel Emelyanov val = tp->repair; 4086ee995283SPavel Emelyanov break; 4087ee995283SPavel Emelyanov 4088ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE: 4089ee995283SPavel Emelyanov if (tp->repair) 4090ee995283SPavel Emelyanov val = tp->repair_queue; 4091ee995283SPavel Emelyanov else 4092ee995283SPavel Emelyanov return -EINVAL; 4093ee995283SPavel Emelyanov break; 4094ee995283SPavel Emelyanov 4095b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: { 4096b1ed4c4fSAndrey Vagin struct tcp_repair_window opt; 4097b1ed4c4fSAndrey Vagin 409834704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4099b1ed4c4fSAndrey Vagin return -EFAULT; 4100b1ed4c4fSAndrey Vagin 4101b1ed4c4fSAndrey Vagin if (len != sizeof(opt)) 4102b1ed4c4fSAndrey Vagin return -EINVAL; 4103b1ed4c4fSAndrey Vagin 4104b1ed4c4fSAndrey Vagin if (!tp->repair) 4105b1ed4c4fSAndrey Vagin return -EPERM; 4106b1ed4c4fSAndrey Vagin 4107b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1; 4108b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd; 4109b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window; 4110b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd; 4111b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup; 4112b1ed4c4fSAndrey Vagin 411334704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &opt, len)) 4114b1ed4c4fSAndrey Vagin return -EFAULT; 4115b1ed4c4fSAndrey Vagin return 0; 4116b1ed4c4fSAndrey Vagin } 4117ee995283SPavel Emelyanov case TCP_QUEUE_SEQ: 4118ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE) 4119ee995283SPavel Emelyanov val = tp->write_seq; 4120ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE) 4121ee995283SPavel Emelyanov val = tp->rcv_nxt; 4122ee995283SPavel Emelyanov else 4123ee995283SPavel Emelyanov return -EINVAL; 4124ee995283SPavel Emelyanov break; 4125ee995283SPavel Emelyanov 4126dca43c75SJerry Chu case TCP_USER_TIMEOUT: 412726023e91SEric Dumazet val = READ_ONCE(icsk->icsk_user_timeout); 4128dca43c75SJerry Chu break; 41291536e285SKenjiro Nakayama 41301536e285SKenjiro Nakayama case TCP_FASTOPEN: 413170f360ddSEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen); 41321536e285SKenjiro Nakayama break; 41331536e285SKenjiro Nakayama 413419f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT: 413519f6d3f3SWei Wang val = tp->fastopen_connect; 413619f6d3f3SWei Wang break; 413719f6d3f3SWei Wang 413871c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE: 413971c02379SChristoph Paasch val = tp->fastopen_no_cookie; 414071c02379SChristoph Paasch break; 414171c02379SChristoph Paasch 4142a842fe14SEric Dumazet case TCP_TX_DELAY: 4143348b81b6SEric Dumazet val = READ_ONCE(tp->tcp_tx_delay); 4144a842fe14SEric Dumazet break; 4145a842fe14SEric Dumazet 414693be6ce0SAndrey Vagin case TCP_TIMESTAMP: 4147dd23c9f1SEric Dumazet val = tcp_time_stamp_raw() + READ_ONCE(tp->tsoffset); 414893be6ce0SAndrey Vagin break; 4149c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT: 41501aeb87bcSEric Dumazet val = READ_ONCE(tp->notsent_lowat); 4151c9bee3b7SEric Dumazet break; 4152b75eba76SSoheil Hassas Yeganeh case TCP_INQ: 4153b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq; 4154b75eba76SSoheil Hassas Yeganeh break; 4155cd8ae852SEric Dumazet case TCP_SAVE_SYN: 4156cd8ae852SEric Dumazet val = tp->save_syn; 4157cd8ae852SEric Dumazet break; 4158cd8ae852SEric Dumazet case TCP_SAVED_SYN: { 415934704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 4160cd8ae852SEric Dumazet return -EFAULT; 4161cd8ae852SEric Dumazet 4162d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 4163cd8ae852SEric Dumazet if (tp->saved_syn) { 416470a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) { 416534704ef0SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 416634704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4167d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4168aea0929eSEric B Munson return -EFAULT; 4169aea0929eSEric B Munson } 4170d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4171aea0929eSEric B Munson return -EINVAL; 4172aea0929eSEric B Munson } 417370a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn); 417434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) { 4175d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4176cd8ae852SEric Dumazet return -EFAULT; 4177cd8ae852SEric Dumazet } 417834704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, tp->saved_syn->data, len)) { 4179d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4180cd8ae852SEric Dumazet return -EFAULT; 4181cd8ae852SEric Dumazet } 4182cd8ae852SEric Dumazet tcp_saved_syn_free(tp); 4183d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4184cd8ae852SEric Dumazet } else { 4185d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 4186cd8ae852SEric Dumazet len = 0; 418734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 4188cd8ae852SEric Dumazet return -EFAULT; 4189cd8ae852SEric Dumazet } 4190cd8ae852SEric Dumazet return 0; 4191cd8ae852SEric Dumazet } 419205255b82SEric Dumazet #ifdef CONFIG_MMU 419305255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: { 41947eeba170SArjun Roy struct scm_timestamping_internal tss; 4195e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {}; 419605255b82SEric Dumazet int err; 419705255b82SEric Dumazet 419834704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int))) 419905255b82SEric Dumazet return -EFAULT; 42002107d45fSArjun Roy if (len < 0 || 42012107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length)) 420205255b82SEric Dumazet return -EINVAL; 42033c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) { 420434704ef0SMartin KaFai Lau err = check_zeroed_sockptr(optval, sizeof(zc), 42053c5a2fd0SArjun Roy len - sizeof(zc)); 42063c5a2fd0SArjun Roy if (err < 1) 42073c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err; 4208c8856c05SArjun Roy len = sizeof(zc); 420934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 42100b7f41f6SArjun Roy return -EFAULT; 42110b7f41f6SArjun Roy } 421234704ef0SMartin KaFai Lau if (copy_from_sockptr(&zc, optval, len)) 421305255b82SEric Dumazet return -EFAULT; 42143c5a2fd0SArjun Roy if (zc.reserved) 42153c5a2fd0SArjun Roy return -EINVAL; 42163c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS)) 42173c5a2fd0SArjun Roy return -EINVAL; 4218d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk); 42197eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss); 42209cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname, 42219cacf81fSStanislav Fomichev &zc, &len, err); 4222d51bbff2SMartin KaFai Lau sockopt_release_sock(sk); 42237eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags)) 42247eeba170SArjun Roy goto zerocopy_rcv_cmsg; 4225c8856c05SArjun Roy switch (len) { 42267eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags): 42277eeba170SArjun Roy goto zerocopy_rcv_cmsg; 42287eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen): 42297eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control): 42307eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags): 42317eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len): 42327eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address): 423333946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err): 423433946518SArjun Roy goto zerocopy_rcv_sk_err; 4235c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq): 4236c8856c05SArjun Roy goto zerocopy_rcv_inq; 4237c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length): 4238c8856c05SArjun Roy default: 4239c8856c05SArjun Roy goto zerocopy_rcv_out; 4240c8856c05SArjun Roy } 42417eeba170SArjun Roy zerocopy_rcv_cmsg: 42427eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS) 42437eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss); 42447eeba170SArjun Roy else 42457eeba170SArjun Roy zc.msg_flags = 0; 424633946518SArjun Roy zerocopy_rcv_sk_err: 424733946518SArjun Roy if (!err) 424833946518SArjun Roy zc.err = sock_error(sk); 4249c8856c05SArjun Roy zerocopy_rcv_inq: 4250c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk); 4251c8856c05SArjun Roy zerocopy_rcv_out: 425234704ef0SMartin KaFai Lau if (!err && copy_to_sockptr(optval, &zc, len)) 425305255b82SEric Dumazet err = -EFAULT; 425405255b82SEric Dumazet return err; 425505255b82SEric Dumazet } 425605255b82SEric Dumazet #endif 42571da177e4SLinus Torvalds default: 42581da177e4SLinus Torvalds return -ENOPROTOOPT; 42593ff50b79SStephen Hemminger } 42601da177e4SLinus Torvalds 426134704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) 42621da177e4SLinus Torvalds return -EFAULT; 426334704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &val, len)) 42641da177e4SLinus Torvalds return -EFAULT; 42651da177e4SLinus Torvalds return 0; 42661da177e4SLinus Torvalds } 42671da177e4SLinus Torvalds 42689cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname) 42699cacf81fSStanislav Fomichev { 42709cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation 42719cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE. 42729cacf81fSStanislav Fomichev */ 42739cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE) 42749cacf81fSStanislav Fomichev return true; 42759cacf81fSStanislav Fomichev 42769cacf81fSStanislav Fomichev return false; 42779cacf81fSStanislav Fomichev } 42789cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt); 42799cacf81fSStanislav Fomichev 42803fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, 42813fdadf7dSDmitry Mishin int __user *optlen) 42823fdadf7dSDmitry Mishin { 42833fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk); 42843fdadf7dSDmitry Mishin 42853fdadf7dSDmitry Mishin if (level != SOL_TCP) 4286f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */ 4287f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname, 42883fdadf7dSDmitry Mishin optval, optlen); 428934704ef0SMartin KaFai Lau return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval), 429034704ef0SMartin KaFai Lau USER_SOCKPTR(optlen)); 42913fdadf7dSDmitry Mishin } 42924bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt); 42933fdadf7dSDmitry Mishin 4294cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 4295349ce993SEric Dumazet static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); 429671cea17eSEric Dumazet static DEFINE_MUTEX(tcp_md5sig_mutex); 4297349ce993SEric Dumazet static bool tcp_md5sig_pool_populated = false; 4298cfb6eeb4SYOSHIFUJI Hideaki 429971cea17eSEric Dumazet static void __tcp_alloc_md5sig_pool(void) 4300cfb6eeb4SYOSHIFUJI Hideaki { 4301cf80e0e4SHerbert Xu struct crypto_ahash *hash; 4302cfb6eeb4SYOSHIFUJI Hideaki int cpu; 4303cfb6eeb4SYOSHIFUJI Hideaki 4304cf80e0e4SHerbert Xu hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC); 43051eea84b7SInsu Yun if (IS_ERR(hash)) 4306349ce993SEric Dumazet return; 4307cf80e0e4SHerbert Xu 4308cf80e0e4SHerbert Xu for_each_possible_cpu(cpu) { 430919689e38SEric Dumazet void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch; 4310cf80e0e4SHerbert Xu struct ahash_request *req; 4311cf80e0e4SHerbert Xu 431219689e38SEric Dumazet if (!scratch) { 431319689e38SEric Dumazet scratch = kmalloc_node(sizeof(union tcp_md5sum_block) + 431419689e38SEric Dumazet sizeof(struct tcphdr), 431519689e38SEric Dumazet GFP_KERNEL, 431619689e38SEric Dumazet cpu_to_node(cpu)); 431719689e38SEric Dumazet if (!scratch) 431819689e38SEric Dumazet return; 431919689e38SEric Dumazet per_cpu(tcp_md5sig_pool, cpu).scratch = scratch; 432019689e38SEric Dumazet } 4321cf80e0e4SHerbert Xu if (per_cpu(tcp_md5sig_pool, cpu).md5_req) 4322cf80e0e4SHerbert Xu continue; 4323cf80e0e4SHerbert Xu 4324cf80e0e4SHerbert Xu req = ahash_request_alloc(hash, GFP_KERNEL); 4325cf80e0e4SHerbert Xu if (!req) 4326cf80e0e4SHerbert Xu return; 4327cf80e0e4SHerbert Xu 4328cf80e0e4SHerbert Xu ahash_request_set_callback(req, 0, NULL, NULL); 4329cf80e0e4SHerbert Xu 4330cf80e0e4SHerbert Xu per_cpu(tcp_md5sig_pool, cpu).md5_req = req; 4331349ce993SEric Dumazet } 4332349ce993SEric Dumazet /* before setting tcp_md5sig_pool_populated, we must commit all writes 4333349ce993SEric Dumazet * to memory. See smp_rmb() in tcp_get_md5sig_pool() 433471cea17eSEric Dumazet */ 433571cea17eSEric Dumazet smp_wmb(); 4336aacd467cSEric Dumazet /* Paired with READ_ONCE() from tcp_alloc_md5sig_pool() 4337aacd467cSEric Dumazet * and tcp_get_md5sig_pool(). 4338aacd467cSEric Dumazet */ 4339aacd467cSEric Dumazet WRITE_ONCE(tcp_md5sig_pool_populated, true); 4340cfb6eeb4SYOSHIFUJI Hideaki } 4341cfb6eeb4SYOSHIFUJI Hideaki 434271cea17eSEric Dumazet bool tcp_alloc_md5sig_pool(void) 4343cfb6eeb4SYOSHIFUJI Hideaki { 4344aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4345aacd467cSEric Dumazet if (unlikely(!READ_ONCE(tcp_md5sig_pool_populated))) { 434671cea17eSEric Dumazet mutex_lock(&tcp_md5sig_mutex); 4347cfb6eeb4SYOSHIFUJI Hideaki 4348459837b5SDmitry Safonov if (!tcp_md5sig_pool_populated) 434971cea17eSEric Dumazet __tcp_alloc_md5sig_pool(); 4350cfb6eeb4SYOSHIFUJI Hideaki 435171cea17eSEric Dumazet mutex_unlock(&tcp_md5sig_mutex); 4352cfb6eeb4SYOSHIFUJI Hideaki } 4353aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4354aacd467cSEric Dumazet return READ_ONCE(tcp_md5sig_pool_populated); 4355cfb6eeb4SYOSHIFUJI Hideaki } 4356cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 4357cfb6eeb4SYOSHIFUJI Hideaki 435835790c04SEric Dumazet 435935790c04SEric Dumazet /** 436035790c04SEric Dumazet * tcp_get_md5sig_pool - get md5sig_pool for this user 436135790c04SEric Dumazet * 436235790c04SEric Dumazet * We use percpu structure, so if we succeed, we exit with preemption 436335790c04SEric Dumazet * and BH disabled, to make sure another thread or softirq handling 436435790c04SEric Dumazet * wont try to get same context. 436535790c04SEric Dumazet */ 436635790c04SEric Dumazet struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 4367cfb6eeb4SYOSHIFUJI Hideaki { 436835790c04SEric Dumazet local_bh_disable(); 436935790c04SEric Dumazet 4370aacd467cSEric Dumazet /* Paired with WRITE_ONCE() from __tcp_alloc_md5sig_pool() */ 4371aacd467cSEric Dumazet if (READ_ONCE(tcp_md5sig_pool_populated)) { 4372349ce993SEric Dumazet /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ 4373349ce993SEric Dumazet smp_rmb(); 4374349ce993SEric Dumazet return this_cpu_ptr(&tcp_md5sig_pool); 4375349ce993SEric Dumazet } 437635790c04SEric Dumazet local_bh_enable(); 437735790c04SEric Dumazet return NULL; 4378cfb6eeb4SYOSHIFUJI Hideaki } 437935790c04SEric Dumazet EXPORT_SYMBOL(tcp_get_md5sig_pool); 4380cfb6eeb4SYOSHIFUJI Hideaki 438149a72dfbSAdam Langley int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 4382cf533ea5SEric Dumazet const struct sk_buff *skb, unsigned int header_len) 438349a72dfbSAdam Langley { 438449a72dfbSAdam Langley struct scatterlist sg; 438549a72dfbSAdam Langley const struct tcphdr *tp = tcp_hdr(skb); 4386cf80e0e4SHerbert Xu struct ahash_request *req = hp->md5_req; 438795c96174SEric Dumazet unsigned int i; 438895c96174SEric Dumazet const unsigned int head_data_len = skb_headlen(skb) > header_len ? 438949a72dfbSAdam Langley skb_headlen(skb) - header_len : 0; 439049a72dfbSAdam Langley const struct skb_shared_info *shi = skb_shinfo(skb); 4391d7fd1b57SEric Dumazet struct sk_buff *frag_iter; 439249a72dfbSAdam Langley 439349a72dfbSAdam Langley sg_init_table(&sg, 1); 439449a72dfbSAdam Langley 439549a72dfbSAdam Langley sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); 4396cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, head_data_len); 4397cf80e0e4SHerbert Xu if (crypto_ahash_update(req)) 439849a72dfbSAdam Langley return 1; 439949a72dfbSAdam Langley 440049a72dfbSAdam Langley for (i = 0; i < shi->nr_frags; ++i) { 4401d8e18a51SMatthew Wilcox (Oracle) const skb_frag_t *f = &shi->frags[i]; 4402b54c9d5bSJonathan Lemon unsigned int offset = skb_frag_off(f); 440354d27fcbSEric Dumazet struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT); 440454d27fcbSEric Dumazet 440554d27fcbSEric Dumazet sg_set_page(&sg, page, skb_frag_size(f), 440654d27fcbSEric Dumazet offset_in_page(offset)); 4407cf80e0e4SHerbert Xu ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f)); 4408cf80e0e4SHerbert Xu if (crypto_ahash_update(req)) 440949a72dfbSAdam Langley return 1; 441049a72dfbSAdam Langley } 441149a72dfbSAdam Langley 4412d7fd1b57SEric Dumazet skb_walk_frags(skb, frag_iter) 4413d7fd1b57SEric Dumazet if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) 4414d7fd1b57SEric Dumazet return 1; 4415d7fd1b57SEric Dumazet 441649a72dfbSAdam Langley return 0; 441749a72dfbSAdam Langley } 441849a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_skb_data); 441949a72dfbSAdam Langley 4420cf533ea5SEric Dumazet int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 442149a72dfbSAdam Langley { 4422e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */ 442349a72dfbSAdam Langley struct scatterlist sg; 442449a72dfbSAdam Langley 44256a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen); 44266a2febecSEric Dumazet ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen); 4427e6ced831SEric Dumazet 4428e6ced831SEric Dumazet /* We use data_race() because tcp_md5_do_add() might change key->key under us */ 4429e6ced831SEric Dumazet return data_race(crypto_ahash_update(hp->md5_req)); 443049a72dfbSAdam Langley } 443149a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key); 443249a72dfbSAdam Langley 44337bbb765bSDmitry Safonov /* Called with rcu_read_lock() */ 44341330b6efSJakub Kicinski enum skb_drop_reason 44351330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, 44367bbb765bSDmitry Safonov const void *saddr, const void *daddr, 44377bbb765bSDmitry Safonov int family, int dif, int sdif) 44387bbb765bSDmitry Safonov { 44397bbb765bSDmitry Safonov /* 44407bbb765bSDmitry Safonov * This gets called for each TCP segment that arrives 44417bbb765bSDmitry Safonov * so we want to be efficient. 44427bbb765bSDmitry Safonov * We have 3 drop cases: 44437bbb765bSDmitry Safonov * o No MD5 hash and one expected. 44447bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one. 44457bbb765bSDmitry Safonov * o MD5 hash and its wrong. 44467bbb765bSDmitry Safonov */ 44477bbb765bSDmitry Safonov const __u8 *hash_location = NULL; 44487bbb765bSDmitry Safonov struct tcp_md5sig_key *hash_expected; 44497bbb765bSDmitry Safonov const struct tcphdr *th = tcp_hdr(skb); 4450e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 44517bbb765bSDmitry Safonov int genhash, l3index; 44527bbb765bSDmitry Safonov u8 newhash[16]; 44537bbb765bSDmitry Safonov 44547bbb765bSDmitry Safonov /* sdif set, means packet ingressed via a device 44557bbb765bSDmitry Safonov * in an L3 domain and dif is set to the l3mdev 44567bbb765bSDmitry Safonov */ 44577bbb765bSDmitry Safonov l3index = sdif ? dif : 0; 44587bbb765bSDmitry Safonov 44597bbb765bSDmitry Safonov hash_expected = tcp_md5_do_lookup(sk, l3index, saddr, family); 44607bbb765bSDmitry Safonov hash_location = tcp_parse_md5sig_option(th); 44617bbb765bSDmitry Safonov 44627bbb765bSDmitry Safonov /* We've parsed the options - do we have a hash? */ 44637bbb765bSDmitry Safonov if (!hash_expected && !hash_location) 44641330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 44657bbb765bSDmitry Safonov 44667bbb765bSDmitry Safonov if (hash_expected && !hash_location) { 44677bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); 44681330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5NOTFOUND; 44697bbb765bSDmitry Safonov } 44707bbb765bSDmitry Safonov 44717bbb765bSDmitry Safonov if (!hash_expected && hash_location) { 44727bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); 44731330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED; 44747bbb765bSDmitry Safonov } 44757bbb765bSDmitry Safonov 4476e62d2e11SEric Dumazet /* Check the signature. 4477e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle 4478e62d2e11SEric Dumazet * IPv4-mapped case. 4479e62d2e11SEric Dumazet */ 4480e62d2e11SEric Dumazet if (family == AF_INET) 4481e62d2e11SEric Dumazet genhash = tcp_v4_md5_hash_skb(newhash, 4482e62d2e11SEric Dumazet hash_expected, 4483e62d2e11SEric Dumazet NULL, skb); 4484e62d2e11SEric Dumazet else 4485e62d2e11SEric Dumazet genhash = tp->af_specific->calc_md5_hash(newhash, 4486e62d2e11SEric Dumazet hash_expected, 44877bbb765bSDmitry Safonov NULL, skb); 44887bbb765bSDmitry Safonov 44897bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) { 44907bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); 44917bbb765bSDmitry Safonov if (family == AF_INET) { 44927bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n", 44937bbb765bSDmitry Safonov saddr, ntohs(th->source), 44947bbb765bSDmitry Safonov daddr, ntohs(th->dest), 44957bbb765bSDmitry Safonov genhash ? " tcp_v4_calc_md5_hash failed" 44967bbb765bSDmitry Safonov : "", l3index); 44977bbb765bSDmitry Safonov } else { 44987bbb765bSDmitry Safonov net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n", 44997bbb765bSDmitry Safonov genhash ? "failed" : "mismatch", 45007bbb765bSDmitry Safonov saddr, ntohs(th->source), 45017bbb765bSDmitry Safonov daddr, ntohs(th->dest), l3index); 45027bbb765bSDmitry Safonov } 45031330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE; 45047bbb765bSDmitry Safonov } 45051330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET; 45067bbb765bSDmitry Safonov } 45077bbb765bSDmitry Safonov EXPORT_SYMBOL(tcp_inbound_md5_hash); 45087bbb765bSDmitry Safonov 4509cfb6eeb4SYOSHIFUJI Hideaki #endif 4510cfb6eeb4SYOSHIFUJI Hideaki 45114ac02babSAndi Kleen void tcp_done(struct sock *sk) 45124ac02babSAndi Kleen { 4513d983ea6fSEric Dumazet struct request_sock *req; 45148336886fSJerry Chu 4515cab209e5SEric Dumazet /* We might be called with a new socket, after 4516cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called 4517cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk) 4518cab209e5SEric Dumazet */ 4519cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1); 45204ac02babSAndi Kleen 45214ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) 4522c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); 45234ac02babSAndi Kleen 45244ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE); 45254ac02babSAndi Kleen tcp_clear_xmit_timers(sk); 452600db4124SIan Morris if (req) 45278336886fSJerry Chu reqsk_fastopen_remove(sk, req, false); 45284ac02babSAndi Kleen 4529e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK); 45304ac02babSAndi Kleen 45314ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD)) 45324ac02babSAndi Kleen sk->sk_state_change(sk); 45334ac02babSAndi Kleen else 45344ac02babSAndi Kleen inet_csk_destroy_sock(sk); 45354ac02babSAndi Kleen } 45364ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done); 45374ac02babSAndi Kleen 4538c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err) 4539c1e64e29SLorenzo Colitti { 4540af9784d0SEric Dumazet int state = inet_sk_state_load(sk); 4541af9784d0SEric Dumazet 4542af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) { 454307f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk); 454407f6f4a3SEric Dumazet 454507f6f4a3SEric Dumazet local_bh_disable(); 4546acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req); 454707f6f4a3SEric Dumazet local_bh_enable(); 454807f6f4a3SEric Dumazet return 0; 454907f6f4a3SEric Dumazet } 4550af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) { 4551af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk); 4552af9784d0SEric Dumazet 4553af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt); 4554af9784d0SEric Dumazet local_bh_disable(); 4555af9784d0SEric Dumazet inet_twsk_deschedule_put(tw); 4556af9784d0SEric Dumazet local_bh_enable(); 4557af9784d0SEric Dumazet return 0; 4558c1e64e29SLorenzo Colitti } 4559c1e64e29SLorenzo Colitti 45604ddbcb88SAditi Ghag /* BPF context ensures sock locking. */ 45614ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4562c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */ 4563c1e64e29SLorenzo Colitti lock_sock(sk); 4564c1e64e29SLorenzo Colitti 45652010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) { 45662010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE); 45672010b93eSLorenzo Colitti inet_csk_listen_stop(sk); 45682010b93eSLorenzo Colitti } 45692010b93eSLorenzo Colitti 4570c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */ 4571c1e64e29SLorenzo Colitti local_bh_disable(); 4572c1e64e29SLorenzo Colitti bh_lock_sock(sk); 4573c1e64e29SLorenzo Colitti 4574c1e64e29SLorenzo Colitti if (!sock_flag(sk, SOCK_DEAD)) { 4575e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, err); 4576c1e64e29SLorenzo Colitti /* This barrier is coupled with smp_rmb() in tcp_poll() */ 4577c1e64e29SLorenzo Colitti smp_wmb(); 4578e3ae2365SAlexander Aring sk_error_report(sk); 4579c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state)) 4580c1e64e29SLorenzo Colitti tcp_send_active_reset(sk, GFP_ATOMIC); 4581c1e64e29SLorenzo Colitti tcp_done(sk); 4582c1e64e29SLorenzo Colitti } 4583c1e64e29SLorenzo Colitti 4584c1e64e29SLorenzo Colitti bh_unlock_sock(sk); 4585c1e64e29SLorenzo Colitti local_bh_enable(); 4586e05836acSSoheil Hassas Yeganeh tcp_write_queue_purge(sk); 45874ddbcb88SAditi Ghag if (!has_current_bpf_ctx()) 4588c1e64e29SLorenzo Colitti release_sock(sk); 4589c1e64e29SLorenzo Colitti return 0; 4590c1e64e29SLorenzo Colitti } 4591c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort); 4592c1e64e29SLorenzo Colitti 45935f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno; 45941da177e4SLinus Torvalds 45951da177e4SLinus Torvalds static __initdata unsigned long thash_entries; 45961da177e4SLinus Torvalds static int __init set_thash_entries(char *str) 45971da177e4SLinus Torvalds { 4598413c27d8SEldad Zack ssize_t ret; 4599413c27d8SEldad Zack 46001da177e4SLinus Torvalds if (!str) 46011da177e4SLinus Torvalds return 0; 4602413c27d8SEldad Zack 4603413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries); 4604413c27d8SEldad Zack if (ret) 4605413c27d8SEldad Zack return 0; 4606413c27d8SEldad Zack 46071da177e4SLinus Torvalds return 1; 46081da177e4SLinus Torvalds } 46091da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries); 46101da177e4SLinus Torvalds 461147d7a88cSFabian Frederick static void __init tcp_init_mem(void) 46124acb4190SGlauber Costa { 4613b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16; 4614b66e91ccSEric Dumazet 46154acb4190SGlauber Costa limit = max(limit, 128UL); 4616b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */ 4617b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */ 4618b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */ 46194acb4190SGlauber Costa } 46204acb4190SGlauber Costa 46211da177e4SLinus Torvalds void __init tcp_init(void) 46221da177e4SLinus Torvalds { 4623b49960a0SEric Dumazet int max_rshare, max_wshare, cnt; 4624b2d3ea4aSEric Dumazet unsigned long limit; 4625074b8517SDimitri Sivanich unsigned int i; 46261da177e4SLinus Torvalds 46273b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 4628b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 4629c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb)); 46301da177e4SLinus Torvalds 4631908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 463219757cebSEric Dumazet 463319757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE); 463419757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD); 463519757cebSEric Dumazet 463627da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash", 463727da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/ 463827da6d37SMartin KaFai Lau 0, 64 * 1024); 46396e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep = 46406e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket", 46416e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0, 4642990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC | 4643990c74e3SVasily Averin SLAB_ACCOUNT, 4644990c74e3SVasily Averin NULL); 464528044fc1SJoanne Koong tcp_hashinfo.bind2_bucket_cachep = 464628044fc1SJoanne Koong kmem_cache_create("tcp_bind2_bucket", 464728044fc1SJoanne Koong sizeof(struct inet_bind2_bucket), 0, 464828044fc1SJoanne Koong SLAB_HWCACHE_ALIGN | SLAB_PANIC | 464928044fc1SJoanne Koong SLAB_ACCOUNT, 465028044fc1SJoanne Koong NULL); 46511da177e4SLinus Torvalds 46521da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket 46531da177e4SLinus Torvalds * hash tables. 46541da177e4SLinus Torvalds * 46551da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache. 46561da177e4SLinus Torvalds */ 46576e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash = 46581da177e4SLinus Torvalds alloc_large_system_hash("TCP established", 46590f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket), 46601da177e4SLinus Torvalds thash_entries, 4661fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 46629e950efaSJohn Heffner 0, 46631da177e4SLinus Torvalds NULL, 4664f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask, 466531fe62b9STim Bird 0, 46660ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024); 466705dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) 46683ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); 466905dbc7b5SEric Dumazet 4670230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo)) 4671230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks"); 46726e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash = 4673593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind", 467428044fc1SJoanne Koong 2 * sizeof(struct inet_bind_hashbucket), 4675f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, 4676fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */ 46779e950efaSJohn Heffner 0, 46786e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size, 46791da177e4SLinus Torvalds NULL, 468031fe62b9STim Bird 0, 46811da177e4SLinus Torvalds 64 * 1024); 4682074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; 468328044fc1SJoanne Koong tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size; 46846e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) { 46856e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock); 46866e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); 468728044fc1SJoanne Koong spin_lock_init(&tcp_hashinfo.bhash2[i].lock); 468828044fc1SJoanne Koong INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain); 46891da177e4SLinus Torvalds } 46901da177e4SLinus Torvalds 4691d1e5e640SKuniyuki Iwashima tcp_hashinfo.pernet = false; 4692c5ed63d6SEric Dumazet 4693c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1; 4694c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2; 46951da177e4SLinus Torvalds 4696a4fe34bfSEric W. Biederman tcp_init_mem(); 4697c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */ 46985fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); 4699b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit); 4700b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit); 47017b4f4b5eSJohn Heffner 4702100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE; 4703356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024; 4704356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare); 47057b4f4b5eSJohn Heffner 4706100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE; 4707a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072; 4708a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare); 47091da177e4SLinus Torvalds 4710afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n", 4711f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); 4712317a76f9SStephen Hemminger 47131946e672SHaishuang Yan tcp_v4_init(); 471451c5d0c4SDavid S. Miller tcp_metrics_init(); 471555d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0); 471646d3ceabSEric Dumazet tcp_tasklet_init(); 4717f870fa0bSMat Martineau mptcp_init(); 47181da177e4SLinus Torvalds } 4719