12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket
51da177e4SLinus Torvalds * interface as the means of communication with the user level.
61da177e4SLinus Torvalds *
71da177e4SLinus Torvalds * Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds *
902c30a84SJesper Juhl * Authors: Ross Biro
101da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds * Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds * Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds * Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds * Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds * Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds * Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds * Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds *
211da177e4SLinus Torvalds * Fixes:
221da177e4SLinus Torvalds * Alan Cox : Numerous verify_area() calls
231da177e4SLinus Torvalds * Alan Cox : Set the ACK bit on a reset
241da177e4SLinus Torvalds * Alan Cox : Stopped it crashing if it closed while
251da177e4SLinus Torvalds * sk->inuse=1 and was trying to connect
261da177e4SLinus Torvalds * (tcp_err()).
271da177e4SLinus Torvalds * Alan Cox : All icmp error handling was broken
281da177e4SLinus Torvalds * pointers passed where wrong and the
291da177e4SLinus Torvalds * socket was looked up backwards. Nobody
301da177e4SLinus Torvalds * tested any icmp error code obviously.
311da177e4SLinus Torvalds * Alan Cox : tcp_err() now handled properly. It
321da177e4SLinus Torvalds * wakes people on errors. poll
331da177e4SLinus Torvalds * behaves and the icmp error race
341da177e4SLinus Torvalds * has gone by moving it into sock.c
351da177e4SLinus Torvalds * Alan Cox : tcp_send_reset() fixed to work for
361da177e4SLinus Torvalds * everything not just packets for
371da177e4SLinus Torvalds * unknown sockets.
381da177e4SLinus Torvalds * Alan Cox : tcp option processing.
391da177e4SLinus Torvalds * Alan Cox : Reset tweaked (still not 100%) [Had
401da177e4SLinus Torvalds * syn rule wrong]
411da177e4SLinus Torvalds * Herp Rosmanith : More reset fixes
421da177e4SLinus Torvalds * Alan Cox : No longer acks invalid rst frames.
431da177e4SLinus Torvalds * Acking any kind of RST is right out.
441da177e4SLinus Torvalds * Alan Cox : Sets an ignore me flag on an rst
451da177e4SLinus Torvalds * receive otherwise odd bits of prattle
461da177e4SLinus Torvalds * escape still
471da177e4SLinus Torvalds * Alan Cox : Fixed another acking RST frame bug.
481da177e4SLinus Torvalds * Should stop LAN workplace lockups.
491da177e4SLinus Torvalds * Alan Cox : Some tidyups using the new skb list
501da177e4SLinus Torvalds * facilities
511da177e4SLinus Torvalds * Alan Cox : sk->keepopen now seems to work
521da177e4SLinus Torvalds * Alan Cox : Pulls options out correctly on accepts
531da177e4SLinus Torvalds * Alan Cox : Fixed assorted sk->rqueue->next errors
541da177e4SLinus Torvalds * Alan Cox : PSH doesn't end a TCP read. Switched a
551da177e4SLinus Torvalds * bit to skb ops.
561da177e4SLinus Torvalds * Alan Cox : Tidied tcp_data to avoid a potential
571da177e4SLinus Torvalds * nasty.
581da177e4SLinus Torvalds * Alan Cox : Added some better commenting, as the
591da177e4SLinus Torvalds * tcp is hard to follow
601da177e4SLinus Torvalds * Alan Cox : Removed incorrect check for 20 * psh
611da177e4SLinus Torvalds * Michael O'Reilly : ack < copied bug fix.
621da177e4SLinus Torvalds * Johannes Stille : Misc tcp fixes (not all in yet).
631da177e4SLinus Torvalds * Alan Cox : FIN with no memory -> CRASH
641da177e4SLinus Torvalds * Alan Cox : Added socket option proto entries.
651da177e4SLinus Torvalds * Also added awareness of them to accept.
661da177e4SLinus Torvalds * Alan Cox : Added TCP options (SOL_TCP)
671da177e4SLinus Torvalds * Alan Cox : Switched wakeup calls to callbacks,
681da177e4SLinus Torvalds * so the kernel can layer network
691da177e4SLinus Torvalds * sockets.
701da177e4SLinus Torvalds * Alan Cox : Use ip_tos/ip_ttl settings.
711da177e4SLinus Torvalds * Alan Cox : Handle FIN (more) properly (we hope).
721da177e4SLinus Torvalds * Alan Cox : RST frames sent on unsynchronised
731da177e4SLinus Torvalds * state ack error.
741da177e4SLinus Torvalds * Alan Cox : Put in missing check for SYN bit.
751da177e4SLinus Torvalds * Alan Cox : Added tcp_select_window() aka NET2E
761da177e4SLinus Torvalds * window non shrink trick.
771da177e4SLinus Torvalds * Alan Cox : Added a couple of small NET2E timer
781da177e4SLinus Torvalds * fixes
791da177e4SLinus Torvalds * Charles Hedrick : TCP fixes
801da177e4SLinus Torvalds * Toomas Tamm : TCP window fixes
811da177e4SLinus Torvalds * Alan Cox : Small URG fix to rlogin ^C ack fight
821da177e4SLinus Torvalds * Charles Hedrick : Rewrote most of it to actually work
831da177e4SLinus Torvalds * Linus : Rewrote tcp_read() and URG handling
841da177e4SLinus Torvalds * completely
851da177e4SLinus Torvalds * Gerhard Koerting: Fixed some missing timer handling
861da177e4SLinus Torvalds * Matthew Dillon : Reworked TCP machine states as per RFC
871da177e4SLinus Torvalds * Gerhard Koerting: PC/TCP workarounds
881da177e4SLinus Torvalds * Adam Caldwell : Assorted timer/timing errors
891da177e4SLinus Torvalds * Matthew Dillon : Fixed another RST bug
901da177e4SLinus Torvalds * Alan Cox : Move to kernel side addressing changes.
911da177e4SLinus Torvalds * Alan Cox : Beginning work on TCP fastpathing
921da177e4SLinus Torvalds * (not yet usable)
931da177e4SLinus Torvalds * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
941da177e4SLinus Torvalds * Alan Cox : TCP fast path debugging
951da177e4SLinus Torvalds * Alan Cox : Window clamping
961da177e4SLinus Torvalds * Michael Riepe : Bug in tcp_check()
971da177e4SLinus Torvalds * Matt Dillon : More TCP improvements and RST bug fixes
981da177e4SLinus Torvalds * Matt Dillon : Yet more small nasties remove from the
991da177e4SLinus Torvalds * TCP code (Be very nice to this man if
1001da177e4SLinus Torvalds * tcp finally works 100%) 8)
1011da177e4SLinus Torvalds * Alan Cox : BSD accept semantics.
1021da177e4SLinus Torvalds * Alan Cox : Reset on closedown bug.
1031da177e4SLinus Torvalds * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
1041da177e4SLinus Torvalds * Michael Pall : Handle poll() after URG properly in
1051da177e4SLinus Torvalds * all cases.
1061da177e4SLinus Torvalds * Michael Pall : Undo the last fix in tcp_read_urg()
1071da177e4SLinus Torvalds * (multi URG PUSH broke rlogin).
1081da177e4SLinus Torvalds * Michael Pall : Fix the multi URG PUSH problem in
1091da177e4SLinus Torvalds * tcp_readable(), poll() after URG
1101da177e4SLinus Torvalds * works now.
1111da177e4SLinus Torvalds * Michael Pall : recv(...,MSG_OOB) never blocks in the
1121da177e4SLinus Torvalds * BSD api.
1131da177e4SLinus Torvalds * Alan Cox : Changed the semantics of sk->socket to
1141da177e4SLinus Torvalds * fix a race and a signal problem with
1151da177e4SLinus Torvalds * accept() and async I/O.
1161da177e4SLinus Torvalds * Alan Cox : Relaxed the rules on tcp_sendto().
1171da177e4SLinus Torvalds * Yury Shevchuk : Really fixed accept() blocking problem.
1181da177e4SLinus Torvalds * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
1191da177e4SLinus Torvalds * clients/servers which listen in on
1201da177e4SLinus Torvalds * fixed ports.
1211da177e4SLinus Torvalds * Alan Cox : Cleaned the above up and shrank it to
1221da177e4SLinus Torvalds * a sensible code size.
1231da177e4SLinus Torvalds * Alan Cox : Self connect lockup fix.
1241da177e4SLinus Torvalds * Alan Cox : No connect to multicast.
1251da177e4SLinus Torvalds * Ross Biro : Close unaccepted children on master
1261da177e4SLinus Torvalds * socket close.
1271da177e4SLinus Torvalds * Alan Cox : Reset tracing code.
1281da177e4SLinus Torvalds * Alan Cox : Spurious resets on shutdown.
1291da177e4SLinus Torvalds * Alan Cox : Giant 15 minute/60 second timer error
1301da177e4SLinus Torvalds * Alan Cox : Small whoops in polling before an
1311da177e4SLinus Torvalds * accept.
1321da177e4SLinus Torvalds * Alan Cox : Kept the state trace facility since
1331da177e4SLinus Torvalds * it's handy for debugging.
1341da177e4SLinus Torvalds * Alan Cox : More reset handler fixes.
1351da177e4SLinus Torvalds * Alan Cox : Started rewriting the code based on
1361da177e4SLinus Torvalds * the RFC's for other useful protocol
1371da177e4SLinus Torvalds * references see: Comer, KA9Q NOS, and
1381da177e4SLinus Torvalds * for a reference on the difference
1391da177e4SLinus Torvalds * between specifications and how BSD
1401da177e4SLinus Torvalds * works see the 4.4lite source.
1411da177e4SLinus Torvalds * A.N.Kuznetsov : Don't time wait on completion of tidy
1421da177e4SLinus Torvalds * close.
1431da177e4SLinus Torvalds * Linus Torvalds : Fin/Shutdown & copied_seq changes.
1441da177e4SLinus Torvalds * Linus Torvalds : Fixed BSD port reuse to work first syn
1451da177e4SLinus Torvalds * Alan Cox : Reimplemented timers as per the RFC
1461da177e4SLinus Torvalds * and using multiple timers for sanity.
1471da177e4SLinus Torvalds * Alan Cox : Small bug fixes, and a lot of new
1481da177e4SLinus Torvalds * comments.
1491da177e4SLinus Torvalds * Alan Cox : Fixed dual reader crash by locking
1501da177e4SLinus Torvalds * the buffers (much like datagram.c)
1511da177e4SLinus Torvalds * Alan Cox : Fixed stuck sockets in probe. A probe
1521da177e4SLinus Torvalds * now gets fed up of retrying without
1531da177e4SLinus Torvalds * (even a no space) answer.
1541da177e4SLinus Torvalds * Alan Cox : Extracted closing code better
1551da177e4SLinus Torvalds * Alan Cox : Fixed the closing state machine to
1561da177e4SLinus Torvalds * resemble the RFC.
1571da177e4SLinus Torvalds * Alan Cox : More 'per spec' fixes.
1581da177e4SLinus Torvalds * Jorge Cwik : Even faster checksumming.
1591da177e4SLinus Torvalds * Alan Cox : tcp_data() doesn't ack illegal PSH
1601da177e4SLinus Torvalds * only frames. At least one pc tcp stack
1611da177e4SLinus Torvalds * generates them.
1621da177e4SLinus Torvalds * Alan Cox : Cache last socket.
1631da177e4SLinus Torvalds * Alan Cox : Per route irtt.
1641da177e4SLinus Torvalds * Matt Day : poll()->select() match BSD precisely on error
1651da177e4SLinus Torvalds * Alan Cox : New buffers
1661da177e4SLinus Torvalds * Marc Tamsky : Various sk->prot->retransmits and
1671da177e4SLinus Torvalds * sk->retransmits misupdating fixed.
1681da177e4SLinus Torvalds * Fixed tcp_write_timeout: stuck close,
1691da177e4SLinus Torvalds * and TCP syn retries gets used now.
1701da177e4SLinus Torvalds * Mark Yarvis : In tcp_read_wakeup(), don't send an
1711da177e4SLinus Torvalds * ack if state is TCP_CLOSED.
1721da177e4SLinus Torvalds * Alan Cox : Look up device on a retransmit - routes may
1731da177e4SLinus Torvalds * change. Doesn't yet cope with MSS shrink right
1741da177e4SLinus Torvalds * but it's a start!
1751da177e4SLinus Torvalds * Marc Tamsky : Closing in closing fixes.
1761da177e4SLinus Torvalds * Mike Shaver : RFC1122 verifications.
1771da177e4SLinus Torvalds * Alan Cox : rcv_saddr errors.
1781da177e4SLinus Torvalds * Alan Cox : Block double connect().
1791da177e4SLinus Torvalds * Alan Cox : Small hooks for enSKIP.
1801da177e4SLinus Torvalds * Alexey Kuznetsov: Path MTU discovery.
1811da177e4SLinus Torvalds * Alan Cox : Support soft errors.
1821da177e4SLinus Torvalds * Alan Cox : Fix MTU discovery pathological case
1831da177e4SLinus Torvalds * when the remote claims no mtu!
1841da177e4SLinus Torvalds * Marc Tamsky : TCP_CLOSE fix.
1851da177e4SLinus Torvalds * Colin (G3TNE) : Send a reset on syn ack replies in
1861da177e4SLinus Torvalds * window but wrong (fixes NT lpd problems)
1871da177e4SLinus Torvalds * Pedro Roque : Better TCP window handling, delayed ack.
1881da177e4SLinus Torvalds * Joerg Reuter : No modification of locked buffers in
1891da177e4SLinus Torvalds * tcp_do_retransmit()
1901da177e4SLinus Torvalds * Eric Schenk : Changed receiver side silly window
1911da177e4SLinus Torvalds * avoidance algorithm to BSD style
1921da177e4SLinus Torvalds * algorithm. This doubles throughput
1931da177e4SLinus Torvalds * against machines running Solaris,
1941da177e4SLinus Torvalds * and seems to result in general
1951da177e4SLinus Torvalds * improvement.
1961da177e4SLinus Torvalds * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
1971da177e4SLinus Torvalds * Willy Konynenberg : Transparent proxying support.
1981da177e4SLinus Torvalds * Mike McLagan : Routing by source
1991da177e4SLinus Torvalds * Keith Owens : Do proper merging with partial SKB's in
2001da177e4SLinus Torvalds * tcp_do_sendmsg to avoid burstiness.
2011da177e4SLinus Torvalds * Eric Schenk : Fix fast close down bug with
2021da177e4SLinus Torvalds * shutdown() followed by close().
2031da177e4SLinus Torvalds * Andi Kleen : Make poll agree with SIGIO
2041da177e4SLinus Torvalds * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
2051da177e4SLinus Torvalds * lingertime == 0 (RFC 793 ABORT Call)
2061da177e4SLinus Torvalds * Hirokazu Takahashi : Use copy_from_user() instead of
2071da177e4SLinus Torvalds * csum_and_copy_from_user() if possible.
2081da177e4SLinus Torvalds *
2091da177e4SLinus Torvalds * Description of States:
2101da177e4SLinus Torvalds *
2111da177e4SLinus Torvalds * TCP_SYN_SENT sent a connection request, waiting for ack
2121da177e4SLinus Torvalds *
2131da177e4SLinus Torvalds * TCP_SYN_RECV received a connection request, sent ack,
2141da177e4SLinus Torvalds * waiting for final ack in three-way handshake.
2151da177e4SLinus Torvalds *
2161da177e4SLinus Torvalds * TCP_ESTABLISHED connection established
2171da177e4SLinus Torvalds *
2181da177e4SLinus Torvalds * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
2191da177e4SLinus Torvalds * transmission of remaining buffered data
2201da177e4SLinus Torvalds *
2211da177e4SLinus Torvalds * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
2221da177e4SLinus Torvalds * to shutdown
2231da177e4SLinus Torvalds *
2241da177e4SLinus Torvalds * TCP_CLOSING both sides have shutdown but we still have
2251da177e4SLinus Torvalds * data we have to finish sending
2261da177e4SLinus Torvalds *
2271da177e4SLinus Torvalds * TCP_TIME_WAIT timeout to catch resent junk before entering
2281da177e4SLinus Torvalds * closed, can only be entered from FIN_WAIT2
2291da177e4SLinus Torvalds * or CLOSING. Required because the other end
2301da177e4SLinus Torvalds * may not have gotten our last ACK causing it
2311da177e4SLinus Torvalds * to retransmit the data packet (which we ignore)
2321da177e4SLinus Torvalds *
2331da177e4SLinus Torvalds * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
2341da177e4SLinus Torvalds * us to finish writing our data and to shutdown
2351da177e4SLinus Torvalds * (we have to close() to move on to LAST_ACK)
2361da177e4SLinus Torvalds *
2371da177e4SLinus Torvalds * TCP_LAST_ACK out side has shutdown after remote has
2381da177e4SLinus Torvalds * shutdown. There may still be data in our
2391da177e4SLinus Torvalds * buffer that we have to finish sending
2401da177e4SLinus Torvalds *
2411da177e4SLinus Torvalds * TCP_CLOSE socket is finished
2421da177e4SLinus Torvalds */
2431da177e4SLinus Torvalds
244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt
245afd46503SJoe Perches
246cf80e0e4SHerbert Xu #include <crypto/hash.h>
247172589ccSIlpo Järvinen #include <linux/kernel.h>
2481da177e4SLinus Torvalds #include <linux/module.h>
2491da177e4SLinus Torvalds #include <linux/types.h>
2501da177e4SLinus Torvalds #include <linux/fcntl.h>
2511da177e4SLinus Torvalds #include <linux/poll.h>
2526e9250f5SEric Dumazet #include <linux/inet_diag.h>
2531da177e4SLinus Torvalds #include <linux/init.h>
2541da177e4SLinus Torvalds #include <linux/fs.h>
2559c55e01cSJens Axboe #include <linux/skbuff.h>
25681b23b4aSAndrew Morton #include <linux/scatterlist.h>
2579c55e01cSJens Axboe #include <linux/splice.h>
2589c55e01cSJens Axboe #include <linux/net.h>
2599c55e01cSJens Axboe #include <linux/socket.h>
2601da177e4SLinus Torvalds #include <linux/random.h>
26157c8a661SMike Rapoport #include <linux/memblock.h>
26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h>
263b8059eadSDavid S. Miller #include <linux/cache.h>
264f4c50d99SHerbert Xu #include <linux/err.h>
265da5c78c8SWilliam Allen Simpson #include <linux/time.h>
2665a0e3ad6STejun Heo #include <linux/slab.h>
26798aaa913SMike Maloney #include <linux/errqueue.h>
26860e2a778SUrsula Braun #include <linux/static_key.h>
26997a19cafSYonghong Song #include <linux/btf.h>
2701da177e4SLinus Torvalds
2711da177e4SLinus Torvalds #include <net/icmp.h>
272cf60af03SYuchung Cheng #include <net/inet_common.h>
2731da177e4SLinus Torvalds #include <net/tcp.h>
274f870fa0bSMat Martineau #include <net/mptcp.h>
275f3d93817SEric Dumazet #include <net/proto_memory.h>
2761da177e4SLinus Torvalds #include <net/xfrm.h>
2771da177e4SLinus Torvalds #include <net/ip.h>
2789c55e01cSJens Axboe #include <net/sock.h>
2795691276bSJason Xing #include <net/rstreason.h>
2801da177e4SLinus Torvalds
2817c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
2821da177e4SLinus Torvalds #include <asm/ioctls.h>
283076bb0c8SEliezer Tamir #include <net/busy_poll.h>
284a86a0661SEric Dumazet #include <net/hotdata.h>
28596be3dcdSDmitry Safonov #include <trace/events/tcp.h>
286490a79faSEric Dumazet #include <net/rps.h>
2871da177e4SLinus Torvalds
288*8f0b3cc9SMina Almasry #include "../core/devmem.h"
289*8f0b3cc9SMina Almasry
290925bba24SArjun Roy /* Track pending CMSGs. */
291925bba24SArjun Roy enum {
292925bba24SArjun Roy TCP_CMSG_INQ = 1,
293925bba24SArjun Roy TCP_CMSG_TS = 2
294925bba24SArjun Roy };
295925bba24SArjun Roy
29619757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
29719757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
2980a5578cfSArnaldo Carvalho de Melo
29941eecbd7SEric Dumazet DEFINE_PER_CPU(u32, tcp_tw_isn);
30041eecbd7SEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn);
30141eecbd7SEric Dumazet
302a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly;
303a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem);
3041da177e4SLinus Torvalds
30591b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp; /* Current allocated memory. */
3061da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
3070defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
3080defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc);
3091748376bSEric Dumazet
31060e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
31160e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
31260e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc);
31360e2a778SUrsula Braun #endif
31460e2a778SUrsula Braun
3151748376bSEric Dumazet /*
3161748376bSEric Dumazet * Current number of TCP sockets.
3171748376bSEric Dumazet */
31891b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
3191da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
3201da177e4SLinus Torvalds
3211da177e4SLinus Torvalds /*
3229c55e01cSJens Axboe * TCP splice context
3239c55e01cSJens Axboe */
3249c55e01cSJens Axboe struct tcp_splice_state {
3259c55e01cSJens Axboe struct pipe_inode_info *pipe;
3269c55e01cSJens Axboe size_t len;
3279c55e01cSJens Axboe unsigned int flags;
3289c55e01cSJens Axboe };
3299c55e01cSJens Axboe
3309c55e01cSJens Axboe /*
3311da177e4SLinus Torvalds * Pressure flag: try to collapse.
3321da177e4SLinus Torvalds * Technical note: it is used by multiple contexts non atomically.
3333ab224beSHideo Aoki * All the __sk_mem_schedule() is of this nature: accounting
3341da177e4SLinus Torvalds * is strict, actions are advisory and have some latency.
3351da177e4SLinus Torvalds */
33606044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly;
33706044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure);
3381da177e4SLinus Torvalds
tcp_enter_memory_pressure(struct sock * sk)3395c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk)
3401da177e4SLinus Torvalds {
34106044751SEric Dumazet unsigned long val;
34206044751SEric Dumazet
3431f142c17SEric Dumazet if (READ_ONCE(tcp_memory_pressure))
34406044751SEric Dumazet return;
34506044751SEric Dumazet val = jiffies;
34606044751SEric Dumazet
34706044751SEric Dumazet if (!val)
34806044751SEric Dumazet val--;
34906044751SEric Dumazet if (!cmpxchg(&tcp_memory_pressure, 0, val))
3504e673444SPavel Emelyanov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
3511da177e4SLinus Torvalds }
35206044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
35306044751SEric Dumazet
tcp_leave_memory_pressure(struct sock * sk)35406044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk)
35506044751SEric Dumazet {
35606044751SEric Dumazet unsigned long val;
35706044751SEric Dumazet
3581f142c17SEric Dumazet if (!READ_ONCE(tcp_memory_pressure))
35906044751SEric Dumazet return;
36006044751SEric Dumazet val = xchg(&tcp_memory_pressure, 0);
36106044751SEric Dumazet if (val)
36206044751SEric Dumazet NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
36306044751SEric Dumazet jiffies_to_msecs(jiffies - val));
3641da177e4SLinus Torvalds }
36506044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
3661da177e4SLinus Torvalds
367b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */
secs_to_retrans(int seconds,int timeout,int rto_max)368b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
369b103cf34SJulian Anastasov {
370b103cf34SJulian Anastasov u8 res = 0;
371b103cf34SJulian Anastasov
372b103cf34SJulian Anastasov if (seconds > 0) {
373b103cf34SJulian Anastasov int period = timeout;
374b103cf34SJulian Anastasov
375b103cf34SJulian Anastasov res = 1;
376b103cf34SJulian Anastasov while (seconds > period && res < 255) {
377b103cf34SJulian Anastasov res++;
378b103cf34SJulian Anastasov timeout <<= 1;
379b103cf34SJulian Anastasov if (timeout > rto_max)
380b103cf34SJulian Anastasov timeout = rto_max;
381b103cf34SJulian Anastasov period += timeout;
382b103cf34SJulian Anastasov }
383b103cf34SJulian Anastasov }
384b103cf34SJulian Anastasov return res;
385b103cf34SJulian Anastasov }
386b103cf34SJulian Anastasov
387b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */
retrans_to_secs(u8 retrans,int timeout,int rto_max)388b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
389b103cf34SJulian Anastasov {
390b103cf34SJulian Anastasov int period = 0;
391b103cf34SJulian Anastasov
392b103cf34SJulian Anastasov if (retrans > 0) {
393b103cf34SJulian Anastasov period = timeout;
394b103cf34SJulian Anastasov while (--retrans) {
395b103cf34SJulian Anastasov timeout <<= 1;
396b103cf34SJulian Anastasov if (timeout > rto_max)
397b103cf34SJulian Anastasov timeout = rto_max;
398b103cf34SJulian Anastasov period += timeout;
399b103cf34SJulian Anastasov }
400b103cf34SJulian Anastasov }
401b103cf34SJulian Anastasov return period;
402b103cf34SJulian Anastasov }
403b103cf34SJulian Anastasov
tcp_compute_delivery_rate(const struct tcp_sock * tp)4040263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
4050263598cSWei Wang {
4060263598cSWei Wang u32 rate = READ_ONCE(tp->rate_delivered);
4070263598cSWei Wang u32 intv = READ_ONCE(tp->rate_interval_us);
4080263598cSWei Wang u64 rate64 = 0;
4090263598cSWei Wang
4100263598cSWei Wang if (rate && intv) {
4110263598cSWei Wang rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
4120263598cSWei Wang do_div(rate64, intv);
4130263598cSWei Wang }
4140263598cSWei Wang return rate64;
4150263598cSWei Wang }
4160263598cSWei Wang
417900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock.
418900f65d3SNeal Cardwell *
419900f65d3SNeal Cardwell * NOTE: A lot of things set to zero explicitly by call to
420900f65d3SNeal Cardwell * sk_alloc() so need not be done here.
421900f65d3SNeal Cardwell */
tcp_init_sock(struct sock * sk)422900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk)
423900f65d3SNeal Cardwell {
424900f65d3SNeal Cardwell struct inet_connection_sock *icsk = inet_csk(sk);
425900f65d3SNeal Cardwell struct tcp_sock *tp = tcp_sk(sk);
426f086edefSKevin Yang int rto_min_us;
427900f65d3SNeal Cardwell
4289f5afeaeSYaogong Wang tp->out_of_order_queue = RB_ROOT;
42975c119afSEric Dumazet sk->tcp_rtx_queue = RB_ROOT;
430900f65d3SNeal Cardwell tcp_init_xmit_timers(sk);
43146d3ceabSEric Dumazet INIT_LIST_HEAD(&tp->tsq_node);
432e2080072SEric Dumazet INIT_LIST_HEAD(&tp->tsorted_sent_queue);
433900f65d3SNeal Cardwell
434900f65d3SNeal Cardwell icsk->icsk_rto = TCP_TIMEOUT_INIT;
435f086edefSKevin Yang rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us);
436f086edefSKevin Yang icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us);
4372b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX;
438740b0f18SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
439ac9517fcSEric Dumazet minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
440900f65d3SNeal Cardwell
441900f65d3SNeal Cardwell /* So many TCP implementations out there (incorrectly) count the
442900f65d3SNeal Cardwell * initial SYN frame in their delayed-ACK and congestion control
443900f65d3SNeal Cardwell * algorithms that we must have the following bandaid to talk
444900f65d3SNeal Cardwell * efficiently to them. -DaveM
445900f65d3SNeal Cardwell */
44640570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
447900f65d3SNeal Cardwell
448d7722e85SSoheil Hassas Yeganeh /* There's a bubble in the pipe until at least the first ACK. */
449d7722e85SSoheil Hassas Yeganeh tp->app_limited = ~0U;
450300b655dSDavid Morley tp->rate_app_limited = 1;
451d7722e85SSoheil Hassas Yeganeh
452900f65d3SNeal Cardwell /* See draft-stevens-tcpca-spec-01 for discussion of the
453900f65d3SNeal Cardwell * initialization of these values.
454900f65d3SNeal Cardwell */
455900f65d3SNeal Cardwell tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
456900f65d3SNeal Cardwell tp->snd_cwnd_clamp = ~0;
457900f65d3SNeal Cardwell tp->mss_cache = TCP_MSS_DEFAULT;
458900f65d3SNeal Cardwell
45946778cd1SKuniyuki Iwashima tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
46055d8694fSFlorian Westphal tcp_assign_congestion_control(sk);
461900f65d3SNeal Cardwell
462ceaa1fefSAndrey Vagin tp->tsoffset = 0;
4631f255691SPriyaranjan Jha tp->rack.reo_wnd_steps = 1;
464ceaa1fefSAndrey Vagin
465900f65d3SNeal Cardwell sk->sk_write_space = sk_stream_write_space;
466900f65d3SNeal Cardwell sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
467900f65d3SNeal Cardwell
468900f65d3SNeal Cardwell icsk->icsk_sync_mss = tcp_sync_mss;
469900f65d3SNeal Cardwell
47002739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
47102739545SKuniyuki Iwashima WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
472dfa2f048SEric Dumazet tcp_scaling_ratio_init(sk);
473900f65d3SNeal Cardwell
474e993ffe3SPavel Begunkov set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
475900f65d3SNeal Cardwell sk_sockets_allocated_inc(sk);
476*8f0b3cc9SMina Almasry xa_init_flags(&sk->sk_user_frags, XA_FLAGS_ALLOC1);
477900f65d3SNeal Cardwell }
478900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock);
479900f65d3SNeal Cardwell
tcp_tx_timestamp(struct sock * sk,u16 tsflags)4804e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
4814ed2d765SWillem de Bruijn {
4824e8cc228SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk);
4834e8cc228SEric Dumazet
484ad02c4f5SSoheil Hassas Yeganeh if (tsflags && skb) {
4854ed2d765SWillem de Bruijn struct skb_shared_info *shinfo = skb_shinfo(skb);
4866b084928SSoheil Hassas Yeganeh struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
4874ed2d765SWillem de Bruijn
488c14ac945SSoheil Hassas Yeganeh sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
4890a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_ACK)
4900a2cf20cSSoheil Hassas Yeganeh tcb->txstamp_ack = 1;
4910a2cf20cSSoheil Hassas Yeganeh if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
4924ed2d765SWillem de Bruijn shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
4934ed2d765SWillem de Bruijn }
494f066e2b0SWillem de Bruijn }
4954ed2d765SWillem de Bruijn
tcp_stream_is_readable(struct sock * sk,int target)49605dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target)
4978934ce2fSJohn Fastabend {
49805dc72abSEric Dumazet if (tcp_epollin_ready(sk, target))
49905dc72abSEric Dumazet return true;
5007b50ecfcSCong Wang return sk_is_readable(sk);
5018934ce2fSJohn Fastabend }
5028934ce2fSJohn Fastabend
5031da177e4SLinus Torvalds /*
504a11e1d43SLinus Torvalds * Wait for a TCP event.
505a11e1d43SLinus Torvalds *
506a11e1d43SLinus Torvalds * Note that we don't need to lock the socket, as the upper poll layers
507a11e1d43SLinus Torvalds * take care of normal races (between the test and the event) and we don't
508a11e1d43SLinus Torvalds * go look at any of the socket buffers directly.
5091da177e4SLinus Torvalds */
tcp_poll(struct file * file,struct socket * sock,poll_table * wait)510a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
5111da177e4SLinus Torvalds {
512a11e1d43SLinus Torvalds __poll_t mask;
5131da177e4SLinus Torvalds struct sock *sk = sock->sk;
514cf533ea5SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk);
515e14cadfdSEric Dumazet u8 shutdown;
51600fd38d9SEric Dumazet int state;
5171da177e4SLinus Torvalds
51889ab066dSKarsten Graul sock_poll_wait(file, sock, wait);
519a11e1d43SLinus Torvalds
520986ffdfdSYafang Shao state = inet_sk_state_load(sk);
52100fd38d9SEric Dumazet if (state == TCP_LISTEN)
522dc40c7bcSArnaldo Carvalho de Melo return inet_csk_listen_poll(sk);
5231da177e4SLinus Torvalds
524a11e1d43SLinus Torvalds /* Socket is not locked. We are protected from async events
525a11e1d43SLinus Torvalds * by poll logic and correct handling of state changes
526a11e1d43SLinus Torvalds * made by other threads is impossible in any case.
527a11e1d43SLinus Torvalds */
528a11e1d43SLinus Torvalds
529a11e1d43SLinus Torvalds mask = 0;
530a11e1d43SLinus Torvalds
5311da177e4SLinus Torvalds /*
532a9a08845SLinus Torvalds * EPOLLHUP is certainly not done right. But poll() doesn't
5331da177e4SLinus Torvalds * have a notion of HUP in just one direction, and for a
5341da177e4SLinus Torvalds * socket the read side is more interesting.
5351da177e4SLinus Torvalds *
536a9a08845SLinus Torvalds * Some poll() documentation says that EPOLLHUP is incompatible
537a9a08845SLinus Torvalds * with the EPOLLOUT/POLLWR flags, so somebody should check this
5381da177e4SLinus Torvalds * all. But careful, it tends to be safer to return too many
5391da177e4SLinus Torvalds * bits than too few, and you can easily break real applications
5401da177e4SLinus Torvalds * if you don't tell them that something has hung up!
5411da177e4SLinus Torvalds *
5421da177e4SLinus Torvalds * Check-me.
5431da177e4SLinus Torvalds *
544a9a08845SLinus Torvalds * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
5451da177e4SLinus Torvalds * our fs/select.c). It means that after we received EOF,
5461da177e4SLinus Torvalds * poll always returns immediately, making impossible poll() on write()
547a9a08845SLinus Torvalds * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
5481da177e4SLinus Torvalds * if and only if shutdown has been made in both directions.
5491da177e4SLinus Torvalds * Actually, it is interesting to look how Solaris and DUX
550a9a08845SLinus Torvalds * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
5511da177e4SLinus Torvalds * then we could set it on SND_SHUTDOWN. BTW examples given
5521da177e4SLinus Torvalds * in Stevens' books assume exactly this behaviour, it explains
553a9a08845SLinus Torvalds * why EPOLLHUP is incompatible with EPOLLOUT. --ANK
5541da177e4SLinus Torvalds *
5551da177e4SLinus Torvalds * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
5561da177e4SLinus Torvalds * blocking on fresh not-connected or disconnected socket. --ANK
5571da177e4SLinus Torvalds */
558e14cadfdSEric Dumazet shutdown = READ_ONCE(sk->sk_shutdown);
559e14cadfdSEric Dumazet if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
560a9a08845SLinus Torvalds mask |= EPOLLHUP;
561e14cadfdSEric Dumazet if (shutdown & RCV_SHUTDOWN)
562a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
5631da177e4SLinus Torvalds
5648336886fSJerry Chu /* Connected or passive Fast Open socket? */
56500fd38d9SEric Dumazet if (state != TCP_SYN_SENT &&
566d983ea6fSEric Dumazet (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
567c7004482SDavid S. Miller int target = sock_rcvlowat(sk, 0, INT_MAX);
5687b6a893aSEric Dumazet u16 urg_data = READ_ONCE(tp->urg_data);
569c7004482SDavid S. Miller
570b96c51bdSEric Dumazet if (unlikely(urg_data) &&
5717b6a893aSEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
5727b6a893aSEric Dumazet !sock_flag(sk, SOCK_URGINLINE))
573b634f875SAlexandra Kossovsky target++;
574c7004482SDavid S. Miller
57505dc72abSEric Dumazet if (tcp_stream_is_readable(sk, target))
576a9a08845SLinus Torvalds mask |= EPOLLIN | EPOLLRDNORM;
5771da177e4SLinus Torvalds
578e14cadfdSEric Dumazet if (!(shutdown & SEND_SHUTDOWN)) {
5798ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1)) {
580a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5811da177e4SLinus Torvalds } else { /* send SIGIO later */
5829cd3e072SEric Dumazet sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
5831da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
5841da177e4SLinus Torvalds
5851da177e4SLinus Torvalds /* Race breaker. If space is freed after
5861da177e4SLinus Torvalds * wspace test but before the flags are set,
5873c715127Sjbaron@akamai.com * IO signal will be lost. Memory barrier
5883c715127Sjbaron@akamai.com * pairs with the input side.
5891da177e4SLinus Torvalds */
5903c715127Sjbaron@akamai.com smp_mb__after_atomic();
5918ba3c9d1SSoheil Hassas Yeganeh if (__sk_stream_is_writeable(sk, 1))
592a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5931da177e4SLinus Torvalds }
594d84ba638SKOSAKI Motohiro } else
595a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
5961da177e4SLinus Torvalds
5977b6a893aSEric Dumazet if (urg_data & TCP_URG_VALID)
598a9a08845SLinus Torvalds mask |= EPOLLPRI;
59908e39c0dSEric Dumazet } else if (state == TCP_SYN_SENT &&
60008e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) {
60119f6d3f3SWei Wang /* Active TCP fastopen socket with defer_connect
602a9a08845SLinus Torvalds * Return EPOLLOUT so application can call write()
60319f6d3f3SWei Wang * in order for kernel to generate SYN+data
60419f6d3f3SWei Wang */
605a9a08845SLinus Torvalds mask |= EPOLLOUT | EPOLLWRNORM;
6061da177e4SLinus Torvalds }
6075e514f1cSEric Dumazet /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */
608a4d25803STom Marshall smp_rmb();
609e13ec3daSEric Dumazet if (READ_ONCE(sk->sk_err) ||
610e13ec3daSEric Dumazet !skb_queue_empty_lockless(&sk->sk_error_queue))
611a9a08845SLinus Torvalds mask |= EPOLLERR;
612a4d25803STom Marshall
6131da177e4SLinus Torvalds return mask;
6141da177e4SLinus Torvalds }
615a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
6161da177e4SLinus Torvalds
tcp_ioctl(struct sock * sk,int cmd,int * karg)617e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg)
6181da177e4SLinus Torvalds {
6191da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
6201da177e4SLinus Torvalds int answ;
6210e71c55cSEric Dumazet bool slow;
6221da177e4SLinus Torvalds
6231da177e4SLinus Torvalds switch (cmd) {
6241da177e4SLinus Torvalds case SIOCINQ:
6251da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
6261da177e4SLinus Torvalds return -EINVAL;
6271da177e4SLinus Torvalds
6280e71c55cSEric Dumazet slow = lock_sock_fast(sk);
629473bd239STom Herbert answ = tcp_inq(sk);
6300e71c55cSEric Dumazet unlock_sock_fast(sk, slow);
6311da177e4SLinus Torvalds break;
6321da177e4SLinus Torvalds case SIOCATMARK:
6337b6a893aSEric Dumazet answ = READ_ONCE(tp->urg_data) &&
634d9b55bf7SEric Dumazet READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
6351da177e4SLinus Torvalds break;
6361da177e4SLinus Torvalds case SIOCOUTQ:
6371da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
6381da177e4SLinus Torvalds return -EINVAL;
6391da177e4SLinus Torvalds
6401da177e4SLinus Torvalds if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
6411da177e4SLinus Torvalds answ = 0;
6421da177e4SLinus Torvalds else
6430f317464SEric Dumazet answ = READ_ONCE(tp->write_seq) - tp->snd_una;
6441da177e4SLinus Torvalds break;
6452f4e1b39SMario Schuknecht case SIOCOUTQNSD:
6462f4e1b39SMario Schuknecht if (sk->sk_state == TCP_LISTEN)
6472f4e1b39SMario Schuknecht return -EINVAL;
6482f4e1b39SMario Schuknecht
6492f4e1b39SMario Schuknecht if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
6502f4e1b39SMario Schuknecht answ = 0;
6512f4e1b39SMario Schuknecht else
652e0d694d6SEric Dumazet answ = READ_ONCE(tp->write_seq) -
653e0d694d6SEric Dumazet READ_ONCE(tp->snd_nxt);
6542f4e1b39SMario Schuknecht break;
6551da177e4SLinus Torvalds default:
6561da177e4SLinus Torvalds return -ENOIOCTLCMD;
6573ff50b79SStephen Hemminger }
6581da177e4SLinus Torvalds
659e1d001faSBreno Leitao *karg = answ;
660e1d001faSBreno Leitao return 0;
6611da177e4SLinus Torvalds }
6624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl);
6631da177e4SLinus Torvalds
tcp_mark_push(struct tcp_sock * tp,struct sk_buff * skb)66404d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
6651da177e4SLinus Torvalds {
6664de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
6671da177e4SLinus Torvalds tp->pushed_seq = tp->write_seq;
6681da177e4SLinus Torvalds }
6691da177e4SLinus Torvalds
forced_push(const struct tcp_sock * tp)670a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp)
6711da177e4SLinus Torvalds {
6721da177e4SLinus Torvalds return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
6731da177e4SLinus Torvalds }
6741da177e4SLinus Torvalds
tcp_skb_entail(struct sock * sk,struct sk_buff * skb)67504d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
6761da177e4SLinus Torvalds {
6779e412ba7SIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk);
678352d4800SArnaldo Carvalho de Melo struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
679352d4800SArnaldo Carvalho de Melo
680352d4800SArnaldo Carvalho de Melo tcb->seq = tcb->end_seq = tp->write_seq;
6814de075e0SEric Dumazet tcb->tcp_flags = TCPHDR_ACK;
682f4a775d1SEric Dumazet __skb_header_release(skb);
683fe067e8aSDavid S. Miller tcp_add_write_queue_tail(sk, skb);
684ab4e846aSEric Dumazet sk_wmem_queued_add(sk, skb->truesize);
6853ab224beSHideo Aoki sk_mem_charge(sk, skb->truesize);
68689ebd197SDavid S. Miller if (tp->nonagle & TCP_NAGLE_PUSH)
6871da177e4SLinus Torvalds tp->nonagle &= ~TCP_NAGLE_PUSH;
6886f021c62SEric Dumazet
6896f021c62SEric Dumazet tcp_slow_start_after_idle_check(sk);
6901da177e4SLinus Torvalds }
6911da177e4SLinus Torvalds
tcp_mark_urg(struct tcp_sock * tp,int flags)692afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
6931da177e4SLinus Torvalds {
69433f5f57eSIlpo Järvinen if (flags & MSG_OOB)
6951da177e4SLinus Torvalds tp->snd_up = tp->write_seq;
6961da177e4SLinus Torvalds }
6971da177e4SLinus Torvalds
698f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if
699a181ceb5SEric Dumazet * we have data packets in Qdisc or NIC queues :
700f54b3111SEric Dumazet * Because TX completion will happen shortly, it gives a chance
701f54b3111SEric Dumazet * to coalesce future sendmsg() payload into this skb, without
702f54b3111SEric Dumazet * need for a timer, and with no latency trade off.
703f54b3111SEric Dumazet * As packets containing data payload have a bigger truesize
704a181ceb5SEric Dumazet * than pure acks (dataless) packets, the last checks prevent
705a181ceb5SEric Dumazet * autocorking if we only have an ACK in Qdisc/NIC queues,
706a181ceb5SEric Dumazet * or if TX completion was delayed after we processed ACK packet.
707f54b3111SEric Dumazet */
tcp_should_autocork(struct sock * sk,struct sk_buff * skb,int size_goal)708f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
709f54b3111SEric Dumazet int size_goal)
7101da177e4SLinus Torvalds {
711f54b3111SEric Dumazet return skb->len < size_goal &&
71285225e6fSKuniyuki Iwashima READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
713114f39feSEric Dumazet !tcp_rtx_queue_empty(sk) &&
714b0de0cf4SEric Dumazet refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
715b0de0cf4SEric Dumazet tcp_skb_can_collapse_to(skb);
716f54b3111SEric Dumazet }
7179e412ba7SIlpo Järvinen
tcp_push(struct sock * sk,int flags,int mss_now,int nonagle,int size_goal)71835b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now,
719f54b3111SEric Dumazet int nonagle, int size_goal)
720f54b3111SEric Dumazet {
721f54b3111SEric Dumazet struct tcp_sock *tp = tcp_sk(sk);
722f54b3111SEric Dumazet struct sk_buff *skb;
723f54b3111SEric Dumazet
724f54b3111SEric Dumazet skb = tcp_write_queue_tail(sk);
72575c119afSEric Dumazet if (!skb)
72675c119afSEric Dumazet return;
7271da177e4SLinus Torvalds if (!(flags & MSG_MORE) || forced_push(tp))
728f54b3111SEric Dumazet tcp_mark_push(tp, skb);
729afeca340SKrishna Kumar
730afeca340SKrishna Kumar tcp_mark_urg(tp, flags);
731f54b3111SEric Dumazet
732f54b3111SEric Dumazet if (tcp_should_autocork(sk, skb, size_goal)) {
733f54b3111SEric Dumazet
734f54b3111SEric Dumazet /* avoid atomic op if TSQ_THROTTLED bit is already set */
7357aa5470cSEric Dumazet if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
736f54b3111SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
7377aa5470cSEric Dumazet set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
7387267e8dcSSalvatore Dipietro smp_mb__after_atomic();
7391da177e4SLinus Torvalds }
740a181ceb5SEric Dumazet /* It is possible TX completion already happened
741a181ceb5SEric Dumazet * before we set TSQ_THROTTLED.
742a181ceb5SEric Dumazet */
74314afee4bSReshetova, Elena if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
744f54b3111SEric Dumazet return;
745f54b3111SEric Dumazet }
746f54b3111SEric Dumazet
747f54b3111SEric Dumazet if (flags & MSG_MORE)
748f54b3111SEric Dumazet nonagle = TCP_NAGLE_CORK;
749f54b3111SEric Dumazet
750f54b3111SEric Dumazet __tcp_push_pending_frames(sk, mss_now, nonagle);
7511da177e4SLinus Torvalds }
7521da177e4SLinus Torvalds
tcp_splice_data_recv(read_descriptor_t * rd_desc,struct sk_buff * skb,unsigned int offset,size_t len)7536ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
7549c55e01cSJens Axboe unsigned int offset, size_t len)
7559c55e01cSJens Axboe {
7569c55e01cSJens Axboe struct tcp_splice_state *tss = rd_desc->arg.data;
75733966dd0SWilly Tarreau int ret;
7589c55e01cSJens Axboe
759a60e3cc7SHannes Frederic Sowa ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
76025869262SAl Viro min(rd_desc->count, len), tss->flags);
76133966dd0SWilly Tarreau if (ret > 0)
76233966dd0SWilly Tarreau rd_desc->count -= ret;
76333966dd0SWilly Tarreau return ret;
7649c55e01cSJens Axboe }
7659c55e01cSJens Axboe
__tcp_splice_read(struct sock * sk,struct tcp_splice_state * tss)7669c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
7679c55e01cSJens Axboe {
7689c55e01cSJens Axboe /* Store TCP splice context information in read_descriptor_t. */
7699c55e01cSJens Axboe read_descriptor_t rd_desc = {
7709c55e01cSJens Axboe .arg.data = tss,
77133966dd0SWilly Tarreau .count = tss->len,
7729c55e01cSJens Axboe };
7739c55e01cSJens Axboe
7749c55e01cSJens Axboe return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
7759c55e01cSJens Axboe }
7769c55e01cSJens Axboe
7779c55e01cSJens Axboe /**
7789c55e01cSJens Axboe * tcp_splice_read - splice data from TCP socket to a pipe
7799c55e01cSJens Axboe * @sock: socket to splice from
7809c55e01cSJens Axboe * @ppos: position (not valid)
7819c55e01cSJens Axboe * @pipe: pipe to splice to
7829c55e01cSJens Axboe * @len: number of bytes to splice
7839c55e01cSJens Axboe * @flags: splice modifier flags
7849c55e01cSJens Axboe *
7859c55e01cSJens Axboe * Description:
7869c55e01cSJens Axboe * Will read pages from given socket and fill them into a pipe.
7879c55e01cSJens Axboe *
7889c55e01cSJens Axboe **/
tcp_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)7899c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
7909c55e01cSJens Axboe struct pipe_inode_info *pipe, size_t len,
7919c55e01cSJens Axboe unsigned int flags)
7929c55e01cSJens Axboe {
7939c55e01cSJens Axboe struct sock *sk = sock->sk;
7949c55e01cSJens Axboe struct tcp_splice_state tss = {
7959c55e01cSJens Axboe .pipe = pipe,
7969c55e01cSJens Axboe .len = len,
7979c55e01cSJens Axboe .flags = flags,
7989c55e01cSJens Axboe };
7999c55e01cSJens Axboe long timeo;
8009c55e01cSJens Axboe ssize_t spliced;
8019c55e01cSJens Axboe int ret;
8029c55e01cSJens Axboe
8033a047bf8SChangli Gao sock_rps_record_flow(sk);
8049c55e01cSJens Axboe /*
8059c55e01cSJens Axboe * We can't seek on a socket input
8069c55e01cSJens Axboe */
8079c55e01cSJens Axboe if (unlikely(*ppos))
8089c55e01cSJens Axboe return -ESPIPE;
8099c55e01cSJens Axboe
8109c55e01cSJens Axboe ret = spliced = 0;
8119c55e01cSJens Axboe
8129c55e01cSJens Axboe lock_sock(sk);
8139c55e01cSJens Axboe
81442324c62SEric Dumazet timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
8159c55e01cSJens Axboe while (tss.len) {
8169c55e01cSJens Axboe ret = __tcp_splice_read(sk, &tss);
8179c55e01cSJens Axboe if (ret < 0)
8189c55e01cSJens Axboe break;
8199c55e01cSJens Axboe else if (!ret) {
8209c55e01cSJens Axboe if (spliced)
8219c55e01cSJens Axboe break;
8229c55e01cSJens Axboe if (sock_flag(sk, SOCK_DONE))
8239c55e01cSJens Axboe break;
8249c55e01cSJens Axboe if (sk->sk_err) {
8259c55e01cSJens Axboe ret = sock_error(sk);
8269c55e01cSJens Axboe break;
8279c55e01cSJens Axboe }
8289c55e01cSJens Axboe if (sk->sk_shutdown & RCV_SHUTDOWN)
8299c55e01cSJens Axboe break;
8309c55e01cSJens Axboe if (sk->sk_state == TCP_CLOSE) {
8319c55e01cSJens Axboe /*
8329c55e01cSJens Axboe * This occurs when user tries to read
8339c55e01cSJens Axboe * from never connected socket.
8349c55e01cSJens Axboe */
8359c55e01cSJens Axboe ret = -ENOTCONN;
8369c55e01cSJens Axboe break;
8379c55e01cSJens Axboe }
8389c55e01cSJens Axboe if (!timeo) {
8399c55e01cSJens Axboe ret = -EAGAIN;
8409c55e01cSJens Axboe break;
8419c55e01cSJens Axboe }
842ccf7abb9SEric Dumazet /* if __tcp_splice_read() got nothing while we have
843ccf7abb9SEric Dumazet * an skb in receive queue, we do not want to loop.
844ccf7abb9SEric Dumazet * This might happen with URG data.
845ccf7abb9SEric Dumazet */
846ccf7abb9SEric Dumazet if (!skb_queue_empty(&sk->sk_receive_queue))
847ccf7abb9SEric Dumazet break;
848419ce133SPaolo Abeni ret = sk_wait_data(sk, &timeo, NULL);
849419ce133SPaolo Abeni if (ret < 0)
850419ce133SPaolo Abeni break;
8519c55e01cSJens Axboe if (signal_pending(current)) {
8529c55e01cSJens Axboe ret = sock_intr_errno(timeo);
8539c55e01cSJens Axboe break;
8549c55e01cSJens Axboe }
8559c55e01cSJens Axboe continue;
8569c55e01cSJens Axboe }
8579c55e01cSJens Axboe tss.len -= ret;
8589c55e01cSJens Axboe spliced += ret;
8599c55e01cSJens Axboe
8602fe11c9dSPavel Begunkov if (!tss.len || !timeo)
86133966dd0SWilly Tarreau break;
8629c55e01cSJens Axboe release_sock(sk);
8639c55e01cSJens Axboe lock_sock(sk);
8649c55e01cSJens Axboe
8659c55e01cSJens Axboe if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
86633966dd0SWilly Tarreau (sk->sk_shutdown & RCV_SHUTDOWN) ||
8679c55e01cSJens Axboe signal_pending(current))
8689c55e01cSJens Axboe break;
8699c55e01cSJens Axboe }
8709c55e01cSJens Axboe
8719c55e01cSJens Axboe release_sock(sk);
8729c55e01cSJens Axboe
8739c55e01cSJens Axboe if (spliced)
8749c55e01cSJens Axboe return spliced;
8759c55e01cSJens Axboe
8769c55e01cSJens Axboe return ret;
8779c55e01cSJens Axboe }
8784bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read);
8799c55e01cSJens Axboe
tcp_stream_alloc_skb(struct sock * sk,gfp_t gfp,bool force_schedule)8805882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
881eb934478SEric Dumazet bool force_schedule)
882f561d0f2SPavel Emelyanov {
883f561d0f2SPavel Emelyanov struct sk_buff *skb;
884f561d0f2SPavel Emelyanov
8855882efffSEric Dumazet skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
8868e4d980aSEric Dumazet if (likely(skb)) {
887eb934478SEric Dumazet bool mem_scheduled;
8888e4d980aSEric Dumazet
8899b65b17dSTalal Ahmad skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
890eb934478SEric Dumazet if (force_schedule) {
891eb934478SEric Dumazet mem_scheduled = true;
8928e4d980aSEric Dumazet sk_forced_mem_schedule(sk, skb->truesize);
8938e4d980aSEric Dumazet } else {
894eb934478SEric Dumazet mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
8958e4d980aSEric Dumazet }
896eb934478SEric Dumazet if (likely(mem_scheduled)) {
8978a794df6SEric Dumazet skb_reserve(skb, MAX_TCP_HEADER);
898a52fe46eSEric Dumazet skb->ip_summed = CHECKSUM_PARTIAL;
899e2080072SEric Dumazet INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
900f561d0f2SPavel Emelyanov return skb;
901f561d0f2SPavel Emelyanov }
902f561d0f2SPavel Emelyanov __kfree_skb(skb);
903f561d0f2SPavel Emelyanov } else {
9045c52ba17SPavel Emelyanov sk->sk_prot->enter_memory_pressure(sk);
905f561d0f2SPavel Emelyanov sk_stream_moderate_sndbuf(sk);
906f561d0f2SPavel Emelyanov }
907f561d0f2SPavel Emelyanov return NULL;
908f561d0f2SPavel Emelyanov }
909f561d0f2SPavel Emelyanov
tcp_xmit_size_goal(struct sock * sk,u32 mss_now,int large_allowed)9100c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
9110c54b85fSIlpo Järvinen int large_allowed)
9120c54b85fSIlpo Järvinen {
9130c54b85fSIlpo Järvinen struct tcp_sock *tp = tcp_sk(sk);
9146c09fa09SEric Dumazet u32 new_size_goal, size_goal;
9150c54b85fSIlpo Järvinen
91674d4a8f8SEric Dumazet if (!large_allowed)
917605ad7f1SEric Dumazet return mss_now;
9180c54b85fSIlpo Järvinen
9196c09fa09SEric Dumazet /* Note : tcp_tso_autosize() will eventually split this later */
920ab14f180SDavid Ahern new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size);
9212a3a041cSIlpo Järvinen
9222a3a041cSIlpo Järvinen /* We try hard to avoid divides here */
923605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now;
924605ad7f1SEric Dumazet if (unlikely(new_size_goal < size_goal ||
925605ad7f1SEric Dumazet new_size_goal >= size_goal + mss_now)) {
926605ad7f1SEric Dumazet tp->gso_segs = min_t(u16, new_size_goal / mss_now,
9271485348dSBen Hutchings sk->sk_gso_max_segs);
928605ad7f1SEric Dumazet size_goal = tp->gso_segs * mss_now;
9290c54b85fSIlpo Järvinen }
9300c54b85fSIlpo Järvinen
931605ad7f1SEric Dumazet return max(size_goal, mss_now);
9320c54b85fSIlpo Järvinen }
9330c54b85fSIlpo Järvinen
tcp_send_mss(struct sock * sk,int * size_goal,int flags)93435b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
9350c54b85fSIlpo Järvinen {
9360c54b85fSIlpo Järvinen int mss_now;
9370c54b85fSIlpo Järvinen
9380c54b85fSIlpo Järvinen mss_now = tcp_current_mss(sk);
9390c54b85fSIlpo Järvinen *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
9400c54b85fSIlpo Järvinen
9410c54b85fSIlpo Järvinen return mss_now;
9420c54b85fSIlpo Järvinen }
9430c54b85fSIlpo Järvinen
94472bf4f17SEric Dumazet /* In some cases, sendmsg() could have added an skb to the write queue,
945dc97391eSDavid Howells * but failed adding payload on it. We need to remove it to consume less
946dc97391eSDavid Howells * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
94772bf4f17SEric Dumazet * epoll() users. Another reason is that tcp_write_xmit() does not like
94872bf4f17SEric Dumazet * finding an empty skb in the write queue.
949fdfc5c85SEric Dumazet */
tcp_remove_empty_skb(struct sock * sk)95027728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk)
951fdfc5c85SEric Dumazet {
95227728ba8SEric Dumazet struct sk_buff *skb = tcp_write_queue_tail(sk);
95327728ba8SEric Dumazet
954cf12e6f9SJon Maxwell if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
955fdfc5c85SEric Dumazet tcp_unlink_write_queue(skb, sk);
956fdfc5c85SEric Dumazet if (tcp_write_queue_empty(sk))
957fdfc5c85SEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
95803271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb);
959fdfc5c85SEric Dumazet }
960fdfc5c85SEric Dumazet }
961fdfc5c85SEric Dumazet
962f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */
tcp_downgrade_zcopy_pure(struct sock * sk,struct sk_buff * skb)963f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
964f8d9d938SEric Dumazet {
965f8d9d938SEric Dumazet if (unlikely(skb_zcopy_pure(skb))) {
966f8d9d938SEric Dumazet u32 extra = skb->truesize -
967f8d9d938SEric Dumazet SKB_TRUESIZE(skb_end_offset(skb));
968f8d9d938SEric Dumazet
969f8d9d938SEric Dumazet if (!sk_wmem_schedule(sk, extra))
970f8d9d938SEric Dumazet return -ENOMEM;
971f8d9d938SEric Dumazet
972f8d9d938SEric Dumazet sk_mem_charge(sk, extra);
973f8d9d938SEric Dumazet skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
974f8d9d938SEric Dumazet }
975f8d9d938SEric Dumazet return 0;
976f8d9d938SEric Dumazet }
977f8d9d938SEric Dumazet
978849b425cSEric Dumazet
tcp_wmem_schedule(struct sock * sk,int copy)979fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy)
980f54755f6SEric Dumazet {
981f54755f6SEric Dumazet int left;
982f54755f6SEric Dumazet
983f54755f6SEric Dumazet if (likely(sk_wmem_schedule(sk, copy)))
984f54755f6SEric Dumazet return copy;
985f54755f6SEric Dumazet
986f54755f6SEric Dumazet /* We could be in trouble if we have nothing queued.
987f54755f6SEric Dumazet * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
988f54755f6SEric Dumazet * to guarantee some progress.
989f54755f6SEric Dumazet */
990683a67daSJason Xing left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued;
991f54755f6SEric Dumazet if (left > 0)
992f54755f6SEric Dumazet sk_forced_mem_schedule(sk, min(left, copy));
993f54755f6SEric Dumazet return min(copy, sk->sk_forward_alloc);
994f54755f6SEric Dumazet }
995f54755f6SEric Dumazet
tcp_free_fastopen_req(struct tcp_sock * tp)996cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp)
997cf60af03SYuchung Cheng {
99800db4124SIan Morris if (tp->fastopen_req) {
999cf60af03SYuchung Cheng kfree(tp->fastopen_req);
1000cf60af03SYuchung Cheng tp->fastopen_req = NULL;
1001cf60af03SYuchung Cheng }
1002cf60af03SYuchung Cheng }
1003cf60af03SYuchung Cheng
tcp_sendmsg_fastopen(struct sock * sk,struct msghdr * msg,int * copied,size_t size,struct ubuf_info * uarg)10043242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
10053242abebSBenjamin Hesmans size_t size, struct ubuf_info *uarg)
1006cf60af03SYuchung Cheng {
1007cf60af03SYuchung Cheng struct tcp_sock *tp = tcp_sk(sk);
100819f6d3f3SWei Wang struct inet_sock *inet = inet_sk(sk);
1009ba615f67SWei Wang struct sockaddr *uaddr = msg->msg_name;
1010cf60af03SYuchung Cheng int err, flags;
1011cf60af03SYuchung Cheng
10125a542133SKuniyuki Iwashima if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) &
10135a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) ||
1014ba615f67SWei Wang (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1015ba615f67SWei Wang uaddr->sa_family == AF_UNSPEC))
1016cf60af03SYuchung Cheng return -EOPNOTSUPP;
101700db4124SIan Morris if (tp->fastopen_req)
1018cf60af03SYuchung Cheng return -EALREADY; /* Another Fast Open is in progress */
1019cf60af03SYuchung Cheng
1020cf60af03SYuchung Cheng tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1021cf60af03SYuchung Cheng sk->sk_allocation);
102251456b29SIan Morris if (unlikely(!tp->fastopen_req))
1023cf60af03SYuchung Cheng return -ENOBUFS;
1024cf60af03SYuchung Cheng tp->fastopen_req->data = msg;
1025f5ddcbbbSEric Dumazet tp->fastopen_req->size = size;
1026f859a448SWillem de Bruijn tp->fastopen_req->uarg = uarg;
1027cf60af03SYuchung Cheng
102808e39c0dSEric Dumazet if (inet_test_bit(DEFER_CONNECT, sk)) {
102919f6d3f3SWei Wang err = tcp_connect(sk);
103019f6d3f3SWei Wang /* Same failure procedure as in tcp_v4/6_connect */
103119f6d3f3SWei Wang if (err) {
103219f6d3f3SWei Wang tcp_set_state(sk, TCP_CLOSE);
103319f6d3f3SWei Wang inet->inet_dport = 0;
103419f6d3f3SWei Wang sk->sk_route_caps = 0;
103519f6d3f3SWei Wang }
103619f6d3f3SWei Wang }
1037cf60af03SYuchung Cheng flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1038ba615f67SWei Wang err = __inet_stream_connect(sk->sk_socket, uaddr,
10393979ad7eSWilly Tarreau msg->msg_namelen, flags, 1);
10407db92362SWei Wang /* fastopen_req could already be freed in __inet_stream_connect
10417db92362SWei Wang * if the connection times out or gets rst
10427db92362SWei Wang */
10437db92362SWei Wang if (tp->fastopen_req) {
1044f5ddcbbbSEric Dumazet *copied = tp->fastopen_req->copied;
1045cf60af03SYuchung Cheng tcp_free_fastopen_req(tp);
104608e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk);
10477db92362SWei Wang }
1048cf60af03SYuchung Cheng return err;
1049cf60af03SYuchung Cheng }
1050cf60af03SYuchung Cheng
tcp_sendmsg_locked(struct sock * sk,struct msghdr * msg,size_t size)1051306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
10521da177e4SLinus Torvalds {
10531da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
1054f214f915SWillem de Bruijn struct ubuf_info *uarg = NULL;
10551da177e4SLinus Torvalds struct sk_buff *skb;
1056c14ac945SSoheil Hassas Yeganeh struct sockcm_cookie sockc;
105757be5bdaSAl Viro int flags, err, copied = 0;
105857be5bdaSAl Viro int mss_now = 0, size_goal, copied_syn = 0;
10591a991488SEric Dumazet int process_backlog = 0;
1060270a1c3dSDavid Howells int zc = 0;
10611da177e4SLinus Torvalds long timeo;
10621da177e4SLinus Torvalds
10631da177e4SLinus Torvalds flags = msg->msg_flags;
1064f214f915SWillem de Bruijn
1065eb315a7dSPavel Begunkov if ((flags & MSG_ZEROCOPY) && size) {
1066eb315a7dSPavel Begunkov if (msg->msg_ubuf) {
1067eb315a7dSPavel Begunkov uarg = msg->msg_ubuf;
1068270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG)
1069270a1c3dSDavid Howells zc = MSG_ZEROCOPY;
1070eb315a7dSPavel Begunkov } else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1071eea96a3eSPavel Begunkov skb = tcp_write_queue_tail(sk);
10728c793822SJonathan Lemon uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
1073f214f915SWillem de Bruijn if (!uarg) {
1074f214f915SWillem de Bruijn err = -ENOBUFS;
1075f214f915SWillem de Bruijn goto out_err;
1076f214f915SWillem de Bruijn }
1077270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG)
1078270a1c3dSDavid Howells zc = MSG_ZEROCOPY;
1079270a1c3dSDavid Howells else
1080e7d2b510SPavel Begunkov uarg_to_msgzc(uarg)->zerocopy = 0;
1081f214f915SWillem de Bruijn }
1082270a1c3dSDavid Howells } else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) {
1083270a1c3dSDavid Howells if (sk->sk_route_caps & NETIF_F_SG)
1084270a1c3dSDavid Howells zc = MSG_SPLICE_PAGES;
1085eb315a7dSPavel Begunkov }
1086f214f915SWillem de Bruijn
108708e39c0dSEric Dumazet if (unlikely(flags & MSG_FASTOPEN ||
108808e39c0dSEric Dumazet inet_test_bit(DEFER_CONNECT, sk)) &&
108916ae6aa1SYuchung Cheng !tp->repair) {
1090f859a448SWillem de Bruijn err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
1091cf60af03SYuchung Cheng if (err == -EINPROGRESS && copied_syn > 0)
1092cf60af03SYuchung Cheng goto out;
1093cf60af03SYuchung Cheng else if (err)
1094cf60af03SYuchung Cheng goto out_err;
1095cf60af03SYuchung Cheng }
1096cf60af03SYuchung Cheng
10971da177e4SLinus Torvalds timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
10981da177e4SLinus Torvalds
1099d7722e85SSoheil Hassas Yeganeh tcp_rate_check_app_limited(sk); /* is sending application-limited? */
1100d7722e85SSoheil Hassas Yeganeh
11018336886fSJerry Chu /* Wait for a connection to finish. One exception is TCP Fast Open
11028336886fSJerry Chu * (passive side) where data is allowed to be sent before a connection
11038336886fSJerry Chu * is fully established.
11048336886fSJerry Chu */
11058336886fSJerry Chu if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
11068336886fSJerry Chu !tcp_passive_fastopen(sk)) {
1107686a5624SYuvaraja Mariappan err = sk_stream_wait_connect(sk, &timeo);
1108686a5624SYuvaraja Mariappan if (err != 0)
1109cf60af03SYuchung Cheng goto do_error;
11108336886fSJerry Chu }
11111da177e4SLinus Torvalds
1112c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) {
1113c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_RECV_QUEUE) {
1114c0e88ff0SPavel Emelyanov copied = tcp_send_rcvq(sk, msg, size);
11155924f17aSChristoph Paasch goto out_nopush;
1116c0e88ff0SPavel Emelyanov }
1117c0e88ff0SPavel Emelyanov
1118c0e88ff0SPavel Emelyanov err = -EINVAL;
1119c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE)
1120c0e88ff0SPavel Emelyanov goto out_err;
1121c0e88ff0SPavel Emelyanov
1122c0e88ff0SPavel Emelyanov /* 'common' sending to sendq */
1123c0e88ff0SPavel Emelyanov }
1124c0e88ff0SPavel Emelyanov
1125657a0667SWillem de Bruijn sockcm_init(&sockc, sk);
1126c14ac945SSoheil Hassas Yeganeh if (msg->msg_controllen) {
1127c14ac945SSoheil Hassas Yeganeh err = sock_cmsg_send(sk, msg, &sockc);
1128c14ac945SSoheil Hassas Yeganeh if (unlikely(err)) {
1129c14ac945SSoheil Hassas Yeganeh err = -EINVAL;
1130c14ac945SSoheil Hassas Yeganeh goto out_err;
1131c14ac945SSoheil Hassas Yeganeh }
1132c14ac945SSoheil Hassas Yeganeh }
1133c14ac945SSoheil Hassas Yeganeh
11341da177e4SLinus Torvalds /* This should be in poll */
11359cd3e072SEric Dumazet sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
11361da177e4SLinus Torvalds
11371da177e4SLinus Torvalds /* Ok commence sending. */
11381da177e4SLinus Torvalds copied = 0;
11391da177e4SLinus Torvalds
1140d41a69f1SEric Dumazet restart:
1141d41a69f1SEric Dumazet mss_now = tcp_send_mss(sk, &size_goal, flags);
1142d41a69f1SEric Dumazet
11431da177e4SLinus Torvalds err = -EPIPE;
11441da177e4SLinus Torvalds if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
114579d8665bSEric Dumazet goto do_error;
11461da177e4SLinus Torvalds
114701e97e65SAl Viro while (msg_data_left(msg)) {
1148270a1c3dSDavid Howells ssize_t copy = 0;
11491da177e4SLinus Torvalds
1150fe067e8aSDavid S. Miller skb = tcp_write_queue_tail(sk);
115165ec6097SEric Dumazet if (skb)
115265ec6097SEric Dumazet copy = size_goal - skb->len;
11531da177e4SLinus Torvalds
1154c134ecb8SMartin KaFai Lau if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
11553613b3dbSEric Dumazet bool first_skb;
11563613b3dbSEric Dumazet
11571da177e4SLinus Torvalds new_segment:
11581da177e4SLinus Torvalds if (!sk_stream_memory_free(sk))
1159afb83012SSoheil Hassas Yeganeh goto wait_for_space;
11601da177e4SLinus Torvalds
11611a991488SEric Dumazet if (unlikely(process_backlog >= 16)) {
11621a991488SEric Dumazet process_backlog = 0;
11631a991488SEric Dumazet if (sk_flush_backlog(sk))
1164d41a69f1SEric Dumazet goto restart;
1165d4011239SEric Dumazet }
116675c119afSEric Dumazet first_skb = tcp_rtx_and_write_queues_empty(sk);
11675882efffSEric Dumazet skb = tcp_stream_alloc_skb(sk, sk->sk_allocation,
11683613b3dbSEric Dumazet first_skb);
11691da177e4SLinus Torvalds if (!skb)
1170afb83012SSoheil Hassas Yeganeh goto wait_for_space;
11711da177e4SLinus Torvalds
11721a991488SEric Dumazet process_backlog++;
11731da177e4SLinus Torvalds
1174a535d594SJakub Kicinski #ifdef CONFIG_SKB_DECRYPTED
1175a535d594SJakub Kicinski skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1176a535d594SJakub Kicinski #endif
117704d8825cSPaolo Abeni tcp_skb_entail(sk, skb);
1178c1b4a7e6SDavid S. Miller copy = size_goal;
11799d186cacSAndrey Vagin
11809d186cacSAndrey Vagin /* All packets are restored as if they have
1181d3edd06eSEric Dumazet * already been sent. skb_mstamp_ns isn't set to
11829d186cacSAndrey Vagin * avoid wrong rtt estimation.
11839d186cacSAndrey Vagin */
11849d186cacSAndrey Vagin if (tp->repair)
11859d186cacSAndrey Vagin TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
11861da177e4SLinus Torvalds }
11871da177e4SLinus Torvalds
11881da177e4SLinus Torvalds /* Try to append data to the end of skb. */
118901e97e65SAl Viro if (copy > msg_data_left(msg))
119001e97e65SAl Viro copy = msg_data_left(msg);
11911da177e4SLinus Torvalds
1192270a1c3dSDavid Howells if (zc == 0) {
11935640f768SEric Dumazet bool merge = true;
11941da177e4SLinus Torvalds int i = skb_shinfo(skb)->nr_frags;
11955640f768SEric Dumazet struct page_frag *pfrag = sk_page_frag(sk);
1196761965eaSEric Dumazet
11975640f768SEric Dumazet if (!sk_page_frag_refill(sk, pfrag))
1198afb83012SSoheil Hassas Yeganeh goto wait_for_space;
1199761965eaSEric Dumazet
12005640f768SEric Dumazet if (!skb_can_coalesce(skb, i, pfrag->page,
12015640f768SEric Dumazet pfrag->offset)) {
1202a86a0661SEric Dumazet if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
12031da177e4SLinus Torvalds tcp_mark_push(tp, skb);
12041da177e4SLinus Torvalds goto new_segment;
12051da177e4SLinus Torvalds }
12065640f768SEric Dumazet merge = false;
12075640f768SEric Dumazet }
1208ef015786SHerbert Xu
12095640f768SEric Dumazet copy = min_t(int, copy, pfrag->size - pfrag->offset);
1210ef015786SHerbert Xu
1211eb315a7dSPavel Begunkov if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) {
1212849b425cSEric Dumazet if (tcp_downgrade_zcopy_pure(sk, skb))
1213849b425cSEric Dumazet goto wait_for_space;
1214eb315a7dSPavel Begunkov skb_zcopy_downgrade_managed(skb);
1215eb315a7dSPavel Begunkov }
1216849b425cSEric Dumazet
1217849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy);
1218849b425cSEric Dumazet if (!copy)
1219afb83012SSoheil Hassas Yeganeh goto wait_for_space;
12201da177e4SLinus Torvalds
122157be5bdaSAl Viro err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
12225640f768SEric Dumazet pfrag->page,
12235640f768SEric Dumazet pfrag->offset,
12245640f768SEric Dumazet copy);
12255640f768SEric Dumazet if (err)
12261da177e4SLinus Torvalds goto do_error;
12271da177e4SLinus Torvalds
12281da177e4SLinus Torvalds /* Update the skb. */
12291da177e4SLinus Torvalds if (merge) {
12309e903e08SEric Dumazet skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
12311da177e4SLinus Torvalds } else {
12325640f768SEric Dumazet skb_fill_page_desc(skb, i, pfrag->page,
12335640f768SEric Dumazet pfrag->offset, copy);
12344e33e346SEric Dumazet page_ref_inc(pfrag->page);
12351da177e4SLinus Torvalds }
12365640f768SEric Dumazet pfrag->offset += copy;
1237270a1c3dSDavid Howells } else if (zc == MSG_ZEROCOPY) {
12389b65b17dSTalal Ahmad /* First append to a fragless skb builds initial
12399b65b17dSTalal Ahmad * pure zerocopy skb
12409b65b17dSTalal Ahmad */
12419b65b17dSTalal Ahmad if (!skb->len)
12429b65b17dSTalal Ahmad skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
12439b65b17dSTalal Ahmad
12449b65b17dSTalal Ahmad if (!skb_zcopy_pure(skb)) {
1245849b425cSEric Dumazet copy = tcp_wmem_schedule(sk, copy);
1246849b425cSEric Dumazet if (!copy)
1247358ed624STalal Ahmad goto wait_for_space;
12489b65b17dSTalal Ahmad }
1249358ed624STalal Ahmad
1250f214f915SWillem de Bruijn err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1251111856c7SWillem de Bruijn if (err == -EMSGSIZE || err == -EEXIST) {
1252111856c7SWillem de Bruijn tcp_mark_push(tp, skb);
1253f214f915SWillem de Bruijn goto new_segment;
1254111856c7SWillem de Bruijn }
1255f214f915SWillem de Bruijn if (err < 0)
1256f214f915SWillem de Bruijn goto do_error;
1257f214f915SWillem de Bruijn copy = err;
1258270a1c3dSDavid Howells } else if (zc == MSG_SPLICE_PAGES) {
1259270a1c3dSDavid Howells /* Splice in data if we can; copy if we can't. */
1260270a1c3dSDavid Howells if (tcp_downgrade_zcopy_pure(sk, skb))
1261270a1c3dSDavid Howells goto wait_for_space;
1262270a1c3dSDavid Howells copy = tcp_wmem_schedule(sk, copy);
1263270a1c3dSDavid Howells if (!copy)
1264270a1c3dSDavid Howells goto wait_for_space;
1265270a1c3dSDavid Howells
1266270a1c3dSDavid Howells err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1267270a1c3dSDavid Howells sk->sk_allocation);
1268270a1c3dSDavid Howells if (err < 0) {
1269270a1c3dSDavid Howells if (err == -EMSGSIZE) {
1270270a1c3dSDavid Howells tcp_mark_push(tp, skb);
1271270a1c3dSDavid Howells goto new_segment;
1272270a1c3dSDavid Howells }
1273270a1c3dSDavid Howells goto do_error;
1274270a1c3dSDavid Howells }
1275270a1c3dSDavid Howells copy = err;
1276270a1c3dSDavid Howells
1277270a1c3dSDavid Howells if (!(flags & MSG_NO_SHARED_FRAGS))
1278270a1c3dSDavid Howells skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
1279270a1c3dSDavid Howells
1280270a1c3dSDavid Howells sk_wmem_queued_add(sk, copy);
1281270a1c3dSDavid Howells sk_mem_charge(sk, copy);
12821da177e4SLinus Torvalds }
12831da177e4SLinus Torvalds
12841da177e4SLinus Torvalds if (!copied)
12854de075e0SEric Dumazet TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
12861da177e4SLinus Torvalds
12870f317464SEric Dumazet WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
12881da177e4SLinus Torvalds TCP_SKB_CB(skb)->end_seq += copy;
1289cd7d8498SEric Dumazet tcp_skb_pcount_set(skb, 0);
12901da177e4SLinus Torvalds
12911da177e4SLinus Torvalds copied += copy;
129201e97e65SAl Viro if (!msg_data_left(msg)) {
1293c134ecb8SMartin KaFai Lau if (unlikely(flags & MSG_EOR))
1294c134ecb8SMartin KaFai Lau TCP_SKB_CB(skb)->eor = 1;
12951da177e4SLinus Torvalds goto out;
12964ed2d765SWillem de Bruijn }
12971da177e4SLinus Torvalds
129865ec6097SEric Dumazet if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
12991da177e4SLinus Torvalds continue;
13001da177e4SLinus Torvalds
13011da177e4SLinus Torvalds if (forced_push(tp)) {
13021da177e4SLinus Torvalds tcp_mark_push(tp, skb);
13039e412ba7SIlpo Järvinen __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1304fe067e8aSDavid S. Miller } else if (skb == tcp_send_head(sk))
13051da177e4SLinus Torvalds tcp_push_one(sk, mss_now);
13061da177e4SLinus Torvalds continue;
13071da177e4SLinus Torvalds
1308afb83012SSoheil Hassas Yeganeh wait_for_space:
13091da177e4SLinus Torvalds set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
131072bf4f17SEric Dumazet tcp_remove_empty_skb(sk);
1311ec342325SAndrew Vagin if (copied)
1312f54b3111SEric Dumazet tcp_push(sk, flags & ~MSG_MORE, mss_now,
1313f54b3111SEric Dumazet TCP_NAGLE_PUSH, size_goal);
13141da177e4SLinus Torvalds
1315686a5624SYuvaraja Mariappan err = sk_stream_wait_memory(sk, &timeo);
1316686a5624SYuvaraja Mariappan if (err != 0)
13171da177e4SLinus Torvalds goto do_error;
13181da177e4SLinus Torvalds
13190c54b85fSIlpo Järvinen mss_now = tcp_send_mss(sk, &size_goal, flags);
13201da177e4SLinus Torvalds }
13211da177e4SLinus Torvalds
13221da177e4SLinus Torvalds out:
1323ad02c4f5SSoheil Hassas Yeganeh if (copied) {
13244e8cc228SEric Dumazet tcp_tx_timestamp(sk, sockc.tsflags);
1325f54b3111SEric Dumazet tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1326ad02c4f5SSoheil Hassas Yeganeh }
13275924f17aSChristoph Paasch out_nopush:
1328a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1329a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf)
13308e044917SJonathan Lemon net_zcopy_put(uarg);
1331cf60af03SYuchung Cheng return copied + copied_syn;
13321da177e4SLinus Torvalds
13331da177e4SLinus Torvalds do_error:
133427728ba8SEric Dumazet tcp_remove_empty_skb(sk);
1335fdfc5c85SEric Dumazet
1336cf60af03SYuchung Cheng if (copied + copied_syn)
13371da177e4SLinus Torvalds goto out;
13381da177e4SLinus Torvalds out_err:
1339a7533584SPavel Begunkov /* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1340a7533584SPavel Begunkov if (uarg && !msg->msg_ubuf)
13418e044917SJonathan Lemon net_zcopy_put_abort(uarg, true);
13421da177e4SLinus Torvalds err = sk_stream_error(sk, flags, err);
1343ce5ec440SJason Baron /* make sure we wake any epoll edge trigger waiter */
1344216808c6SEric Dumazet if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
1345ce5ec440SJason Baron sk->sk_write_space(sk);
1346b0f71bd3SFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1347b0f71bd3SFrancis Yan }
13481da177e4SLinus Torvalds return err;
13491da177e4SLinus Torvalds }
1350774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1351306b13ebSTom Herbert
tcp_sendmsg(struct sock * sk,struct msghdr * msg,size_t size)1352306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1353306b13ebSTom Herbert {
1354306b13ebSTom Herbert int ret;
1355306b13ebSTom Herbert
1356306b13ebSTom Herbert lock_sock(sk);
1357306b13ebSTom Herbert ret = tcp_sendmsg_locked(sk, msg, size);
1358306b13ebSTom Herbert release_sock(sk);
1359306b13ebSTom Herbert
1360306b13ebSTom Herbert return ret;
1361306b13ebSTom Herbert }
13624bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg);
13631da177e4SLinus Torvalds
tcp_splice_eof(struct socket * sock)13641d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock)
13651d7e4538SDavid Howells {
13661d7e4538SDavid Howells struct sock *sk = sock->sk;
13671d7e4538SDavid Howells struct tcp_sock *tp = tcp_sk(sk);
13681d7e4538SDavid Howells int mss_now, size_goal;
13691d7e4538SDavid Howells
13701d7e4538SDavid Howells if (!tcp_write_queue_tail(sk))
13711d7e4538SDavid Howells return;
13721d7e4538SDavid Howells
13731d7e4538SDavid Howells lock_sock(sk);
13741d7e4538SDavid Howells mss_now = tcp_send_mss(sk, &size_goal, 0);
13751d7e4538SDavid Howells tcp_push(sk, 0, mss_now, tp->nonagle, size_goal);
13761d7e4538SDavid Howells release_sock(sk);
13771d7e4538SDavid Howells }
13781d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof);
13791d7e4538SDavid Howells
13801da177e4SLinus Torvalds /*
13811da177e4SLinus Torvalds * Handle reading urgent data. BSD has very simple semantics for
13821da177e4SLinus Torvalds * this, no blocking and very strange errors 8)
13831da177e4SLinus Torvalds */
13841da177e4SLinus Torvalds
tcp_recv_urg(struct sock * sk,struct msghdr * msg,int len,int flags)1385377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
13861da177e4SLinus Torvalds {
13871da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
13881da177e4SLinus Torvalds
13891da177e4SLinus Torvalds /* No URG data to read. */
13901da177e4SLinus Torvalds if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
13911da177e4SLinus Torvalds tp->urg_data == TCP_URG_READ)
13921da177e4SLinus Torvalds return -EINVAL; /* Yes this is right ! */
13931da177e4SLinus Torvalds
13941da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
13951da177e4SLinus Torvalds return -ENOTCONN;
13961da177e4SLinus Torvalds
13971da177e4SLinus Torvalds if (tp->urg_data & TCP_URG_VALID) {
13981da177e4SLinus Torvalds int err = 0;
13991da177e4SLinus Torvalds char c = tp->urg_data;
14001da177e4SLinus Torvalds
14011da177e4SLinus Torvalds if (!(flags & MSG_PEEK))
14027b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, TCP_URG_READ);
14031da177e4SLinus Torvalds
14041da177e4SLinus Torvalds /* Read urgent data. */
14051da177e4SLinus Torvalds msg->msg_flags |= MSG_OOB;
14061da177e4SLinus Torvalds
14071da177e4SLinus Torvalds if (len > 0) {
14081da177e4SLinus Torvalds if (!(flags & MSG_TRUNC))
14097eab8d9eSAl Viro err = memcpy_to_msg(msg, &c, 1);
14101da177e4SLinus Torvalds len = 1;
14111da177e4SLinus Torvalds } else
14121da177e4SLinus Torvalds msg->msg_flags |= MSG_TRUNC;
14131da177e4SLinus Torvalds
14141da177e4SLinus Torvalds return err ? -EFAULT : len;
14151da177e4SLinus Torvalds }
14161da177e4SLinus Torvalds
14171da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
14181da177e4SLinus Torvalds return 0;
14191da177e4SLinus Torvalds
14201da177e4SLinus Torvalds /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
14211da177e4SLinus Torvalds * the available implementations agree in this case:
14221da177e4SLinus Torvalds * this call should never block, independent of the
14231da177e4SLinus Torvalds * blocking state of the socket.
14241da177e4SLinus Torvalds * Mike <pall@rz.uni-karlsruhe.de>
14251da177e4SLinus Torvalds */
14261da177e4SLinus Torvalds return -EAGAIN;
14271da177e4SLinus Torvalds }
14281da177e4SLinus Torvalds
tcp_peek_sndq(struct sock * sk,struct msghdr * msg,int len)1429c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1430c0e88ff0SPavel Emelyanov {
1431c0e88ff0SPavel Emelyanov struct sk_buff *skb;
1432c0e88ff0SPavel Emelyanov int copied = 0, err = 0;
1433c0e88ff0SPavel Emelyanov
143475c119afSEric Dumazet skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
143575c119afSEric Dumazet err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
143675c119afSEric Dumazet if (err)
143775c119afSEric Dumazet return err;
143875c119afSEric Dumazet copied += skb->len;
143975c119afSEric Dumazet }
144075c119afSEric Dumazet
1441c0e88ff0SPavel Emelyanov skb_queue_walk(&sk->sk_write_queue, skb) {
144251f3d02bSDavid S. Miller err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1443c0e88ff0SPavel Emelyanov if (err)
1444c0e88ff0SPavel Emelyanov break;
1445c0e88ff0SPavel Emelyanov
1446c0e88ff0SPavel Emelyanov copied += skb->len;
1447c0e88ff0SPavel Emelyanov }
1448c0e88ff0SPavel Emelyanov
1449c0e88ff0SPavel Emelyanov return err ?: copied;
1450c0e88ff0SPavel Emelyanov }
1451c0e88ff0SPavel Emelyanov
14521da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
14531da177e4SLinus Torvalds * then send an ACK if necessary. COPIED is the number of bytes
14541da177e4SLinus Torvalds * tcp_recvmsg has given to the user so far, it speeds up the
14551da177e4SLinus Torvalds * calculation of whether or not we must ACK for the sake of
14561da177e4SLinus Torvalds * a window update.
14571da177e4SLinus Torvalds */
__tcp_cleanup_rbuf(struct sock * sk,int copied)1458e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied)
14591da177e4SLinus Torvalds {
14601da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
1461a2a385d6SEric Dumazet bool time_to_ack = false;
14621da177e4SLinus Torvalds
1463463c84b9SArnaldo Carvalho de Melo if (inet_csk_ack_scheduled(sk)) {
1464463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk);
1465b6b6d653SEric Dumazet
1466b6b6d653SEric Dumazet if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
1467463c84b9SArnaldo Carvalho de Melo tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
14681da177e4SLinus Torvalds /*
14691da177e4SLinus Torvalds * If this read emptied read buffer, we send ACK, if
14701da177e4SLinus Torvalds * connection is not bidirectional, user drained
14711da177e4SLinus Torvalds * receive buffer and there was a small segment
14721da177e4SLinus Torvalds * in queue.
14731da177e4SLinus Torvalds */
14741ef9696cSAlexey Kuznetsov (copied > 0 &&
14751ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
14761ef9696cSAlexey Kuznetsov ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
147731954cd8SWei Wang !inet_csk_in_pingpong_mode(sk))) &&
14781ef9696cSAlexey Kuznetsov !atomic_read(&sk->sk_rmem_alloc)))
1479a2a385d6SEric Dumazet time_to_ack = true;
14801da177e4SLinus Torvalds }
14811da177e4SLinus Torvalds
14821da177e4SLinus Torvalds /* We send an ACK if we can now advertise a non-zero window
14831da177e4SLinus Torvalds * which has been raised "significantly".
14841da177e4SLinus Torvalds *
14851da177e4SLinus Torvalds * Even if window raised up to infinity, do not send window open ACK
14861da177e4SLinus Torvalds * in states, where we will not receive more. It is useless.
14871da177e4SLinus Torvalds */
14881da177e4SLinus Torvalds if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
14891da177e4SLinus Torvalds __u32 rcv_window_now = tcp_receive_window(tp);
14901da177e4SLinus Torvalds
14911da177e4SLinus Torvalds /* Optimize, __tcp_select_window() is not cheap. */
14921da177e4SLinus Torvalds if (2*rcv_window_now <= tp->window_clamp) {
14931da177e4SLinus Torvalds __u32 new_window = __tcp_select_window(sk);
14941da177e4SLinus Torvalds
14951da177e4SLinus Torvalds /* Send ACK now, if this read freed lots of space
14961da177e4SLinus Torvalds * in our buffer. Certainly, new_window is new window.
14971da177e4SLinus Torvalds * We can advertise it now, if it is not less than current one.
14981da177e4SLinus Torvalds * "Lots" means "at least twice" here.
14991da177e4SLinus Torvalds */
15001da177e4SLinus Torvalds if (new_window && new_window >= 2 * rcv_window_now)
1501a2a385d6SEric Dumazet time_to_ack = true;
15021da177e4SLinus Torvalds }
15031da177e4SLinus Torvalds }
15041da177e4SLinus Torvalds if (time_to_ack)
15051da177e4SLinus Torvalds tcp_send_ack(sk);
15061da177e4SLinus Torvalds }
15071da177e4SLinus Torvalds
tcp_cleanup_rbuf(struct sock * sk,int copied)1508c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied)
1509c457985aSCong Wang {
1510c457985aSCong Wang struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1511c457985aSCong Wang struct tcp_sock *tp = tcp_sk(sk);
1512c457985aSCong Wang
1513c457985aSCong Wang WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1514c457985aSCong Wang "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1515c457985aSCong Wang tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1516c457985aSCong Wang __tcp_cleanup_rbuf(sk, copied);
1517c457985aSCong Wang }
1518c457985aSCong Wang
tcp_eat_recv_skb(struct sock * sk,struct sk_buff * skb)15193df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
15203df684c1SEric Dumazet {
1521f35f8219SEric Dumazet __skb_unlink(skb, &sk->sk_receive_queue);
15223df684c1SEric Dumazet if (likely(skb->destructor == sock_rfree)) {
15233df684c1SEric Dumazet sock_rfree(skb);
15243df684c1SEric Dumazet skb->destructor = NULL;
15253df684c1SEric Dumazet skb->sk = NULL;
152668822bdfSEric Dumazet return skb_attempt_defer_free(skb);
1527f35f8219SEric Dumazet }
1528f35f8219SEric Dumazet __kfree_skb(skb);
15293df684c1SEric Dumazet }
15303df684c1SEric Dumazet
tcp_recv_skb(struct sock * sk,u32 seq,u32 * off)15313f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
15321da177e4SLinus Torvalds {
15331da177e4SLinus Torvalds struct sk_buff *skb;
15341da177e4SLinus Torvalds u32 offset;
15351da177e4SLinus Torvalds
1536f26845b4SEric Dumazet while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
15371da177e4SLinus Torvalds offset = seq - TCP_SKB_CB(skb)->seq;
15389d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
15399d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__);
15401da177e4SLinus Torvalds offset--;
15419d691539SEric Dumazet }
1542e11ecddfSEric Dumazet if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
15431da177e4SLinus Torvalds *off = offset;
15441da177e4SLinus Torvalds return skb;
15451da177e4SLinus Torvalds }
1546f26845b4SEric Dumazet /* This looks weird, but this can happen if TCP collapsing
1547f26845b4SEric Dumazet * splitted a fat GRO packet, while we released socket lock
1548f26845b4SEric Dumazet * in skb_splice_bits()
1549f26845b4SEric Dumazet */
15503df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
15511da177e4SLinus Torvalds }
15521da177e4SLinus Torvalds return NULL;
15531da177e4SLinus Torvalds }
15543f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb);
15551da177e4SLinus Torvalds
15561da177e4SLinus Torvalds /*
15571da177e4SLinus Torvalds * This routine provides an alternative to tcp_recvmsg() for routines
15581da177e4SLinus Torvalds * that would like to handle copying from skbuffs directly in 'sendfile'
15591da177e4SLinus Torvalds * fashion.
15601da177e4SLinus Torvalds * Note:
15611da177e4SLinus Torvalds * - It is assumed that the socket was locked by the caller.
15621da177e4SLinus Torvalds * - The routine does not block.
15631da177e4SLinus Torvalds * - At present, there is no support for reading OOB data
15641da177e4SLinus Torvalds * or for 'peeking' the socket using this routine
15651da177e4SLinus Torvalds * (although both would be easy to implement).
15661da177e4SLinus Torvalds */
tcp_read_sock(struct sock * sk,read_descriptor_t * desc,sk_read_actor_t recv_actor)15671da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
15681da177e4SLinus Torvalds sk_read_actor_t recv_actor)
15691da177e4SLinus Torvalds {
15701da177e4SLinus Torvalds struct sk_buff *skb;
15711da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
15721da177e4SLinus Torvalds u32 seq = tp->copied_seq;
15731da177e4SLinus Torvalds u32 offset;
15741da177e4SLinus Torvalds int copied = 0;
15751da177e4SLinus Torvalds
15761da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
15771da177e4SLinus Torvalds return -ENOTCONN;
15781da177e4SLinus Torvalds while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
15791da177e4SLinus Torvalds if (offset < skb->len) {
1580374e7b59SOctavian Purdila int used;
1581374e7b59SOctavian Purdila size_t len;
15821da177e4SLinus Torvalds
15831da177e4SLinus Torvalds len = skb->len - offset;
15841da177e4SLinus Torvalds /* Stop reading if we hit a patch of urgent data */
1585b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) {
15861da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - seq;
15871da177e4SLinus Torvalds if (urg_offset < len)
15881da177e4SLinus Torvalds len = urg_offset;
15891da177e4SLinus Torvalds if (!len)
15901da177e4SLinus Torvalds break;
15911da177e4SLinus Torvalds }
15921da177e4SLinus Torvalds used = recv_actor(desc, skb, offset, len);
1593ff905b1eSEric Dumazet if (used <= 0) {
1594ddb61a57SJens Axboe if (!copied)
1595ddb61a57SJens Axboe copied = used;
1596ddb61a57SJens Axboe break;
1597e3d5ea2cSEric Dumazet }
1598e3d5ea2cSEric Dumazet if (WARN_ON_ONCE(used > len))
1599e3d5ea2cSEric Dumazet used = len;
16001da177e4SLinus Torvalds seq += used;
16011da177e4SLinus Torvalds copied += used;
16021da177e4SLinus Torvalds offset += used;
1603e3d5ea2cSEric Dumazet
160402275a2eSWilly Tarreau /* If recv_actor drops the lock (e.g. TCP splice
1605293ad604SOctavian Purdila * receive) the skb pointer might be invalid when
1606293ad604SOctavian Purdila * getting here: tcp_collapse might have deleted it
1607293ad604SOctavian Purdila * while aggregating skbs from the socket queue.
1608293ad604SOctavian Purdila */
1609293ad604SOctavian Purdila skb = tcp_recv_skb(sk, seq - 1, &offset);
161002275a2eSWilly Tarreau if (!skb)
16111da177e4SLinus Torvalds break;
161202275a2eSWilly Tarreau /* TCP coalescing might have appended data to the skb.
161302275a2eSWilly Tarreau * Try to splice more frags
161402275a2eSWilly Tarreau */
161502275a2eSWilly Tarreau if (offset + 1 != skb->len)
161602275a2eSWilly Tarreau continue;
16171da177e4SLinus Torvalds }
1618e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
16193df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
16201da177e4SLinus Torvalds ++seq;
16211da177e4SLinus Torvalds break;
16221da177e4SLinus Torvalds }
16233df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
16241da177e4SLinus Torvalds if (!desc->count)
16251da177e4SLinus Torvalds break;
16267db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq);
16271da177e4SLinus Torvalds }
16287db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq);
16291da177e4SLinus Torvalds
16301da177e4SLinus Torvalds tcp_rcv_space_adjust(sk);
16311da177e4SLinus Torvalds
16321da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */
1633f26845b4SEric Dumazet if (copied > 0) {
1634f26845b4SEric Dumazet tcp_recv_skb(sk, seq, &offset);
16350e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied);
1636f26845b4SEric Dumazet }
16371da177e4SLinus Torvalds return copied;
16381da177e4SLinus Torvalds }
16394bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock);
16401da177e4SLinus Torvalds
tcp_read_skb(struct sock * sk,skb_read_actor_t recv_actor)1641965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
164204919bedSCong Wang {
164304919bedSCong Wang struct sk_buff *skb;
164404919bedSCong Wang int copied = 0;
164504919bedSCong Wang
164604919bedSCong Wang if (sk->sk_state == TCP_LISTEN)
164704919bedSCong Wang return -ENOTCONN;
164804919bedSCong Wang
16499b7177b1SJohn Fastabend while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1650db4192a7SCong Wang u8 tcp_flags;
1651db4192a7SCong Wang int used;
165204919bedSCong Wang
165304919bedSCong Wang __skb_unlink(skb, &sk->sk_receive_queue);
165496628951SPeilin Ye WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
1655db4192a7SCong Wang tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
1656db4192a7SCong Wang used = recv_actor(sk, skb);
1657db4192a7SCong Wang if (used < 0) {
1658db4192a7SCong Wang if (!copied)
1659db4192a7SCong Wang copied = used;
1660db4192a7SCong Wang break;
1661db4192a7SCong Wang }
1662db4192a7SCong Wang copied += used;
1663db4192a7SCong Wang
16649b7177b1SJohn Fastabend if (tcp_flags & TCPHDR_FIN)
1665db4192a7SCong Wang break;
1666db4192a7SCong Wang }
166704919bedSCong Wang return copied;
166804919bedSCong Wang }
166904919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb);
167004919bedSCong Wang
tcp_read_done(struct sock * sk,size_t len)16713f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len)
16723f92a64eSJakub Kicinski {
16733f92a64eSJakub Kicinski struct tcp_sock *tp = tcp_sk(sk);
16743f92a64eSJakub Kicinski u32 seq = tp->copied_seq;
16753f92a64eSJakub Kicinski struct sk_buff *skb;
16763f92a64eSJakub Kicinski size_t left;
16773f92a64eSJakub Kicinski u32 offset;
16783f92a64eSJakub Kicinski
16793f92a64eSJakub Kicinski if (sk->sk_state == TCP_LISTEN)
16803f92a64eSJakub Kicinski return;
16813f92a64eSJakub Kicinski
16823f92a64eSJakub Kicinski left = len;
16833f92a64eSJakub Kicinski while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
16843f92a64eSJakub Kicinski int used;
16853f92a64eSJakub Kicinski
16863f92a64eSJakub Kicinski used = min_t(size_t, skb->len - offset, left);
16873f92a64eSJakub Kicinski seq += used;
16883f92a64eSJakub Kicinski left -= used;
16893f92a64eSJakub Kicinski
16903f92a64eSJakub Kicinski if (skb->len > offset + used)
16913f92a64eSJakub Kicinski break;
16923f92a64eSJakub Kicinski
16933f92a64eSJakub Kicinski if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
16943f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb);
16953f92a64eSJakub Kicinski ++seq;
16963f92a64eSJakub Kicinski break;
16973f92a64eSJakub Kicinski }
16983f92a64eSJakub Kicinski tcp_eat_recv_skb(sk, skb);
16993f92a64eSJakub Kicinski }
17003f92a64eSJakub Kicinski WRITE_ONCE(tp->copied_seq, seq);
17013f92a64eSJakub Kicinski
17023f92a64eSJakub Kicinski tcp_rcv_space_adjust(sk);
17033f92a64eSJakub Kicinski
17043f92a64eSJakub Kicinski /* Clean up data we have read: This will do ACK frames. */
17053f92a64eSJakub Kicinski if (left != len)
17063f92a64eSJakub Kicinski tcp_cleanup_rbuf(sk, len - left);
17073f92a64eSJakub Kicinski }
17083f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done);
17093f92a64eSJakub Kicinski
tcp_peek_len(struct socket * sock)171032035585STom Herbert int tcp_peek_len(struct socket *sock)
171132035585STom Herbert {
171232035585STom Herbert return tcp_inq(sock->sk);
171332035585STom Herbert }
171432035585STom Herbert EXPORT_SYMBOL(tcp_peek_len);
171532035585STom Herbert
1716d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
tcp_set_rcvlowat(struct sock * sk,int val)1717d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val)
1718d1361840SEric Dumazet {
1719dfa2f048SEric Dumazet int space, cap;
1720867f816bSSoheil Hassas Yeganeh
1721867f816bSSoheil Hassas Yeganeh if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1722867f816bSSoheil Hassas Yeganeh cap = sk->sk_rcvbuf >> 1;
1723867f816bSSoheil Hassas Yeganeh else
172402739545SKuniyuki Iwashima cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
1725867f816bSSoheil Hassas Yeganeh val = min(val, cap);
1726eac66402SEric Dumazet WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
172703f45c88SEric Dumazet
172803f45c88SEric Dumazet /* Check if we need to signal EPOLLIN right now */
172903f45c88SEric Dumazet tcp_data_ready(sk);
173003f45c88SEric Dumazet
1731d1361840SEric Dumazet if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1732d1361840SEric Dumazet return 0;
1733d1361840SEric Dumazet
1734dfa2f048SEric Dumazet space = tcp_space_from_win(sk, val);
1735dfa2f048SEric Dumazet if (space > sk->sk_rcvbuf) {
1736dfa2f048SEric Dumazet WRITE_ONCE(sk->sk_rcvbuf, space);
1737f410cbeaSEric Dumazet WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
1738d1361840SEric Dumazet }
1739d1361840SEric Dumazet return 0;
1740d1361840SEric Dumazet }
1741d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat);
1742d1361840SEric Dumazet
tcp_update_recv_tstamps(struct sk_buff * skb,struct scm_timestamping_internal * tss)1743892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb,
17447eeba170SArjun Roy struct scm_timestamping_internal *tss)
17457eeba170SArjun Roy {
17467eeba170SArjun Roy if (skb->tstamp)
17477eeba170SArjun Roy tss->ts[0] = ktime_to_timespec64(skb->tstamp);
17487eeba170SArjun Roy else
17497eeba170SArjun Roy tss->ts[0] = (struct timespec64) {0};
17507eeba170SArjun Roy
17517eeba170SArjun Roy if (skb_hwtstamps(skb)->hwtstamp)
17527eeba170SArjun Roy tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
17537eeba170SArjun Roy else
17547eeba170SArjun Roy tss->ts[2] = (struct timespec64) {0};
17557eeba170SArjun Roy }
17567eeba170SArjun Roy
175705255b82SEric Dumazet #ifdef CONFIG_MMU
1758350f6bbcSMatthew Wilcox (Oracle) static const struct vm_operations_struct tcp_vm_ops = {
175905255b82SEric Dumazet };
176005255b82SEric Dumazet
tcp_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)176193ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock,
176293ab6cc6SEric Dumazet struct vm_area_struct *vma)
176393ab6cc6SEric Dumazet {
176405255b82SEric Dumazet if (vma->vm_flags & (VM_WRITE | VM_EXEC))
176505255b82SEric Dumazet return -EPERM;
17661c71222eSSuren Baghdasaryan vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
176705255b82SEric Dumazet
17683e4e28c5SMichel Lespinasse /* Instruct vm_insert_page() to not mmap_read_lock(mm) */
17691c71222eSSuren Baghdasaryan vm_flags_set(vma, VM_MIXEDMAP);
177005255b82SEric Dumazet
177105255b82SEric Dumazet vma->vm_ops = &tcp_vm_ops;
177205255b82SEric Dumazet return 0;
177305255b82SEric Dumazet }
177405255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap);
177505255b82SEric Dumazet
skb_advance_to_frag(struct sk_buff * skb,u32 offset_skb,u32 * offset_frag)17767fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
17777fba5309SArjun Roy u32 *offset_frag)
17787fba5309SArjun Roy {
17797fba5309SArjun Roy skb_frag_t *frag;
17807fba5309SArjun Roy
178170701b83SArjun Roy if (unlikely(offset_skb >= skb->len))
178270701b83SArjun Roy return NULL;
178370701b83SArjun Roy
17847fba5309SArjun Roy offset_skb -= skb_headlen(skb);
17857fba5309SArjun Roy if ((int)offset_skb < 0 || skb_has_frag_list(skb))
17867fba5309SArjun Roy return NULL;
17877fba5309SArjun Roy
17887fba5309SArjun Roy frag = skb_shinfo(skb)->frags;
17897fba5309SArjun Roy while (offset_skb) {
17907fba5309SArjun Roy if (skb_frag_size(frag) > offset_skb) {
17917fba5309SArjun Roy *offset_frag = offset_skb;
17927fba5309SArjun Roy return frag;
17937fba5309SArjun Roy }
17947fba5309SArjun Roy offset_skb -= skb_frag_size(frag);
17957fba5309SArjun Roy ++frag;
17967fba5309SArjun Roy }
17977fba5309SArjun Roy *offset_frag = 0;
17987fba5309SArjun Roy return frag;
17997fba5309SArjun Roy }
18007fba5309SArjun Roy
can_map_frag(const skb_frag_t * frag)180198917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag)
180298917cf0SArjun Roy {
1803577e4432SEric Dumazet struct page *page;
1804577e4432SEric Dumazet
1805577e4432SEric Dumazet if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
1806577e4432SEric Dumazet return false;
1807577e4432SEric Dumazet
1808577e4432SEric Dumazet page = skb_frag_page(frag);
1809577e4432SEric Dumazet
1810577e4432SEric Dumazet if (PageCompound(page) || page->mapping)
1811577e4432SEric Dumazet return false;
1812577e4432SEric Dumazet
1813577e4432SEric Dumazet return true;
181498917cf0SArjun Roy }
181598917cf0SArjun Roy
find_next_mappable_frag(const skb_frag_t * frag,int remaining_in_skb)181698917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag,
181798917cf0SArjun Roy int remaining_in_skb)
181898917cf0SArjun Roy {
181998917cf0SArjun Roy int offset = 0;
182098917cf0SArjun Roy
182198917cf0SArjun Roy if (likely(can_map_frag(frag)))
182298917cf0SArjun Roy return 0;
182398917cf0SArjun Roy
182498917cf0SArjun Roy while (offset < remaining_in_skb && !can_map_frag(frag)) {
182598917cf0SArjun Roy offset += skb_frag_size(frag);
182698917cf0SArjun Roy ++frag;
182798917cf0SArjun Roy }
182898917cf0SArjun Roy return offset;
182998917cf0SArjun Roy }
183098917cf0SArjun Roy
tcp_zerocopy_set_hint_for_skb(struct sock * sk,struct tcp_zerocopy_receive * zc,struct sk_buff * skb,u32 offset)18310c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
18320c3936d3SArjun Roy struct tcp_zerocopy_receive *zc,
18330c3936d3SArjun Roy struct sk_buff *skb, u32 offset)
18340c3936d3SArjun Roy {
18350c3936d3SArjun Roy u32 frag_offset, partial_frag_remainder = 0;
18360c3936d3SArjun Roy int mappable_offset;
18370c3936d3SArjun Roy skb_frag_t *frag;
18380c3936d3SArjun Roy
18390c3936d3SArjun Roy /* worst case: skip to next skb. try to improve on this case below */
18400c3936d3SArjun Roy zc->recv_skip_hint = skb->len - offset;
18410c3936d3SArjun Roy
18420c3936d3SArjun Roy /* Find the frag containing this offset (and how far into that frag) */
18430c3936d3SArjun Roy frag = skb_advance_to_frag(skb, offset, &frag_offset);
18440c3936d3SArjun Roy if (!frag)
18450c3936d3SArjun Roy return;
18460c3936d3SArjun Roy
18470c3936d3SArjun Roy if (frag_offset) {
18480c3936d3SArjun Roy struct skb_shared_info *info = skb_shinfo(skb);
18490c3936d3SArjun Roy
18500c3936d3SArjun Roy /* We read part of the last frag, must recvmsg() rest of skb. */
18510c3936d3SArjun Roy if (frag == &info->frags[info->nr_frags - 1])
18520c3936d3SArjun Roy return;
18530c3936d3SArjun Roy
18540c3936d3SArjun Roy /* Else, we must at least read the remainder in this frag. */
18550c3936d3SArjun Roy partial_frag_remainder = skb_frag_size(frag) - frag_offset;
18560c3936d3SArjun Roy zc->recv_skip_hint -= partial_frag_remainder;
18570c3936d3SArjun Roy ++frag;
18580c3936d3SArjun Roy }
18590c3936d3SArjun Roy
18600c3936d3SArjun Roy /* partial_frag_remainder: If part way through a frag, must read rest.
18610c3936d3SArjun Roy * mappable_offset: Bytes till next mappable frag, *not* counting bytes
18620c3936d3SArjun Roy * in partial_frag_remainder.
18630c3936d3SArjun Roy */
18640c3936d3SArjun Roy mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint);
18650c3936d3SArjun Roy zc->recv_skip_hint = mappable_offset + partial_frag_remainder;
18660c3936d3SArjun Roy }
18670c3936d3SArjun Roy
1868f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
1869ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss,
1870f21a3c48SArjun Roy int *cmsg_flags);
receive_fallback_to_copy(struct sock * sk,struct tcp_zerocopy_receive * zc,int inq,struct scm_timestamping_internal * tss)1871f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk,
18727eeba170SArjun Roy struct tcp_zerocopy_receive *zc, int inq,
18737eeba170SArjun Roy struct scm_timestamping_internal *tss)
1874f21a3c48SArjun Roy {
1875f21a3c48SArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address;
1876f21a3c48SArjun Roy struct msghdr msg = {};
18777eeba170SArjun Roy int err;
1878f21a3c48SArjun Roy
1879f21a3c48SArjun Roy zc->length = 0;
1880f21a3c48SArjun Roy zc->recv_skip_hint = 0;
1881f21a3c48SArjun Roy
1882f21a3c48SArjun Roy if (copy_address != zc->copybuf_address)
1883f21a3c48SArjun Roy return -EINVAL;
1884f21a3c48SArjun Roy
18859fd7874cSJens Axboe err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq,
18869fd7874cSJens Axboe &msg.msg_iter);
1887f21a3c48SArjun Roy if (err)
1888f21a3c48SArjun Roy return err;
1889f21a3c48SArjun Roy
1890ec095263SOliver Hartkopp err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT,
18917eeba170SArjun Roy tss, &zc->msg_flags);
1892f21a3c48SArjun Roy if (err < 0)
1893f21a3c48SArjun Roy return err;
1894f21a3c48SArjun Roy
1895f21a3c48SArjun Roy zc->copybuf_len = err;
18960c3936d3SArjun Roy if (likely(zc->copybuf_len)) {
18970c3936d3SArjun Roy struct sk_buff *skb;
18980c3936d3SArjun Roy u32 offset;
18990c3936d3SArjun Roy
19000c3936d3SArjun Roy skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset);
19010c3936d3SArjun Roy if (skb)
19020c3936d3SArjun Roy tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset);
19030c3936d3SArjun Roy }
1904f21a3c48SArjun Roy return 0;
1905f21a3c48SArjun Roy }
1906f21a3c48SArjun Roy
tcp_copy_straggler_data(struct tcp_zerocopy_receive * zc,struct sk_buff * skb,u32 copylen,u32 * offset,u32 * seq)190718fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
190818fb76edSArjun Roy struct sk_buff *skb, u32 copylen,
190918fb76edSArjun Roy u32 *offset, u32 *seq)
191018fb76edSArjun Roy {
191118fb76edSArjun Roy unsigned long copy_address = (unsigned long)zc->copybuf_address;
191218fb76edSArjun Roy struct msghdr msg = {};
191318fb76edSArjun Roy int err;
191418fb76edSArjun Roy
191518fb76edSArjun Roy if (copy_address != zc->copybuf_address)
191618fb76edSArjun Roy return -EINVAL;
191718fb76edSArjun Roy
19189fd7874cSJens Axboe err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen,
19199fd7874cSJens Axboe &msg.msg_iter);
192018fb76edSArjun Roy if (err)
192118fb76edSArjun Roy return err;
192218fb76edSArjun Roy err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
192318fb76edSArjun Roy if (err)
192418fb76edSArjun Roy return err;
192518fb76edSArjun Roy zc->recv_skip_hint -= copylen;
192618fb76edSArjun Roy *offset += copylen;
192718fb76edSArjun Roy *seq += copylen;
192818fb76edSArjun Roy return (__s32)copylen;
192918fb76edSArjun Roy }
193018fb76edSArjun Roy
tcp_zc_handle_leftover(struct tcp_zerocopy_receive * zc,struct sock * sk,struct sk_buff * skb,u32 * seq,s32 copybuf_len,struct scm_timestamping_internal * tss)19317eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc,
193218fb76edSArjun Roy struct sock *sk,
193318fb76edSArjun Roy struct sk_buff *skb,
193418fb76edSArjun Roy u32 *seq,
19357eeba170SArjun Roy s32 copybuf_len,
19367eeba170SArjun Roy struct scm_timestamping_internal *tss)
193718fb76edSArjun Roy {
193818fb76edSArjun Roy u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
193918fb76edSArjun Roy
194018fb76edSArjun Roy if (!copylen)
194118fb76edSArjun Roy return 0;
194218fb76edSArjun Roy /* skb is null if inq < PAGE_SIZE. */
19437eeba170SArjun Roy if (skb) {
194418fb76edSArjun Roy offset = *seq - TCP_SKB_CB(skb)->seq;
19457eeba170SArjun Roy } else {
194618fb76edSArjun Roy skb = tcp_recv_skb(sk, *seq, &offset);
19477eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) {
19487eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss);
19497eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS;
19507eeba170SArjun Roy }
19517eeba170SArjun Roy }
195218fb76edSArjun Roy
195318fb76edSArjun Roy zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset,
195418fb76edSArjun Roy seq);
195518fb76edSArjun Roy return zc->copybuf_len < 0 ? 0 : copylen;
195618fb76edSArjun Roy }
195718fb76edSArjun Roy
tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct * vma,struct page ** pending_pages,unsigned long pages_remaining,unsigned long * address,u32 * length,u32 * seq,struct tcp_zerocopy_receive * zc,u32 total_bytes_to_map,int err)195894ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
195994ab9eb9SArjun Roy struct page **pending_pages,
196094ab9eb9SArjun Roy unsigned long pages_remaining,
196194ab9eb9SArjun Roy unsigned long *address,
196294ab9eb9SArjun Roy u32 *length,
196394ab9eb9SArjun Roy u32 *seq,
196494ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc,
196594ab9eb9SArjun Roy u32 total_bytes_to_map,
196694ab9eb9SArjun Roy int err)
196794ab9eb9SArjun Roy {
196894ab9eb9SArjun Roy /* At least one page did not map. Try zapping if we skipped earlier. */
196994ab9eb9SArjun Roy if (err == -EBUSY &&
197094ab9eb9SArjun Roy zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) {
197194ab9eb9SArjun Roy u32 maybe_zap_len;
197294ab9eb9SArjun Roy
197394ab9eb9SArjun Roy maybe_zap_len = total_bytes_to_map - /* All bytes to map */
197494ab9eb9SArjun Roy *length + /* Mapped or pending */
197594ab9eb9SArjun Roy (pages_remaining * PAGE_SIZE); /* Failed map. */
1976e9adcfecSMike Kravetz zap_page_range_single(vma, *address, maybe_zap_len, NULL);
197794ab9eb9SArjun Roy err = 0;
197894ab9eb9SArjun Roy }
197994ab9eb9SArjun Roy
198094ab9eb9SArjun Roy if (!err) {
198194ab9eb9SArjun Roy unsigned long leftover_pages = pages_remaining;
198294ab9eb9SArjun Roy int bytes_mapped;
198394ab9eb9SArjun Roy
1984e9adcfecSMike Kravetz /* We called zap_page_range_single, try to reinsert. */
198594ab9eb9SArjun Roy err = vm_insert_pages(vma, *address,
198694ab9eb9SArjun Roy pending_pages,
198794ab9eb9SArjun Roy &pages_remaining);
198894ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining);
198994ab9eb9SArjun Roy *seq += bytes_mapped;
199094ab9eb9SArjun Roy *address += bytes_mapped;
199194ab9eb9SArjun Roy }
199294ab9eb9SArjun Roy if (err) {
199394ab9eb9SArjun Roy /* Either we were unable to zap, OR we zapped, retried an
199494ab9eb9SArjun Roy * insert, and still had an issue. Either ways, pages_remaining
199594ab9eb9SArjun Roy * is the number of pages we were unable to map, and we unroll
199694ab9eb9SArjun Roy * some state we speculatively touched before.
199794ab9eb9SArjun Roy */
199894ab9eb9SArjun Roy const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
199994ab9eb9SArjun Roy
200094ab9eb9SArjun Roy *length -= bytes_not_mapped;
200194ab9eb9SArjun Roy zc->recv_skip_hint += bytes_not_mapped;
200294ab9eb9SArjun Roy }
200394ab9eb9SArjun Roy return err;
200494ab9eb9SArjun Roy }
200594ab9eb9SArjun Roy
tcp_zerocopy_vm_insert_batch(struct vm_area_struct * vma,struct page ** pages,unsigned int pages_to_map,unsigned long * address,u32 * length,u32 * seq,struct tcp_zerocopy_receive * zc,u32 total_bytes_to_map)20063763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
20073763a24cSArjun Roy struct page **pages,
200894ab9eb9SArjun Roy unsigned int pages_to_map,
200994ab9eb9SArjun Roy unsigned long *address,
201094ab9eb9SArjun Roy u32 *length,
20113763a24cSArjun Roy u32 *seq,
201294ab9eb9SArjun Roy struct tcp_zerocopy_receive *zc,
201394ab9eb9SArjun Roy u32 total_bytes_to_map)
20143763a24cSArjun Roy {
20153763a24cSArjun Roy unsigned long pages_remaining = pages_to_map;
201694ab9eb9SArjun Roy unsigned int pages_mapped;
201794ab9eb9SArjun Roy unsigned int bytes_mapped;
201894ab9eb9SArjun Roy int err;
20193763a24cSArjun Roy
202094ab9eb9SArjun Roy err = vm_insert_pages(vma, *address, pages, &pages_remaining);
202194ab9eb9SArjun Roy pages_mapped = pages_to_map - (unsigned int)pages_remaining;
202294ab9eb9SArjun Roy bytes_mapped = PAGE_SIZE * pages_mapped;
20233763a24cSArjun Roy /* Even if vm_insert_pages fails, it may have partially succeeded in
20243763a24cSArjun Roy * mapping (some but not all of the pages).
20253763a24cSArjun Roy */
20263763a24cSArjun Roy *seq += bytes_mapped;
202794ab9eb9SArjun Roy *address += bytes_mapped;
202894ab9eb9SArjun Roy
202994ab9eb9SArjun Roy if (likely(!err))
203094ab9eb9SArjun Roy return 0;
203194ab9eb9SArjun Roy
203294ab9eb9SArjun Roy /* Error: maybe zap and retry + rollback state for failed inserts. */
203394ab9eb9SArjun Roy return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped,
203494ab9eb9SArjun Roy pages_remaining, address, length, seq, zc, total_bytes_to_map,
203594ab9eb9SArjun Roy err);
20363763a24cSArjun Roy }
20373763a24cSArjun Roy
20383c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS (TCP_CMSG_TS)
tcp_zc_finalize_rx_tstamp(struct sock * sk,struct tcp_zerocopy_receive * zc,struct scm_timestamping_internal * tss)20397eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
20407eeba170SArjun Roy struct tcp_zerocopy_receive *zc,
20417eeba170SArjun Roy struct scm_timestamping_internal *tss)
20427eeba170SArjun Roy {
20437eeba170SArjun Roy unsigned long msg_control_addr;
20447eeba170SArjun Roy struct msghdr cmsg_dummy;
20457eeba170SArjun Roy
20467eeba170SArjun Roy msg_control_addr = (unsigned long)zc->msg_control;
2047c39ef213SKevin Brodsky cmsg_dummy.msg_control_user = (void __user *)msg_control_addr;
20487eeba170SArjun Roy cmsg_dummy.msg_controllen =
20497eeba170SArjun Roy (__kernel_size_t)zc->msg_controllen;
20507eeba170SArjun Roy cmsg_dummy.msg_flags = in_compat_syscall()
20517eeba170SArjun Roy ? MSG_CMSG_COMPAT : 0;
2052a6f8ee58SArjun Roy cmsg_dummy.msg_control_is_user = true;
20537eeba170SArjun Roy zc->msg_flags = 0;
20547eeba170SArjun Roy if (zc->msg_control == msg_control_addr &&
20557eeba170SArjun Roy zc->msg_controllen == cmsg_dummy.msg_controllen) {
20567eeba170SArjun Roy tcp_recv_timestamp(&cmsg_dummy, sk, tss);
20577eeba170SArjun Roy zc->msg_control = (__u64)
2058c39ef213SKevin Brodsky ((uintptr_t)cmsg_dummy.msg_control_user);
20597eeba170SArjun Roy zc->msg_controllen =
20607eeba170SArjun Roy (__u64)cmsg_dummy.msg_controllen;
20617eeba170SArjun Roy zc->msg_flags = (__u32)cmsg_dummy.msg_flags;
20627eeba170SArjun Roy }
20637eeba170SArjun Roy }
20647eeba170SArjun Roy
find_tcp_vma(struct mm_struct * mm,unsigned long address,bool * mmap_locked)20657a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
20667a7f0946SArjun Roy unsigned long address,
20677a7f0946SArjun Roy bool *mmap_locked)
20687a7f0946SArjun Roy {
2069350f6bbcSMatthew Wilcox (Oracle) struct vm_area_struct *vma = lock_vma_under_rcu(mm, address);
20707a7f0946SArjun Roy
20717a7f0946SArjun Roy if (vma) {
2072350f6bbcSMatthew Wilcox (Oracle) if (vma->vm_ops != &tcp_vm_ops) {
20737a7f0946SArjun Roy vma_end_read(vma);
20747a7f0946SArjun Roy return NULL;
20757a7f0946SArjun Roy }
20767a7f0946SArjun Roy *mmap_locked = false;
20777a7f0946SArjun Roy return vma;
20787a7f0946SArjun Roy }
20797a7f0946SArjun Roy
20807a7f0946SArjun Roy mmap_read_lock(mm);
20817a7f0946SArjun Roy vma = vma_lookup(mm, address);
2082350f6bbcSMatthew Wilcox (Oracle) if (!vma || vma->vm_ops != &tcp_vm_ops) {
20837a7f0946SArjun Roy mmap_read_unlock(mm);
20847a7f0946SArjun Roy return NULL;
20857a7f0946SArjun Roy }
20867a7f0946SArjun Roy *mmap_locked = true;
20877a7f0946SArjun Roy return vma;
20887a7f0946SArjun Roy }
20897a7f0946SArjun Roy
209094ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32
tcp_zerocopy_receive(struct sock * sk,struct tcp_zerocopy_receive * zc,struct scm_timestamping_internal * tss)209105255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk,
20927eeba170SArjun Roy struct tcp_zerocopy_receive *zc,
20937eeba170SArjun Roy struct scm_timestamping_internal *tss)
209405255b82SEric Dumazet {
209594ab9eb9SArjun Roy u32 length = 0, offset, vma_len, avail_len, copylen = 0;
209605255b82SEric Dumazet unsigned long address = (unsigned long)zc->address;
209794ab9eb9SArjun Roy struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE];
209818fb76edSArjun Roy s32 copybuf_len = zc->copybuf_len;
209918fb76edSArjun Roy struct tcp_sock *tp = tcp_sk(sk);
210005255b82SEric Dumazet const skb_frag_t *frags = NULL;
210194ab9eb9SArjun Roy unsigned int pages_to_map = 0;
210205255b82SEric Dumazet struct vm_area_struct *vma;
210305255b82SEric Dumazet struct sk_buff *skb = NULL;
210418fb76edSArjun Roy u32 seq = tp->copied_seq;
210594ab9eb9SArjun Roy u32 total_bytes_to_map;
210618fb76edSArjun Roy int inq = tcp_inq(sk);
21077a7f0946SArjun Roy bool mmap_locked;
210893ab6cc6SEric Dumazet int ret;
210993ab6cc6SEric Dumazet
211018fb76edSArjun Roy zc->copybuf_len = 0;
21117eeba170SArjun Roy zc->msg_flags = 0;
211218fb76edSArjun Roy
211305255b82SEric Dumazet if (address & (PAGE_SIZE - 1) || address != zc->address)
211493ab6cc6SEric Dumazet return -EINVAL;
211593ab6cc6SEric Dumazet
211693ab6cc6SEric Dumazet if (sk->sk_state == TCP_LISTEN)
211705255b82SEric Dumazet return -ENOTCONN;
211893ab6cc6SEric Dumazet
211993ab6cc6SEric Dumazet sock_rps_record_flow(sk);
212093ab6cc6SEric Dumazet
2121f21a3c48SArjun Roy if (inq && inq <= copybuf_len)
21227eeba170SArjun Roy return receive_fallback_to_copy(sk, zc, inq, tss);
2123f21a3c48SArjun Roy
2124936ced41SArjun Roy if (inq < PAGE_SIZE) {
2125936ced41SArjun Roy zc->length = 0;
2126936ced41SArjun Roy zc->recv_skip_hint = inq;
2127936ced41SArjun Roy if (!inq && sock_flag(sk, SOCK_DONE))
2128936ced41SArjun Roy return -EIO;
2129936ced41SArjun Roy return 0;
2130936ced41SArjun Roy }
2131936ced41SArjun Roy
21327a7f0946SArjun Roy vma = find_tcp_vma(current->mm, address, &mmap_locked);
21337a7f0946SArjun Roy if (!vma)
2134e776af60SEric Dumazet return -EINVAL;
21357a7f0946SArjun Roy
213618fb76edSArjun Roy vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
213718fb76edSArjun Roy avail_len = min_t(u32, vma_len, inq);
213894ab9eb9SArjun Roy total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
213994ab9eb9SArjun Roy if (total_bytes_to_map) {
214094ab9eb9SArjun Roy if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
2141e9adcfecSMike Kravetz zap_page_range_single(vma, address, total_bytes_to_map,
2142e9adcfecSMike Kravetz NULL);
214394ab9eb9SArjun Roy zc->length = total_bytes_to_map;
214405255b82SEric Dumazet zc->recv_skip_hint = 0;
21458f2b0293SSoheil Hassas Yeganeh } else {
214618fb76edSArjun Roy zc->length = avail_len;
214718fb76edSArjun Roy zc->recv_skip_hint = avail_len;
21488f2b0293SSoheil Hassas Yeganeh }
214905255b82SEric Dumazet ret = 0;
215005255b82SEric Dumazet while (length + PAGE_SIZE <= zc->length) {
215198917cf0SArjun Roy int mappable_offset;
215294ab9eb9SArjun Roy struct page *page;
215398917cf0SArjun Roy
215405255b82SEric Dumazet if (zc->recv_skip_hint < PAGE_SIZE) {
21557fba5309SArjun Roy u32 offset_frag;
21567fba5309SArjun Roy
215705255b82SEric Dumazet if (skb) {
21580e627190SArjun Roy if (zc->recv_skip_hint > 0)
21590e627190SArjun Roy break;
216005255b82SEric Dumazet skb = skb->next;
216105255b82SEric Dumazet offset = seq - TCP_SKB_CB(skb)->seq;
216205255b82SEric Dumazet } else {
216393ab6cc6SEric Dumazet skb = tcp_recv_skb(sk, seq, &offset);
216405255b82SEric Dumazet }
21657eeba170SArjun Roy
216665249febSMina Almasry if (!skb_frags_readable(skb))
216765249febSMina Almasry break;
216865249febSMina Almasry
21697eeba170SArjun Roy if (TCP_SKB_CB(skb)->has_rxtstamp) {
21707eeba170SArjun Roy tcp_update_recv_tstamps(skb, tss);
21717eeba170SArjun Roy zc->msg_flags |= TCP_CMSG_TS;
21727eeba170SArjun Roy }
217305255b82SEric Dumazet zc->recv_skip_hint = skb->len - offset;
21747fba5309SArjun Roy frags = skb_advance_to_frag(skb, offset, &offset_frag);
21757fba5309SArjun Roy if (!frags || offset_frag)
217605255b82SEric Dumazet break;
217705255b82SEric Dumazet }
2178789762ceSSoheil Hassas Yeganeh
217998917cf0SArjun Roy mappable_offset = find_next_mappable_frag(frags,
218098917cf0SArjun Roy zc->recv_skip_hint);
218198917cf0SArjun Roy if (mappable_offset) {
218298917cf0SArjun Roy zc->recv_skip_hint = mappable_offset;
218305255b82SEric Dumazet break;
2184789762ceSSoheil Hassas Yeganeh }
218594ab9eb9SArjun Roy page = skb_frag_page(frags);
21869f6b619eSMina Almasry if (WARN_ON_ONCE(!page))
21879f6b619eSMina Almasry break;
21889f6b619eSMina Almasry
218994ab9eb9SArjun Roy prefetchw(page);
219094ab9eb9SArjun Roy pages[pages_to_map++] = page;
219105255b82SEric Dumazet length += PAGE_SIZE;
219205255b82SEric Dumazet zc->recv_skip_hint -= PAGE_SIZE;
219305255b82SEric Dumazet frags++;
219494ab9eb9SArjun Roy if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE ||
219594ab9eb9SArjun Roy zc->recv_skip_hint < PAGE_SIZE) {
219694ab9eb9SArjun Roy /* Either full batch, or we're about to go to next skb
219794ab9eb9SArjun Roy * (and we cannot unroll failed ops across skbs).
219894ab9eb9SArjun Roy */
219994ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages,
220094ab9eb9SArjun Roy pages_to_map,
220194ab9eb9SArjun Roy &address, &length,
220294ab9eb9SArjun Roy &seq, zc,
220394ab9eb9SArjun Roy total_bytes_to_map);
22043763a24cSArjun Roy if (ret)
22053763a24cSArjun Roy goto out;
220694ab9eb9SArjun Roy pages_to_map = 0;
22073763a24cSArjun Roy }
22083763a24cSArjun Roy }
220994ab9eb9SArjun Roy if (pages_to_map) {
221094ab9eb9SArjun Roy ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map,
221194ab9eb9SArjun Roy &address, &length, &seq,
221294ab9eb9SArjun Roy zc, total_bytes_to_map);
221393ab6cc6SEric Dumazet }
221405255b82SEric Dumazet out:
22157a7f0946SArjun Roy if (mmap_locked)
2216d8ed45c5SMichel Lespinasse mmap_read_unlock(current->mm);
22177a7f0946SArjun Roy else
22187a7f0946SArjun Roy vma_end_read(vma);
221918fb76edSArjun Roy /* Try to copy straggler data. */
222018fb76edSArjun Roy if (!ret)
22217eeba170SArjun Roy copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);
222218fb76edSArjun Roy
222318fb76edSArjun Roy if (length + copylen) {
22247db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, seq);
222593ab6cc6SEric Dumazet tcp_rcv_space_adjust(sk);
222693ab6cc6SEric Dumazet
222793ab6cc6SEric Dumazet /* Clean up data we have read: This will do ACK frames. */
222893ab6cc6SEric Dumazet tcp_recv_skb(sk, seq, &offset);
222918fb76edSArjun Roy tcp_cleanup_rbuf(sk, length + copylen);
223093ab6cc6SEric Dumazet ret = 0;
223105255b82SEric Dumazet if (length == zc->length)
223205255b82SEric Dumazet zc->recv_skip_hint = 0;
223305255b82SEric Dumazet } else {
223405255b82SEric Dumazet if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
223505255b82SEric Dumazet ret = -EIO;
223605255b82SEric Dumazet }
223705255b82SEric Dumazet zc->length = length;
223893ab6cc6SEric Dumazet return ret;
223993ab6cc6SEric Dumazet }
224005255b82SEric Dumazet #endif
224193ab6cc6SEric Dumazet
224298aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */
tcp_recv_timestamp(struct msghdr * msg,const struct sock * sk,struct scm_timestamping_internal * tss)2243892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
22449718475eSDeepa Dinamani struct scm_timestamping_internal *tss)
224598aaa913SMike Maloney {
2246887feae3SDeepa Dinamani int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
2247be8e9eb3SJason Xing u32 tsflags = READ_ONCE(sk->sk_tsflags);
224898aaa913SMike Maloney bool has_timestamping = false;
224998aaa913SMike Maloney
225098aaa913SMike Maloney if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
225198aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMP)) {
225298aaa913SMike Maloney if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
2253887feae3SDeepa Dinamani if (new_tstamp) {
2254df1b4ba9SArnd Bergmann struct __kernel_timespec kts = {
2255df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2256df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec,
2257df1b4ba9SArnd Bergmann };
2258887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
2259887feae3SDeepa Dinamani sizeof(kts), &kts);
2260887feae3SDeepa Dinamani } else {
2261df1b4ba9SArnd Bergmann struct __kernel_old_timespec ts_old = {
2262df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2263df1b4ba9SArnd Bergmann .tv_nsec = tss->ts[0].tv_nsec,
2264df1b4ba9SArnd Bergmann };
22657f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
22669718475eSDeepa Dinamani sizeof(ts_old), &ts_old);
2267887feae3SDeepa Dinamani }
226898aaa913SMike Maloney } else {
2269887feae3SDeepa Dinamani if (new_tstamp) {
2270df1b4ba9SArnd Bergmann struct __kernel_sock_timeval stv = {
2271df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2272df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000,
2273df1b4ba9SArnd Bergmann };
2274887feae3SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
2275887feae3SDeepa Dinamani sizeof(stv), &stv);
2276887feae3SDeepa Dinamani } else {
2277df1b4ba9SArnd Bergmann struct __kernel_old_timeval tv = {
2278df1b4ba9SArnd Bergmann .tv_sec = tss->ts[0].tv_sec,
2279df1b4ba9SArnd Bergmann .tv_usec = tss->ts[0].tv_nsec / 1000,
2280df1b4ba9SArnd Bergmann };
22817f1bc6e9SDeepa Dinamani put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
228298aaa913SMike Maloney sizeof(tv), &tv);
228398aaa913SMike Maloney }
228498aaa913SMike Maloney }
2285887feae3SDeepa Dinamani }
228698aaa913SMike Maloney
2287be8e9eb3SJason Xing if (tsflags & SOF_TIMESTAMPING_SOFTWARE &&
2288be8e9eb3SJason Xing (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE ||
2289be8e9eb3SJason Xing !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
229098aaa913SMike Maloney has_timestamping = true;
229198aaa913SMike Maloney else
22929718475eSDeepa Dinamani tss->ts[0] = (struct timespec64) {0};
229398aaa913SMike Maloney }
229498aaa913SMike Maloney
229598aaa913SMike Maloney if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
2296be8e9eb3SJason Xing if (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE &&
2297be8e9eb3SJason Xing (tsflags & SOF_TIMESTAMPING_RX_HARDWARE ||
2298be8e9eb3SJason Xing !(tsflags & SOF_TIMESTAMPING_OPT_RX_FILTER)))
229998aaa913SMike Maloney has_timestamping = true;
230098aaa913SMike Maloney else
23019718475eSDeepa Dinamani tss->ts[2] = (struct timespec64) {0};
230298aaa913SMike Maloney }
230398aaa913SMike Maloney
230498aaa913SMike Maloney if (has_timestamping) {
23059718475eSDeepa Dinamani tss->ts[1] = (struct timespec64) {0};
23069718475eSDeepa Dinamani if (sock_flag(sk, SOCK_TSTAMP_NEW))
23079718475eSDeepa Dinamani put_cmsg_scm_timestamping64(msg, tss);
23089718475eSDeepa Dinamani else
23099718475eSDeepa Dinamani put_cmsg_scm_timestamping(msg, tss);
231098aaa913SMike Maloney }
231198aaa913SMike Maloney }
231298aaa913SMike Maloney
tcp_inq_hint(struct sock * sk)2313b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk)
2314b75eba76SSoheil Hassas Yeganeh {
2315b75eba76SSoheil Hassas Yeganeh const struct tcp_sock *tp = tcp_sk(sk);
2316b75eba76SSoheil Hassas Yeganeh u32 copied_seq = READ_ONCE(tp->copied_seq);
2317b75eba76SSoheil Hassas Yeganeh u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
2318b75eba76SSoheil Hassas Yeganeh int inq;
2319b75eba76SSoheil Hassas Yeganeh
2320b75eba76SSoheil Hassas Yeganeh inq = rcv_nxt - copied_seq;
2321b75eba76SSoheil Hassas Yeganeh if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
2322b75eba76SSoheil Hassas Yeganeh lock_sock(sk);
2323b75eba76SSoheil Hassas Yeganeh inq = tp->rcv_nxt - tp->copied_seq;
2324b75eba76SSoheil Hassas Yeganeh release_sock(sk);
2325b75eba76SSoheil Hassas Yeganeh }
23266466e715SSoheil Hassas Yeganeh /* After receiving a FIN, tell the user-space to continue reading
23276466e715SSoheil Hassas Yeganeh * by returning a non-zero inq.
23286466e715SSoheil Hassas Yeganeh */
23296466e715SSoheil Hassas Yeganeh if (inq == 0 && sock_flag(sk, SOCK_DONE))
23306466e715SSoheil Hassas Yeganeh inq = 1;
2331b75eba76SSoheil Hassas Yeganeh return inq;
2332b75eba76SSoheil Hassas Yeganeh }
2333b75eba76SSoheil Hassas Yeganeh
2334*8f0b3cc9SMina Almasry /* batch __xa_alloc() calls and reduce xa_lock()/xa_unlock() overhead. */
2335*8f0b3cc9SMina Almasry struct tcp_xa_pool {
2336*8f0b3cc9SMina Almasry u8 max; /* max <= MAX_SKB_FRAGS */
2337*8f0b3cc9SMina Almasry u8 idx; /* idx <= max */
2338*8f0b3cc9SMina Almasry __u32 tokens[MAX_SKB_FRAGS];
2339*8f0b3cc9SMina Almasry netmem_ref netmems[MAX_SKB_FRAGS];
2340*8f0b3cc9SMina Almasry };
2341*8f0b3cc9SMina Almasry
tcp_xa_pool_commit_locked(struct sock * sk,struct tcp_xa_pool * p)2342*8f0b3cc9SMina Almasry static void tcp_xa_pool_commit_locked(struct sock *sk, struct tcp_xa_pool *p)
2343*8f0b3cc9SMina Almasry {
2344*8f0b3cc9SMina Almasry int i;
2345*8f0b3cc9SMina Almasry
2346*8f0b3cc9SMina Almasry /* Commit part that has been copied to user space. */
2347*8f0b3cc9SMina Almasry for (i = 0; i < p->idx; i++)
2348*8f0b3cc9SMina Almasry __xa_cmpxchg(&sk->sk_user_frags, p->tokens[i], XA_ZERO_ENTRY,
2349*8f0b3cc9SMina Almasry (__force void *)p->netmems[i], GFP_KERNEL);
2350*8f0b3cc9SMina Almasry /* Rollback what has been pre-allocated and is no longer needed. */
2351*8f0b3cc9SMina Almasry for (; i < p->max; i++)
2352*8f0b3cc9SMina Almasry __xa_erase(&sk->sk_user_frags, p->tokens[i]);
2353*8f0b3cc9SMina Almasry
2354*8f0b3cc9SMina Almasry p->max = 0;
2355*8f0b3cc9SMina Almasry p->idx = 0;
2356*8f0b3cc9SMina Almasry }
2357*8f0b3cc9SMina Almasry
tcp_xa_pool_commit(struct sock * sk,struct tcp_xa_pool * p)2358*8f0b3cc9SMina Almasry static void tcp_xa_pool_commit(struct sock *sk, struct tcp_xa_pool *p)
2359*8f0b3cc9SMina Almasry {
2360*8f0b3cc9SMina Almasry if (!p->max)
2361*8f0b3cc9SMina Almasry return;
2362*8f0b3cc9SMina Almasry
2363*8f0b3cc9SMina Almasry xa_lock_bh(&sk->sk_user_frags);
2364*8f0b3cc9SMina Almasry
2365*8f0b3cc9SMina Almasry tcp_xa_pool_commit_locked(sk, p);
2366*8f0b3cc9SMina Almasry
2367*8f0b3cc9SMina Almasry xa_unlock_bh(&sk->sk_user_frags);
2368*8f0b3cc9SMina Almasry }
2369*8f0b3cc9SMina Almasry
tcp_xa_pool_refill(struct sock * sk,struct tcp_xa_pool * p,unsigned int max_frags)2370*8f0b3cc9SMina Almasry static int tcp_xa_pool_refill(struct sock *sk, struct tcp_xa_pool *p,
2371*8f0b3cc9SMina Almasry unsigned int max_frags)
2372*8f0b3cc9SMina Almasry {
2373*8f0b3cc9SMina Almasry int err, k;
2374*8f0b3cc9SMina Almasry
2375*8f0b3cc9SMina Almasry if (p->idx < p->max)
2376*8f0b3cc9SMina Almasry return 0;
2377*8f0b3cc9SMina Almasry
2378*8f0b3cc9SMina Almasry xa_lock_bh(&sk->sk_user_frags);
2379*8f0b3cc9SMina Almasry
2380*8f0b3cc9SMina Almasry tcp_xa_pool_commit_locked(sk, p);
2381*8f0b3cc9SMina Almasry
2382*8f0b3cc9SMina Almasry for (k = 0; k < max_frags; k++) {
2383*8f0b3cc9SMina Almasry err = __xa_alloc(&sk->sk_user_frags, &p->tokens[k],
2384*8f0b3cc9SMina Almasry XA_ZERO_ENTRY, xa_limit_31b, GFP_KERNEL);
2385*8f0b3cc9SMina Almasry if (err)
2386*8f0b3cc9SMina Almasry break;
2387*8f0b3cc9SMina Almasry }
2388*8f0b3cc9SMina Almasry
2389*8f0b3cc9SMina Almasry xa_unlock_bh(&sk->sk_user_frags);
2390*8f0b3cc9SMina Almasry
2391*8f0b3cc9SMina Almasry p->max = k;
2392*8f0b3cc9SMina Almasry p->idx = 0;
2393*8f0b3cc9SMina Almasry return k ? 0 : err;
2394*8f0b3cc9SMina Almasry }
2395*8f0b3cc9SMina Almasry
2396*8f0b3cc9SMina Almasry /* On error, returns the -errno. On success, returns number of bytes sent to the
2397*8f0b3cc9SMina Almasry * user. May not consume all of @remaining_len.
2398*8f0b3cc9SMina Almasry */
tcp_recvmsg_dmabuf(struct sock * sk,const struct sk_buff * skb,unsigned int offset,struct msghdr * msg,int remaining_len)2399*8f0b3cc9SMina Almasry static int tcp_recvmsg_dmabuf(struct sock *sk, const struct sk_buff *skb,
2400*8f0b3cc9SMina Almasry unsigned int offset, struct msghdr *msg,
2401*8f0b3cc9SMina Almasry int remaining_len)
2402*8f0b3cc9SMina Almasry {
2403*8f0b3cc9SMina Almasry struct dmabuf_cmsg dmabuf_cmsg = { 0 };
2404*8f0b3cc9SMina Almasry struct tcp_xa_pool tcp_xa_pool;
2405*8f0b3cc9SMina Almasry unsigned int start;
2406*8f0b3cc9SMina Almasry int i, copy, n;
2407*8f0b3cc9SMina Almasry int sent = 0;
2408*8f0b3cc9SMina Almasry int err = 0;
2409*8f0b3cc9SMina Almasry
2410*8f0b3cc9SMina Almasry tcp_xa_pool.max = 0;
2411*8f0b3cc9SMina Almasry tcp_xa_pool.idx = 0;
2412*8f0b3cc9SMina Almasry do {
2413*8f0b3cc9SMina Almasry start = skb_headlen(skb);
2414*8f0b3cc9SMina Almasry
2415*8f0b3cc9SMina Almasry if (skb_frags_readable(skb)) {
2416*8f0b3cc9SMina Almasry err = -ENODEV;
2417*8f0b3cc9SMina Almasry goto out;
2418*8f0b3cc9SMina Almasry }
2419*8f0b3cc9SMina Almasry
2420*8f0b3cc9SMina Almasry /* Copy header. */
2421*8f0b3cc9SMina Almasry copy = start - offset;
2422*8f0b3cc9SMina Almasry if (copy > 0) {
2423*8f0b3cc9SMina Almasry copy = min(copy, remaining_len);
2424*8f0b3cc9SMina Almasry
2425*8f0b3cc9SMina Almasry n = copy_to_iter(skb->data + offset, copy,
2426*8f0b3cc9SMina Almasry &msg->msg_iter);
2427*8f0b3cc9SMina Almasry if (n != copy) {
2428*8f0b3cc9SMina Almasry err = -EFAULT;
2429*8f0b3cc9SMina Almasry goto out;
2430*8f0b3cc9SMina Almasry }
2431*8f0b3cc9SMina Almasry
2432*8f0b3cc9SMina Almasry offset += copy;
2433*8f0b3cc9SMina Almasry remaining_len -= copy;
2434*8f0b3cc9SMina Almasry
2435*8f0b3cc9SMina Almasry /* First a dmabuf_cmsg for # bytes copied to user
2436*8f0b3cc9SMina Almasry * buffer.
2437*8f0b3cc9SMina Almasry */
2438*8f0b3cc9SMina Almasry memset(&dmabuf_cmsg, 0, sizeof(dmabuf_cmsg));
2439*8f0b3cc9SMina Almasry dmabuf_cmsg.frag_size = copy;
2440*8f0b3cc9SMina Almasry err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_LINEAR,
2441*8f0b3cc9SMina Almasry sizeof(dmabuf_cmsg), &dmabuf_cmsg);
2442*8f0b3cc9SMina Almasry if (err || msg->msg_flags & MSG_CTRUNC) {
2443*8f0b3cc9SMina Almasry msg->msg_flags &= ~MSG_CTRUNC;
2444*8f0b3cc9SMina Almasry if (!err)
2445*8f0b3cc9SMina Almasry err = -ETOOSMALL;
2446*8f0b3cc9SMina Almasry goto out;
2447*8f0b3cc9SMina Almasry }
2448*8f0b3cc9SMina Almasry
2449*8f0b3cc9SMina Almasry sent += copy;
2450*8f0b3cc9SMina Almasry
2451*8f0b3cc9SMina Almasry if (remaining_len == 0)
2452*8f0b3cc9SMina Almasry goto out;
2453*8f0b3cc9SMina Almasry }
2454*8f0b3cc9SMina Almasry
2455*8f0b3cc9SMina Almasry /* after that, send information of dmabuf pages through a
2456*8f0b3cc9SMina Almasry * sequence of cmsg
2457*8f0b3cc9SMina Almasry */
2458*8f0b3cc9SMina Almasry for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2459*8f0b3cc9SMina Almasry skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2460*8f0b3cc9SMina Almasry struct net_iov *niov;
2461*8f0b3cc9SMina Almasry u64 frag_offset;
2462*8f0b3cc9SMina Almasry int end;
2463*8f0b3cc9SMina Almasry
2464*8f0b3cc9SMina Almasry /* !skb_frags_readable() should indicate that ALL the
2465*8f0b3cc9SMina Almasry * frags in this skb are dmabuf net_iovs. We're checking
2466*8f0b3cc9SMina Almasry * for that flag above, but also check individual frags
2467*8f0b3cc9SMina Almasry * here. If the tcp stack is not setting
2468*8f0b3cc9SMina Almasry * skb_frags_readable() correctly, we still don't want
2469*8f0b3cc9SMina Almasry * to crash here.
2470*8f0b3cc9SMina Almasry */
2471*8f0b3cc9SMina Almasry if (!skb_frag_net_iov(frag)) {
2472*8f0b3cc9SMina Almasry net_err_ratelimited("Found non-dmabuf skb with net_iov");
2473*8f0b3cc9SMina Almasry err = -ENODEV;
2474*8f0b3cc9SMina Almasry goto out;
2475*8f0b3cc9SMina Almasry }
2476*8f0b3cc9SMina Almasry
2477*8f0b3cc9SMina Almasry niov = skb_frag_net_iov(frag);
2478*8f0b3cc9SMina Almasry end = start + skb_frag_size(frag);
2479*8f0b3cc9SMina Almasry copy = end - offset;
2480*8f0b3cc9SMina Almasry
2481*8f0b3cc9SMina Almasry if (copy > 0) {
2482*8f0b3cc9SMina Almasry copy = min(copy, remaining_len);
2483*8f0b3cc9SMina Almasry
2484*8f0b3cc9SMina Almasry frag_offset = net_iov_virtual_addr(niov) +
2485*8f0b3cc9SMina Almasry skb_frag_off(frag) + offset -
2486*8f0b3cc9SMina Almasry start;
2487*8f0b3cc9SMina Almasry dmabuf_cmsg.frag_offset = frag_offset;
2488*8f0b3cc9SMina Almasry dmabuf_cmsg.frag_size = copy;
2489*8f0b3cc9SMina Almasry err = tcp_xa_pool_refill(sk, &tcp_xa_pool,
2490*8f0b3cc9SMina Almasry skb_shinfo(skb)->nr_frags - i);
2491*8f0b3cc9SMina Almasry if (err)
2492*8f0b3cc9SMina Almasry goto out;
2493*8f0b3cc9SMina Almasry
2494*8f0b3cc9SMina Almasry /* Will perform the exchange later */
2495*8f0b3cc9SMina Almasry dmabuf_cmsg.frag_token = tcp_xa_pool.tokens[tcp_xa_pool.idx];
2496*8f0b3cc9SMina Almasry dmabuf_cmsg.dmabuf_id = net_iov_binding_id(niov);
2497*8f0b3cc9SMina Almasry
2498*8f0b3cc9SMina Almasry offset += copy;
2499*8f0b3cc9SMina Almasry remaining_len -= copy;
2500*8f0b3cc9SMina Almasry
2501*8f0b3cc9SMina Almasry err = put_cmsg(msg, SOL_SOCKET,
2502*8f0b3cc9SMina Almasry SO_DEVMEM_DMABUF,
2503*8f0b3cc9SMina Almasry sizeof(dmabuf_cmsg),
2504*8f0b3cc9SMina Almasry &dmabuf_cmsg);
2505*8f0b3cc9SMina Almasry if (err || msg->msg_flags & MSG_CTRUNC) {
2506*8f0b3cc9SMina Almasry msg->msg_flags &= ~MSG_CTRUNC;
2507*8f0b3cc9SMina Almasry if (!err)
2508*8f0b3cc9SMina Almasry err = -ETOOSMALL;
2509*8f0b3cc9SMina Almasry goto out;
2510*8f0b3cc9SMina Almasry }
2511*8f0b3cc9SMina Almasry
2512*8f0b3cc9SMina Almasry atomic_long_inc(&niov->pp_ref_count);
2513*8f0b3cc9SMina Almasry tcp_xa_pool.netmems[tcp_xa_pool.idx++] = skb_frag_netmem(frag);
2514*8f0b3cc9SMina Almasry
2515*8f0b3cc9SMina Almasry sent += copy;
2516*8f0b3cc9SMina Almasry
2517*8f0b3cc9SMina Almasry if (remaining_len == 0)
2518*8f0b3cc9SMina Almasry goto out;
2519*8f0b3cc9SMina Almasry }
2520*8f0b3cc9SMina Almasry start = end;
2521*8f0b3cc9SMina Almasry }
2522*8f0b3cc9SMina Almasry
2523*8f0b3cc9SMina Almasry tcp_xa_pool_commit(sk, &tcp_xa_pool);
2524*8f0b3cc9SMina Almasry if (!remaining_len)
2525*8f0b3cc9SMina Almasry goto out;
2526*8f0b3cc9SMina Almasry
2527*8f0b3cc9SMina Almasry /* if remaining_len is not satisfied yet, we need to go to the
2528*8f0b3cc9SMina Almasry * next frag in the frag_list to satisfy remaining_len.
2529*8f0b3cc9SMina Almasry */
2530*8f0b3cc9SMina Almasry skb = skb_shinfo(skb)->frag_list ?: skb->next;
2531*8f0b3cc9SMina Almasry
2532*8f0b3cc9SMina Almasry offset = offset - start;
2533*8f0b3cc9SMina Almasry } while (skb);
2534*8f0b3cc9SMina Almasry
2535*8f0b3cc9SMina Almasry if (remaining_len) {
2536*8f0b3cc9SMina Almasry err = -EFAULT;
2537*8f0b3cc9SMina Almasry goto out;
2538*8f0b3cc9SMina Almasry }
2539*8f0b3cc9SMina Almasry
2540*8f0b3cc9SMina Almasry out:
2541*8f0b3cc9SMina Almasry tcp_xa_pool_commit(sk, &tcp_xa_pool);
2542*8f0b3cc9SMina Almasry if (!sent)
2543*8f0b3cc9SMina Almasry sent = err;
2544*8f0b3cc9SMina Almasry
2545*8f0b3cc9SMina Almasry return sent;
2546*8f0b3cc9SMina Almasry }
2547*8f0b3cc9SMina Almasry
25481da177e4SLinus Torvalds /*
25491da177e4SLinus Torvalds * This routine copies from a sock struct into the user buffer.
25501da177e4SLinus Torvalds *
25511da177e4SLinus Torvalds * Technical note: in 2.3 we work on _locked_ socket, so that
25521da177e4SLinus Torvalds * tricks with *seq access order and skb->users are not required.
25531da177e4SLinus Torvalds * Probably, code can be easily improved even more.
25541da177e4SLinus Torvalds */
25551da177e4SLinus Torvalds
tcp_recvmsg_locked(struct sock * sk,struct msghdr * msg,size_t len,int flags,struct scm_timestamping_internal * tss,int * cmsg_flags)25562cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
2557ec095263SOliver Hartkopp int flags, struct scm_timestamping_internal *tss,
25582cd81161SArjun Roy int *cmsg_flags)
25591da177e4SLinus Torvalds {
25601da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
2561*8f0b3cc9SMina Almasry int last_copied_dmabuf = -1; /* uninitialized */
25621da177e4SLinus Torvalds int copied = 0;
25631da177e4SLinus Torvalds u32 peek_seq;
25641da177e4SLinus Torvalds u32 *seq;
25651da177e4SLinus Torvalds unsigned long used;
25662cd81161SArjun Roy int err;
25671da177e4SLinus Torvalds int target; /* Read at least this many bytes */
25681da177e4SLinus Torvalds long timeo;
2569dfbafc99SSabrina Dubroca struct sk_buff *skb, *last;
257005ea4916SJon Maloy u32 peek_offset = 0;
257177527313SIlpo Järvinen u32 urg_hole = 0;
25721da177e4SLinus Torvalds
25731da177e4SLinus Torvalds err = -ENOTCONN;
25741da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN)
25751da177e4SLinus Torvalds goto out;
25761da177e4SLinus Torvalds
2577f94fd25cSJens Axboe if (tp->recvmsg_inq) {
2578925bba24SArjun Roy *cmsg_flags = TCP_CMSG_INQ;
2579f94fd25cSJens Axboe msg->msg_get_inq = 1;
2580f94fd25cSJens Axboe }
2581ec095263SOliver Hartkopp timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
25821da177e4SLinus Torvalds
25831da177e4SLinus Torvalds /* Urgent data needs to be handled specially. */
25841da177e4SLinus Torvalds if (flags & MSG_OOB)
25851da177e4SLinus Torvalds goto recv_urg;
25861da177e4SLinus Torvalds
2587c0e88ff0SPavel Emelyanov if (unlikely(tp->repair)) {
2588c0e88ff0SPavel Emelyanov err = -EPERM;
2589c0e88ff0SPavel Emelyanov if (!(flags & MSG_PEEK))
2590c0e88ff0SPavel Emelyanov goto out;
2591c0e88ff0SPavel Emelyanov
2592c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE)
2593c0e88ff0SPavel Emelyanov goto recv_sndq;
2594c0e88ff0SPavel Emelyanov
2595c0e88ff0SPavel Emelyanov err = -EINVAL;
2596c0e88ff0SPavel Emelyanov if (tp->repair_queue == TCP_NO_QUEUE)
2597c0e88ff0SPavel Emelyanov goto out;
2598c0e88ff0SPavel Emelyanov
2599c0e88ff0SPavel Emelyanov /* 'common' recv queue MSG_PEEK-ing */
2600c0e88ff0SPavel Emelyanov }
2601c0e88ff0SPavel Emelyanov
26021da177e4SLinus Torvalds seq = &tp->copied_seq;
26031da177e4SLinus Torvalds if (flags & MSG_PEEK) {
260405ea4916SJon Maloy peek_offset = max(sk_peek_offset(sk, flags), 0);
260505ea4916SJon Maloy peek_seq = tp->copied_seq + peek_offset;
26061da177e4SLinus Torvalds seq = &peek_seq;
26071da177e4SLinus Torvalds }
26081da177e4SLinus Torvalds
26091da177e4SLinus Torvalds target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
26101da177e4SLinus Torvalds
26111da177e4SLinus Torvalds do {
26121da177e4SLinus Torvalds u32 offset;
26131da177e4SLinus Torvalds
26141da177e4SLinus Torvalds /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2615b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
26161da177e4SLinus Torvalds if (copied)
26171da177e4SLinus Torvalds break;
26181da177e4SLinus Torvalds if (signal_pending(current)) {
26191da177e4SLinus Torvalds copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
26201da177e4SLinus Torvalds break;
26211da177e4SLinus Torvalds }
26221da177e4SLinus Torvalds }
26231da177e4SLinus Torvalds
26241da177e4SLinus Torvalds /* Next get a buffer. */
26251da177e4SLinus Torvalds
2626dfbafc99SSabrina Dubroca last = skb_peek_tail(&sk->sk_receive_queue);
262791521944SDavid S. Miller skb_queue_walk(&sk->sk_receive_queue, skb) {
2628dfbafc99SSabrina Dubroca last = skb;
26291da177e4SLinus Torvalds /* Now that we have two receive queues this
26301da177e4SLinus Torvalds * shouldn't happen.
26311da177e4SLinus Torvalds */
2632d792c100SIlpo Järvinen if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2633e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
26342af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2635d792c100SIlpo Järvinen flags))
26361da177e4SLinus Torvalds break;
2637d792c100SIlpo Järvinen
26381da177e4SLinus Torvalds offset = *seq - TCP_SKB_CB(skb)->seq;
26399d691539SEric Dumazet if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
26409d691539SEric Dumazet pr_err_once("%s: found a SYN, please report !\n", __func__);
26411da177e4SLinus Torvalds offset--;
26429d691539SEric Dumazet }
26431da177e4SLinus Torvalds if (offset < skb->len)
26441da177e4SLinus Torvalds goto found_ok_skb;
2645e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
26461da177e4SLinus Torvalds goto found_fin_ok;
26472af6fd8bSJoe Perches WARN(!(flags & MSG_PEEK),
2648e56b8ce3SRandy Dunlap "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
26492af6fd8bSJoe Perches *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
265091521944SDavid S. Miller }
26511da177e4SLinus Torvalds
26521da177e4SLinus Torvalds /* Well, if we have backlog, try to process it now yet. */
26531da177e4SLinus Torvalds
26549ed498c6SEric Dumazet if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
26551da177e4SLinus Torvalds break;
26561da177e4SLinus Torvalds
26571da177e4SLinus Torvalds if (copied) {
26588bd172b7SEric Dumazet if (!timeo ||
26598bd172b7SEric Dumazet sk->sk_err ||
26601da177e4SLinus Torvalds sk->sk_state == TCP_CLOSE ||
26611da177e4SLinus Torvalds (sk->sk_shutdown & RCV_SHUTDOWN) ||
2662518a09efSDavid S. Miller signal_pending(current))
26631da177e4SLinus Torvalds break;
26641da177e4SLinus Torvalds } else {
26651da177e4SLinus Torvalds if (sock_flag(sk, SOCK_DONE))
26661da177e4SLinus Torvalds break;
26671da177e4SLinus Torvalds
26681da177e4SLinus Torvalds if (sk->sk_err) {
26691da177e4SLinus Torvalds copied = sock_error(sk);
26701da177e4SLinus Torvalds break;
26711da177e4SLinus Torvalds }
26721da177e4SLinus Torvalds
26731da177e4SLinus Torvalds if (sk->sk_shutdown & RCV_SHUTDOWN)
26741da177e4SLinus Torvalds break;
26751da177e4SLinus Torvalds
26761da177e4SLinus Torvalds if (sk->sk_state == TCP_CLOSE) {
26771da177e4SLinus Torvalds /* This occurs when user tries to read
26781da177e4SLinus Torvalds * from never connected socket.
26791da177e4SLinus Torvalds */
26801da177e4SLinus Torvalds copied = -ENOTCONN;
26811da177e4SLinus Torvalds break;
26821da177e4SLinus Torvalds }
26831da177e4SLinus Torvalds
26841da177e4SLinus Torvalds if (!timeo) {
26851da177e4SLinus Torvalds copied = -EAGAIN;
26861da177e4SLinus Torvalds break;
26871da177e4SLinus Torvalds }
26881da177e4SLinus Torvalds
26891da177e4SLinus Torvalds if (signal_pending(current)) {
26901da177e4SLinus Torvalds copied = sock_intr_errno(timeo);
26911da177e4SLinus Torvalds break;
26921da177e4SLinus Torvalds }
26931da177e4SLinus Torvalds }
26941da177e4SLinus Torvalds
26951da177e4SLinus Torvalds if (copied >= target) {
26961da177e4SLinus Torvalds /* Do not sleep, just process backlog. */
269793afcfd1SEric Dumazet __sk_flush_backlog(sk);
2698dfbafc99SSabrina Dubroca } else {
269929fbc26eSEric Dumazet tcp_cleanup_rbuf(sk, copied);
2700419ce133SPaolo Abeni err = sk_wait_data(sk, &timeo, last);
2701419ce133SPaolo Abeni if (err < 0) {
2702419ce133SPaolo Abeni err = copied ? : err;
2703419ce133SPaolo Abeni goto out;
2704419ce133SPaolo Abeni }
2705dfbafc99SSabrina Dubroca }
27061da177e4SLinus Torvalds
270777527313SIlpo Järvinen if ((flags & MSG_PEEK) &&
270805ea4916SJon Maloy (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) {
2709e87cc472SJoe Perches net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2710e87cc472SJoe Perches current->comm,
2711e87cc472SJoe Perches task_pid_nr(current));
271205ea4916SJon Maloy peek_seq = tp->copied_seq + peek_offset;
27131da177e4SLinus Torvalds }
27141da177e4SLinus Torvalds continue;
27151da177e4SLinus Torvalds
27161da177e4SLinus Torvalds found_ok_skb:
27171da177e4SLinus Torvalds /* Ok so how much can we use? */
27181da177e4SLinus Torvalds used = skb->len - offset;
27191da177e4SLinus Torvalds if (len < used)
27201da177e4SLinus Torvalds used = len;
27211da177e4SLinus Torvalds
27221da177e4SLinus Torvalds /* Do we have urgent data here? */
2723b96c51bdSEric Dumazet if (unlikely(tp->urg_data)) {
27241da177e4SLinus Torvalds u32 urg_offset = tp->urg_seq - *seq;
27251da177e4SLinus Torvalds if (urg_offset < used) {
27261da177e4SLinus Torvalds if (!urg_offset) {
27271da177e4SLinus Torvalds if (!sock_flag(sk, SOCK_URGINLINE)) {
27287db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1);
272977527313SIlpo Järvinen urg_hole++;
27301da177e4SLinus Torvalds offset++;
27311da177e4SLinus Torvalds used--;
27321da177e4SLinus Torvalds if (!used)
27331da177e4SLinus Torvalds goto skip_copy;
27341da177e4SLinus Torvalds }
27351da177e4SLinus Torvalds } else
27361da177e4SLinus Torvalds used = urg_offset;
27371da177e4SLinus Torvalds }
27381da177e4SLinus Torvalds }
27391da177e4SLinus Torvalds
27401da177e4SLinus Torvalds if (!(flags & MSG_TRUNC)) {
2741*8f0b3cc9SMina Almasry if (last_copied_dmabuf != -1 &&
2742*8f0b3cc9SMina Almasry last_copied_dmabuf != !skb_frags_readable(skb))
2743*8f0b3cc9SMina Almasry break;
2744*8f0b3cc9SMina Almasry
2745*8f0b3cc9SMina Almasry if (skb_frags_readable(skb)) {
2746*8f0b3cc9SMina Almasry err = skb_copy_datagram_msg(skb, offset, msg,
2747*8f0b3cc9SMina Almasry used);
27481da177e4SLinus Torvalds if (err) {
27491da177e4SLinus Torvalds /* Exception. Bailout! */
27501da177e4SLinus Torvalds if (!copied)
27511da177e4SLinus Torvalds copied = -EFAULT;
27521da177e4SLinus Torvalds break;
27531da177e4SLinus Torvalds }
2754*8f0b3cc9SMina Almasry } else {
2755*8f0b3cc9SMina Almasry if (!(flags & MSG_SOCK_DEVMEM)) {
2756*8f0b3cc9SMina Almasry /* dmabuf skbs can only be received
2757*8f0b3cc9SMina Almasry * with the MSG_SOCK_DEVMEM flag.
2758*8f0b3cc9SMina Almasry */
2759*8f0b3cc9SMina Almasry if (!copied)
2760*8f0b3cc9SMina Almasry copied = -EFAULT;
2761*8f0b3cc9SMina Almasry
2762*8f0b3cc9SMina Almasry break;
27631da177e4SLinus Torvalds }
27641da177e4SLinus Torvalds
2765*8f0b3cc9SMina Almasry err = tcp_recvmsg_dmabuf(sk, skb, offset, msg,
2766*8f0b3cc9SMina Almasry used);
2767*8f0b3cc9SMina Almasry if (err <= 0) {
2768*8f0b3cc9SMina Almasry if (!copied)
2769*8f0b3cc9SMina Almasry copied = -EFAULT;
2770*8f0b3cc9SMina Almasry
2771*8f0b3cc9SMina Almasry break;
2772*8f0b3cc9SMina Almasry }
2773*8f0b3cc9SMina Almasry used = err;
2774*8f0b3cc9SMina Almasry }
2775*8f0b3cc9SMina Almasry }
2776*8f0b3cc9SMina Almasry
2777*8f0b3cc9SMina Almasry last_copied_dmabuf = !skb_frags_readable(skb);
2778*8f0b3cc9SMina Almasry
27797db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + used);
27801da177e4SLinus Torvalds copied += used;
27811da177e4SLinus Torvalds len -= used;
278205ea4916SJon Maloy if (flags & MSG_PEEK)
278305ea4916SJon Maloy sk_peek_offset_fwd(sk, used);
278405ea4916SJon Maloy else
278505ea4916SJon Maloy sk_peek_offset_bwd(sk, used);
27861da177e4SLinus Torvalds tcp_rcv_space_adjust(sk);
27871da177e4SLinus Torvalds
27881da177e4SLinus Torvalds skip_copy:
2789b96c51bdSEric Dumazet if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
27907b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0);
279131770e34SFlorian Westphal tcp_fast_path_check(sk);
279231770e34SFlorian Westphal }
27931da177e4SLinus Torvalds
279498aaa913SMike Maloney if (TCP_SKB_CB(skb)->has_rxtstamp) {
27952cd81161SArjun Roy tcp_update_recv_tstamps(skb, tss);
2796925bba24SArjun Roy *cmsg_flags |= TCP_CMSG_TS;
279798aaa913SMike Maloney }
2798cc4de047SKelly Littlepage
2799cc4de047SKelly Littlepage if (used + offset < skb->len)
2800cc4de047SKelly Littlepage continue;
2801cc4de047SKelly Littlepage
2802e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
28031da177e4SLinus Torvalds goto found_fin_ok;
28047bced397SDan Williams if (!(flags & MSG_PEEK))
28053df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
28061da177e4SLinus Torvalds continue;
28071da177e4SLinus Torvalds
28081da177e4SLinus Torvalds found_fin_ok:
28091da177e4SLinus Torvalds /* Process the FIN. */
28107db48e98SEric Dumazet WRITE_ONCE(*seq, *seq + 1);
28117bced397SDan Williams if (!(flags & MSG_PEEK))
28123df684c1SEric Dumazet tcp_eat_recv_skb(sk, skb);
28131da177e4SLinus Torvalds break;
28141da177e4SLinus Torvalds } while (len > 0);
28151da177e4SLinus Torvalds
28161da177e4SLinus Torvalds /* According to UNIX98, msg_name/msg_namelen are ignored
28171da177e4SLinus Torvalds * on connected socket. I was just happy when found this 8) --ANK
28181da177e4SLinus Torvalds */
28191da177e4SLinus Torvalds
28201da177e4SLinus Torvalds /* Clean up data we have read: This will do ACK frames. */
28210e4b4992SChris Leech tcp_cleanup_rbuf(sk, copied);
28221da177e4SLinus Torvalds return copied;
28231da177e4SLinus Torvalds
28241da177e4SLinus Torvalds out:
28251da177e4SLinus Torvalds return err;
28261da177e4SLinus Torvalds
28271da177e4SLinus Torvalds recv_urg:
2828377f0a08SRami Rosen err = tcp_recv_urg(sk, msg, len, flags);
28291da177e4SLinus Torvalds goto out;
2830c0e88ff0SPavel Emelyanov
2831c0e88ff0SPavel Emelyanov recv_sndq:
2832c0e88ff0SPavel Emelyanov err = tcp_peek_sndq(sk, msg, len);
2833c0e88ff0SPavel Emelyanov goto out;
28341da177e4SLinus Torvalds }
28352cd81161SArjun Roy
tcp_recvmsg(struct sock * sk,struct msghdr * msg,size_t len,int flags,int * addr_len)2836ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
2837ec095263SOliver Hartkopp int *addr_len)
28382cd81161SArjun Roy {
2839f94fd25cSJens Axboe int cmsg_flags = 0, ret;
28402cd81161SArjun Roy struct scm_timestamping_internal tss;
28412cd81161SArjun Roy
28422cd81161SArjun Roy if (unlikely(flags & MSG_ERRQUEUE))
28432cd81161SArjun Roy return inet_recv_error(sk, msg, len, addr_len);
28442cd81161SArjun Roy
28452cd81161SArjun Roy if (sk_can_busy_loop(sk) &&
28462cd81161SArjun Roy skb_queue_empty_lockless(&sk->sk_receive_queue) &&
28472cd81161SArjun Roy sk->sk_state == TCP_ESTABLISHED)
2848ec095263SOliver Hartkopp sk_busy_loop(sk, flags & MSG_DONTWAIT);
28492cd81161SArjun Roy
28502cd81161SArjun Roy lock_sock(sk);
2851ec095263SOliver Hartkopp ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags);
28522cd81161SArjun Roy release_sock(sk);
28532cd81161SArjun Roy
2854f94fd25cSJens Axboe if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
2855925bba24SArjun Roy if (cmsg_flags & TCP_CMSG_TS)
28562cd81161SArjun Roy tcp_recv_timestamp(msg, sk, &tss);
2857f94fd25cSJens Axboe if (msg->msg_get_inq) {
2858f94fd25cSJens Axboe msg->msg_inq = tcp_inq_hint(sk);
2859f94fd25cSJens Axboe if (cmsg_flags & TCP_CMSG_INQ)
2860f94fd25cSJens Axboe put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
2861f94fd25cSJens Axboe sizeof(msg->msg_inq), &msg->msg_inq);
28622cd81161SArjun Roy }
28632cd81161SArjun Roy }
28642cd81161SArjun Roy return ret;
28652cd81161SArjun Roy }
28664bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg);
28671da177e4SLinus Torvalds
tcp_set_state(struct sock * sk,int state)2868490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state)
2869490d5046SIlpo Järvinen {
2870490d5046SIlpo Järvinen int oldstate = sk->sk_state;
2871490d5046SIlpo Järvinen
2872d4487491SLawrence Brakmo /* We defined a new enum for TCP states that are exported in BPF
2873d4487491SLawrence Brakmo * so as not force the internal TCP states to be frozen. The
2874d4487491SLawrence Brakmo * following checks will detect if an internal state value ever
2875d4487491SLawrence Brakmo * differs from the BPF value. If this ever happens, then we will
2876d4487491SLawrence Brakmo * need to remap the internal value to the BPF value before calling
2877d4487491SLawrence Brakmo * tcp_call_bpf_2arg.
2878d4487491SLawrence Brakmo */
2879d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2880d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2881d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2882d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2883d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2884d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2885d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2886d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2887d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2888d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2889d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2890d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
289191051f00SGuillaume Nault BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE);
2892d4487491SLawrence Brakmo BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2893d4487491SLawrence Brakmo
289497a19cafSYonghong Song /* bpf uapi header bpf.h defines an anonymous enum with values
289597a19cafSYonghong Song * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux
289697a19cafSYonghong Song * is able to emit this enum in DWARF due to the above BUILD_BUG_ON.
289797a19cafSYonghong Song * But clang built vmlinux does not have this enum in DWARF
289897a19cafSYonghong Song * since clang removes the above code before generating IR/debuginfo.
289997a19cafSYonghong Song * Let us explicitly emit the type debuginfo to ensure the
290097a19cafSYonghong Song * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF
290197a19cafSYonghong Song * regardless of which compiler is used.
290297a19cafSYonghong Song */
290397a19cafSYonghong Song BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED);
290497a19cafSYonghong Song
2905d4487491SLawrence Brakmo if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2906d4487491SLawrence Brakmo tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2907e8fce239SSong Liu
2908490d5046SIlpo Järvinen switch (state) {
2909490d5046SIlpo Järvinen case TCP_ESTABLISHED:
2910490d5046SIlpo Järvinen if (oldstate != TCP_ESTABLISHED)
291181cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2912490d5046SIlpo Järvinen break;
2913a46d0ea5SJason Xing case TCP_CLOSE_WAIT:
2914a46d0ea5SJason Xing if (oldstate == TCP_SYN_RECV)
2915a46d0ea5SJason Xing TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2916a46d0ea5SJason Xing break;
2917490d5046SIlpo Järvinen
2918490d5046SIlpo Järvinen case TCP_CLOSE:
2919490d5046SIlpo Järvinen if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
292081cc8a75SPavel Emelyanov TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2921490d5046SIlpo Järvinen
2922490d5046SIlpo Järvinen sk->sk_prot->unhash(sk);
2923490d5046SIlpo Järvinen if (inet_csk(sk)->icsk_bind_hash &&
2924490d5046SIlpo Järvinen !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2925ab1e0a13SArnaldo Carvalho de Melo inet_put_port(sk);
2926a8eceea8SJoe Perches fallthrough;
2927490d5046SIlpo Järvinen default:
2928a46d0ea5SJason Xing if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
292974688e48SPavel Emelyanov TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2930490d5046SIlpo Järvinen }
2931490d5046SIlpo Järvinen
2932490d5046SIlpo Järvinen /* Change state AFTER socket is unhashed to avoid closed
2933490d5046SIlpo Järvinen * socket sitting in hash tables.
2934490d5046SIlpo Järvinen */
2935563e0bb0SYafang Shao inet_sk_state_store(sk, state);
2936490d5046SIlpo Järvinen }
2937490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state);
2938490d5046SIlpo Järvinen
29391da177e4SLinus Torvalds /*
29401da177e4SLinus Torvalds * State processing on a close. This implements the state shift for
29411da177e4SLinus Torvalds * sending our FIN frame. Note that we only send a FIN for some
29421da177e4SLinus Torvalds * states. A shutdown() may have already sent the FIN, or we may be
29431da177e4SLinus Torvalds * closed.
29441da177e4SLinus Torvalds */
29451da177e4SLinus Torvalds
29469b5b5cffSArjan van de Ven static const unsigned char new_state[16] = {
29471da177e4SLinus Torvalds /* current state: new state: action: */
29480980c1e3SEric Dumazet [0 /* (Invalid) */] = TCP_CLOSE,
29490980c1e3SEric Dumazet [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
29500980c1e3SEric Dumazet [TCP_SYN_SENT] = TCP_CLOSE,
29510980c1e3SEric Dumazet [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN,
29520980c1e3SEric Dumazet [TCP_FIN_WAIT1] = TCP_FIN_WAIT1,
29530980c1e3SEric Dumazet [TCP_FIN_WAIT2] = TCP_FIN_WAIT2,
29540980c1e3SEric Dumazet [TCP_TIME_WAIT] = TCP_CLOSE,
29550980c1e3SEric Dumazet [TCP_CLOSE] = TCP_CLOSE,
29560980c1e3SEric Dumazet [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN,
29570980c1e3SEric Dumazet [TCP_LAST_ACK] = TCP_LAST_ACK,
29580980c1e3SEric Dumazet [TCP_LISTEN] = TCP_CLOSE,
29590980c1e3SEric Dumazet [TCP_CLOSING] = TCP_CLOSING,
29600980c1e3SEric Dumazet [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */
29611da177e4SLinus Torvalds };
29621da177e4SLinus Torvalds
tcp_close_state(struct sock * sk)29631da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
29641da177e4SLinus Torvalds {
29651da177e4SLinus Torvalds int next = (int)new_state[sk->sk_state];
29661da177e4SLinus Torvalds int ns = next & TCP_STATE_MASK;
29671da177e4SLinus Torvalds
29681da177e4SLinus Torvalds tcp_set_state(sk, ns);
29691da177e4SLinus Torvalds
29701da177e4SLinus Torvalds return next & TCP_ACTION_FIN;
29711da177e4SLinus Torvalds }
29721da177e4SLinus Torvalds
29731da177e4SLinus Torvalds /*
29741da177e4SLinus Torvalds * Shutdown the sending side of a connection. Much like close except
29751f29b058SSatoru SATOH * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
29761da177e4SLinus Torvalds */
29771da177e4SLinus Torvalds
tcp_shutdown(struct sock * sk,int how)29781da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
29791da177e4SLinus Torvalds {
29801da177e4SLinus Torvalds /* We need to grab some memory, and put together a FIN,
29811da177e4SLinus Torvalds * and then put it into the queue to be sent.
29821da177e4SLinus Torvalds * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
29831da177e4SLinus Torvalds */
29841da177e4SLinus Torvalds if (!(how & SEND_SHUTDOWN))
29851da177e4SLinus Torvalds return;
29861da177e4SLinus Torvalds
29871da177e4SLinus Torvalds /* If we've already sent a FIN, or it's a closed state, skip this. */
29881da177e4SLinus Torvalds if ((1 << sk->sk_state) &
29891da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_SYN_SENT |
299094062790SEric Dumazet TCPF_CLOSE_WAIT)) {
29911da177e4SLinus Torvalds /* Clear out any half completed packets. FIN if needed. */
29921da177e4SLinus Torvalds if (tcp_close_state(sk))
29931da177e4SLinus Torvalds tcp_send_fin(sk);
29941da177e4SLinus Torvalds }
29951da177e4SLinus Torvalds }
29964bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown);
29971da177e4SLinus Torvalds
tcp_orphan_count_sum(void)299819757cebSEric Dumazet int tcp_orphan_count_sum(void)
299919757cebSEric Dumazet {
300019757cebSEric Dumazet int i, total = 0;
300119757cebSEric Dumazet
300219757cebSEric Dumazet for_each_possible_cpu(i)
300319757cebSEric Dumazet total += per_cpu(tcp_orphan_count, i);
300419757cebSEric Dumazet
300519757cebSEric Dumazet return max(total, 0);
300619757cebSEric Dumazet }
300719757cebSEric Dumazet
300819757cebSEric Dumazet static int tcp_orphan_cache;
300919757cebSEric Dumazet static struct timer_list tcp_orphan_timer;
301019757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
301119757cebSEric Dumazet
tcp_orphan_update(struct timer_list * unused)301219757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused)
301319757cebSEric Dumazet {
301419757cebSEric Dumazet WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
301519757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
301619757cebSEric Dumazet }
301719757cebSEric Dumazet
tcp_too_many_orphans(int shift)301819757cebSEric Dumazet static bool tcp_too_many_orphans(int shift)
301919757cebSEric Dumazet {
302047e6ab24SKuniyuki Iwashima return READ_ONCE(tcp_orphan_cache) << shift >
302147e6ab24SKuniyuki Iwashima READ_ONCE(sysctl_tcp_max_orphans);
302219757cebSEric Dumazet }
302319757cebSEric Dumazet
tcp_out_of_memory(const struct sock * sk)3024dda4d96aSEric Dumazet static bool tcp_out_of_memory(const struct sock *sk)
3025dda4d96aSEric Dumazet {
3026dda4d96aSEric Dumazet if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
3027dda4d96aSEric Dumazet sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
3028dda4d96aSEric Dumazet return true;
3029dda4d96aSEric Dumazet return false;
3030dda4d96aSEric Dumazet }
3031dda4d96aSEric Dumazet
tcp_check_oom(const struct sock * sk,int shift)3032dda4d96aSEric Dumazet bool tcp_check_oom(const struct sock *sk, int shift)
3033efcdbf24SArun Sharma {
3034efcdbf24SArun Sharma bool too_many_orphans, out_of_socket_memory;
3035efcdbf24SArun Sharma
303619757cebSEric Dumazet too_many_orphans = tcp_too_many_orphans(shift);
3037efcdbf24SArun Sharma out_of_socket_memory = tcp_out_of_memory(sk);
3038efcdbf24SArun Sharma
3039e87cc472SJoe Perches if (too_many_orphans)
3040e87cc472SJoe Perches net_info_ratelimited("too many orphaned sockets\n");
3041e87cc472SJoe Perches if (out_of_socket_memory)
3042e87cc472SJoe Perches net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
3043efcdbf24SArun Sharma return too_many_orphans || out_of_socket_memory;
3044efcdbf24SArun Sharma }
3045efcdbf24SArun Sharma
__tcp_close(struct sock * sk,long timeout)304677c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout)
30471da177e4SLinus Torvalds {
30481da177e4SLinus Torvalds struct sk_buff *skb;
30491da177e4SLinus Torvalds int data_was_unread = 0;
305075c2d907SHerbert Xu int state;
30511da177e4SLinus Torvalds
3052e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
30531da177e4SLinus Torvalds
30541da177e4SLinus Torvalds if (sk->sk_state == TCP_LISTEN) {
30551da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
30561da177e4SLinus Torvalds
30571da177e4SLinus Torvalds /* Special case. */
30580a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk);
30591da177e4SLinus Torvalds
30601da177e4SLinus Torvalds goto adjudge_to_death;
30611da177e4SLinus Torvalds }
30621da177e4SLinus Torvalds
30631da177e4SLinus Torvalds /* We need to flush the recv. buffs. We do this only on the
30641da177e4SLinus Torvalds * descriptor close, not protocol-sourced closes, because the
30651da177e4SLinus Torvalds * reader process may not have drained the data yet!
30661da177e4SLinus Torvalds */
30671da177e4SLinus Torvalds while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
3068e11ecddfSEric Dumazet u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
3069e11ecddfSEric Dumazet
3070e11ecddfSEric Dumazet if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
3071e11ecddfSEric Dumazet len--;
30721da177e4SLinus Torvalds data_was_unread += len;
30731da177e4SLinus Torvalds __kfree_skb(skb);
30741da177e4SLinus Torvalds }
30751da177e4SLinus Torvalds
3076565b7b2dSKonstantin Khorenko /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
3077565b7b2dSKonstantin Khorenko if (sk->sk_state == TCP_CLOSE)
3078565b7b2dSKonstantin Khorenko goto adjudge_to_death;
3079565b7b2dSKonstantin Khorenko
308065bb723cSGerrit Renker /* As outlined in RFC 2525, section 2.17, we send a RST here because
308165bb723cSGerrit Renker * data was lost. To witness the awful effects of the old behavior of
308265bb723cSGerrit Renker * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
308365bb723cSGerrit Renker * GET in an FTP client, suspend the process, wait for the client to
308465bb723cSGerrit Renker * advertise a zero window, then kill -9 the FTP client, wheee...
308565bb723cSGerrit Renker * Note: timeout is always zero in such a case.
30861da177e4SLinus Torvalds */
3087ee995283SPavel Emelyanov if (unlikely(tcp_sk(sk)->repair)) {
3088ee995283SPavel Emelyanov sk->sk_prot->disconnect(sk, 0);
3089ee995283SPavel Emelyanov } else if (data_was_unread) {
30901da177e4SLinus Torvalds /* Unread data was tossed, zap the connection. */
30916aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
30921da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
30935691276bSJason Xing tcp_send_active_reset(sk, sk->sk_allocation,
309490c36325SJason Xing SK_RST_REASON_TCP_ABORT_ON_CLOSE);
30951da177e4SLinus Torvalds } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
30961da177e4SLinus Torvalds /* Check zero linger _after_ checking for unread data. */
30971da177e4SLinus Torvalds sk->sk_prot->disconnect(sk, 0);
30986aef70a8SEric Dumazet NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
30991da177e4SLinus Torvalds } else if (tcp_close_state(sk)) {
31001da177e4SLinus Torvalds /* We FIN if the application ate all the data before
31011da177e4SLinus Torvalds * zapping the connection.
31021da177e4SLinus Torvalds */
31031da177e4SLinus Torvalds
31041da177e4SLinus Torvalds /* RED-PEN. Formally speaking, we have broken TCP state
31051da177e4SLinus Torvalds * machine. State transitions:
31061da177e4SLinus Torvalds *
31071da177e4SLinus Torvalds * TCP_ESTABLISHED -> TCP_FIN_WAIT1
310894062790SEric Dumazet * TCP_SYN_RECV -> TCP_FIN_WAIT1 (it is difficult)
31091da177e4SLinus Torvalds * TCP_CLOSE_WAIT -> TCP_LAST_ACK
31101da177e4SLinus Torvalds *
31111da177e4SLinus Torvalds * are legal only when FIN has been sent (i.e. in window),
31121da177e4SLinus Torvalds * rather than queued out of window. Purists blame.
31131da177e4SLinus Torvalds *
31141da177e4SLinus Torvalds * F.e. "RFC state" is ESTABLISHED,
31151da177e4SLinus Torvalds * if Linux state is FIN-WAIT-1, but FIN is still not sent.
31161da177e4SLinus Torvalds *
31171da177e4SLinus Torvalds * The visible declinations are that sometimes
31181da177e4SLinus Torvalds * we enter time-wait state, when it is not required really
31191da177e4SLinus Torvalds * (harmless), do not send active resets, when they are
31201da177e4SLinus Torvalds * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
31211da177e4SLinus Torvalds * they look as CLOSING or LAST_ACK for Linux)
31221da177e4SLinus Torvalds * Probably, I missed some more holelets.
31231da177e4SLinus Torvalds * --ANK
31248336886fSJerry Chu * XXX (TFO) - To start off we don't support SYN+ACK+FIN
31258336886fSJerry Chu * in a single packet! (May consider it later but will
31268336886fSJerry Chu * probably need API support or TCP_CORK SYN-ACK until
31278336886fSJerry Chu * data is written and socket is closed.)
31281da177e4SLinus Torvalds */
31291da177e4SLinus Torvalds tcp_send_fin(sk);
31301da177e4SLinus Torvalds }
31311da177e4SLinus Torvalds
31321da177e4SLinus Torvalds sk_stream_wait_close(sk, timeout);
31331da177e4SLinus Torvalds
31341da177e4SLinus Torvalds adjudge_to_death:
313575c2d907SHerbert Xu state = sk->sk_state;
313675c2d907SHerbert Xu sock_hold(sk);
313775c2d907SHerbert Xu sock_orphan(sk);
313875c2d907SHerbert Xu
31391da177e4SLinus Torvalds local_bh_disable();
31401da177e4SLinus Torvalds bh_lock_sock(sk);
31418873c064SEric Dumazet /* remove backlog if any, without releasing ownership. */
31428873c064SEric Dumazet __release_sock(sk);
31431da177e4SLinus Torvalds
314419757cebSEric Dumazet this_cpu_inc(tcp_orphan_count);
3145eb4dea58SHerbert Xu
314675c2d907SHerbert Xu /* Have we already been destroyed by a softirq or backlog? */
314775c2d907SHerbert Xu if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
314875c2d907SHerbert Xu goto out;
31491da177e4SLinus Torvalds
31501da177e4SLinus Torvalds /* This is a (useful) BSD violating of the RFC. There is a
31511da177e4SLinus Torvalds * problem with TCP as specified in that the other end could
31521da177e4SLinus Torvalds * keep a socket open forever with no application left this end.
3153b10bd54cSJesper Juhl * We use a 1 minute timeout (about the same as BSD) then kill
31541da177e4SLinus Torvalds * our end. If they send after that then tough - BUT: long enough
31551da177e4SLinus Torvalds * that we won't make the old 4*rto = almost no time - whoops
31561da177e4SLinus Torvalds * reset mistake.
31571da177e4SLinus Torvalds *
31581da177e4SLinus Torvalds * Nope, it was not mistake. It is really desired behaviour
31591da177e4SLinus Torvalds * f.e. on http servers, when such sockets are useless, but
31601da177e4SLinus Torvalds * consume significant resources. Let's do it with special
31611da177e4SLinus Torvalds * linger2 option. --ANK
31621da177e4SLinus Torvalds */
31631da177e4SLinus Torvalds
31641da177e4SLinus Torvalds if (sk->sk_state == TCP_FIN_WAIT2) {
31651da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
3166a81722ddSEric Dumazet if (READ_ONCE(tp->linger2) < 0) {
31671da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
31685691276bSJason Xing tcp_send_active_reset(sk, GFP_ATOMIC,
3169edc92b48SJason Xing SK_RST_REASON_TCP_ABORT_ON_LINGER);
317002a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk),
3171de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONLINGER);
31721da177e4SLinus Torvalds } else {
3173463c84b9SArnaldo Carvalho de Melo const int tmo = tcp_fin_time(sk);
31741da177e4SLinus Torvalds
31751da177e4SLinus Torvalds if (tmo > TCP_TIMEWAIT_LEN) {
317652499afeSDavid S. Miller inet_csk_reset_keepalive_timer(sk,
317752499afeSDavid S. Miller tmo - TCP_TIMEWAIT_LEN);
31781da177e4SLinus Torvalds } else {
31791da177e4SLinus Torvalds tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
31801da177e4SLinus Torvalds goto out;
31811da177e4SLinus Torvalds }
31821da177e4SLinus Torvalds }
31831da177e4SLinus Torvalds }
31841da177e4SLinus Torvalds if (sk->sk_state != TCP_CLOSE) {
3185efcdbf24SArun Sharma if (tcp_check_oom(sk, 0)) {
31861da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
31875691276bSJason Xing tcp_send_active_reset(sk, GFP_ATOMIC,
31888407994fSJason Xing SK_RST_REASON_TCP_ABORT_ON_MEMORY);
318902a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk),
3190de0744afSPavel Emelyanov LINUX_MIB_TCPABORTONMEMORY);
31914ee806d5SDan Streetman } else if (!check_net(sock_net(sk))) {
31924ee806d5SDan Streetman /* Not possible to send reset; just close */
31934ee806d5SDan Streetman tcp_set_state(sk, TCP_CLOSE);
31941da177e4SLinus Torvalds }
31951da177e4SLinus Torvalds }
31961da177e4SLinus Torvalds
31978336886fSJerry Chu if (sk->sk_state == TCP_CLOSE) {
3198d983ea6fSEric Dumazet struct request_sock *req;
3199d983ea6fSEric Dumazet
3200d983ea6fSEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
3201d983ea6fSEric Dumazet lockdep_sock_is_held(sk));
32028336886fSJerry Chu /* We could get here with a non-NULL req if the socket is
32038336886fSJerry Chu * aborted (e.g., closed with unread data) before 3WHS
32048336886fSJerry Chu * finishes.
32058336886fSJerry Chu */
320600db4124SIan Morris if (req)
32078336886fSJerry Chu reqsk_fastopen_remove(sk, req, false);
32080a5578cfSArnaldo Carvalho de Melo inet_csk_destroy_sock(sk);
32098336886fSJerry Chu }
32101da177e4SLinus Torvalds /* Otherwise, socket is reprieved until protocol close. */
32111da177e4SLinus Torvalds
32121da177e4SLinus Torvalds out:
32131da177e4SLinus Torvalds bh_unlock_sock(sk);
32141da177e4SLinus Torvalds local_bh_enable();
321577c3c956SPaolo Abeni }
321677c3c956SPaolo Abeni
tcp_close(struct sock * sk,long timeout)321777c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout)
321877c3c956SPaolo Abeni {
321977c3c956SPaolo Abeni lock_sock(sk);
322077c3c956SPaolo Abeni __tcp_close(sk, timeout);
32218873c064SEric Dumazet release_sock(sk);
3222151c9c72SEric Dumazet if (!sk->sk_net_refcnt)
3223151c9c72SEric Dumazet inet_csk_clear_xmit_timers_sync(sk);
32241da177e4SLinus Torvalds sock_put(sk);
32251da177e4SLinus Torvalds }
32264bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close);
32271da177e4SLinus Torvalds
32281da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
32291da177e4SLinus Torvalds
tcp_need_reset(int state)3230a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state)
32311da177e4SLinus Torvalds {
32321da177e4SLinus Torvalds return (1 << state) &
32331da177e4SLinus Torvalds (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
3234a7150e38SEric Dumazet TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
32351da177e4SLinus Torvalds }
32361da177e4SLinus Torvalds
tcp_rtx_queue_purge(struct sock * sk)323775c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk)
323875c119afSEric Dumazet {
323975c119afSEric Dumazet struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
324075c119afSEric Dumazet
32412bec445fSEric Dumazet tcp_sk(sk)->highest_sack = NULL;
324275c119afSEric Dumazet while (p) {
324375c119afSEric Dumazet struct sk_buff *skb = rb_to_skb(p);
324475c119afSEric Dumazet
324575c119afSEric Dumazet p = rb_next(p);
324675c119afSEric Dumazet /* Since we are deleting whole queue, no need to
324775c119afSEric Dumazet * list_del(&skb->tcp_tsorted_anchor)
324875c119afSEric Dumazet */
324975c119afSEric Dumazet tcp_rtx_queue_unlink(skb, sk);
325003271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb);
325175c119afSEric Dumazet }
325275c119afSEric Dumazet }
325375c119afSEric Dumazet
tcp_write_queue_purge(struct sock * sk)3254ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk)
3255ac3f09baSEric Dumazet {
3256ac3f09baSEric Dumazet struct sk_buff *skb;
3257ac3f09baSEric Dumazet
3258ac3f09baSEric Dumazet tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3259ac3f09baSEric Dumazet while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
3260ac3f09baSEric Dumazet tcp_skb_tsorted_anchor_cleanup(skb);
326103271f3aSTalal Ahmad tcp_wmem_free_skb(sk, skb);
3262ac3f09baSEric Dumazet }
326375c119afSEric Dumazet tcp_rtx_queue_purge(sk);
3264ac3f09baSEric Dumazet INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
3265ac3f09baSEric Dumazet tcp_clear_all_retrans_hints(tcp_sk(sk));
3266bffd168cSSoheil Hassas Yeganeh tcp_sk(sk)->packets_out = 0;
326704c03114SEric Dumazet inet_csk(sk)->icsk_backoff = 0;
3268ac3f09baSEric Dumazet }
3269ac3f09baSEric Dumazet
tcp_disconnect(struct sock * sk,int flags)32701da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
32711da177e4SLinus Torvalds {
32721da177e4SLinus Torvalds struct inet_sock *inet = inet_sk(sk);
3273463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk);
32741da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
32751da177e4SLinus Torvalds int old_state = sk->sk_state;
32760f317464SEric Dumazet u32 seq;
32771da177e4SLinus Torvalds
32781da177e4SLinus Torvalds if (old_state != TCP_CLOSE)
32791da177e4SLinus Torvalds tcp_set_state(sk, TCP_CLOSE);
32801da177e4SLinus Torvalds
32811da177e4SLinus Torvalds /* ABORT function of RFC793 */
32821da177e4SLinus Torvalds if (old_state == TCP_LISTEN) {
32830a5578cfSArnaldo Carvalho de Melo inet_csk_listen_stop(sk);
3284ee995283SPavel Emelyanov } else if (unlikely(tp->repair)) {
3285e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNABORTED);
3286edefba66SJason Xing } else if (tcp_need_reset(old_state)) {
3287edefba66SJason Xing tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_TCP_STATE);
3288edefba66SJason Xing WRITE_ONCE(sk->sk_err, ECONNRESET);
3289edefba66SJason Xing } else if (tp->snd_nxt != tp->write_seq &&
3290edefba66SJason Xing (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) {
3291caa20d9aSStephen Hemminger /* The last check adjusts for discrepancy of Linux wrt. RFC
32921da177e4SLinus Torvalds * states
32931da177e4SLinus Torvalds */
3294c026c656SJason Xing tcp_send_active_reset(sk, gfp_any(),
3295c026c656SJason Xing SK_RST_REASON_TCP_DISCONNECT_WITH_DATA);
3296e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET);
3297a7150e38SEric Dumazet } else if (old_state == TCP_SYN_SENT)
3298e13ec3daSEric Dumazet WRITE_ONCE(sk->sk_err, ECONNRESET);
32991da177e4SLinus Torvalds
33001da177e4SLinus Torvalds tcp_clear_xmit_timers(sk);
33011da177e4SLinus Torvalds __skb_queue_purge(&sk->sk_receive_queue);
33027db48e98SEric Dumazet WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
33037b6a893aSEric Dumazet WRITE_ONCE(tp->urg_data, 0);
330405ea4916SJon Maloy sk_set_peek_off(sk, -1);
3305fe067e8aSDavid S. Miller tcp_write_queue_purge(sk);
3306cf1ef3f0SWei Wang tcp_fastopen_active_disable_ofo_check(sk);
33079f5afeaeSYaogong Wang skb_rbtree_purge(&tp->out_of_order_queue);
33081da177e4SLinus Torvalds
3309c720c7e8SEric Dumazet inet->inet_dport = 0;
33101da177e4SLinus Torvalds
3311e0833d1fSKuniyuki Iwashima inet_bhash2_reset_saddr(sk);
33121da177e4SLinus Torvalds
3313e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, 0);
33141da177e4SLinus Torvalds sock_reset_flag(sk, SOCK_DONE);
3315740b0f18SEric Dumazet tp->srtt_us = 0;
3316b9e2e689SEric Dumazet tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
33173f6c65d6SWei Wang tp->rcv_rtt_last_tsecr = 0;
33180f317464SEric Dumazet
33190f317464SEric Dumazet seq = tp->write_seq + tp->max_window + 2;
33200f317464SEric Dumazet if (!seq)
33210f317464SEric Dumazet seq = 1;
33220f317464SEric Dumazet WRITE_ONCE(tp->write_seq, seq);
33230f317464SEric Dumazet
3324463c84b9SArnaldo Carvalho de Melo icsk->icsk_backoff = 0;
33256687e988SArnaldo Carvalho de Melo icsk->icsk_probes_out = 0;
33269d9b1ee0SEnke Chen icsk->icsk_probes_tstamp = 0;
33276a408147SEric Dumazet icsk->icsk_rto = TCP_TIMEOUT_INIT;
3328ca584ba0SMartin KaFai Lau icsk->icsk_rto_min = TCP_RTO_MIN;
33292b8ee4f0SMartin KaFai Lau icsk->icsk_delack_max = TCP_DELACK_MAX;
33300b6a05c1SIlpo Järvinen tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
333140570375SEric Dumazet tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
33321da177e4SLinus Torvalds tp->snd_cwnd_cnt = 0;
3333f4ce91ceSNeal Cardwell tp->is_cwnd_limited = 0;
3334f4ce91ceSNeal Cardwell tp->max_packets_out = 0;
33351fdf475aSEric Dumazet tp->window_clamp = 0;
33362fbdd562SEric Dumazet tp->delivered = 0;
3337e21db6f6SYuchung Cheng tp->delivered_ce = 0;
3338ce69e563SChristoph Paasch if (icsk->icsk_ca_ops->release)
3339ce69e563SChristoph Paasch icsk->icsk_ca_ops->release(sk);
3340ce69e563SChristoph Paasch memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
33418919a9b3SNeal Cardwell icsk->icsk_ca_initialized = 0;
33426687e988SArnaldo Carvalho de Melo tcp_set_ca_state(sk, TCP_CA_Open);
3343d4761754SYousuk Seung tp->is_sack_reneg = 0;
33441da177e4SLinus Torvalds tcp_clear_retrans(tp);
3345c13c48c0SEric Dumazet tp->total_retrans = 0;
3346463c84b9SArnaldo Carvalho de Melo inet_csk_delack_init(sk);
3347499350a5SWei Wang /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
3348499350a5SWei Wang * issue in __tcp_select_window()
3349499350a5SWei Wang */
3350499350a5SWei Wang icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
3351b40b4f79SSrinivas Aji memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
33521da177e4SLinus Torvalds __sk_dst_reset(sk);
3353b4cb4a13SEric Dumazet dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL)));
335417c3060bSEric Dumazet tcp_saved_syn_free(tp);
33555d9f4262SEric Dumazet tp->compressed_ack = 0;
3356784f8344SEric Dumazet tp->segs_in = 0;
3357784f8344SEric Dumazet tp->segs_out = 0;
3358ba113c3aSWei Wang tp->bytes_sent = 0;
3359e858faf5SChristoph Paasch tp->bytes_acked = 0;
3360e858faf5SChristoph Paasch tp->bytes_received = 0;
3361fb31c9b9SWei Wang tp->bytes_retrans = 0;
3362db7ffee6SEric Dumazet tp->data_segs_in = 0;
3363db7ffee6SEric Dumazet tp->data_segs_out = 0;
33647788174eSYuchung Cheng tp->duplicate_sack[0].start_seq = 0;
33657788174eSYuchung Cheng tp->duplicate_sack[0].end_seq = 0;
33667e10b655SWei Wang tp->dsack_dups = 0;
33677ec65372SWei Wang tp->reord_seen = 0;
33685c701549SEric Dumazet tp->retrans_out = 0;
33695c701549SEric Dumazet tp->sacked_out = 0;
33705c701549SEric Dumazet tp->tlp_high_seq = 0;
33715c701549SEric Dumazet tp->last_oow_ack_time = 0;
337229c1c446SMubashir Adnan Qureshi tp->plb_rehash = 0;
33736cda8b74SEric Dumazet /* There's a bubble in the pipe until at least the first ACK. */
33746cda8b74SEric Dumazet tp->app_limited = ~0U;
3375300b655dSDavid Morley tp->rate_app_limited = 1;
3376792c4354SEric Dumazet tp->rack.mstamp = 0;
3377792c4354SEric Dumazet tp->rack.advanced = 0;
3378792c4354SEric Dumazet tp->rack.reo_wnd_steps = 1;
3379792c4354SEric Dumazet tp->rack.last_delivered = 0;
3380792c4354SEric Dumazet tp->rack.reo_wnd_persist = 0;
3381792c4354SEric Dumazet tp->rack.dsack_seen = 0;
33826bcdc40dSEric Dumazet tp->syn_data_acked = 0;
33836bcdc40dSEric Dumazet tp->rx_opt.saw_tstamp = 0;
33846bcdc40dSEric Dumazet tp->rx_opt.dsack = 0;
33856bcdc40dSEric Dumazet tp->rx_opt.num_sacks = 0;
3386f9af2dbbSThomas Higdon tp->rcv_ooopack = 0;
33876cda8b74SEric Dumazet
33881da177e4SLinus Torvalds
33897db92362SWei Wang /* Clean up fastopen related fields */
33907db92362SWei Wang tcp_free_fastopen_req(tp);
339108e39c0dSEric Dumazet inet_clear_bit(DEFER_CONNECT, sk);
339248027478SJason Baron tp->fastopen_client_fail = 0;
33937db92362SWei Wang
3394c720c7e8SEric Dumazet WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
33951da177e4SLinus Torvalds
33969b42d55aSLi RongQing if (sk->sk_frag.page) {
33979b42d55aSLi RongQing put_page(sk->sk_frag.page);
33989b42d55aSLi RongQing sk->sk_frag.page = NULL;
33999b42d55aSLi RongQing sk->sk_frag.offset = 0;
34009b42d55aSLi RongQing }
3401e3ae2365SAlexander Aring sk_error_report(sk);
3402a01512b1SYueHaibing return 0;
34031da177e4SLinus Torvalds }
34044bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect);
34051da177e4SLinus Torvalds
tcp_can_repair_sock(const struct sock * sk)3406a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk)
3407ee995283SPavel Emelyanov {
3408cb388e7eSMartin KaFai Lau return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
3409319b0534SAndrey Vagin (sk->sk_state != TCP_LISTEN);
3410ee995283SPavel Emelyanov }
3411ee995283SPavel Emelyanov
tcp_repair_set_window(struct tcp_sock * tp,sockptr_t optbuf,int len)3412d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
3413b1ed4c4fSAndrey Vagin {
3414b1ed4c4fSAndrey Vagin struct tcp_repair_window opt;
3415b1ed4c4fSAndrey Vagin
3416b1ed4c4fSAndrey Vagin if (!tp->repair)
3417b1ed4c4fSAndrey Vagin return -EPERM;
3418b1ed4c4fSAndrey Vagin
3419b1ed4c4fSAndrey Vagin if (len != sizeof(opt))
3420b1ed4c4fSAndrey Vagin return -EINVAL;
3421b1ed4c4fSAndrey Vagin
3422d38d2b00SChristoph Hellwig if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
3423b1ed4c4fSAndrey Vagin return -EFAULT;
3424b1ed4c4fSAndrey Vagin
3425b1ed4c4fSAndrey Vagin if (opt.max_window < opt.snd_wnd)
3426b1ed4c4fSAndrey Vagin return -EINVAL;
3427b1ed4c4fSAndrey Vagin
3428b1ed4c4fSAndrey Vagin if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
3429b1ed4c4fSAndrey Vagin return -EINVAL;
3430b1ed4c4fSAndrey Vagin
3431b1ed4c4fSAndrey Vagin if (after(opt.rcv_wup, tp->rcv_nxt))
3432b1ed4c4fSAndrey Vagin return -EINVAL;
3433b1ed4c4fSAndrey Vagin
3434b1ed4c4fSAndrey Vagin tp->snd_wl1 = opt.snd_wl1;
3435b1ed4c4fSAndrey Vagin tp->snd_wnd = opt.snd_wnd;
3436b1ed4c4fSAndrey Vagin tp->max_window = opt.max_window;
3437b1ed4c4fSAndrey Vagin
3438b1ed4c4fSAndrey Vagin tp->rcv_wnd = opt.rcv_wnd;
3439b1ed4c4fSAndrey Vagin tp->rcv_wup = opt.rcv_wup;
3440b1ed4c4fSAndrey Vagin
3441b1ed4c4fSAndrey Vagin return 0;
3442b1ed4c4fSAndrey Vagin }
3443b1ed4c4fSAndrey Vagin
tcp_repair_options_est(struct sock * sk,sockptr_t optbuf,unsigned int len)3444d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
3445d38d2b00SChristoph Hellwig unsigned int len)
3446b139ba4eSPavel Emelyanov {
344715e56515SDouglas Caetano dos Santos struct tcp_sock *tp = tcp_sk(sk);
3448de248a75SPavel Emelyanov struct tcp_repair_opt opt;
3449d3c48151SChristoph Hellwig size_t offset = 0;
3450b139ba4eSPavel Emelyanov
3451de248a75SPavel Emelyanov while (len >= sizeof(opt)) {
3452d3c48151SChristoph Hellwig if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt)))
3453b139ba4eSPavel Emelyanov return -EFAULT;
3454b139ba4eSPavel Emelyanov
3455d3c48151SChristoph Hellwig offset += sizeof(opt);
3456de248a75SPavel Emelyanov len -= sizeof(opt);
3457b139ba4eSPavel Emelyanov
3458de248a75SPavel Emelyanov switch (opt.opt_code) {
3459de248a75SPavel Emelyanov case TCPOPT_MSS:
3460de248a75SPavel Emelyanov tp->rx_opt.mss_clamp = opt.opt_val;
346115e56515SDouglas Caetano dos Santos tcp_mtup_init(sk);
3462b139ba4eSPavel Emelyanov break;
3463de248a75SPavel Emelyanov case TCPOPT_WINDOW:
3464bc26ccd8SAndrey Vagin {
3465bc26ccd8SAndrey Vagin u16 snd_wscale = opt.opt_val & 0xFFFF;
3466bc26ccd8SAndrey Vagin u16 rcv_wscale = opt.opt_val >> 16;
3467bc26ccd8SAndrey Vagin
3468589c49cbSGao Feng if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
3469b139ba4eSPavel Emelyanov return -EFBIG;
3470b139ba4eSPavel Emelyanov
3471bc26ccd8SAndrey Vagin tp->rx_opt.snd_wscale = snd_wscale;
3472bc26ccd8SAndrey Vagin tp->rx_opt.rcv_wscale = rcv_wscale;
3473bc26ccd8SAndrey Vagin tp->rx_opt.wscale_ok = 1;
3474bc26ccd8SAndrey Vagin }
3475b139ba4eSPavel Emelyanov break;
3476b139ba4eSPavel Emelyanov case TCPOPT_SACK_PERM:
3477de248a75SPavel Emelyanov if (opt.opt_val != 0)
3478de248a75SPavel Emelyanov return -EINVAL;
3479de248a75SPavel Emelyanov
3480b139ba4eSPavel Emelyanov tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
3481b139ba4eSPavel Emelyanov break;
3482b139ba4eSPavel Emelyanov case TCPOPT_TIMESTAMP:
3483de248a75SPavel Emelyanov if (opt.opt_val != 0)
3484de248a75SPavel Emelyanov return -EINVAL;
3485de248a75SPavel Emelyanov
3486b139ba4eSPavel Emelyanov tp->rx_opt.tstamp_ok = 1;
3487b139ba4eSPavel Emelyanov break;
3488b139ba4eSPavel Emelyanov }
3489b139ba4eSPavel Emelyanov }
3490b139ba4eSPavel Emelyanov
3491b139ba4eSPavel Emelyanov return 0;
3492b139ba4eSPavel Emelyanov }
3493b139ba4eSPavel Emelyanov
3494a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
3495a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled);
3496a842fe14SEric Dumazet
tcp_enable_tx_delay(void)3497a842fe14SEric Dumazet static void tcp_enable_tx_delay(void)
3498a842fe14SEric Dumazet {
3499a842fe14SEric Dumazet if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
3500a842fe14SEric Dumazet static int __tcp_tx_delay_enabled = 0;
3501a842fe14SEric Dumazet
3502a842fe14SEric Dumazet if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
3503a842fe14SEric Dumazet static_branch_enable(&tcp_tx_delay_enabled);
3504a842fe14SEric Dumazet pr_info("TCP_TX_DELAY enabled\n");
3505a842fe14SEric Dumazet }
3506a842fe14SEric Dumazet }
3507a842fe14SEric Dumazet }
3508a842fe14SEric Dumazet
3509db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames. Later the user clears
3510db10538aSChristoph Hellwig * this option and we transmit any pending partial frames in the queue. This is
3511db10538aSChristoph Hellwig * meant to be used alongside sendfile() to get properly filled frames when the
3512db10538aSChristoph Hellwig * user (for example) must write out headers with a write() call first and then
3513db10538aSChristoph Hellwig * use sendfile to send out the data parts.
3514db10538aSChristoph Hellwig *
3515db10538aSChristoph Hellwig * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
3516db10538aSChristoph Hellwig * TCP_NODELAY.
3517db10538aSChristoph Hellwig */
__tcp_sock_set_cork(struct sock * sk,bool on)35186fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on)
3519db10538aSChristoph Hellwig {
3520db10538aSChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk);
3521db10538aSChristoph Hellwig
3522db10538aSChristoph Hellwig if (on) {
3523db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_CORK;
3524db10538aSChristoph Hellwig } else {
3525db10538aSChristoph Hellwig tp->nonagle &= ~TCP_NAGLE_CORK;
3526db10538aSChristoph Hellwig if (tp->nonagle & TCP_NAGLE_OFF)
3527db10538aSChristoph Hellwig tp->nonagle |= TCP_NAGLE_PUSH;
3528db10538aSChristoph Hellwig tcp_push_pending_frames(sk);
3529db10538aSChristoph Hellwig }
3530db10538aSChristoph Hellwig }
3531db10538aSChristoph Hellwig
tcp_sock_set_cork(struct sock * sk,bool on)3532db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on)
3533db10538aSChristoph Hellwig {
3534db10538aSChristoph Hellwig lock_sock(sk);
3535db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, on);
3536db10538aSChristoph Hellwig release_sock(sk);
3537db10538aSChristoph Hellwig }
3538db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork);
3539db10538aSChristoph Hellwig
354012abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
354112abc5eeSChristoph Hellwig * remembered, but it is not activated until cork is cleared.
354212abc5eeSChristoph Hellwig *
354312abc5eeSChristoph Hellwig * However, when TCP_NODELAY is set we make an explicit push, which overrides
354412abc5eeSChristoph Hellwig * even TCP_CORK for currently queued segments.
354512abc5eeSChristoph Hellwig */
__tcp_sock_set_nodelay(struct sock * sk,bool on)35466fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on)
354712abc5eeSChristoph Hellwig {
354812abc5eeSChristoph Hellwig if (on) {
354912abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
355012abc5eeSChristoph Hellwig tcp_push_pending_frames(sk);
355112abc5eeSChristoph Hellwig } else {
355212abc5eeSChristoph Hellwig tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF;
355312abc5eeSChristoph Hellwig }
355412abc5eeSChristoph Hellwig }
355512abc5eeSChristoph Hellwig
tcp_sock_set_nodelay(struct sock * sk)355612abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk)
355712abc5eeSChristoph Hellwig {
355812abc5eeSChristoph Hellwig lock_sock(sk);
355912abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, true);
356012abc5eeSChristoph Hellwig release_sock(sk);
356112abc5eeSChristoph Hellwig }
356212abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay);
356312abc5eeSChristoph Hellwig
__tcp_sock_set_quickack(struct sock * sk,int val)3564ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val)
3565ddd061b8SChristoph Hellwig {
3566ddd061b8SChristoph Hellwig if (!val) {
3567ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk);
3568ddd061b8SChristoph Hellwig return;
3569ddd061b8SChristoph Hellwig }
3570ddd061b8SChristoph Hellwig
3571ddd061b8SChristoph Hellwig inet_csk_exit_pingpong_mode(sk);
3572ddd061b8SChristoph Hellwig if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
3573ddd061b8SChristoph Hellwig inet_csk_ack_scheduled(sk)) {
3574ddd061b8SChristoph Hellwig inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED;
3575ddd061b8SChristoph Hellwig tcp_cleanup_rbuf(sk, 1);
3576ddd061b8SChristoph Hellwig if (!(val & 1))
3577ddd061b8SChristoph Hellwig inet_csk_enter_pingpong_mode(sk);
3578ddd061b8SChristoph Hellwig }
3579ddd061b8SChristoph Hellwig }
3580ddd061b8SChristoph Hellwig
tcp_sock_set_quickack(struct sock * sk,int val)3581ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val)
3582ddd061b8SChristoph Hellwig {
3583ddd061b8SChristoph Hellwig lock_sock(sk);
3584ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val);
3585ddd061b8SChristoph Hellwig release_sock(sk);
3586ddd061b8SChristoph Hellwig }
3587ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack);
3588ddd061b8SChristoph Hellwig
tcp_sock_set_syncnt(struct sock * sk,int val)3589557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val)
3590557eadfcSChristoph Hellwig {
3591557eadfcSChristoph Hellwig if (val < 1 || val > MAX_TCP_SYNCNT)
3592557eadfcSChristoph Hellwig return -EINVAL;
3593557eadfcSChristoph Hellwig
35943a037f0fSEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val);
3595557eadfcSChristoph Hellwig return 0;
3596557eadfcSChristoph Hellwig }
3597557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt);
3598557eadfcSChristoph Hellwig
tcp_sock_set_user_timeout(struct sock * sk,int val)3599d58f2e15SEric Dumazet int tcp_sock_set_user_timeout(struct sock *sk, int val)
3600c488aeadSChristoph Hellwig {
3601d58f2e15SEric Dumazet /* Cap the max time in ms TCP will retry or probe the window
3602d58f2e15SEric Dumazet * before giving up and aborting (ETIMEDOUT) a connection.
3603d58f2e15SEric Dumazet */
3604d58f2e15SEric Dumazet if (val < 0)
3605d58f2e15SEric Dumazet return -EINVAL;
3606d58f2e15SEric Dumazet
360726023e91SEric Dumazet WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val);
3608d58f2e15SEric Dumazet return 0;
3609c488aeadSChristoph Hellwig }
3610c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout);
3611c488aeadSChristoph Hellwig
tcp_sock_set_keepidle_locked(struct sock * sk,int val)3612aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
361371c48eb8SChristoph Hellwig {
361471c48eb8SChristoph Hellwig struct tcp_sock *tp = tcp_sk(sk);
361571c48eb8SChristoph Hellwig
361671c48eb8SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPIDLE)
361771c48eb8SChristoph Hellwig return -EINVAL;
361871c48eb8SChristoph Hellwig
36194164245cSEric Dumazet /* Paired with WRITE_ONCE() in keepalive_time_when() */
36204164245cSEric Dumazet WRITE_ONCE(tp->keepalive_time, val * HZ);
362171c48eb8SChristoph Hellwig if (sock_flag(sk, SOCK_KEEPOPEN) &&
362271c48eb8SChristoph Hellwig !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
362371c48eb8SChristoph Hellwig u32 elapsed = keepalive_time_elapsed(tp);
362471c48eb8SChristoph Hellwig
362571c48eb8SChristoph Hellwig if (tp->keepalive_time > elapsed)
362671c48eb8SChristoph Hellwig elapsed = tp->keepalive_time - elapsed;
362771c48eb8SChristoph Hellwig else
362871c48eb8SChristoph Hellwig elapsed = 0;
362971c48eb8SChristoph Hellwig inet_csk_reset_keepalive_timer(sk, elapsed);
363071c48eb8SChristoph Hellwig }
363171c48eb8SChristoph Hellwig
363271c48eb8SChristoph Hellwig return 0;
363371c48eb8SChristoph Hellwig }
363471c48eb8SChristoph Hellwig
tcp_sock_set_keepidle(struct sock * sk,int val)363571c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val)
363671c48eb8SChristoph Hellwig {
363771c48eb8SChristoph Hellwig int err;
363871c48eb8SChristoph Hellwig
363971c48eb8SChristoph Hellwig lock_sock(sk);
3640aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val);
364171c48eb8SChristoph Hellwig release_sock(sk);
364271c48eb8SChristoph Hellwig return err;
364371c48eb8SChristoph Hellwig }
364471c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle);
364571c48eb8SChristoph Hellwig
tcp_sock_set_keepintvl(struct sock * sk,int val)3646d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val)
3647d41ecaacSChristoph Hellwig {
3648d41ecaacSChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPINTVL)
3649d41ecaacSChristoph Hellwig return -EINVAL;
3650d41ecaacSChristoph Hellwig
36515ecf9d4fSEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
3652d41ecaacSChristoph Hellwig return 0;
3653d41ecaacSChristoph Hellwig }
3654d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl);
3655d41ecaacSChristoph Hellwig
tcp_sock_set_keepcnt(struct sock * sk,int val)3656480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val)
3657480aeb96SChristoph Hellwig {
3658480aeb96SChristoph Hellwig if (val < 1 || val > MAX_TCP_KEEPCNT)
3659480aeb96SChristoph Hellwig return -EINVAL;
3660480aeb96SChristoph Hellwig
36616e5e1de6SEric Dumazet /* Paired with READ_ONCE() in keepalive_probes() */
36626e5e1de6SEric Dumazet WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
3663480aeb96SChristoph Hellwig return 0;
3664480aeb96SChristoph Hellwig }
3665480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt);
3666480aeb96SChristoph Hellwig
tcp_set_window_clamp(struct sock * sk,int val)3667cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val)
3668cb811109SPrankur gupta {
3669cb811109SPrankur gupta struct tcp_sock *tp = tcp_sk(sk);
3670cb811109SPrankur gupta
3671cb811109SPrankur gupta if (!val) {
3672cb811109SPrankur gupta if (sk->sk_state != TCP_CLOSE)
3673cb811109SPrankur gupta return -EINVAL;
3674f410cbeaSEric Dumazet WRITE_ONCE(tp->window_clamp, 0);
3675cb811109SPrankur gupta } else {
367658d3aadeSPaolo Abeni u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
367758d3aadeSPaolo Abeni u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
3678cb811109SPrankur gupta SOCK_MIN_RCVBUF / 2 : val;
367958d3aadeSPaolo Abeni
368058d3aadeSPaolo Abeni if (new_window_clamp == old_window_clamp)
368158d3aadeSPaolo Abeni return 0;
368258d3aadeSPaolo Abeni
3683f410cbeaSEric Dumazet WRITE_ONCE(tp->window_clamp, new_window_clamp);
368458d3aadeSPaolo Abeni if (new_window_clamp < old_window_clamp) {
368558d3aadeSPaolo Abeni /* need to apply the reserved mem provisioning only
368658d3aadeSPaolo Abeni * when shrinking the window clamp
368758d3aadeSPaolo Abeni */
368858d3aadeSPaolo Abeni __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
368958d3aadeSPaolo Abeni
369058d3aadeSPaolo Abeni } else {
369158d3aadeSPaolo Abeni new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
369258d3aadeSPaolo Abeni tp->rcv_ssthresh = max(new_rcv_ssthresh,
369358d3aadeSPaolo Abeni tp->rcv_ssthresh);
369458d3aadeSPaolo Abeni }
3695cb811109SPrankur gupta }
3696cb811109SPrankur gupta return 0;
3697cb811109SPrankur gupta }
3698cb811109SPrankur gupta
36991da177e4SLinus Torvalds /*
37001da177e4SLinus Torvalds * Socket option code for TCP.
37011da177e4SLinus Torvalds */
do_tcp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)37020c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname,
3703d38d2b00SChristoph Hellwig sockptr_t optval, unsigned int optlen)
37041da177e4SLinus Torvalds {
37051da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
3706463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk);
37071e579caaSNikolay Borisov struct net *net = sock_net(sk);
37081da177e4SLinus Torvalds int val;
37091da177e4SLinus Torvalds int err = 0;
37101da177e4SLinus Torvalds
3711e56fb50fSWilliam Allen Simpson /* These are data/string values, all the others are ints */
3712e56fb50fSWilliam Allen Simpson switch (optname) {
3713e56fb50fSWilliam Allen Simpson case TCP_CONGESTION: {
37145f8ef48dSStephen Hemminger char name[TCP_CA_NAME_MAX];
37155f8ef48dSStephen Hemminger
37165f8ef48dSStephen Hemminger if (optlen < 1)
37175f8ef48dSStephen Hemminger return -EINVAL;
37185f8ef48dSStephen Hemminger
3719d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval,
37204fdb78d3SAndrew Morton min_t(long, TCP_CA_NAME_MAX-1, optlen));
37215f8ef48dSStephen Hemminger if (val < 0)
37225f8ef48dSStephen Hemminger return -EFAULT;
37235f8ef48dSStephen Hemminger name[val] = 0;
37245f8ef48dSStephen Hemminger
3725cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk);
372684e5a0f2SMartin KaFai Lau err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(),
3727cb388e7eSMartin KaFai Lau sockopt_ns_capable(sock_net(sk)->user_ns,
37288d650cdeSEric Dumazet CAP_NET_ADMIN));
3729cb388e7eSMartin KaFai Lau sockopt_release_sock(sk);
37305f8ef48dSStephen Hemminger return err;
37315f8ef48dSStephen Hemminger }
3732734942ccSDave Watson case TCP_ULP: {
3733734942ccSDave Watson char name[TCP_ULP_NAME_MAX];
3734734942ccSDave Watson
3735734942ccSDave Watson if (optlen < 1)
3736734942ccSDave Watson return -EINVAL;
3737734942ccSDave Watson
3738d38d2b00SChristoph Hellwig val = strncpy_from_sockptr(name, optval,
3739734942ccSDave Watson min_t(long, TCP_ULP_NAME_MAX - 1,
3740734942ccSDave Watson optlen));
3741734942ccSDave Watson if (val < 0)
3742734942ccSDave Watson return -EFAULT;
3743734942ccSDave Watson name[val] = 0;
3744734942ccSDave Watson
3745cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk);
3746734942ccSDave Watson err = tcp_set_ulp(sk, name);
3747cb388e7eSMartin KaFai Lau sockopt_release_sock(sk);
3748734942ccSDave Watson return err;
3749734942ccSDave Watson }
37501fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: {
37510f1ce023SJason Baron __u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
37520f1ce023SJason Baron __u8 *backup_key = NULL;
37531fba70e5SYuchung Cheng
37540f1ce023SJason Baron /* Allow a backup key as well to facilitate key rotation
37550f1ce023SJason Baron * First key is the active one.
37560f1ce023SJason Baron */
37570f1ce023SJason Baron if (optlen != TCP_FASTOPEN_KEY_LENGTH &&
37580f1ce023SJason Baron optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
37591fba70e5SYuchung Cheng return -EINVAL;
37601fba70e5SYuchung Cheng
3761d38d2b00SChristoph Hellwig if (copy_from_sockptr(key, optval, optlen))
37621fba70e5SYuchung Cheng return -EFAULT;
37631fba70e5SYuchung Cheng
37640f1ce023SJason Baron if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
37650f1ce023SJason Baron backup_key = key + TCP_FASTOPEN_KEY_LENGTH;
37660f1ce023SJason Baron
3767438ac880SArd Biesheuvel return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
37681fba70e5SYuchung Cheng }
3769e56fb50fSWilliam Allen Simpson default:
3770e56fb50fSWilliam Allen Simpson /* fallthru */
3771e56fb50fSWilliam Allen Simpson break;
3772ccbd6a5aSJoe Perches }
37735f8ef48dSStephen Hemminger
37741da177e4SLinus Torvalds if (optlen < sizeof(int))
37751da177e4SLinus Torvalds return -EINVAL;
37761da177e4SLinus Torvalds
3777d38d2b00SChristoph Hellwig if (copy_from_sockptr(&val, optval, sizeof(val)))
37781da177e4SLinus Torvalds return -EFAULT;
37791da177e4SLinus Torvalds
3780d44fd4a7SEric Dumazet /* Handle options that can be set without locking the socket. */
3781d44fd4a7SEric Dumazet switch (optname) {
3782d44fd4a7SEric Dumazet case TCP_SYNCNT:
3783d44fd4a7SEric Dumazet return tcp_sock_set_syncnt(sk, val);
3784d58f2e15SEric Dumazet case TCP_USER_TIMEOUT:
3785d58f2e15SEric Dumazet return tcp_sock_set_user_timeout(sk, val);
37866fd70a6bSEric Dumazet case TCP_KEEPINTVL:
37876fd70a6bSEric Dumazet return tcp_sock_set_keepintvl(sk, val);
378884485080SEric Dumazet case TCP_KEEPCNT:
378984485080SEric Dumazet return tcp_sock_set_keepcnt(sk, val);
3790a81722ddSEric Dumazet case TCP_LINGER2:
3791a81722ddSEric Dumazet if (val < 0)
3792a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, -1);
3793a81722ddSEric Dumazet else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
3794a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
3795a81722ddSEric Dumazet else
3796a81722ddSEric Dumazet WRITE_ONCE(tp->linger2, val * HZ);
3797a81722ddSEric Dumazet return 0;
37986e97ba55SEric Dumazet case TCP_DEFER_ACCEPT:
37996e97ba55SEric Dumazet /* Translate value in seconds to number of retransmits */
38006e97ba55SEric Dumazet WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
38016e97ba55SEric Dumazet secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
38026e97ba55SEric Dumazet TCP_RTO_MAX / HZ));
38036e97ba55SEric Dumazet return 0;
3804d44fd4a7SEric Dumazet }
3805d44fd4a7SEric Dumazet
3806cb388e7eSMartin KaFai Lau sockopt_lock_sock(sk);
38071da177e4SLinus Torvalds
38081da177e4SLinus Torvalds switch (optname) {
38091da177e4SLinus Torvalds case TCP_MAXSEG:
38101da177e4SLinus Torvalds /* Values greater than interface MTU won't take effect. However
38111da177e4SLinus Torvalds * at the point when this call is done we typically don't yet
3812a777f715SRohit Chavan * know which interface is going to be used
3813a777f715SRohit Chavan */
3814cfc62d87SGao Feng if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
38151da177e4SLinus Torvalds err = -EINVAL;
38161da177e4SLinus Torvalds break;
38171da177e4SLinus Torvalds }
38181da177e4SLinus Torvalds tp->rx_opt.user_mss = val;
38191da177e4SLinus Torvalds break;
38201da177e4SLinus Torvalds
38211da177e4SLinus Torvalds case TCP_NODELAY:
382212abc5eeSChristoph Hellwig __tcp_sock_set_nodelay(sk, val);
38231da177e4SLinus Torvalds break;
38241da177e4SLinus Torvalds
382536e31b0aSAndreas Petlund case TCP_THIN_LINEAR_TIMEOUTS:
382636e31b0aSAndreas Petlund if (val < 0 || val > 1)
382736e31b0aSAndreas Petlund err = -EINVAL;
382836e31b0aSAndreas Petlund else
382936e31b0aSAndreas Petlund tp->thin_lto = val;
383036e31b0aSAndreas Petlund break;
383136e31b0aSAndreas Petlund
38327e380175SAndreas Petlund case TCP_THIN_DUPACK:
38337e380175SAndreas Petlund if (val < 0 || val > 1)
38347e380175SAndreas Petlund err = -EINVAL;
38357e380175SAndreas Petlund break;
38367e380175SAndreas Petlund
3837ee995283SPavel Emelyanov case TCP_REPAIR:
3838ee995283SPavel Emelyanov if (!tcp_can_repair_sock(sk))
3839ee995283SPavel Emelyanov err = -EPERM;
384031048d7aSStefan Baranoff else if (val == TCP_REPAIR_ON) {
3841ee995283SPavel Emelyanov tp->repair = 1;
3842ee995283SPavel Emelyanov sk->sk_reuse = SK_FORCE_REUSE;
3843ee995283SPavel Emelyanov tp->repair_queue = TCP_NO_QUEUE;
384431048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF) {
3845ee995283SPavel Emelyanov tp->repair = 0;
3846ee995283SPavel Emelyanov sk->sk_reuse = SK_NO_REUSE;
3847ee995283SPavel Emelyanov tcp_send_window_probe(sk);
384831048d7aSStefan Baranoff } else if (val == TCP_REPAIR_OFF_NO_WP) {
384931048d7aSStefan Baranoff tp->repair = 0;
385031048d7aSStefan Baranoff sk->sk_reuse = SK_NO_REUSE;
3851ee995283SPavel Emelyanov } else
3852ee995283SPavel Emelyanov err = -EINVAL;
3853ee995283SPavel Emelyanov
3854ee995283SPavel Emelyanov break;
3855ee995283SPavel Emelyanov
3856ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE:
3857ee995283SPavel Emelyanov if (!tp->repair)
3858ee995283SPavel Emelyanov err = -EPERM;
3859bf2acc94SEric Dumazet else if ((unsigned int)val < TCP_QUEUES_NR)
3860ee995283SPavel Emelyanov tp->repair_queue = val;
3861ee995283SPavel Emelyanov else
3862ee995283SPavel Emelyanov err = -EINVAL;
3863ee995283SPavel Emelyanov break;
3864ee995283SPavel Emelyanov
3865ee995283SPavel Emelyanov case TCP_QUEUE_SEQ:
38668811f4a9SEric Dumazet if (sk->sk_state != TCP_CLOSE) {
3867ee995283SPavel Emelyanov err = -EPERM;
38688811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_SEND_QUEUE) {
38698811f4a9SEric Dumazet if (!tcp_rtx_queue_empty(sk))
38708811f4a9SEric Dumazet err = -EPERM;
38718811f4a9SEric Dumazet else
38720f317464SEric Dumazet WRITE_ONCE(tp->write_seq, val);
38738811f4a9SEric Dumazet } else if (tp->repair_queue == TCP_RECV_QUEUE) {
38748811f4a9SEric Dumazet if (tp->rcv_nxt != tp->copied_seq) {
38758811f4a9SEric Dumazet err = -EPERM;
38768811f4a9SEric Dumazet } else {
3877dba7d9b8SEric Dumazet WRITE_ONCE(tp->rcv_nxt, val);
38786cd6cbf5SEric Dumazet WRITE_ONCE(tp->copied_seq, val);
38796cd6cbf5SEric Dumazet }
38808811f4a9SEric Dumazet } else {
3881ee995283SPavel Emelyanov err = -EINVAL;
38828811f4a9SEric Dumazet }
3883ee995283SPavel Emelyanov break;
3884ee995283SPavel Emelyanov
3885b139ba4eSPavel Emelyanov case TCP_REPAIR_OPTIONS:
3886b139ba4eSPavel Emelyanov if (!tp->repair)
3887b139ba4eSPavel Emelyanov err = -EINVAL;
38880c175da7SLu Wei else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent)
3889d38d2b00SChristoph Hellwig err = tcp_repair_options_est(sk, optval, optlen);
3890b139ba4eSPavel Emelyanov else
3891b139ba4eSPavel Emelyanov err = -EPERM;
3892b139ba4eSPavel Emelyanov break;
3893b139ba4eSPavel Emelyanov
38941da177e4SLinus Torvalds case TCP_CORK:
3895db10538aSChristoph Hellwig __tcp_sock_set_cork(sk, val);
38961da177e4SLinus Torvalds break;
38971da177e4SLinus Torvalds
38981da177e4SLinus Torvalds case TCP_KEEPIDLE:
3899aad4a0a9SDmitry Yakunin err = tcp_sock_set_keepidle_locked(sk, val);
39001da177e4SLinus Torvalds break;
3901cd8ae852SEric Dumazet case TCP_SAVE_SYN:
3902267cf9faSMartin KaFai Lau /* 0: disable, 1: enable, 2: start from ether_header */
3903267cf9faSMartin KaFai Lau if (val < 0 || val > 2)
3904cd8ae852SEric Dumazet err = -EINVAL;
3905cd8ae852SEric Dumazet else
3906cd8ae852SEric Dumazet tp->save_syn = val;
3907cd8ae852SEric Dumazet break;
3908cd8ae852SEric Dumazet
39091da177e4SLinus Torvalds case TCP_WINDOW_CLAMP:
3910cb811109SPrankur gupta err = tcp_set_window_clamp(sk, val);
39111da177e4SLinus Torvalds break;
39121da177e4SLinus Torvalds
39131da177e4SLinus Torvalds case TCP_QUICKACK:
3914ddd061b8SChristoph Hellwig __tcp_sock_set_quickack(sk, val);
39151da177e4SLinus Torvalds break;
39161da177e4SLinus Torvalds
3917faadfabaSDmitry Safonov case TCP_AO_REPAIR:
3918965c00e4SDmitry Safonov if (!tcp_can_repair_sock(sk)) {
3919965c00e4SDmitry Safonov err = -EPERM;
3920965c00e4SDmitry Safonov break;
3921965c00e4SDmitry Safonov }
3922faadfabaSDmitry Safonov err = tcp_ao_set_repair(sk, optval, optlen);
3923faadfabaSDmitry Safonov break;
39244954f17dSDmitry Safonov #ifdef CONFIG_TCP_AO
39254954f17dSDmitry Safonov case TCP_AO_ADD_KEY:
39264954f17dSDmitry Safonov case TCP_AO_DEL_KEY:
39274954f17dSDmitry Safonov case TCP_AO_INFO: {
39284954f17dSDmitry Safonov /* If this is the first TCP-AO setsockopt() on the socket,
3929faadfabaSDmitry Safonov * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR
3930faadfabaSDmitry Safonov * in any state.
39314954f17dSDmitry Safonov */
3932faadfabaSDmitry Safonov if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
3933faadfabaSDmitry Safonov goto ao_parse;
3934faadfabaSDmitry Safonov if (rcu_dereference_protected(tcp_sk(sk)->ao_info,
39354954f17dSDmitry Safonov lockdep_sock_is_held(sk)))
3936faadfabaSDmitry Safonov goto ao_parse;
3937faadfabaSDmitry Safonov if (tp->repair)
3938faadfabaSDmitry Safonov goto ao_parse;
39394954f17dSDmitry Safonov err = -EISCONN;
39404954f17dSDmitry Safonov break;
3941faadfabaSDmitry Safonov ao_parse:
3942faadfabaSDmitry Safonov err = tp->af_specific->ao_parse(sk, optname, optval, optlen);
3943faadfabaSDmitry Safonov break;
39444954f17dSDmitry Safonov }
39454954f17dSDmitry Safonov #endif
3946cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3947cfb6eeb4SYOSHIFUJI Hideaki case TCP_MD5SIG:
39488917a777SIvan Delalande case TCP_MD5SIG_EXT:
3949d38d2b00SChristoph Hellwig err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3950cfb6eeb4SYOSHIFUJI Hideaki break;
3951cfb6eeb4SYOSHIFUJI Hideaki #endif
39528336886fSJerry Chu case TCP_FASTOPEN:
39538336886fSJerry Chu if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
3954dfea2aa6SChristoph Paasch TCPF_LISTEN))) {
395543713848SHaishuang Yan tcp_fastopen_init_key_once(net);
3956dfea2aa6SChristoph Paasch
39570536fcc0SEric Dumazet fastopen_queue_tune(sk, val);
3958dfea2aa6SChristoph Paasch } else {
39598336886fSJerry Chu err = -EINVAL;
3960dfea2aa6SChristoph Paasch }
39618336886fSJerry Chu break;
396219f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT:
396319f6d3f3SWei Wang if (val > 1 || val < 0) {
396419f6d3f3SWei Wang err = -EINVAL;
39655a542133SKuniyuki Iwashima } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) &
39665a542133SKuniyuki Iwashima TFO_CLIENT_ENABLE) {
396719f6d3f3SWei Wang if (sk->sk_state == TCP_CLOSE)
396819f6d3f3SWei Wang tp->fastopen_connect = val;
396919f6d3f3SWei Wang else
397019f6d3f3SWei Wang err = -EINVAL;
397119f6d3f3SWei Wang } else {
397219f6d3f3SWei Wang err = -EOPNOTSUPP;
397319f6d3f3SWei Wang }
397419f6d3f3SWei Wang break;
397571c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE:
397671c02379SChristoph Paasch if (val > 1 || val < 0)
397771c02379SChristoph Paasch err = -EINVAL;
397871c02379SChristoph Paasch else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
397971c02379SChristoph Paasch err = -EINVAL;
398071c02379SChristoph Paasch else
398171c02379SChristoph Paasch tp->fastopen_no_cookie = val;
398271c02379SChristoph Paasch break;
398393be6ce0SAndrey Vagin case TCP_TIMESTAMP:
3984614e8316SEric Dumazet if (!tp->repair) {
398593be6ce0SAndrey Vagin err = -EPERM;
3986614e8316SEric Dumazet break;
3987614e8316SEric Dumazet }
3988614e8316SEric Dumazet /* val is an opaque field,
3989614e8316SEric Dumazet * and low order bit contains usec_ts enable bit.
3990614e8316SEric Dumazet * Its a best effort, and we do not care if user makes an error.
3991614e8316SEric Dumazet */
3992614e8316SEric Dumazet tp->tcp_usec_ts = val & 1;
3993614e8316SEric Dumazet WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts));
399493be6ce0SAndrey Vagin break;
3995b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW:
3996b1ed4c4fSAndrey Vagin err = tcp_repair_set_window(tp, optval, optlen);
3997b1ed4c4fSAndrey Vagin break;
3998c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT:
39991aeb87bcSEric Dumazet WRITE_ONCE(tp->notsent_lowat, val);
4000c9bee3b7SEric Dumazet sk->sk_write_space(sk);
4001c9bee3b7SEric Dumazet break;
4002b75eba76SSoheil Hassas Yeganeh case TCP_INQ:
4003b75eba76SSoheil Hassas Yeganeh if (val > 1 || val < 0)
4004b75eba76SSoheil Hassas Yeganeh err = -EINVAL;
4005b75eba76SSoheil Hassas Yeganeh else
4006b75eba76SSoheil Hassas Yeganeh tp->recvmsg_inq = val;
4007b75eba76SSoheil Hassas Yeganeh break;
4008a842fe14SEric Dumazet case TCP_TX_DELAY:
4009a842fe14SEric Dumazet if (val)
4010a842fe14SEric Dumazet tcp_enable_tx_delay();
4011348b81b6SEric Dumazet WRITE_ONCE(tp->tcp_tx_delay, val);
4012a842fe14SEric Dumazet break;
40131da177e4SLinus Torvalds default:
40141da177e4SLinus Torvalds err = -ENOPROTOOPT;
40151da177e4SLinus Torvalds break;
40163ff50b79SStephen Hemminger }
40173ff50b79SStephen Hemminger
4018cb388e7eSMartin KaFai Lau sockopt_release_sock(sk);
40191da177e4SLinus Torvalds return err;
40201da177e4SLinus Torvalds }
40211da177e4SLinus Torvalds
tcp_setsockopt(struct sock * sk,int level,int optname,sockptr_t optval,unsigned int optlen)4022a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
4023b7058842SDavid S. Miller unsigned int optlen)
40243fdadf7dSDmitry Mishin {
4025cf533ea5SEric Dumazet const struct inet_connection_sock *icsk = inet_csk(sk);
40263fdadf7dSDmitry Mishin
40273fdadf7dSDmitry Mishin if (level != SOL_TCP)
4028f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4029f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname,
40303fdadf7dSDmitry Mishin optval, optlen);
4031a7b75c5aSChristoph Hellwig return do_tcp_setsockopt(sk, level, optname, optval, optlen);
40323fdadf7dSDmitry Mishin }
40334bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt);
40343fdadf7dSDmitry Mishin
tcp_get_info_chrono_stats(const struct tcp_sock * tp,struct tcp_info * info)4035efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
4036efd90174SFrancis Yan struct tcp_info *info)
4037efd90174SFrancis Yan {
4038efd90174SFrancis Yan u64 stats[__TCP_CHRONO_MAX], total = 0;
4039efd90174SFrancis Yan enum tcp_chrono i;
4040efd90174SFrancis Yan
4041efd90174SFrancis Yan for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
4042efd90174SFrancis Yan stats[i] = tp->chrono_stat[i - 1];
4043efd90174SFrancis Yan if (i == tp->chrono_type)
4044628174ccSEric Dumazet stats[i] += tcp_jiffies32 - tp->chrono_start;
4045efd90174SFrancis Yan stats[i] *= USEC_PER_SEC / HZ;
4046efd90174SFrancis Yan total += stats[i];
4047efd90174SFrancis Yan }
4048efd90174SFrancis Yan
4049efd90174SFrancis Yan info->tcpi_busy_time = total;
4050efd90174SFrancis Yan info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
4051efd90174SFrancis Yan info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
4052efd90174SFrancis Yan }
4053efd90174SFrancis Yan
40541da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
tcp_get_info(struct sock * sk,struct tcp_info * info)40550df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info)
40561da177e4SLinus Torvalds {
405735ac838aSCraig Gallek const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
4058463c84b9SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk);
405976a9ebe8SEric Dumazet unsigned long rate;
40600263598cSWei Wang u32 now;
4061ff5d7497SEric Dumazet u64 rate64;
406267db3e4bSEric Dumazet bool slow;
40631da177e4SLinus Torvalds
40641da177e4SLinus Torvalds memset(info, 0, sizeof(*info));
406535ac838aSCraig Gallek if (sk->sk_type != SOCK_STREAM)
406635ac838aSCraig Gallek return;
40671da177e4SLinus Torvalds
4068986ffdfdSYafang Shao info->tcpi_state = inet_sk_state_load(sk);
406900fd38d9SEric Dumazet
4070ccbf3bfaSEric Dumazet /* Report meaningful fields for all TCP states, including listeners */
4071ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_pacing_rate);
407276a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL;
4073f522a5fcSEric Dumazet info->tcpi_pacing_rate = rate64;
4074ccbf3bfaSEric Dumazet
4075ccbf3bfaSEric Dumazet rate = READ_ONCE(sk->sk_max_pacing_rate);
407676a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL;
4077f522a5fcSEric Dumazet info->tcpi_max_pacing_rate = rate64;
4078ccbf3bfaSEric Dumazet
4079ccbf3bfaSEric Dumazet info->tcpi_reordering = tp->reordering;
408040570375SEric Dumazet info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
4081ccbf3bfaSEric Dumazet
4082ccbf3bfaSEric Dumazet if (info->tcpi_state == TCP_LISTEN) {
4083ccbf3bfaSEric Dumazet /* listeners aliased fields :
4084ccbf3bfaSEric Dumazet * tcpi_unacked -> Number of children ready for accept()
4085ccbf3bfaSEric Dumazet * tcpi_sacked -> max backlog
4086ccbf3bfaSEric Dumazet */
4087288efe86SEric Dumazet info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
4088099ecf59SEric Dumazet info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
4089ccbf3bfaSEric Dumazet return;
4090ccbf3bfaSEric Dumazet }
4091b369e7fdSEric Dumazet
4092b369e7fdSEric Dumazet slow = lock_sock_fast(sk);
4093b369e7fdSEric Dumazet
40946687e988SArnaldo Carvalho de Melo info->tcpi_ca_state = icsk->icsk_ca_state;
4095463c84b9SArnaldo Carvalho de Melo info->tcpi_retransmits = icsk->icsk_retransmits;
40966687e988SArnaldo Carvalho de Melo info->tcpi_probes = icsk->icsk_probes_out;
4097463c84b9SArnaldo Carvalho de Melo info->tcpi_backoff = icsk->icsk_backoff;
40981da177e4SLinus Torvalds
40991da177e4SLinus Torvalds if (tp->rx_opt.tstamp_ok)
41001da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
4101e60402d0SIlpo Järvinen if (tcp_is_sack(tp))
41021da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_SACK;
41031da177e4SLinus Torvalds if (tp->rx_opt.wscale_ok) {
41041da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_WSCALE;
41051da177e4SLinus Torvalds info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
41061da177e4SLinus Torvalds info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
41071da177e4SLinus Torvalds }
41081da177e4SLinus Torvalds
41091da177e4SLinus Torvalds if (tp->ecn_flags & TCP_ECN_OK)
41101da177e4SLinus Torvalds info->tcpi_options |= TCPI_OPT_ECN;
4111b5c5693bSEric Dumazet if (tp->ecn_flags & TCP_ECN_SEEN)
4112b5c5693bSEric Dumazet info->tcpi_options |= TCPI_OPT_ECN_SEEN;
41136f73601eSYuchung Cheng if (tp->syn_data_acked)
41146f73601eSYuchung Cheng info->tcpi_options |= TCPI_OPT_SYN_DATA;
4115a77a0f5cSEric Dumazet if (tp->tcp_usec_ts)
4116a77a0f5cSEric Dumazet info->tcpi_options |= TCPI_OPT_USEC_TS;
41171da177e4SLinus Torvalds
4118463c84b9SArnaldo Carvalho de Melo info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
411995b9a87cSDavid Morley info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato,
4120bbf80d71SEric Dumazet tcp_delack_max(sk)));
4121c1b4a7e6SDavid S. Miller info->tcpi_snd_mss = tp->mss_cache;
4122463c84b9SArnaldo Carvalho de Melo info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
41231da177e4SLinus Torvalds
41241da177e4SLinus Torvalds info->tcpi_unacked = tp->packets_out;
41251da177e4SLinus Torvalds info->tcpi_sacked = tp->sacked_out;
4126ccbf3bfaSEric Dumazet
41271da177e4SLinus Torvalds info->tcpi_lost = tp->lost_out;
41281da177e4SLinus Torvalds info->tcpi_retrans = tp->retrans_out;
41291da177e4SLinus Torvalds
4130d635fbe2SEric Dumazet now = tcp_jiffies32;
41311da177e4SLinus Torvalds info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
4132463c84b9SArnaldo Carvalho de Melo info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
41331da177e4SLinus Torvalds info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
41341da177e4SLinus Torvalds
4135d83d8461SArnaldo Carvalho de Melo info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
41361da177e4SLinus Torvalds info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
4137740b0f18SEric Dumazet info->tcpi_rtt = tp->srtt_us >> 3;
4138740b0f18SEric Dumazet info->tcpi_rttvar = tp->mdev_us >> 2;
41391da177e4SLinus Torvalds info->tcpi_snd_ssthresh = tp->snd_ssthresh;
41401da177e4SLinus Torvalds info->tcpi_advmss = tp->advmss;
41411da177e4SLinus Torvalds
4142645f4c6fSEric Dumazet info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
41431da177e4SLinus Torvalds info->tcpi_rcv_space = tp->rcvq_space.space;
41441da177e4SLinus Torvalds
41451da177e4SLinus Torvalds info->tcpi_total_retrans = tp->total_retrans;
4146977cb0ecSEric Dumazet
4147f522a5fcSEric Dumazet info->tcpi_bytes_acked = tp->bytes_acked;
4148f522a5fcSEric Dumazet info->tcpi_bytes_received = tp->bytes_received;
414967db3e4bSEric Dumazet info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
4150efd90174SFrancis Yan tcp_get_info_chrono_stats(tp, info);
415167db3e4bSEric Dumazet
41522efd055cSMarcelo Ricardo Leitner info->tcpi_segs_out = tp->segs_out;
41530307a0b7SEric Dumazet
41540307a0b7SEric Dumazet /* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
41550307a0b7SEric Dumazet info->tcpi_segs_in = READ_ONCE(tp->segs_in);
41560307a0b7SEric Dumazet info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
4157cd9b2660SEric Dumazet
4158cd9b2660SEric Dumazet info->tcpi_min_rtt = tcp_min_rtt(tp);
4159a44d6eacSMartin KaFai Lau info->tcpi_data_segs_out = tp->data_segs_out;
4160eb8329e0SYuchung Cheng
4161eb8329e0SYuchung Cheng info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
41620263598cSWei Wang rate64 = tcp_compute_delivery_rate(tp);
41630263598cSWei Wang if (rate64)
4164f522a5fcSEric Dumazet info->tcpi_delivery_rate = rate64;
4165feb5f2ecSYuchung Cheng info->tcpi_delivered = tp->delivered;
4166feb5f2ecSYuchung Cheng info->tcpi_delivered_ce = tp->delivered_ce;
4167ba113c3aSWei Wang info->tcpi_bytes_sent = tp->bytes_sent;
4168fb31c9b9SWei Wang info->tcpi_bytes_retrans = tp->bytes_retrans;
41697e10b655SWei Wang info->tcpi_dsack_dups = tp->dsack_dups;
41707ec65372SWei Wang info->tcpi_reord_seen = tp->reord_seen;
4171f9af2dbbSThomas Higdon info->tcpi_rcv_ooopack = tp->rcv_ooopack;
41728f7baad7SThomas Higdon info->tcpi_snd_wnd = tp->snd_wnd;
417371fc7047SMubashir Adnan Qureshi info->tcpi_rcv_wnd = tp->rcv_wnd;
417471fc7047SMubashir Adnan Qureshi info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
417548027478SJason Baron info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
41763868ab0fSAananth V
41773868ab0fSAananth V info->tcpi_total_rto = tp->total_rto;
41783868ab0fSAananth V info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
41793868ab0fSAananth V info->tcpi_total_rto_time = tp->total_rto_time;
41802a7c8d29SEric Dumazet if (tp->rto_stamp)
41812a7c8d29SEric Dumazet info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp;
41823868ab0fSAananth V
4183b369e7fdSEric Dumazet unlock_sock_fast(sk, slow);
41841da177e4SLinus Torvalds }
41851da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
41861da177e4SLinus Torvalds
tcp_opt_stats_get_size(void)4187984988aaSWei Wang static size_t tcp_opt_stats_get_size(void)
4188984988aaSWei Wang {
4189984988aaSWei Wang return
4190984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
4191984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
4192984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
4193984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
4194984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
4195984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
4196984988aaSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
4197984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
4198984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
4199984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
4200984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
4201984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
4202984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
4203984988aaSWei Wang nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
4204984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
4205984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
4206984988aaSWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
4207ba113c3aSWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
4208fb31c9b9SWei Wang nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
42097e10b655SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
42107ec65372SWei Wang nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
4211e8bd8fcaSYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
421232efcc06SAbdul Kabbani nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
4213e08ab0b3SYousuk Seung nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
421448040793SYousuk Seung nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */
4215e7ed11eeSYousuk Seung nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */
421629c1c446SMubashir Adnan Qureshi nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */
4217984988aaSWei Wang 0;
4218984988aaSWei Wang }
4219984988aaSWei Wang
4220e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */
tcp_skb_ttl_or_hop_limit(const struct sk_buff * skb)4221e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb)
4222e7ed11eeSYousuk Seung {
4223e7ed11eeSYousuk Seung if (skb->protocol == htons(ETH_P_IP))
4224e7ed11eeSYousuk Seung return ip_hdr(skb)->ttl;
4225e7ed11eeSYousuk Seung else if (skb->protocol == htons(ETH_P_IPV6))
4226e7ed11eeSYousuk Seung return ipv6_hdr(skb)->hop_limit;
4227e7ed11eeSYousuk Seung else
4228e7ed11eeSYousuk Seung return 0;
4229e7ed11eeSYousuk Seung }
4230e7ed11eeSYousuk Seung
tcp_get_timestamping_opt_stats(const struct sock * sk,const struct sk_buff * orig_skb,const struct sk_buff * ack_skb)423148040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
4232e7ed11eeSYousuk Seung const struct sk_buff *orig_skb,
4233e7ed11eeSYousuk Seung const struct sk_buff *ack_skb)
42341c885808SFrancis Yan {
42351c885808SFrancis Yan const struct tcp_sock *tp = tcp_sk(sk);
42361c885808SFrancis Yan struct sk_buff *stats;
42371c885808SFrancis Yan struct tcp_info info;
423876a9ebe8SEric Dumazet unsigned long rate;
4239bb7c19f9SWei Wang u64 rate64;
42401c885808SFrancis Yan
4241984988aaSWei Wang stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
42421c885808SFrancis Yan if (!stats)
42431c885808SFrancis Yan return NULL;
42441c885808SFrancis Yan
42451c885808SFrancis Yan tcp_get_info_chrono_stats(tp, &info);
42461c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_BUSY,
42471c885808SFrancis Yan info.tcpi_busy_time, TCP_NLA_PAD);
42481c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
42491c885808SFrancis Yan info.tcpi_rwnd_limited, TCP_NLA_PAD);
42501c885808SFrancis Yan nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
42511c885808SFrancis Yan info.tcpi_sndbuf_limited, TCP_NLA_PAD);
42527e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
42537e98102fSYuchung Cheng tp->data_segs_out, TCP_NLA_PAD);
42547e98102fSYuchung Cheng nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
42557e98102fSYuchung Cheng tp->total_retrans, TCP_NLA_PAD);
4256bb7c19f9SWei Wang
4257bb7c19f9SWei Wang rate = READ_ONCE(sk->sk_pacing_rate);
425876a9ebe8SEric Dumazet rate64 = (rate != ~0UL) ? rate : ~0ULL;
4259bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
4260bb7c19f9SWei Wang
4261bb7c19f9SWei Wang rate64 = tcp_compute_delivery_rate(tp);
4262bb7c19f9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
4263bb7c19f9SWei Wang
426440570375SEric Dumazet nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
4265bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
4266bb7c19f9SWei Wang nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
4267bb7c19f9SWei Wang
4268bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
4269bb7c19f9SWei Wang nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
42707156d194SYousuk Seung nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
4271feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
4272feb5f2ecSYuchung Cheng nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
427387ecc95dSPriyaranjan Jha
427487ecc95dSPriyaranjan Jha nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
4275be631892SPriyaranjan Jha nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
4276feb5f2ecSYuchung Cheng
4277ba113c3aSWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
4278ba113c3aSWei Wang TCP_NLA_PAD);
4279fb31c9b9SWei Wang nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
4280fb31c9b9SWei Wang TCP_NLA_PAD);
42817e10b655SWei Wang nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
42827ec65372SWei Wang nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
4283e8bd8fcaSYousuk Seung nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
428432efcc06SAbdul Kabbani nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
4285e08ab0b3SYousuk Seung nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
4286e08ab0b3SYousuk Seung max_t(int, 0, tp->write_seq - tp->snd_nxt));
428748040793SYousuk Seung nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns,
428848040793SYousuk Seung TCP_NLA_PAD);
4289e7ed11eeSYousuk Seung if (ack_skb)
4290e7ed11eeSYousuk Seung nla_put_u8(stats, TCP_NLA_TTL,
4291e7ed11eeSYousuk Seung tcp_skb_ttl_or_hop_limit(ack_skb));
4292ba113c3aSWei Wang
429329c1c446SMubashir Adnan Qureshi nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash);
42941c885808SFrancis Yan return stats;
42951c885808SFrancis Yan }
42961c885808SFrancis Yan
do_tcp_getsockopt(struct sock * sk,int level,int optname,sockptr_t optval,sockptr_t optlen)4297273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level,
429834704ef0SMartin KaFai Lau int optname, sockptr_t optval, sockptr_t optlen)
42991da177e4SLinus Torvalds {
4300295f7324SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk);
43011da177e4SLinus Torvalds struct tcp_sock *tp = tcp_sk(sk);
43026fa25166SNikolay Borisov struct net *net = sock_net(sk);
43031da177e4SLinus Torvalds int val, len;
43041da177e4SLinus Torvalds
430534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
43061da177e4SLinus Torvalds return -EFAULT;
43071da177e4SLinus Torvalds
43081da177e4SLinus Torvalds if (len < 0)
43091da177e4SLinus Torvalds return -EINVAL;
43101da177e4SLinus Torvalds
4311716edc97SGavrilov Ilia len = min_t(unsigned int, len, sizeof(int));
4312716edc97SGavrilov Ilia
43131da177e4SLinus Torvalds switch (optname) {
43141da177e4SLinus Torvalds case TCP_MAXSEG:
4315c1b4a7e6SDavid S. Miller val = tp->mss_cache;
431634dfde4aSCambda Zhu if (tp->rx_opt.user_mss &&
431734dfde4aSCambda Zhu ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
43181da177e4SLinus Torvalds val = tp->rx_opt.user_mss;
43195e6a3ce6SPavel Emelyanov if (tp->repair)
43205e6a3ce6SPavel Emelyanov val = tp->rx_opt.mss_clamp;
43211da177e4SLinus Torvalds break;
43221da177e4SLinus Torvalds case TCP_NODELAY:
43231da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_OFF);
43241da177e4SLinus Torvalds break;
43251da177e4SLinus Torvalds case TCP_CORK:
43261da177e4SLinus Torvalds val = !!(tp->nonagle&TCP_NAGLE_CORK);
43271da177e4SLinus Torvalds break;
43281da177e4SLinus Torvalds case TCP_KEEPIDLE:
4329df19a626SEric Dumazet val = keepalive_time_when(tp) / HZ;
43301da177e4SLinus Torvalds break;
43311da177e4SLinus Torvalds case TCP_KEEPINTVL:
4332df19a626SEric Dumazet val = keepalive_intvl_when(tp) / HZ;
43331da177e4SLinus Torvalds break;
43341da177e4SLinus Torvalds case TCP_KEEPCNT:
4335df19a626SEric Dumazet val = keepalive_probes(tp);
43361da177e4SLinus Torvalds break;
43371da177e4SLinus Torvalds case TCP_SYNCNT:
43383a037f0fSEric Dumazet val = READ_ONCE(icsk->icsk_syn_retries) ? :
433920a3b1c0SKuniyuki Iwashima READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
43401da177e4SLinus Torvalds break;
43411da177e4SLinus Torvalds case TCP_LINGER2:
43429df5335cSEric Dumazet val = READ_ONCE(tp->linger2);
43431da177e4SLinus Torvalds if (val >= 0)
434439e24435SKuniyuki Iwashima val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
43451da177e4SLinus Torvalds break;
43461da177e4SLinus Torvalds case TCP_DEFER_ACCEPT:
4347ae488c74SEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
4348ae488c74SEric Dumazet val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
4349ae488c74SEric Dumazet TCP_RTO_MAX / HZ);
43501da177e4SLinus Torvalds break;
43511da177e4SLinus Torvalds case TCP_WINDOW_CLAMP:
4352f410cbeaSEric Dumazet val = READ_ONCE(tp->window_clamp);
43531da177e4SLinus Torvalds break;
43541da177e4SLinus Torvalds case TCP_INFO: {
43551da177e4SLinus Torvalds struct tcp_info info;
43561da177e4SLinus Torvalds
435734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
43581da177e4SLinus Torvalds return -EFAULT;
43591da177e4SLinus Torvalds
43601da177e4SLinus Torvalds tcp_get_info(sk, &info);
43611da177e4SLinus Torvalds
43621da177e4SLinus Torvalds len = min_t(unsigned int, len, sizeof(info));
436334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
43641da177e4SLinus Torvalds return -EFAULT;
436534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len))
43661da177e4SLinus Torvalds return -EFAULT;
43671da177e4SLinus Torvalds return 0;
43681da177e4SLinus Torvalds }
43696e9250f5SEric Dumazet case TCP_CC_INFO: {
43706e9250f5SEric Dumazet const struct tcp_congestion_ops *ca_ops;
43716e9250f5SEric Dumazet union tcp_cc_info info;
43726e9250f5SEric Dumazet size_t sz = 0;
43736e9250f5SEric Dumazet int attr;
43746e9250f5SEric Dumazet
437534704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
43766e9250f5SEric Dumazet return -EFAULT;
43776e9250f5SEric Dumazet
43786e9250f5SEric Dumazet ca_ops = icsk->icsk_ca_ops;
43796e9250f5SEric Dumazet if (ca_ops && ca_ops->get_info)
43806e9250f5SEric Dumazet sz = ca_ops->get_info(sk, ~0U, &attr, &info);
43816e9250f5SEric Dumazet
43826e9250f5SEric Dumazet len = min_t(unsigned int, len, sz);
438334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
43846e9250f5SEric Dumazet return -EFAULT;
438534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &info, len))
43866e9250f5SEric Dumazet return -EFAULT;
43876e9250f5SEric Dumazet return 0;
43886e9250f5SEric Dumazet }
43891da177e4SLinus Torvalds case TCP_QUICKACK:
439031954cd8SWei Wang val = !inet_csk_in_pingpong_mode(sk);
43911da177e4SLinus Torvalds break;
43925f8ef48dSStephen Hemminger
43935f8ef48dSStephen Hemminger case TCP_CONGESTION:
439434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
43955f8ef48dSStephen Hemminger return -EFAULT;
43965f8ef48dSStephen Hemminger len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
439734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
43985f8ef48dSStephen Hemminger return -EFAULT;
439934704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len))
44005f8ef48dSStephen Hemminger return -EFAULT;
44015f8ef48dSStephen Hemminger return 0;
4402e56fb50fSWilliam Allen Simpson
4403734942ccSDave Watson case TCP_ULP:
440434704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
4405734942ccSDave Watson return -EFAULT;
4406734942ccSDave Watson len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
4407d97af30fSDave Watson if (!icsk->icsk_ulp_ops) {
440834704ef0SMartin KaFai Lau len = 0;
440934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
4410d97af30fSDave Watson return -EFAULT;
4411d97af30fSDave Watson return 0;
4412d97af30fSDave Watson }
441334704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
4414734942ccSDave Watson return -EFAULT;
441534704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len))
4416734942ccSDave Watson return -EFAULT;
4417734942ccSDave Watson return 0;
4418734942ccSDave Watson
44191fba70e5SYuchung Cheng case TCP_FASTOPEN_KEY: {
4420f19008e6SJason Baron u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
4421f19008e6SJason Baron unsigned int key_len;
44221fba70e5SYuchung Cheng
442334704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
44241fba70e5SYuchung Cheng return -EFAULT;
44251fba70e5SYuchung Cheng
4426f19008e6SJason Baron key_len = tcp_fastopen_get_cipher(net, icsk, key) *
44270f1ce023SJason Baron TCP_FASTOPEN_KEY_LENGTH;
44280f1ce023SJason Baron len = min_t(unsigned int, len, key_len);
442934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
44301fba70e5SYuchung Cheng return -EFAULT;
443134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, key, len))
44321fba70e5SYuchung Cheng return -EFAULT;
44331fba70e5SYuchung Cheng return 0;
44341fba70e5SYuchung Cheng }
44353c0fef0bSJosh Hunt case TCP_THIN_LINEAR_TIMEOUTS:
44363c0fef0bSJosh Hunt val = tp->thin_lto;
44373c0fef0bSJosh Hunt break;
44384a7f6009SYuchung Cheng
44393c0fef0bSJosh Hunt case TCP_THIN_DUPACK:
44404a7f6009SYuchung Cheng val = 0;
44413c0fef0bSJosh Hunt break;
4442dca43c75SJerry Chu
4443ee995283SPavel Emelyanov case TCP_REPAIR:
4444ee995283SPavel Emelyanov val = tp->repair;
4445ee995283SPavel Emelyanov break;
4446ee995283SPavel Emelyanov
4447ee995283SPavel Emelyanov case TCP_REPAIR_QUEUE:
4448ee995283SPavel Emelyanov if (tp->repair)
4449ee995283SPavel Emelyanov val = tp->repair_queue;
4450ee995283SPavel Emelyanov else
4451ee995283SPavel Emelyanov return -EINVAL;
4452ee995283SPavel Emelyanov break;
4453ee995283SPavel Emelyanov
4454b1ed4c4fSAndrey Vagin case TCP_REPAIR_WINDOW: {
4455b1ed4c4fSAndrey Vagin struct tcp_repair_window opt;
4456b1ed4c4fSAndrey Vagin
445734704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
4458b1ed4c4fSAndrey Vagin return -EFAULT;
4459b1ed4c4fSAndrey Vagin
4460b1ed4c4fSAndrey Vagin if (len != sizeof(opt))
4461b1ed4c4fSAndrey Vagin return -EINVAL;
4462b1ed4c4fSAndrey Vagin
4463b1ed4c4fSAndrey Vagin if (!tp->repair)
4464b1ed4c4fSAndrey Vagin return -EPERM;
4465b1ed4c4fSAndrey Vagin
4466b1ed4c4fSAndrey Vagin opt.snd_wl1 = tp->snd_wl1;
4467b1ed4c4fSAndrey Vagin opt.snd_wnd = tp->snd_wnd;
4468b1ed4c4fSAndrey Vagin opt.max_window = tp->max_window;
4469b1ed4c4fSAndrey Vagin opt.rcv_wnd = tp->rcv_wnd;
4470b1ed4c4fSAndrey Vagin opt.rcv_wup = tp->rcv_wup;
4471b1ed4c4fSAndrey Vagin
447234704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &opt, len))
4473b1ed4c4fSAndrey Vagin return -EFAULT;
4474b1ed4c4fSAndrey Vagin return 0;
4475b1ed4c4fSAndrey Vagin }
4476ee995283SPavel Emelyanov case TCP_QUEUE_SEQ:
4477ee995283SPavel Emelyanov if (tp->repair_queue == TCP_SEND_QUEUE)
4478ee995283SPavel Emelyanov val = tp->write_seq;
4479ee995283SPavel Emelyanov else if (tp->repair_queue == TCP_RECV_QUEUE)
4480ee995283SPavel Emelyanov val = tp->rcv_nxt;
4481ee995283SPavel Emelyanov else
4482ee995283SPavel Emelyanov return -EINVAL;
4483ee995283SPavel Emelyanov break;
4484ee995283SPavel Emelyanov
4485dca43c75SJerry Chu case TCP_USER_TIMEOUT:
448626023e91SEric Dumazet val = READ_ONCE(icsk->icsk_user_timeout);
4487dca43c75SJerry Chu break;
44881536e285SKenjiro Nakayama
44891536e285SKenjiro Nakayama case TCP_FASTOPEN:
449070f360ddSEric Dumazet val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
44911536e285SKenjiro Nakayama break;
44921536e285SKenjiro Nakayama
449319f6d3f3SWei Wang case TCP_FASTOPEN_CONNECT:
449419f6d3f3SWei Wang val = tp->fastopen_connect;
449519f6d3f3SWei Wang break;
449619f6d3f3SWei Wang
449771c02379SChristoph Paasch case TCP_FASTOPEN_NO_COOKIE:
449871c02379SChristoph Paasch val = tp->fastopen_no_cookie;
449971c02379SChristoph Paasch break;
450071c02379SChristoph Paasch
4501a842fe14SEric Dumazet case TCP_TX_DELAY:
4502348b81b6SEric Dumazet val = READ_ONCE(tp->tcp_tx_delay);
4503a842fe14SEric Dumazet break;
4504a842fe14SEric Dumazet
450593be6ce0SAndrey Vagin case TCP_TIMESTAMP:
4506614e8316SEric Dumazet val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset);
4507614e8316SEric Dumazet if (tp->tcp_usec_ts)
4508614e8316SEric Dumazet val |= 1;
4509614e8316SEric Dumazet else
4510614e8316SEric Dumazet val &= ~1;
451193be6ce0SAndrey Vagin break;
4512c9bee3b7SEric Dumazet case TCP_NOTSENT_LOWAT:
45131aeb87bcSEric Dumazet val = READ_ONCE(tp->notsent_lowat);
4514c9bee3b7SEric Dumazet break;
4515b75eba76SSoheil Hassas Yeganeh case TCP_INQ:
4516b75eba76SSoheil Hassas Yeganeh val = tp->recvmsg_inq;
4517b75eba76SSoheil Hassas Yeganeh break;
4518cd8ae852SEric Dumazet case TCP_SAVE_SYN:
4519cd8ae852SEric Dumazet val = tp->save_syn;
4520cd8ae852SEric Dumazet break;
4521cd8ae852SEric Dumazet case TCP_SAVED_SYN: {
452234704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
4523cd8ae852SEric Dumazet return -EFAULT;
4524cd8ae852SEric Dumazet
4525d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk);
4526cd8ae852SEric Dumazet if (tp->saved_syn) {
452770a217f1SMartin KaFai Lau if (len < tcp_saved_syn_len(tp->saved_syn)) {
452834704ef0SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn);
452934704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4530d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4531aea0929eSEric B Munson return -EFAULT;
4532aea0929eSEric B Munson }
4533d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4534aea0929eSEric B Munson return -EINVAL;
4535aea0929eSEric B Munson }
453670a217f1SMartin KaFai Lau len = tcp_saved_syn_len(tp->saved_syn);
453734704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4538d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4539cd8ae852SEric Dumazet return -EFAULT;
4540cd8ae852SEric Dumazet }
454134704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, tp->saved_syn->data, len)) {
4542d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4543cd8ae852SEric Dumazet return -EFAULT;
4544cd8ae852SEric Dumazet }
4545cd8ae852SEric Dumazet tcp_saved_syn_free(tp);
4546d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4547cd8ae852SEric Dumazet } else {
4548d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
4549cd8ae852SEric Dumazet len = 0;
455034704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
4551cd8ae852SEric Dumazet return -EFAULT;
4552cd8ae852SEric Dumazet }
4553cd8ae852SEric Dumazet return 0;
4554cd8ae852SEric Dumazet }
455505255b82SEric Dumazet #ifdef CONFIG_MMU
455605255b82SEric Dumazet case TCP_ZEROCOPY_RECEIVE: {
45577eeba170SArjun Roy struct scm_timestamping_internal tss;
4558e0fecb28SArjun Roy struct tcp_zerocopy_receive zc = {};
455905255b82SEric Dumazet int err;
456005255b82SEric Dumazet
456134704ef0SMartin KaFai Lau if (copy_from_sockptr(&len, optlen, sizeof(int)))
456205255b82SEric Dumazet return -EFAULT;
45632107d45fSArjun Roy if (len < 0 ||
45642107d45fSArjun Roy len < offsetofend(struct tcp_zerocopy_receive, length))
456505255b82SEric Dumazet return -EINVAL;
45663c5a2fd0SArjun Roy if (unlikely(len > sizeof(zc))) {
456734704ef0SMartin KaFai Lau err = check_zeroed_sockptr(optval, sizeof(zc),
45683c5a2fd0SArjun Roy len - sizeof(zc));
45693c5a2fd0SArjun Roy if (err < 1)
45703c5a2fd0SArjun Roy return err == 0 ? -EINVAL : err;
4571c8856c05SArjun Roy len = sizeof(zc);
457234704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
45730b7f41f6SArjun Roy return -EFAULT;
45740b7f41f6SArjun Roy }
457534704ef0SMartin KaFai Lau if (copy_from_sockptr(&zc, optval, len))
457605255b82SEric Dumazet return -EFAULT;
45773c5a2fd0SArjun Roy if (zc.reserved)
45783c5a2fd0SArjun Roy return -EINVAL;
45793c5a2fd0SArjun Roy if (zc.msg_flags & ~(TCP_VALID_ZC_MSG_FLAGS))
45803c5a2fd0SArjun Roy return -EINVAL;
4581d51bbff2SMartin KaFai Lau sockopt_lock_sock(sk);
45827eeba170SArjun Roy err = tcp_zerocopy_receive(sk, &zc, &tss);
45839cacf81fSStanislav Fomichev err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
45849cacf81fSStanislav Fomichev &zc, &len, err);
4585d51bbff2SMartin KaFai Lau sockopt_release_sock(sk);
45867eeba170SArjun Roy if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
45877eeba170SArjun Roy goto zerocopy_rcv_cmsg;
4588c8856c05SArjun Roy switch (len) {
45897eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_flags):
45907eeba170SArjun Roy goto zerocopy_rcv_cmsg;
45917eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_controllen):
45927eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, msg_control):
45937eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, flags):
45947eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_len):
45957eeba170SArjun Roy case offsetofend(struct tcp_zerocopy_receive, copybuf_address):
459633946518SArjun Roy case offsetofend(struct tcp_zerocopy_receive, err):
459733946518SArjun Roy goto zerocopy_rcv_sk_err;
4598c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, inq):
4599c8856c05SArjun Roy goto zerocopy_rcv_inq;
4600c8856c05SArjun Roy case offsetofend(struct tcp_zerocopy_receive, length):
4601c8856c05SArjun Roy default:
4602c8856c05SArjun Roy goto zerocopy_rcv_out;
4603c8856c05SArjun Roy }
46047eeba170SArjun Roy zerocopy_rcv_cmsg:
46057eeba170SArjun Roy if (zc.msg_flags & TCP_CMSG_TS)
46067eeba170SArjun Roy tcp_zc_finalize_rx_tstamp(sk, &zc, &tss);
46077eeba170SArjun Roy else
46087eeba170SArjun Roy zc.msg_flags = 0;
460933946518SArjun Roy zerocopy_rcv_sk_err:
461033946518SArjun Roy if (!err)
461133946518SArjun Roy zc.err = sock_error(sk);
4612c8856c05SArjun Roy zerocopy_rcv_inq:
4613c8856c05SArjun Roy zc.inq = tcp_inq_hint(sk);
4614c8856c05SArjun Roy zerocopy_rcv_out:
461534704ef0SMartin KaFai Lau if (!err && copy_to_sockptr(optval, &zc, len))
461605255b82SEric Dumazet err = -EFAULT;
461705255b82SEric Dumazet return err;
461805255b82SEric Dumazet }
461905255b82SEric Dumazet #endif
4620faadfabaSDmitry Safonov case TCP_AO_REPAIR:
4621965c00e4SDmitry Safonov if (!tcp_can_repair_sock(sk))
4622965c00e4SDmitry Safonov return -EPERM;
4623faadfabaSDmitry Safonov return tcp_ao_get_repair(sk, optval, optlen);
4624ef84703aSDmitry Safonov case TCP_AO_GET_KEYS:
4625ef84703aSDmitry Safonov case TCP_AO_INFO: {
4626ef84703aSDmitry Safonov int err;
4627ef84703aSDmitry Safonov
4628ef84703aSDmitry Safonov sockopt_lock_sock(sk);
4629ef84703aSDmitry Safonov if (optname == TCP_AO_GET_KEYS)
4630ef84703aSDmitry Safonov err = tcp_ao_get_mkts(sk, optval, optlen);
4631ef84703aSDmitry Safonov else
4632ef84703aSDmitry Safonov err = tcp_ao_get_sock_info(sk, optval, optlen);
4633ef84703aSDmitry Safonov sockopt_release_sock(sk);
4634ef84703aSDmitry Safonov
4635ef84703aSDmitry Safonov return err;
4636ef84703aSDmitry Safonov }
4637c084ebd7SMatthieu Baerts (NGI0) case TCP_IS_MPTCP:
4638c084ebd7SMatthieu Baerts (NGI0) val = 0;
4639c084ebd7SMatthieu Baerts (NGI0) break;
46401da177e4SLinus Torvalds default:
46411da177e4SLinus Torvalds return -ENOPROTOOPT;
46423ff50b79SStephen Hemminger }
46431da177e4SLinus Torvalds
464434704ef0SMartin KaFai Lau if (copy_to_sockptr(optlen, &len, sizeof(int)))
46451da177e4SLinus Torvalds return -EFAULT;
464634704ef0SMartin KaFai Lau if (copy_to_sockptr(optval, &val, len))
46471da177e4SLinus Torvalds return -EFAULT;
46481da177e4SLinus Torvalds return 0;
46491da177e4SLinus Torvalds }
46501da177e4SLinus Torvalds
tcp_bpf_bypass_getsockopt(int level,int optname)46519cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname)
46529cacf81fSStanislav Fomichev {
46539cacf81fSStanislav Fomichev /* TCP do_tcp_getsockopt has optimized getsockopt implementation
46549cacf81fSStanislav Fomichev * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
46559cacf81fSStanislav Fomichev */
46569cacf81fSStanislav Fomichev if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
46579cacf81fSStanislav Fomichev return true;
46589cacf81fSStanislav Fomichev
46599cacf81fSStanislav Fomichev return false;
46609cacf81fSStanislav Fomichev }
46619cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt);
46629cacf81fSStanislav Fomichev
tcp_getsockopt(struct sock * sk,int level,int optname,char __user * optval,int __user * optlen)46633fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
46643fdadf7dSDmitry Mishin int __user *optlen)
46653fdadf7dSDmitry Mishin {
46663fdadf7dSDmitry Mishin struct inet_connection_sock *icsk = inet_csk(sk);
46673fdadf7dSDmitry Mishin
46683fdadf7dSDmitry Mishin if (level != SOL_TCP)
4669f49cd2f4SKuniyuki Iwashima /* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4670f49cd2f4SKuniyuki Iwashima return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname,
46713fdadf7dSDmitry Mishin optval, optlen);
467234704ef0SMartin KaFai Lau return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval),
467334704ef0SMartin KaFai Lau USER_SOCKPTR(optlen));
46743fdadf7dSDmitry Mishin }
46754bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt);
46763fdadf7dSDmitry Mishin
4677cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
46788c73b263SDmitry Safonov int tcp_md5_sigpool_id = -1;
46798c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id);
4680cfb6eeb4SYOSHIFUJI Hideaki
tcp_md5_alloc_sigpool(void)46818c73b263SDmitry Safonov int tcp_md5_alloc_sigpool(void)
4682cfb6eeb4SYOSHIFUJI Hideaki {
46838c73b263SDmitry Safonov size_t scratch_size;
46848c73b263SDmitry Safonov int ret;
4685cfb6eeb4SYOSHIFUJI Hideaki
46868c73b263SDmitry Safonov scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr);
46878c73b263SDmitry Safonov ret = tcp_sigpool_alloc_ahash("md5", scratch_size);
46888c73b263SDmitry Safonov if (ret >= 0) {
46898c73b263SDmitry Safonov /* As long as any md5 sigpool was allocated, the return
46908c73b263SDmitry Safonov * id would stay the same. Re-write the id only for the case
46918c73b263SDmitry Safonov * when previously all MD5 keys were deleted and this call
46928c73b263SDmitry Safonov * allocates the first MD5 key, which may return a different
46938c73b263SDmitry Safonov * sigpool id than was used previously.
469471cea17eSEric Dumazet */
46958c73b263SDmitry Safonov WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */
469649a72dfbSAdam Langley return 0;
469749a72dfbSAdam Langley }
46988c73b263SDmitry Safonov return ret;
46998c73b263SDmitry Safonov }
470049a72dfbSAdam Langley
tcp_md5_release_sigpool(void)47018c73b263SDmitry Safonov void tcp_md5_release_sigpool(void)
47028c73b263SDmitry Safonov {
47038c73b263SDmitry Safonov tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id));
47048c73b263SDmitry Safonov }
47058c73b263SDmitry Safonov
tcp_md5_add_sigpool(void)47068c73b263SDmitry Safonov void tcp_md5_add_sigpool(void)
47078c73b263SDmitry Safonov {
47088c73b263SDmitry Safonov tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id));
47098c73b263SDmitry Safonov }
47108c73b263SDmitry Safonov
tcp_md5_hash_key(struct tcp_sigpool * hp,const struct tcp_md5sig_key * key)47118c73b263SDmitry Safonov int tcp_md5_hash_key(struct tcp_sigpool *hp,
47128c73b263SDmitry Safonov const struct tcp_md5sig_key *key)
471349a72dfbSAdam Langley {
4714e6ced831SEric Dumazet u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
471549a72dfbSAdam Langley struct scatterlist sg;
471649a72dfbSAdam Langley
47176a2febecSEric Dumazet sg_init_one(&sg, key->key, keylen);
47188c73b263SDmitry Safonov ahash_request_set_crypt(hp->req, &sg, NULL, keylen);
4719e6ced831SEric Dumazet
47208c73b263SDmitry Safonov /* We use data_race() because tcp_md5_do_add() might change
47218c73b263SDmitry Safonov * key->key under us
47228c73b263SDmitry Safonov */
47238c73b263SDmitry Safonov return data_race(crypto_ahash_update(hp->req));
472449a72dfbSAdam Langley }
472549a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key);
472649a72dfbSAdam Langley
47277bbb765bSDmitry Safonov /* Called with rcu_read_lock() */
4728811efc06SDmitry Safonov static enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock * sk,const struct sk_buff * skb,const void * saddr,const void * daddr,int family,int l3index,const __u8 * hash_location)47291330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
47307bbb765bSDmitry Safonov const void *saddr, const void *daddr,
47310a3a8090SDmitry Safonov int family, int l3index, const __u8 *hash_location)
47327bbb765bSDmitry Safonov {
47330a3a8090SDmitry Safonov /* This gets called for each TCP segment that has TCP-MD5 option.
47347bbb765bSDmitry Safonov * We have 3 drop cases:
47357bbb765bSDmitry Safonov * o No MD5 hash and one expected.
47367bbb765bSDmitry Safonov * o MD5 hash and we're not expecting one.
47377bbb765bSDmitry Safonov * o MD5 hash and its wrong.
47387bbb765bSDmitry Safonov */
4739e9d9da91SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk);
47400a3a8090SDmitry Safonov struct tcp_md5sig_key *key;
47417bbb765bSDmitry Safonov u8 newhash[16];
47420a3a8090SDmitry Safonov int genhash;
47437bbb765bSDmitry Safonov
47440a3a8090SDmitry Safonov key = tcp_md5_do_lookup(sk, l3index, saddr, family);
47457bbb765bSDmitry Safonov
47460a3a8090SDmitry Safonov if (!key && hash_location) {
47477bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
474896be3dcdSDmitry Safonov trace_tcp_hash_md5_unexpected(sk, skb);
47491330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
47507bbb765bSDmitry Safonov }
47517bbb765bSDmitry Safonov
4752e62d2e11SEric Dumazet /* Check the signature.
4753e62d2e11SEric Dumazet * To support dual stack listeners, we need to handle
4754e62d2e11SEric Dumazet * IPv4-mapped case.
4755e62d2e11SEric Dumazet */
4756e62d2e11SEric Dumazet if (family == AF_INET)
47570a3a8090SDmitry Safonov genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
4758e62d2e11SEric Dumazet else
47590a3a8090SDmitry Safonov genhash = tp->af_specific->calc_md5_hash(newhash, key,
47607bbb765bSDmitry Safonov NULL, skb);
47617bbb765bSDmitry Safonov if (genhash || memcmp(hash_location, newhash, 16) != 0) {
47627bbb765bSDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
476396be3dcdSDmitry Safonov trace_tcp_hash_md5_mismatch(sk, skb);
47641330b6efSJakub Kicinski return SKB_DROP_REASON_TCP_MD5FAILURE;
47657bbb765bSDmitry Safonov }
47661330b6efSJakub Kicinski return SKB_NOT_DROPPED_YET;
47677bbb765bSDmitry Safonov }
4768811efc06SDmitry Safonov #else
4769811efc06SDmitry Safonov static inline enum skb_drop_reason
tcp_inbound_md5_hash(const struct sock * sk,const struct sk_buff * skb,const void * saddr,const void * daddr,int family,int l3index,const __u8 * hash_location)4770811efc06SDmitry Safonov tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
4771811efc06SDmitry Safonov const void *saddr, const void *daddr,
4772811efc06SDmitry Safonov int family, int l3index, const __u8 *hash_location)
4773811efc06SDmitry Safonov {
4774811efc06SDmitry Safonov return SKB_NOT_DROPPED_YET;
4775811efc06SDmitry Safonov }
47767bbb765bSDmitry Safonov
4777cfb6eeb4SYOSHIFUJI Hideaki #endif
4778cfb6eeb4SYOSHIFUJI Hideaki
4779811efc06SDmitry Safonov /* Called with rcu_read_lock() */
4780811efc06SDmitry Safonov enum skb_drop_reason
tcp_inbound_hash(struct sock * sk,const struct request_sock * req,const struct sk_buff * skb,const void * saddr,const void * daddr,int family,int dif,int sdif)4781811efc06SDmitry Safonov tcp_inbound_hash(struct sock *sk, const struct request_sock *req,
4782811efc06SDmitry Safonov const struct sk_buff *skb,
4783811efc06SDmitry Safonov const void *saddr, const void *daddr,
4784811efc06SDmitry Safonov int family, int dif, int sdif)
4785811efc06SDmitry Safonov {
4786811efc06SDmitry Safonov const struct tcphdr *th = tcp_hdr(skb);
4787811efc06SDmitry Safonov const struct tcp_ao_hdr *aoh;
4788811efc06SDmitry Safonov const __u8 *md5_location;
4789811efc06SDmitry Safonov int l3index;
4790811efc06SDmitry Safonov
4791811efc06SDmitry Safonov /* Invalid option or two times meet any of auth options */
4792811efc06SDmitry Safonov if (tcp_parse_auth_options(th, &md5_location, &aoh)) {
479396be3dcdSDmitry Safonov trace_tcp_hash_bad_header(sk, skb);
4794811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_AUTH_HDR;
4795811efc06SDmitry Safonov }
4796811efc06SDmitry Safonov
4797811efc06SDmitry Safonov if (req) {
4798811efc06SDmitry Safonov if (tcp_rsk_used_ao(req) != !!aoh) {
479996be3dcdSDmitry Safonov u8 keyid, rnext, maclen;
480096be3dcdSDmitry Safonov
480196be3dcdSDmitry Safonov if (aoh) {
480296be3dcdSDmitry Safonov keyid = aoh->keyid;
480396be3dcdSDmitry Safonov rnext = aoh->rnext_keyid;
480496be3dcdSDmitry Safonov maclen = tcp_ao_hdr_maclen(aoh);
480596be3dcdSDmitry Safonov } else {
480696be3dcdSDmitry Safonov keyid = rnext = maclen = 0;
480796be3dcdSDmitry Safonov }
480896be3dcdSDmitry Safonov
4809811efc06SDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD);
481096be3dcdSDmitry Safonov trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen);
4811811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_AOFAILURE;
4812811efc06SDmitry Safonov }
4813811efc06SDmitry Safonov }
4814811efc06SDmitry Safonov
4815811efc06SDmitry Safonov /* sdif set, means packet ingressed via a device
4816811efc06SDmitry Safonov * in an L3 domain and dif is set to the l3mdev
4817811efc06SDmitry Safonov */
4818811efc06SDmitry Safonov l3index = sdif ? dif : 0;
4819811efc06SDmitry Safonov
4820811efc06SDmitry Safonov /* Fast path: unsigned segments */
4821811efc06SDmitry Safonov if (likely(!md5_location && !aoh)) {
4822811efc06SDmitry Safonov /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid
4823811efc06SDmitry Safonov * for the remote peer. On TCP-AO established connection
4824811efc06SDmitry Safonov * the last key is impossible to remove, so there's
4825811efc06SDmitry Safonov * always at least one current_key.
4826811efc06SDmitry Safonov */
4827811efc06SDmitry Safonov if (tcp_ao_required(sk, saddr, family, l3index, true)) {
482896be3dcdSDmitry Safonov trace_tcp_hash_ao_required(sk, skb);
4829811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_AONOTFOUND;
4830811efc06SDmitry Safonov }
4831811efc06SDmitry Safonov if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) {
4832811efc06SDmitry Safonov NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
483396be3dcdSDmitry Safonov trace_tcp_hash_md5_required(sk, skb);
4834811efc06SDmitry Safonov return SKB_DROP_REASON_TCP_MD5NOTFOUND;
4835811efc06SDmitry Safonov }
4836811efc06SDmitry Safonov return SKB_NOT_DROPPED_YET;
4837811efc06SDmitry Safonov }
4838811efc06SDmitry Safonov
4839811efc06SDmitry Safonov if (aoh)
4840811efc06SDmitry Safonov return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh);
4841811efc06SDmitry Safonov
4842811efc06SDmitry Safonov return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family,
4843811efc06SDmitry Safonov l3index, md5_location);
4844811efc06SDmitry Safonov }
4845811efc06SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_inbound_hash);
4846811efc06SDmitry Safonov
tcp_done(struct sock * sk)48474ac02babSAndi Kleen void tcp_done(struct sock *sk)
48484ac02babSAndi Kleen {
4849d983ea6fSEric Dumazet struct request_sock *req;
48508336886fSJerry Chu
4851cab209e5SEric Dumazet /* We might be called with a new socket, after
4852cab209e5SEric Dumazet * inet_csk_prepare_forced_close() has been called
4853cab209e5SEric Dumazet * so we can not use lockdep_sock_is_held(sk)
4854cab209e5SEric Dumazet */
4855cab209e5SEric Dumazet req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
48564ac02babSAndi Kleen
48574ac02babSAndi Kleen if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
4858c10d9310SEric Dumazet TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
48594ac02babSAndi Kleen
48604ac02babSAndi Kleen tcp_set_state(sk, TCP_CLOSE);
48614ac02babSAndi Kleen tcp_clear_xmit_timers(sk);
486200db4124SIan Morris if (req)
48638336886fSJerry Chu reqsk_fastopen_remove(sk, req, false);
48644ac02babSAndi Kleen
4865e14cadfdSEric Dumazet WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
48664ac02babSAndi Kleen
48674ac02babSAndi Kleen if (!sock_flag(sk, SOCK_DEAD))
48684ac02babSAndi Kleen sk->sk_state_change(sk);
48694ac02babSAndi Kleen else
48704ac02babSAndi Kleen inet_csk_destroy_sock(sk);
48714ac02babSAndi Kleen }
48724ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done);
48734ac02babSAndi Kleen
tcp_abort(struct sock * sk,int err)4874c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err)
4875c1e64e29SLorenzo Colitti {
4876af9784d0SEric Dumazet int state = inet_sk_state_load(sk);
4877af9784d0SEric Dumazet
4878af9784d0SEric Dumazet if (state == TCP_NEW_SYN_RECV) {
487907f6f4a3SEric Dumazet struct request_sock *req = inet_reqsk(sk);
488007f6f4a3SEric Dumazet
488107f6f4a3SEric Dumazet local_bh_disable();
4882acc2cf4eSLorenzo Colitti inet_csk_reqsk_queue_drop(req->rsk_listener, req);
488307f6f4a3SEric Dumazet local_bh_enable();
488407f6f4a3SEric Dumazet return 0;
488507f6f4a3SEric Dumazet }
4886af9784d0SEric Dumazet if (state == TCP_TIME_WAIT) {
4887af9784d0SEric Dumazet struct inet_timewait_sock *tw = inet_twsk(sk);
4888af9784d0SEric Dumazet
4889af9784d0SEric Dumazet refcount_inc(&tw->tw_refcnt);
4890af9784d0SEric Dumazet local_bh_disable();
4891af9784d0SEric Dumazet inet_twsk_deschedule_put(tw);
4892af9784d0SEric Dumazet local_bh_enable();
4893af9784d0SEric Dumazet return 0;
4894c1e64e29SLorenzo Colitti }
4895c1e64e29SLorenzo Colitti
48964ddbcb88SAditi Ghag /* BPF context ensures sock locking. */
48974ddbcb88SAditi Ghag if (!has_current_bpf_ctx())
4898c1e64e29SLorenzo Colitti /* Don't race with userspace socket closes such as tcp_close. */
4899c1e64e29SLorenzo Colitti lock_sock(sk);
4900c1e64e29SLorenzo Colitti
4901bac76cf8SXueming Feng /* Avoid closing the same socket twice. */
4902bac76cf8SXueming Feng if (sk->sk_state == TCP_CLOSE) {
4903bac76cf8SXueming Feng if (!has_current_bpf_ctx())
4904bac76cf8SXueming Feng release_sock(sk);
4905bac76cf8SXueming Feng return -ENOENT;
4906bac76cf8SXueming Feng }
4907bac76cf8SXueming Feng
49082010b93eSLorenzo Colitti if (sk->sk_state == TCP_LISTEN) {
49092010b93eSLorenzo Colitti tcp_set_state(sk, TCP_CLOSE);
49102010b93eSLorenzo Colitti inet_csk_listen_stop(sk);
49112010b93eSLorenzo Colitti }
49122010b93eSLorenzo Colitti
4913c1e64e29SLorenzo Colitti /* Don't race with BH socket closes such as inet_csk_listen_stop. */
4914c1e64e29SLorenzo Colitti local_bh_disable();
4915c1e64e29SLorenzo Colitti bh_lock_sock(sk);
4916c1e64e29SLorenzo Colitti
4917c1e64e29SLorenzo Colitti if (tcp_need_reset(sk->sk_state))
49185691276bSJason Xing tcp_send_active_reset(sk, GFP_ATOMIC,
4919edefba66SJason Xing SK_RST_REASON_TCP_STATE);
49205ce4645cSEric Dumazet tcp_done_with_error(sk, err);
4921c1e64e29SLorenzo Colitti
4922c1e64e29SLorenzo Colitti bh_unlock_sock(sk);
4923c1e64e29SLorenzo Colitti local_bh_enable();
49244ddbcb88SAditi Ghag if (!has_current_bpf_ctx())
4925c1e64e29SLorenzo Colitti release_sock(sk);
4926c1e64e29SLorenzo Colitti return 0;
4927c1e64e29SLorenzo Colitti }
4928c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort);
4929c1e64e29SLorenzo Colitti
49305f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
49311da177e4SLinus Torvalds
49321da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
set_thash_entries(char * str)49331da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
49341da177e4SLinus Torvalds {
4935413c27d8SEldad Zack ssize_t ret;
4936413c27d8SEldad Zack
49371da177e4SLinus Torvalds if (!str)
49381da177e4SLinus Torvalds return 0;
4939413c27d8SEldad Zack
4940413c27d8SEldad Zack ret = kstrtoul(str, 0, &thash_entries);
4941413c27d8SEldad Zack if (ret)
4942413c27d8SEldad Zack return 0;
4943413c27d8SEldad Zack
49441da177e4SLinus Torvalds return 1;
49451da177e4SLinus Torvalds }
49461da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
49471da177e4SLinus Torvalds
tcp_init_mem(void)494847d7a88cSFabian Frederick static void __init tcp_init_mem(void)
49494acb4190SGlauber Costa {
4950b66e91ccSEric Dumazet unsigned long limit = nr_free_buffer_pages() / 16;
4951b66e91ccSEric Dumazet
49524acb4190SGlauber Costa limit = max(limit, 128UL);
4953b66e91ccSEric Dumazet sysctl_tcp_mem[0] = limit / 4 * 3; /* 4.68 % */
4954b66e91ccSEric Dumazet sysctl_tcp_mem[1] = limit; /* 6.25 % */
4955b66e91ccSEric Dumazet sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; /* 9.37 % */
49564acb4190SGlauber Costa }
49574acb4190SGlauber Costa
tcp_struct_check(void)4958d5fed5adSCoco Li static void __init tcp_struct_check(void)
4959d5fed5adSCoco Li {
4960d5fed5adSCoco Li /* TX read-mostly hotpath cache lines */
4961d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window);
4962d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh);
4963d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering);
4964d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
4965d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
4966d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint);
4967d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
4968d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40);
4969d5fed5adSCoco Li
4970d5fed5adSCoco Li /* TXRX read-mostly hotpath cache lines */
4971d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
4972d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd);
4973d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache);
4974d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd);
4975d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out);
4976d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out);
4977d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out);
4978119ff048SEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio);
4979119ff048SEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32);
4980d5fed5adSCoco Li
4981d5fed5adSCoco Li /* RX read-mostly hotpath cache lines */
4982d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
4983d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp);
4984d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1);
4985d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq);
4986d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us);
4987d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out);
4988d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss);
4989d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data);
4990d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost);
4991d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
4992d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
4993d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
4994d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69);
4995d5fed5adSCoco Li
4996d5fed5adSCoco Li /* TX read-write hotpath cache lines */
4997d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);
4998d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out);
4999d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent);
5000d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml);
5001d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start);
5002d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat);
5003d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq);
5004d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq);
5005d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime);
5006d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us);
5007d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns);
5008d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq);
5009d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
5010d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
5011d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
5012d2c3a7ebSEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89);
5013d5fed5adSCoco Li
5014d5fed5adSCoco Li /* TXRX read-write hotpath cache lines */
5015d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
5016d2c3a7ebSEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache);
5017d2c3a7ebSEric Dumazet CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp);
5018d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt);
5019d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt);
5020d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una);
5021d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp);
5022d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us);
5023d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out);
5024d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up);
5025d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered);
5026d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce);
5027d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
5028d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
5029d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
50309b9fd458SEric Dumazet
50319b9fd458SEric Dumazet /* 32bit arches with 8byte alignment on u64 fields might need padding
50329b9fd458SEric Dumazet * before tcp_clock_cache.
50339b9fd458SEric Dumazet */
50349b9fd458SEric Dumazet CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4);
5035d5fed5adSCoco Li
5036d5fed5adSCoco Li /* RX read-write hotpath cache lines */
5037d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);
5038d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in);
5039d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in);
5040d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup);
5041d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out);
5042d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq);
5043d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered);
5044d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us);
5045d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr);
5046d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp);
5047d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp);
5048d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked);
5049d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est);
5050d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space);
5051d5fed5adSCoco Li CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99);
5052d5fed5adSCoco Li }
5053d5fed5adSCoco Li
tcp_init(void)50541da177e4SLinus Torvalds void __init tcp_init(void)
50551da177e4SLinus Torvalds {
5056b49960a0SEric Dumazet int max_rshare, max_wshare, cnt;
5057b2d3ea4aSEric Dumazet unsigned long limit;
5058074b8517SDimitri Sivanich unsigned int i;
50591da177e4SLinus Torvalds
50603b4929f6SEric Dumazet BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
5061b2d3ea4aSEric Dumazet BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
5062c593642cSPankaj Bharadiya sizeof_field(struct sk_buff, cb));
50631da177e4SLinus Torvalds
5064d5fed5adSCoco Li tcp_struct_check();
5065d5fed5adSCoco Li
5066908c7f19STejun Heo percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
506719757cebSEric Dumazet
506819757cebSEric Dumazet timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
506919757cebSEric Dumazet mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
507019757cebSEric Dumazet
507127da6d37SMartin KaFai Lau inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
507227da6d37SMartin KaFai Lau thash_entries, 21, /* one slot per 2 MB*/
507327da6d37SMartin KaFai Lau 0, 64 * 1024);
50746e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bind_bucket_cachep =
50756e04e021SArnaldo Carvalho de Melo kmem_cache_create("tcp_bind_bucket",
50766e04e021SArnaldo Carvalho de Melo sizeof(struct inet_bind_bucket), 0,
5077990c74e3SVasily Averin SLAB_HWCACHE_ALIGN | SLAB_PANIC |
5078990c74e3SVasily Averin SLAB_ACCOUNT,
5079990c74e3SVasily Averin NULL);
508028044fc1SJoanne Koong tcp_hashinfo.bind2_bucket_cachep =
508128044fc1SJoanne Koong kmem_cache_create("tcp_bind2_bucket",
508228044fc1SJoanne Koong sizeof(struct inet_bind2_bucket), 0,
508328044fc1SJoanne Koong SLAB_HWCACHE_ALIGN | SLAB_PANIC |
508428044fc1SJoanne Koong SLAB_ACCOUNT,
508528044fc1SJoanne Koong NULL);
50861da177e4SLinus Torvalds
50871da177e4SLinus Torvalds /* Size and allocate the main established and bind bucket
50881da177e4SLinus Torvalds * hash tables.
50891da177e4SLinus Torvalds *
50901da177e4SLinus Torvalds * The methodology is similar to that of the buffer cache.
50911da177e4SLinus Torvalds */
50926e04e021SArnaldo Carvalho de Melo tcp_hashinfo.ehash =
50931da177e4SLinus Torvalds alloc_large_system_hash("TCP established",
50940f7ff927SArnaldo Carvalho de Melo sizeof(struct inet_ehash_bucket),
50951da177e4SLinus Torvalds thash_entries,
5096fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */
50979e950efaSJohn Heffner 0,
50981da177e4SLinus Torvalds NULL,
5099f373b53bSEric Dumazet &tcp_hashinfo.ehash_mask,
510031fe62b9STim Bird 0,
51010ccfe618SJean Delvare thash_entries ? 0 : 512 * 1024);
510205dbc7b5SEric Dumazet for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
51033ab5aee7SEric Dumazet INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
510405dbc7b5SEric Dumazet
5105230140cfSEric Dumazet if (inet_ehash_locks_alloc(&tcp_hashinfo))
5106230140cfSEric Dumazet panic("TCP: failed to alloc ehash_locks");
51076e04e021SArnaldo Carvalho de Melo tcp_hashinfo.bhash =
5108593d1ebeSJoanne Koong alloc_large_system_hash("TCP bind",
510928044fc1SJoanne Koong 2 * sizeof(struct inet_bind_hashbucket),
5110f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1,
5111fd90b29dSEric Dumazet 17, /* one slot per 128 KB of memory */
51129e950efaSJohn Heffner 0,
51136e04e021SArnaldo Carvalho de Melo &tcp_hashinfo.bhash_size,
51141da177e4SLinus Torvalds NULL,
511531fe62b9STim Bird 0,
51161da177e4SLinus Torvalds 64 * 1024);
5117074b8517SDimitri Sivanich tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
511828044fc1SJoanne Koong tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size;
51196e04e021SArnaldo Carvalho de Melo for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
51206e04e021SArnaldo Carvalho de Melo spin_lock_init(&tcp_hashinfo.bhash[i].lock);
51216e04e021SArnaldo Carvalho de Melo INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
512228044fc1SJoanne Koong spin_lock_init(&tcp_hashinfo.bhash2[i].lock);
512328044fc1SJoanne Koong INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain);
51241da177e4SLinus Torvalds }
51251da177e4SLinus Torvalds
5126d1e5e640SKuniyuki Iwashima tcp_hashinfo.pernet = false;
5127c5ed63d6SEric Dumazet
5128c5ed63d6SEric Dumazet cnt = tcp_hashinfo.ehash_mask + 1;
5129c5ed63d6SEric Dumazet sysctl_tcp_max_orphans = cnt / 2;
51301da177e4SLinus Torvalds
5131a4fe34bfSEric W. Biederman tcp_init_mem();
5132c43b874dSJason Wang /* Set per-socket limits to no more than 1/128 the pressure threshold */
51335fb84b14SEric Dumazet limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
5134b49960a0SEric Dumazet max_wshare = min(4UL*1024*1024, limit);
5135b49960a0SEric Dumazet max_rshare = min(6UL*1024*1024, limit);
51367b4f4b5eSJohn Heffner
5137100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE;
5138356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
5139356d1833SEric Dumazet init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
51407b4f4b5eSJohn Heffner
5141100fdd1fSEric Dumazet init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE;
5142a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
5143a337531bSYuchung Cheng init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
51441da177e4SLinus Torvalds
5145afd46503SJoe Perches pr_info("Hash tables configured (established %u bind %u)\n",
5146f373b53bSEric Dumazet tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
5147317a76f9SStephen Hemminger
51481946e672SHaishuang Yan tcp_v4_init();
514951c5d0c4SDavid S. Miller tcp_metrics_init();
515055d8694fSFlorian Westphal BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
515146d3ceabSEric Dumazet tcp_tasklet_init();
5152f870fa0bSMat Martineau mptcp_init();
51531da177e4SLinus Torvalds }
5154