xref: /linux/net/ipv4/tcp.c (revision dda4d96acb20c02920f6d9a20fdc3f4846192aeb)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
41da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
51da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
81da177e4SLinus Torvalds  *
902c30a84SJesper Juhl  * Authors:	Ross Biro
101da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
111da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
121da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
131da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
141da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
151da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
161da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
171da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
181da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
191da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
201da177e4SLinus Torvalds  *
211da177e4SLinus Torvalds  * Fixes:
221da177e4SLinus Torvalds  *		Alan Cox	:	Numerous verify_area() calls
231da177e4SLinus Torvalds  *		Alan Cox	:	Set the ACK bit on a reset
241da177e4SLinus Torvalds  *		Alan Cox	:	Stopped it crashing if it closed while
251da177e4SLinus Torvalds  *					sk->inuse=1 and was trying to connect
261da177e4SLinus Torvalds  *					(tcp_err()).
271da177e4SLinus Torvalds  *		Alan Cox	:	All icmp error handling was broken
281da177e4SLinus Torvalds  *					pointers passed where wrong and the
291da177e4SLinus Torvalds  *					socket was looked up backwards. Nobody
301da177e4SLinus Torvalds  *					tested any icmp error code obviously.
311da177e4SLinus Torvalds  *		Alan Cox	:	tcp_err() now handled properly. It
321da177e4SLinus Torvalds  *					wakes people on errors. poll
331da177e4SLinus Torvalds  *					behaves and the icmp error race
341da177e4SLinus Torvalds  *					has gone by moving it into sock.c
351da177e4SLinus Torvalds  *		Alan Cox	:	tcp_send_reset() fixed to work for
361da177e4SLinus Torvalds  *					everything not just packets for
371da177e4SLinus Torvalds  *					unknown sockets.
381da177e4SLinus Torvalds  *		Alan Cox	:	tcp option processing.
391da177e4SLinus Torvalds  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
401da177e4SLinus Torvalds  *					syn rule wrong]
411da177e4SLinus Torvalds  *		Herp Rosmanith  :	More reset fixes
421da177e4SLinus Torvalds  *		Alan Cox	:	No longer acks invalid rst frames.
431da177e4SLinus Torvalds  *					Acking any kind of RST is right out.
441da177e4SLinus Torvalds  *		Alan Cox	:	Sets an ignore me flag on an rst
451da177e4SLinus Torvalds  *					receive otherwise odd bits of prattle
461da177e4SLinus Torvalds  *					escape still
471da177e4SLinus Torvalds  *		Alan Cox	:	Fixed another acking RST frame bug.
481da177e4SLinus Torvalds  *					Should stop LAN workplace lockups.
491da177e4SLinus Torvalds  *		Alan Cox	: 	Some tidyups using the new skb list
501da177e4SLinus Torvalds  *					facilities
511da177e4SLinus Torvalds  *		Alan Cox	:	sk->keepopen now seems to work
521da177e4SLinus Torvalds  *		Alan Cox	:	Pulls options out correctly on accepts
531da177e4SLinus Torvalds  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
541da177e4SLinus Torvalds  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
551da177e4SLinus Torvalds  *					bit to skb ops.
561da177e4SLinus Torvalds  *		Alan Cox	:	Tidied tcp_data to avoid a potential
571da177e4SLinus Torvalds  *					nasty.
581da177e4SLinus Torvalds  *		Alan Cox	:	Added some better commenting, as the
591da177e4SLinus Torvalds  *					tcp is hard to follow
601da177e4SLinus Torvalds  *		Alan Cox	:	Removed incorrect check for 20 * psh
611da177e4SLinus Torvalds  *	Michael O'Reilly	:	ack < copied bug fix.
621da177e4SLinus Torvalds  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
631da177e4SLinus Torvalds  *		Alan Cox	:	FIN with no memory -> CRASH
641da177e4SLinus Torvalds  *		Alan Cox	:	Added socket option proto entries.
651da177e4SLinus Torvalds  *					Also added awareness of them to accept.
661da177e4SLinus Torvalds  *		Alan Cox	:	Added TCP options (SOL_TCP)
671da177e4SLinus Torvalds  *		Alan Cox	:	Switched wakeup calls to callbacks,
681da177e4SLinus Torvalds  *					so the kernel can layer network
691da177e4SLinus Torvalds  *					sockets.
701da177e4SLinus Torvalds  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
711da177e4SLinus Torvalds  *		Alan Cox	:	Handle FIN (more) properly (we hope).
721da177e4SLinus Torvalds  *		Alan Cox	:	RST frames sent on unsynchronised
731da177e4SLinus Torvalds  *					state ack error.
741da177e4SLinus Torvalds  *		Alan Cox	:	Put in missing check for SYN bit.
751da177e4SLinus Torvalds  *		Alan Cox	:	Added tcp_select_window() aka NET2E
761da177e4SLinus Torvalds  *					window non shrink trick.
771da177e4SLinus Torvalds  *		Alan Cox	:	Added a couple of small NET2E timer
781da177e4SLinus Torvalds  *					fixes
791da177e4SLinus Torvalds  *		Charles Hedrick :	TCP fixes
801da177e4SLinus Torvalds  *		Toomas Tamm	:	TCP window fixes
811da177e4SLinus Torvalds  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
821da177e4SLinus Torvalds  *		Charles Hedrick	:	Rewrote most of it to actually work
831da177e4SLinus Torvalds  *		Linus		:	Rewrote tcp_read() and URG handling
841da177e4SLinus Torvalds  *					completely
851da177e4SLinus Torvalds  *		Gerhard Koerting:	Fixed some missing timer handling
861da177e4SLinus Torvalds  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
871da177e4SLinus Torvalds  *		Gerhard Koerting:	PC/TCP workarounds
881da177e4SLinus Torvalds  *		Adam Caldwell	:	Assorted timer/timing errors
891da177e4SLinus Torvalds  *		Matthew Dillon	:	Fixed another RST bug
901da177e4SLinus Torvalds  *		Alan Cox	:	Move to kernel side addressing changes.
911da177e4SLinus Torvalds  *		Alan Cox	:	Beginning work on TCP fastpathing
921da177e4SLinus Torvalds  *					(not yet usable)
931da177e4SLinus Torvalds  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
941da177e4SLinus Torvalds  *		Alan Cox	:	TCP fast path debugging
951da177e4SLinus Torvalds  *		Alan Cox	:	Window clamping
961da177e4SLinus Torvalds  *		Michael Riepe	:	Bug in tcp_check()
971da177e4SLinus Torvalds  *		Matt Dillon	:	More TCP improvements and RST bug fixes
981da177e4SLinus Torvalds  *		Matt Dillon	:	Yet more small nasties remove from the
991da177e4SLinus Torvalds  *					TCP code (Be very nice to this man if
1001da177e4SLinus Torvalds  *					tcp finally works 100%) 8)
1011da177e4SLinus Torvalds  *		Alan Cox	:	BSD accept semantics.
1021da177e4SLinus Torvalds  *		Alan Cox	:	Reset on closedown bug.
1031da177e4SLinus Torvalds  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
1041da177e4SLinus Torvalds  *		Michael Pall	:	Handle poll() after URG properly in
1051da177e4SLinus Torvalds  *					all cases.
1061da177e4SLinus Torvalds  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
1071da177e4SLinus Torvalds  *					(multi URG PUSH broke rlogin).
1081da177e4SLinus Torvalds  *		Michael Pall	:	Fix the multi URG PUSH problem in
1091da177e4SLinus Torvalds  *					tcp_readable(), poll() after URG
1101da177e4SLinus Torvalds  *					works now.
1111da177e4SLinus Torvalds  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
1121da177e4SLinus Torvalds  *					BSD api.
1131da177e4SLinus Torvalds  *		Alan Cox	:	Changed the semantics of sk->socket to
1141da177e4SLinus Torvalds  *					fix a race and a signal problem with
1151da177e4SLinus Torvalds  *					accept() and async I/O.
1161da177e4SLinus Torvalds  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
1171da177e4SLinus Torvalds  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
1181da177e4SLinus Torvalds  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
1191da177e4SLinus Torvalds  *					clients/servers which listen in on
1201da177e4SLinus Torvalds  *					fixed ports.
1211da177e4SLinus Torvalds  *		Alan Cox	:	Cleaned the above up and shrank it to
1221da177e4SLinus Torvalds  *					a sensible code size.
1231da177e4SLinus Torvalds  *		Alan Cox	:	Self connect lockup fix.
1241da177e4SLinus Torvalds  *		Alan Cox	:	No connect to multicast.
1251da177e4SLinus Torvalds  *		Ross Biro	:	Close unaccepted children on master
1261da177e4SLinus Torvalds  *					socket close.
1271da177e4SLinus Torvalds  *		Alan Cox	:	Reset tracing code.
1281da177e4SLinus Torvalds  *		Alan Cox	:	Spurious resets on shutdown.
1291da177e4SLinus Torvalds  *		Alan Cox	:	Giant 15 minute/60 second timer error
1301da177e4SLinus Torvalds  *		Alan Cox	:	Small whoops in polling before an
1311da177e4SLinus Torvalds  *					accept.
1321da177e4SLinus Torvalds  *		Alan Cox	:	Kept the state trace facility since
1331da177e4SLinus Torvalds  *					it's handy for debugging.
1341da177e4SLinus Torvalds  *		Alan Cox	:	More reset handler fixes.
1351da177e4SLinus Torvalds  *		Alan Cox	:	Started rewriting the code based on
1361da177e4SLinus Torvalds  *					the RFC's for other useful protocol
1371da177e4SLinus Torvalds  *					references see: Comer, KA9Q NOS, and
1381da177e4SLinus Torvalds  *					for a reference on the difference
1391da177e4SLinus Torvalds  *					between specifications and how BSD
1401da177e4SLinus Torvalds  *					works see the 4.4lite source.
1411da177e4SLinus Torvalds  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
1421da177e4SLinus Torvalds  *					close.
1431da177e4SLinus Torvalds  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
1441da177e4SLinus Torvalds  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
1451da177e4SLinus Torvalds  *		Alan Cox	:	Reimplemented timers as per the RFC
1461da177e4SLinus Torvalds  *					and using multiple timers for sanity.
1471da177e4SLinus Torvalds  *		Alan Cox	:	Small bug fixes, and a lot of new
1481da177e4SLinus Torvalds  *					comments.
1491da177e4SLinus Torvalds  *		Alan Cox	:	Fixed dual reader crash by locking
1501da177e4SLinus Torvalds  *					the buffers (much like datagram.c)
1511da177e4SLinus Torvalds  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
1521da177e4SLinus Torvalds  *					now gets fed up of retrying without
1531da177e4SLinus Torvalds  *					(even a no space) answer.
1541da177e4SLinus Torvalds  *		Alan Cox	:	Extracted closing code better
1551da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the closing state machine to
1561da177e4SLinus Torvalds  *					resemble the RFC.
1571da177e4SLinus Torvalds  *		Alan Cox	:	More 'per spec' fixes.
1581da177e4SLinus Torvalds  *		Jorge Cwik	:	Even faster checksumming.
1591da177e4SLinus Torvalds  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
1601da177e4SLinus Torvalds  *					only frames. At least one pc tcp stack
1611da177e4SLinus Torvalds  *					generates them.
1621da177e4SLinus Torvalds  *		Alan Cox	:	Cache last socket.
1631da177e4SLinus Torvalds  *		Alan Cox	:	Per route irtt.
1641da177e4SLinus Torvalds  *		Matt Day	:	poll()->select() match BSD precisely on error
1651da177e4SLinus Torvalds  *		Alan Cox	:	New buffers
1661da177e4SLinus Torvalds  *		Marc Tamsky	:	Various sk->prot->retransmits and
1671da177e4SLinus Torvalds  *					sk->retransmits misupdating fixed.
1681da177e4SLinus Torvalds  *					Fixed tcp_write_timeout: stuck close,
1691da177e4SLinus Torvalds  *					and TCP syn retries gets used now.
1701da177e4SLinus Torvalds  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
1711da177e4SLinus Torvalds  *					ack if state is TCP_CLOSED.
1721da177e4SLinus Torvalds  *		Alan Cox	:	Look up device on a retransmit - routes may
1731da177e4SLinus Torvalds  *					change. Doesn't yet cope with MSS shrink right
1741da177e4SLinus Torvalds  *					but it's a start!
1751da177e4SLinus Torvalds  *		Marc Tamsky	:	Closing in closing fixes.
1761da177e4SLinus Torvalds  *		Mike Shaver	:	RFC1122 verifications.
1771da177e4SLinus Torvalds  *		Alan Cox	:	rcv_saddr errors.
1781da177e4SLinus Torvalds  *		Alan Cox	:	Block double connect().
1791da177e4SLinus Torvalds  *		Alan Cox	:	Small hooks for enSKIP.
1801da177e4SLinus Torvalds  *		Alexey Kuznetsov:	Path MTU discovery.
1811da177e4SLinus Torvalds  *		Alan Cox	:	Support soft errors.
1821da177e4SLinus Torvalds  *		Alan Cox	:	Fix MTU discovery pathological case
1831da177e4SLinus Torvalds  *					when the remote claims no mtu!
1841da177e4SLinus Torvalds  *		Marc Tamsky	:	TCP_CLOSE fix.
1851da177e4SLinus Torvalds  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
1861da177e4SLinus Torvalds  *					window but wrong (fixes NT lpd problems)
1871da177e4SLinus Torvalds  *		Pedro Roque	:	Better TCP window handling, delayed ack.
1881da177e4SLinus Torvalds  *		Joerg Reuter	:	No modification of locked buffers in
1891da177e4SLinus Torvalds  *					tcp_do_retransmit()
1901da177e4SLinus Torvalds  *		Eric Schenk	:	Changed receiver side silly window
1911da177e4SLinus Torvalds  *					avoidance algorithm to BSD style
1921da177e4SLinus Torvalds  *					algorithm. This doubles throughput
1931da177e4SLinus Torvalds  *					against machines running Solaris,
1941da177e4SLinus Torvalds  *					and seems to result in general
1951da177e4SLinus Torvalds  *					improvement.
1961da177e4SLinus Torvalds  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
1971da177e4SLinus Torvalds  *	Willy Konynenberg	:	Transparent proxying support.
1981da177e4SLinus Torvalds  *	Mike McLagan		:	Routing by source
1991da177e4SLinus Torvalds  *		Keith Owens	:	Do proper merging with partial SKB's in
2001da177e4SLinus Torvalds  *					tcp_do_sendmsg to avoid burstiness.
2011da177e4SLinus Torvalds  *		Eric Schenk	:	Fix fast close down bug with
2021da177e4SLinus Torvalds  *					shutdown() followed by close().
2031da177e4SLinus Torvalds  *		Andi Kleen 	:	Make poll agree with SIGIO
2041da177e4SLinus Torvalds  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
2051da177e4SLinus Torvalds  *					lingertime == 0 (RFC 793 ABORT Call)
2061da177e4SLinus Torvalds  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
2071da177e4SLinus Torvalds  *					csum_and_copy_from_user() if possible.
2081da177e4SLinus Torvalds  *
2091da177e4SLinus Torvalds  * Description of States:
2101da177e4SLinus Torvalds  *
2111da177e4SLinus Torvalds  *	TCP_SYN_SENT		sent a connection request, waiting for ack
2121da177e4SLinus Torvalds  *
2131da177e4SLinus Torvalds  *	TCP_SYN_RECV		received a connection request, sent ack,
2141da177e4SLinus Torvalds  *				waiting for final ack in three-way handshake.
2151da177e4SLinus Torvalds  *
2161da177e4SLinus Torvalds  *	TCP_ESTABLISHED		connection established
2171da177e4SLinus Torvalds  *
2181da177e4SLinus Torvalds  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
2191da177e4SLinus Torvalds  *				transmission of remaining buffered data
2201da177e4SLinus Torvalds  *
2211da177e4SLinus Torvalds  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
2221da177e4SLinus Torvalds  *				to shutdown
2231da177e4SLinus Torvalds  *
2241da177e4SLinus Torvalds  *	TCP_CLOSING		both sides have shutdown but we still have
2251da177e4SLinus Torvalds  *				data we have to finish sending
2261da177e4SLinus Torvalds  *
2271da177e4SLinus Torvalds  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
2281da177e4SLinus Torvalds  *				closed, can only be entered from FIN_WAIT2
2291da177e4SLinus Torvalds  *				or CLOSING.  Required because the other end
2301da177e4SLinus Torvalds  *				may not have gotten our last ACK causing it
2311da177e4SLinus Torvalds  *				to retransmit the data packet (which we ignore)
2321da177e4SLinus Torvalds  *
2331da177e4SLinus Torvalds  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
2341da177e4SLinus Torvalds  *				us to finish writing our data and to shutdown
2351da177e4SLinus Torvalds  *				(we have to close() to move on to LAST_ACK)
2361da177e4SLinus Torvalds  *
2371da177e4SLinus Torvalds  *	TCP_LAST_ACK		out side has shutdown after remote has
2381da177e4SLinus Torvalds  *				shutdown.  There may still be data in our
2391da177e4SLinus Torvalds  *				buffer that we have to finish sending
2401da177e4SLinus Torvalds  *
2411da177e4SLinus Torvalds  *	TCP_CLOSE		socket is finished
2421da177e4SLinus Torvalds  */
2431da177e4SLinus Torvalds 
244afd46503SJoe Perches #define pr_fmt(fmt) "TCP: " fmt
245afd46503SJoe Perches 
246cf80e0e4SHerbert Xu #include <crypto/hash.h>
247172589ccSIlpo Järvinen #include <linux/kernel.h>
2481da177e4SLinus Torvalds #include <linux/module.h>
2491da177e4SLinus Torvalds #include <linux/types.h>
2501da177e4SLinus Torvalds #include <linux/fcntl.h>
2511da177e4SLinus Torvalds #include <linux/poll.h>
2526e9250f5SEric Dumazet #include <linux/inet_diag.h>
2531da177e4SLinus Torvalds #include <linux/init.h>
2541da177e4SLinus Torvalds #include <linux/fs.h>
2559c55e01cSJens Axboe #include <linux/skbuff.h>
25681b23b4aSAndrew Morton #include <linux/scatterlist.h>
2579c55e01cSJens Axboe #include <linux/splice.h>
2589c55e01cSJens Axboe #include <linux/net.h>
2599c55e01cSJens Axboe #include <linux/socket.h>
2601da177e4SLinus Torvalds #include <linux/random.h>
26157c8a661SMike Rapoport #include <linux/memblock.h>
26257413ebcSMiquel van Smoorenburg #include <linux/highmem.h>
263b8059eadSDavid S. Miller #include <linux/cache.h>
264f4c50d99SHerbert Xu #include <linux/err.h>
265da5c78c8SWilliam Allen Simpson #include <linux/time.h>
2665a0e3ad6STejun Heo #include <linux/slab.h>
26798aaa913SMike Maloney #include <linux/errqueue.h>
26860e2a778SUrsula Braun #include <linux/static_key.h>
26997a19cafSYonghong Song #include <linux/btf.h>
2701da177e4SLinus Torvalds 
2711da177e4SLinus Torvalds #include <net/icmp.h>
272cf60af03SYuchung Cheng #include <net/inet_common.h>
2731da177e4SLinus Torvalds #include <net/tcp.h>
274f870fa0bSMat Martineau #include <net/mptcp.h>
2751da177e4SLinus Torvalds #include <net/xfrm.h>
2761da177e4SLinus Torvalds #include <net/ip.h>
2779c55e01cSJens Axboe #include <net/sock.h>
2785691276bSJason Xing #include <net/rstreason.h>
2791da177e4SLinus Torvalds 
2807c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
2811da177e4SLinus Torvalds #include <asm/ioctls.h>
282076bb0c8SEliezer Tamir #include <net/busy_poll.h>
283a86a0661SEric Dumazet #include <net/hotdata.h>
284490a79faSEric Dumazet #include <net/rps.h>
2851da177e4SLinus Torvalds 
286925bba24SArjun Roy /* Track pending CMSGs. */
287925bba24SArjun Roy enum {
288925bba24SArjun Roy 	TCP_CMSG_INQ = 1,
289925bba24SArjun Roy 	TCP_CMSG_TS = 2
290925bba24SArjun Roy };
291925bba24SArjun Roy 
29219757cebSEric Dumazet DEFINE_PER_CPU(unsigned int, tcp_orphan_count);
29319757cebSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_orphan_count);
2940a5578cfSArnaldo Carvalho de Melo 
29541eecbd7SEric Dumazet DEFINE_PER_CPU(u32, tcp_tw_isn);
29641eecbd7SEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_tw_isn);
29741eecbd7SEric Dumazet 
298a4fe34bfSEric W. Biederman long sysctl_tcp_mem[3] __read_mostly;
299a4fe34bfSEric W. Biederman EXPORT_SYMBOL(sysctl_tcp_mem);
3001da177e4SLinus Torvalds 
30191b6d325SEric Dumazet atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;	/* Current allocated memory. */
3021da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
3030defbb0aSEric Dumazet DEFINE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
3040defbb0aSEric Dumazet EXPORT_PER_CPU_SYMBOL_GPL(tcp_memory_per_cpu_fw_alloc);
3051748376bSEric Dumazet 
30660e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
30760e2a778SUrsula Braun DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
30860e2a778SUrsula Braun EXPORT_SYMBOL(tcp_have_smc);
30960e2a778SUrsula Braun #endif
31060e2a778SUrsula Braun 
3111748376bSEric Dumazet /*
3121748376bSEric Dumazet  * Current number of TCP sockets.
3131748376bSEric Dumazet  */
31491b6d325SEric Dumazet struct percpu_counter tcp_sockets_allocated ____cacheline_aligned_in_smp;
3151da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
3161da177e4SLinus Torvalds 
3171da177e4SLinus Torvalds /*
3189c55e01cSJens Axboe  * TCP splice context
3199c55e01cSJens Axboe  */
3209c55e01cSJens Axboe struct tcp_splice_state {
3219c55e01cSJens Axboe 	struct pipe_inode_info *pipe;
3229c55e01cSJens Axboe 	size_t len;
3239c55e01cSJens Axboe 	unsigned int flags;
3249c55e01cSJens Axboe };
3259c55e01cSJens Axboe 
3269c55e01cSJens Axboe /*
3271da177e4SLinus Torvalds  * Pressure flag: try to collapse.
3281da177e4SLinus Torvalds  * Technical note: it is used by multiple contexts non atomically.
3293ab224beSHideo Aoki  * All the __sk_mem_schedule() is of this nature: accounting
3301da177e4SLinus Torvalds  * is strict, actions are advisory and have some latency.
3311da177e4SLinus Torvalds  */
33206044751SEric Dumazet unsigned long tcp_memory_pressure __read_mostly;
33306044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_memory_pressure);
3341da177e4SLinus Torvalds 
3355c52ba17SPavel Emelyanov void tcp_enter_memory_pressure(struct sock *sk)
3361da177e4SLinus Torvalds {
33706044751SEric Dumazet 	unsigned long val;
33806044751SEric Dumazet 
3391f142c17SEric Dumazet 	if (READ_ONCE(tcp_memory_pressure))
34006044751SEric Dumazet 		return;
34106044751SEric Dumazet 	val = jiffies;
34206044751SEric Dumazet 
34306044751SEric Dumazet 	if (!val)
34406044751SEric Dumazet 		val--;
34506044751SEric Dumazet 	if (!cmpxchg(&tcp_memory_pressure, 0, val))
3464e673444SPavel Emelyanov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
3471da177e4SLinus Torvalds }
34806044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
34906044751SEric Dumazet 
35006044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk)
35106044751SEric Dumazet {
35206044751SEric Dumazet 	unsigned long val;
35306044751SEric Dumazet 
3541f142c17SEric Dumazet 	if (!READ_ONCE(tcp_memory_pressure))
35506044751SEric Dumazet 		return;
35606044751SEric Dumazet 	val = xchg(&tcp_memory_pressure, 0);
35706044751SEric Dumazet 	if (val)
35806044751SEric Dumazet 		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
35906044751SEric Dumazet 			      jiffies_to_msecs(jiffies - val));
3601da177e4SLinus Torvalds }
36106044751SEric Dumazet EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
3621da177e4SLinus Torvalds 
363b103cf34SJulian Anastasov /* Convert seconds to retransmits based on initial and max timeout */
364b103cf34SJulian Anastasov static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
365b103cf34SJulian Anastasov {
366b103cf34SJulian Anastasov 	u8 res = 0;
367b103cf34SJulian Anastasov 
368b103cf34SJulian Anastasov 	if (seconds > 0) {
369b103cf34SJulian Anastasov 		int period = timeout;
370b103cf34SJulian Anastasov 
371b103cf34SJulian Anastasov 		res = 1;
372b103cf34SJulian Anastasov 		while (seconds > period && res < 255) {
373b103cf34SJulian Anastasov 			res++;
374b103cf34SJulian Anastasov 			timeout <<= 1;
375b103cf34SJulian Anastasov 			if (timeout > rto_max)
376b103cf34SJulian Anastasov 				timeout = rto_max;
377b103cf34SJulian Anastasov 			period += timeout;
378b103cf34SJulian Anastasov 		}
379b103cf34SJulian Anastasov 	}
380b103cf34SJulian Anastasov 	return res;
381b103cf34SJulian Anastasov }
382b103cf34SJulian Anastasov 
383b103cf34SJulian Anastasov /* Convert retransmits to seconds based on initial and max timeout */
384b103cf34SJulian Anastasov static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
385b103cf34SJulian Anastasov {
386b103cf34SJulian Anastasov 	int period = 0;
387b103cf34SJulian Anastasov 
388b103cf34SJulian Anastasov 	if (retrans > 0) {
389b103cf34SJulian Anastasov 		period = timeout;
390b103cf34SJulian Anastasov 		while (--retrans) {
391b103cf34SJulian Anastasov 			timeout <<= 1;
392b103cf34SJulian Anastasov 			if (timeout > rto_max)
393b103cf34SJulian Anastasov 				timeout = rto_max;
394b103cf34SJulian Anastasov 			period += timeout;
395b103cf34SJulian Anastasov 		}
396b103cf34SJulian Anastasov 	}
397b103cf34SJulian Anastasov 	return period;
398b103cf34SJulian Anastasov }
399b103cf34SJulian Anastasov 
4000263598cSWei Wang static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
4010263598cSWei Wang {
4020263598cSWei Wang 	u32 rate = READ_ONCE(tp->rate_delivered);
4030263598cSWei Wang 	u32 intv = READ_ONCE(tp->rate_interval_us);
4040263598cSWei Wang 	u64 rate64 = 0;
4050263598cSWei Wang 
4060263598cSWei Wang 	if (rate && intv) {
4070263598cSWei Wang 		rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
4080263598cSWei Wang 		do_div(rate64, intv);
4090263598cSWei Wang 	}
4100263598cSWei Wang 	return rate64;
4110263598cSWei Wang }
4120263598cSWei Wang 
413900f65d3SNeal Cardwell /* Address-family independent initialization for a tcp_sock.
414900f65d3SNeal Cardwell  *
415900f65d3SNeal Cardwell  * NOTE: A lot of things set to zero explicitly by call to
416900f65d3SNeal Cardwell  *       sk_alloc() so need not be done here.
417900f65d3SNeal Cardwell  */
418900f65d3SNeal Cardwell void tcp_init_sock(struct sock *sk)
419900f65d3SNeal Cardwell {
420900f65d3SNeal Cardwell 	struct inet_connection_sock *icsk = inet_csk(sk);
421900f65d3SNeal Cardwell 	struct tcp_sock *tp = tcp_sk(sk);
422900f65d3SNeal Cardwell 
4239f5afeaeSYaogong Wang 	tp->out_of_order_queue = RB_ROOT;
42475c119afSEric Dumazet 	sk->tcp_rtx_queue = RB_ROOT;
425900f65d3SNeal Cardwell 	tcp_init_xmit_timers(sk);
42646d3ceabSEric Dumazet 	INIT_LIST_HEAD(&tp->tsq_node);
427e2080072SEric Dumazet 	INIT_LIST_HEAD(&tp->tsorted_sent_queue);
428900f65d3SNeal Cardwell 
429900f65d3SNeal Cardwell 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
430ca584ba0SMartin KaFai Lau 	icsk->icsk_rto_min = TCP_RTO_MIN;
4312b8ee4f0SMartin KaFai Lau 	icsk->icsk_delack_max = TCP_DELACK_MAX;
432740b0f18SEric Dumazet 	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
433ac9517fcSEric Dumazet 	minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
434900f65d3SNeal Cardwell 
435900f65d3SNeal Cardwell 	/* So many TCP implementations out there (incorrectly) count the
436900f65d3SNeal Cardwell 	 * initial SYN frame in their delayed-ACK and congestion control
437900f65d3SNeal Cardwell 	 * algorithms that we must have the following bandaid to talk
438900f65d3SNeal Cardwell 	 * efficiently to them.  -DaveM
439900f65d3SNeal Cardwell 	 */
44040570375SEric Dumazet 	tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
441900f65d3SNeal Cardwell 
442d7722e85SSoheil Hassas Yeganeh 	/* There's a bubble in the pipe until at least the first ACK. */
443d7722e85SSoheil Hassas Yeganeh 	tp->app_limited = ~0U;
444300b655dSDavid Morley 	tp->rate_app_limited = 1;
445d7722e85SSoheil Hassas Yeganeh 
446900f65d3SNeal Cardwell 	/* See draft-stevens-tcpca-spec-01 for discussion of the
447900f65d3SNeal Cardwell 	 * initialization of these values.
448900f65d3SNeal Cardwell 	 */
449900f65d3SNeal Cardwell 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
450900f65d3SNeal Cardwell 	tp->snd_cwnd_clamp = ~0;
451900f65d3SNeal Cardwell 	tp->mss_cache = TCP_MSS_DEFAULT;
452900f65d3SNeal Cardwell 
45346778cd1SKuniyuki Iwashima 	tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
45455d8694fSFlorian Westphal 	tcp_assign_congestion_control(sk);
455900f65d3SNeal Cardwell 
456ceaa1fefSAndrey Vagin 	tp->tsoffset = 0;
4571f255691SPriyaranjan Jha 	tp->rack.reo_wnd_steps = 1;
458ceaa1fefSAndrey Vagin 
459900f65d3SNeal Cardwell 	sk->sk_write_space = sk_stream_write_space;
460900f65d3SNeal Cardwell 	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
461900f65d3SNeal Cardwell 
462900f65d3SNeal Cardwell 	icsk->icsk_sync_mss = tcp_sync_mss;
463900f65d3SNeal Cardwell 
46402739545SKuniyuki Iwashima 	WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[1]));
46502739545SKuniyuki Iwashima 	WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[1]));
466dfa2f048SEric Dumazet 	tcp_scaling_ratio_init(sk);
467900f65d3SNeal Cardwell 
468e993ffe3SPavel Begunkov 	set_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
469900f65d3SNeal Cardwell 	sk_sockets_allocated_inc(sk);
470900f65d3SNeal Cardwell }
471900f65d3SNeal Cardwell EXPORT_SYMBOL(tcp_init_sock);
472900f65d3SNeal Cardwell 
4734e8cc228SEric Dumazet static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
4744ed2d765SWillem de Bruijn {
4754e8cc228SEric Dumazet 	struct sk_buff *skb = tcp_write_queue_tail(sk);
4764e8cc228SEric Dumazet 
477ad02c4f5SSoheil Hassas Yeganeh 	if (tsflags && skb) {
4784ed2d765SWillem de Bruijn 		struct skb_shared_info *shinfo = skb_shinfo(skb);
4796b084928SSoheil Hassas Yeganeh 		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
4804ed2d765SWillem de Bruijn 
481c14ac945SSoheil Hassas Yeganeh 		sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
4820a2cf20cSSoheil Hassas Yeganeh 		if (tsflags & SOF_TIMESTAMPING_TX_ACK)
4830a2cf20cSSoheil Hassas Yeganeh 			tcb->txstamp_ack = 1;
4840a2cf20cSSoheil Hassas Yeganeh 		if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
4854ed2d765SWillem de Bruijn 			shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
4864ed2d765SWillem de Bruijn 	}
487f066e2b0SWillem de Bruijn }
4884ed2d765SWillem de Bruijn 
48905dc72abSEric Dumazet static bool tcp_stream_is_readable(struct sock *sk, int target)
4908934ce2fSJohn Fastabend {
49105dc72abSEric Dumazet 	if (tcp_epollin_ready(sk, target))
49205dc72abSEric Dumazet 		return true;
4937b50ecfcSCong Wang 	return sk_is_readable(sk);
4948934ce2fSJohn Fastabend }
4958934ce2fSJohn Fastabend 
4961da177e4SLinus Torvalds /*
497a11e1d43SLinus Torvalds  *	Wait for a TCP event.
498a11e1d43SLinus Torvalds  *
499a11e1d43SLinus Torvalds  *	Note that we don't need to lock the socket, as the upper poll layers
500a11e1d43SLinus Torvalds  *	take care of normal races (between the test and the event) and we don't
501a11e1d43SLinus Torvalds  *	go look at any of the socket buffers directly.
5021da177e4SLinus Torvalds  */
503a11e1d43SLinus Torvalds __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
5041da177e4SLinus Torvalds {
505a11e1d43SLinus Torvalds 	__poll_t mask;
5061da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
507cf533ea5SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
508e14cadfdSEric Dumazet 	u8 shutdown;
50900fd38d9SEric Dumazet 	int state;
5101da177e4SLinus Torvalds 
51189ab066dSKarsten Graul 	sock_poll_wait(file, sock, wait);
512a11e1d43SLinus Torvalds 
513986ffdfdSYafang Shao 	state = inet_sk_state_load(sk);
51400fd38d9SEric Dumazet 	if (state == TCP_LISTEN)
515dc40c7bcSArnaldo Carvalho de Melo 		return inet_csk_listen_poll(sk);
5161da177e4SLinus Torvalds 
517a11e1d43SLinus Torvalds 	/* Socket is not locked. We are protected from async events
518a11e1d43SLinus Torvalds 	 * by poll logic and correct handling of state changes
519a11e1d43SLinus Torvalds 	 * made by other threads is impossible in any case.
520a11e1d43SLinus Torvalds 	 */
521a11e1d43SLinus Torvalds 
522a11e1d43SLinus Torvalds 	mask = 0;
523a11e1d43SLinus Torvalds 
5241da177e4SLinus Torvalds 	/*
525a9a08845SLinus Torvalds 	 * EPOLLHUP is certainly not done right. But poll() doesn't
5261da177e4SLinus Torvalds 	 * have a notion of HUP in just one direction, and for a
5271da177e4SLinus Torvalds 	 * socket the read side is more interesting.
5281da177e4SLinus Torvalds 	 *
529a9a08845SLinus Torvalds 	 * Some poll() documentation says that EPOLLHUP is incompatible
530a9a08845SLinus Torvalds 	 * with the EPOLLOUT/POLLWR flags, so somebody should check this
5311da177e4SLinus Torvalds 	 * all. But careful, it tends to be safer to return too many
5321da177e4SLinus Torvalds 	 * bits than too few, and you can easily break real applications
5331da177e4SLinus Torvalds 	 * if you don't tell them that something has hung up!
5341da177e4SLinus Torvalds 	 *
5351da177e4SLinus Torvalds 	 * Check-me.
5361da177e4SLinus Torvalds 	 *
537a9a08845SLinus Torvalds 	 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
5381da177e4SLinus Torvalds 	 * our fs/select.c). It means that after we received EOF,
5391da177e4SLinus Torvalds 	 * poll always returns immediately, making impossible poll() on write()
540a9a08845SLinus Torvalds 	 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
5411da177e4SLinus Torvalds 	 * if and only if shutdown has been made in both directions.
5421da177e4SLinus Torvalds 	 * Actually, it is interesting to look how Solaris and DUX
543a9a08845SLinus Torvalds 	 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
5441da177e4SLinus Torvalds 	 * then we could set it on SND_SHUTDOWN. BTW examples given
5451da177e4SLinus Torvalds 	 * in Stevens' books assume exactly this behaviour, it explains
546a9a08845SLinus Torvalds 	 * why EPOLLHUP is incompatible with EPOLLOUT.	--ANK
5471da177e4SLinus Torvalds 	 *
5481da177e4SLinus Torvalds 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
5491da177e4SLinus Torvalds 	 * blocking on fresh not-connected or disconnected socket. --ANK
5501da177e4SLinus Torvalds 	 */
551e14cadfdSEric Dumazet 	shutdown = READ_ONCE(sk->sk_shutdown);
552e14cadfdSEric Dumazet 	if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
553a9a08845SLinus Torvalds 		mask |= EPOLLHUP;
554e14cadfdSEric Dumazet 	if (shutdown & RCV_SHUTDOWN)
555a9a08845SLinus Torvalds 		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
5561da177e4SLinus Torvalds 
5578336886fSJerry Chu 	/* Connected or passive Fast Open socket? */
55800fd38d9SEric Dumazet 	if (state != TCP_SYN_SENT &&
559d983ea6fSEric Dumazet 	    (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
560c7004482SDavid S. Miller 		int target = sock_rcvlowat(sk, 0, INT_MAX);
5617b6a893aSEric Dumazet 		u16 urg_data = READ_ONCE(tp->urg_data);
562c7004482SDavid S. Miller 
563b96c51bdSEric Dumazet 		if (unlikely(urg_data) &&
5647b6a893aSEric Dumazet 		    READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
5657b6a893aSEric Dumazet 		    !sock_flag(sk, SOCK_URGINLINE))
566b634f875SAlexandra Kossovsky 			target++;
567c7004482SDavid S. Miller 
56805dc72abSEric Dumazet 		if (tcp_stream_is_readable(sk, target))
569a9a08845SLinus Torvalds 			mask |= EPOLLIN | EPOLLRDNORM;
5701da177e4SLinus Torvalds 
571e14cadfdSEric Dumazet 		if (!(shutdown & SEND_SHUTDOWN)) {
5728ba3c9d1SSoheil Hassas Yeganeh 			if (__sk_stream_is_writeable(sk, 1)) {
573a9a08845SLinus Torvalds 				mask |= EPOLLOUT | EPOLLWRNORM;
5741da177e4SLinus Torvalds 			} else {  /* send SIGIO later */
5759cd3e072SEric Dumazet 				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
5761da177e4SLinus Torvalds 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
5771da177e4SLinus Torvalds 
5781da177e4SLinus Torvalds 				/* Race breaker. If space is freed after
5791da177e4SLinus Torvalds 				 * wspace test but before the flags are set,
5803c715127Sjbaron@akamai.com 				 * IO signal will be lost. Memory barrier
5813c715127Sjbaron@akamai.com 				 * pairs with the input side.
5821da177e4SLinus Torvalds 				 */
5833c715127Sjbaron@akamai.com 				smp_mb__after_atomic();
5848ba3c9d1SSoheil Hassas Yeganeh 				if (__sk_stream_is_writeable(sk, 1))
585a9a08845SLinus Torvalds 					mask |= EPOLLOUT | EPOLLWRNORM;
5861da177e4SLinus Torvalds 			}
587d84ba638SKOSAKI Motohiro 		} else
588a9a08845SLinus Torvalds 			mask |= EPOLLOUT | EPOLLWRNORM;
5891da177e4SLinus Torvalds 
5907b6a893aSEric Dumazet 		if (urg_data & TCP_URG_VALID)
591a9a08845SLinus Torvalds 			mask |= EPOLLPRI;
59208e39c0dSEric Dumazet 	} else if (state == TCP_SYN_SENT &&
59308e39c0dSEric Dumazet 		   inet_test_bit(DEFER_CONNECT, sk)) {
59419f6d3f3SWei Wang 		/* Active TCP fastopen socket with defer_connect
595a9a08845SLinus Torvalds 		 * Return EPOLLOUT so application can call write()
59619f6d3f3SWei Wang 		 * in order for kernel to generate SYN+data
59719f6d3f3SWei Wang 		 */
598a9a08845SLinus Torvalds 		mask |= EPOLLOUT | EPOLLWRNORM;
5991da177e4SLinus Torvalds 	}
600a4d25803STom Marshall 	/* This barrier is coupled with smp_wmb() in tcp_reset() */
601a4d25803STom Marshall 	smp_rmb();
602e13ec3daSEric Dumazet 	if (READ_ONCE(sk->sk_err) ||
603e13ec3daSEric Dumazet 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
604a9a08845SLinus Torvalds 		mask |= EPOLLERR;
605a4d25803STom Marshall 
6061da177e4SLinus Torvalds 	return mask;
6071da177e4SLinus Torvalds }
608a11e1d43SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
6091da177e4SLinus Torvalds 
610e1d001faSBreno Leitao int tcp_ioctl(struct sock *sk, int cmd, int *karg)
6111da177e4SLinus Torvalds {
6121da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6131da177e4SLinus Torvalds 	int answ;
6140e71c55cSEric Dumazet 	bool slow;
6151da177e4SLinus Torvalds 
6161da177e4SLinus Torvalds 	switch (cmd) {
6171da177e4SLinus Torvalds 	case SIOCINQ:
6181da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
6191da177e4SLinus Torvalds 			return -EINVAL;
6201da177e4SLinus Torvalds 
6210e71c55cSEric Dumazet 		slow = lock_sock_fast(sk);
622473bd239STom Herbert 		answ = tcp_inq(sk);
6230e71c55cSEric Dumazet 		unlock_sock_fast(sk, slow);
6241da177e4SLinus Torvalds 		break;
6251da177e4SLinus Torvalds 	case SIOCATMARK:
6267b6a893aSEric Dumazet 		answ = READ_ONCE(tp->urg_data) &&
627d9b55bf7SEric Dumazet 		       READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
6281da177e4SLinus Torvalds 		break;
6291da177e4SLinus Torvalds 	case SIOCOUTQ:
6301da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
6311da177e4SLinus Torvalds 			return -EINVAL;
6321da177e4SLinus Torvalds 
6331da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
6341da177e4SLinus Torvalds 			answ = 0;
6351da177e4SLinus Torvalds 		else
6360f317464SEric Dumazet 			answ = READ_ONCE(tp->write_seq) - tp->snd_una;
6371da177e4SLinus Torvalds 		break;
6382f4e1b39SMario Schuknecht 	case SIOCOUTQNSD:
6392f4e1b39SMario Schuknecht 		if (sk->sk_state == TCP_LISTEN)
6402f4e1b39SMario Schuknecht 			return -EINVAL;
6412f4e1b39SMario Schuknecht 
6422f4e1b39SMario Schuknecht 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
6432f4e1b39SMario Schuknecht 			answ = 0;
6442f4e1b39SMario Schuknecht 		else
645e0d694d6SEric Dumazet 			answ = READ_ONCE(tp->write_seq) -
646e0d694d6SEric Dumazet 			       READ_ONCE(tp->snd_nxt);
6472f4e1b39SMario Schuknecht 		break;
6481da177e4SLinus Torvalds 	default:
6491da177e4SLinus Torvalds 		return -ENOIOCTLCMD;
6503ff50b79SStephen Hemminger 	}
6511da177e4SLinus Torvalds 
652e1d001faSBreno Leitao 	*karg = answ;
653e1d001faSBreno Leitao 	return 0;
6541da177e4SLinus Torvalds }
6554bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_ioctl);
6561da177e4SLinus Torvalds 
65704d8825cSPaolo Abeni void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
6581da177e4SLinus Torvalds {
6594de075e0SEric Dumazet 	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
6601da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
6611da177e4SLinus Torvalds }
6621da177e4SLinus Torvalds 
663a2a385d6SEric Dumazet static inline bool forced_push(const struct tcp_sock *tp)
6641da177e4SLinus Torvalds {
6651da177e4SLinus Torvalds 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
6661da177e4SLinus Torvalds }
6671da177e4SLinus Torvalds 
66804d8825cSPaolo Abeni void tcp_skb_entail(struct sock *sk, struct sk_buff *skb)
6691da177e4SLinus Torvalds {
6709e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
671352d4800SArnaldo Carvalho de Melo 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
672352d4800SArnaldo Carvalho de Melo 
673352d4800SArnaldo Carvalho de Melo 	tcb->seq     = tcb->end_seq = tp->write_seq;
6744de075e0SEric Dumazet 	tcb->tcp_flags = TCPHDR_ACK;
675f4a775d1SEric Dumazet 	__skb_header_release(skb);
676fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
677ab4e846aSEric Dumazet 	sk_wmem_queued_add(sk, skb->truesize);
6783ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
67989ebd197SDavid S. Miller 	if (tp->nonagle & TCP_NAGLE_PUSH)
6801da177e4SLinus Torvalds 		tp->nonagle &= ~TCP_NAGLE_PUSH;
6816f021c62SEric Dumazet 
6826f021c62SEric Dumazet 	tcp_slow_start_after_idle_check(sk);
6831da177e4SLinus Torvalds }
6841da177e4SLinus Torvalds 
685afeca340SKrishna Kumar static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
6861da177e4SLinus Torvalds {
68733f5f57eSIlpo Järvinen 	if (flags & MSG_OOB)
6881da177e4SLinus Torvalds 		tp->snd_up = tp->write_seq;
6891da177e4SLinus Torvalds }
6901da177e4SLinus Torvalds 
691f54b3111SEric Dumazet /* If a not yet filled skb is pushed, do not send it if
692a181ceb5SEric Dumazet  * we have data packets in Qdisc or NIC queues :
693f54b3111SEric Dumazet  * Because TX completion will happen shortly, it gives a chance
694f54b3111SEric Dumazet  * to coalesce future sendmsg() payload into this skb, without
695f54b3111SEric Dumazet  * need for a timer, and with no latency trade off.
696f54b3111SEric Dumazet  * As packets containing data payload have a bigger truesize
697a181ceb5SEric Dumazet  * than pure acks (dataless) packets, the last checks prevent
698a181ceb5SEric Dumazet  * autocorking if we only have an ACK in Qdisc/NIC queues,
699a181ceb5SEric Dumazet  * or if TX completion was delayed after we processed ACK packet.
700f54b3111SEric Dumazet  */
701f54b3111SEric Dumazet static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
702f54b3111SEric Dumazet 				int size_goal)
7031da177e4SLinus Torvalds {
704f54b3111SEric Dumazet 	return skb->len < size_goal &&
70585225e6fSKuniyuki Iwashima 	       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
706114f39feSEric Dumazet 	       !tcp_rtx_queue_empty(sk) &&
707b0de0cf4SEric Dumazet 	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize &&
708b0de0cf4SEric Dumazet 	       tcp_skb_can_collapse_to(skb);
709f54b3111SEric Dumazet }
7109e412ba7SIlpo Järvinen 
71135b2c321SMat Martineau void tcp_push(struct sock *sk, int flags, int mss_now,
712f54b3111SEric Dumazet 	      int nonagle, int size_goal)
713f54b3111SEric Dumazet {
714f54b3111SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
715f54b3111SEric Dumazet 	struct sk_buff *skb;
716f54b3111SEric Dumazet 
717f54b3111SEric Dumazet 	skb = tcp_write_queue_tail(sk);
71875c119afSEric Dumazet 	if (!skb)
71975c119afSEric Dumazet 		return;
7201da177e4SLinus Torvalds 	if (!(flags & MSG_MORE) || forced_push(tp))
721f54b3111SEric Dumazet 		tcp_mark_push(tp, skb);
722afeca340SKrishna Kumar 
723afeca340SKrishna Kumar 	tcp_mark_urg(tp, flags);
724f54b3111SEric Dumazet 
725f54b3111SEric Dumazet 	if (tcp_should_autocork(sk, skb, size_goal)) {
726f54b3111SEric Dumazet 
727f54b3111SEric Dumazet 		/* avoid atomic op if TSQ_THROTTLED bit is already set */
7287aa5470cSEric Dumazet 		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
729f54b3111SEric Dumazet 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
7307aa5470cSEric Dumazet 			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
7317267e8dcSSalvatore Dipietro 			smp_mb__after_atomic();
7321da177e4SLinus Torvalds 		}
733a181ceb5SEric Dumazet 		/* It is possible TX completion already happened
734a181ceb5SEric Dumazet 		 * before we set TSQ_THROTTLED.
735a181ceb5SEric Dumazet 		 */
73614afee4bSReshetova, Elena 		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
737f54b3111SEric Dumazet 			return;
738f54b3111SEric Dumazet 	}
739f54b3111SEric Dumazet 
740f54b3111SEric Dumazet 	if (flags & MSG_MORE)
741f54b3111SEric Dumazet 		nonagle = TCP_NAGLE_CORK;
742f54b3111SEric Dumazet 
743f54b3111SEric Dumazet 	__tcp_push_pending_frames(sk, mss_now, nonagle);
7441da177e4SLinus Torvalds }
7451da177e4SLinus Torvalds 
7466ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
7479c55e01cSJens Axboe 				unsigned int offset, size_t len)
7489c55e01cSJens Axboe {
7499c55e01cSJens Axboe 	struct tcp_splice_state *tss = rd_desc->arg.data;
75033966dd0SWilly Tarreau 	int ret;
7519c55e01cSJens Axboe 
752a60e3cc7SHannes Frederic Sowa 	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
75325869262SAl Viro 			      min(rd_desc->count, len), tss->flags);
75433966dd0SWilly Tarreau 	if (ret > 0)
75533966dd0SWilly Tarreau 		rd_desc->count -= ret;
75633966dd0SWilly Tarreau 	return ret;
7579c55e01cSJens Axboe }
7589c55e01cSJens Axboe 
7599c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
7609c55e01cSJens Axboe {
7619c55e01cSJens Axboe 	/* Store TCP splice context information in read_descriptor_t. */
7629c55e01cSJens Axboe 	read_descriptor_t rd_desc = {
7639c55e01cSJens Axboe 		.arg.data = tss,
76433966dd0SWilly Tarreau 		.count	  = tss->len,
7659c55e01cSJens Axboe 	};
7669c55e01cSJens Axboe 
7679c55e01cSJens Axboe 	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
7689c55e01cSJens Axboe }
7699c55e01cSJens Axboe 
7709c55e01cSJens Axboe /**
7719c55e01cSJens Axboe  *  tcp_splice_read - splice data from TCP socket to a pipe
7729c55e01cSJens Axboe  * @sock:	socket to splice from
7739c55e01cSJens Axboe  * @ppos:	position (not valid)
7749c55e01cSJens Axboe  * @pipe:	pipe to splice to
7759c55e01cSJens Axboe  * @len:	number of bytes to splice
7769c55e01cSJens Axboe  * @flags:	splice modifier flags
7779c55e01cSJens Axboe  *
7789c55e01cSJens Axboe  * Description:
7799c55e01cSJens Axboe  *    Will read pages from given socket and fill them into a pipe.
7809c55e01cSJens Axboe  *
7819c55e01cSJens Axboe  **/
7829c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
7839c55e01cSJens Axboe 			struct pipe_inode_info *pipe, size_t len,
7849c55e01cSJens Axboe 			unsigned int flags)
7859c55e01cSJens Axboe {
7869c55e01cSJens Axboe 	struct sock *sk = sock->sk;
7879c55e01cSJens Axboe 	struct tcp_splice_state tss = {
7889c55e01cSJens Axboe 		.pipe = pipe,
7899c55e01cSJens Axboe 		.len = len,
7909c55e01cSJens Axboe 		.flags = flags,
7919c55e01cSJens Axboe 	};
7929c55e01cSJens Axboe 	long timeo;
7939c55e01cSJens Axboe 	ssize_t spliced;
7949c55e01cSJens Axboe 	int ret;
7959c55e01cSJens Axboe 
7963a047bf8SChangli Gao 	sock_rps_record_flow(sk);
7979c55e01cSJens Axboe 	/*
7989c55e01cSJens Axboe 	 * We can't seek on a socket input
7999c55e01cSJens Axboe 	 */
8009c55e01cSJens Axboe 	if (unlikely(*ppos))
8019c55e01cSJens Axboe 		return -ESPIPE;
8029c55e01cSJens Axboe 
8039c55e01cSJens Axboe 	ret = spliced = 0;
8049c55e01cSJens Axboe 
8059c55e01cSJens Axboe 	lock_sock(sk);
8069c55e01cSJens Axboe 
80742324c62SEric Dumazet 	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
8089c55e01cSJens Axboe 	while (tss.len) {
8099c55e01cSJens Axboe 		ret = __tcp_splice_read(sk, &tss);
8109c55e01cSJens Axboe 		if (ret < 0)
8119c55e01cSJens Axboe 			break;
8129c55e01cSJens Axboe 		else if (!ret) {
8139c55e01cSJens Axboe 			if (spliced)
8149c55e01cSJens Axboe 				break;
8159c55e01cSJens Axboe 			if (sock_flag(sk, SOCK_DONE))
8169c55e01cSJens Axboe 				break;
8179c55e01cSJens Axboe 			if (sk->sk_err) {
8189c55e01cSJens Axboe 				ret = sock_error(sk);
8199c55e01cSJens Axboe 				break;
8209c55e01cSJens Axboe 			}
8219c55e01cSJens Axboe 			if (sk->sk_shutdown & RCV_SHUTDOWN)
8229c55e01cSJens Axboe 				break;
8239c55e01cSJens Axboe 			if (sk->sk_state == TCP_CLOSE) {
8249c55e01cSJens Axboe 				/*
8259c55e01cSJens Axboe 				 * This occurs when user tries to read
8269c55e01cSJens Axboe 				 * from never connected socket.
8279c55e01cSJens Axboe 				 */
8289c55e01cSJens Axboe 				ret = -ENOTCONN;
8299c55e01cSJens Axboe 				break;
8309c55e01cSJens Axboe 			}
8319c55e01cSJens Axboe 			if (!timeo) {
8329c55e01cSJens Axboe 				ret = -EAGAIN;
8339c55e01cSJens Axboe 				break;
8349c55e01cSJens Axboe 			}
835ccf7abb9SEric Dumazet 			/* if __tcp_splice_read() got nothing while we have
836ccf7abb9SEric Dumazet 			 * an skb in receive queue, we do not want to loop.
837ccf7abb9SEric Dumazet 			 * This might happen with URG data.
838ccf7abb9SEric Dumazet 			 */
839ccf7abb9SEric Dumazet 			if (!skb_queue_empty(&sk->sk_receive_queue))
840ccf7abb9SEric Dumazet 				break;
841419ce133SPaolo Abeni 			ret = sk_wait_data(sk, &timeo, NULL);
842419ce133SPaolo Abeni 			if (ret < 0)
843419ce133SPaolo Abeni 				break;
8449c55e01cSJens Axboe 			if (signal_pending(current)) {
8459c55e01cSJens Axboe 				ret = sock_intr_errno(timeo);
8469c55e01cSJens Axboe 				break;
8479c55e01cSJens Axboe 			}
8489c55e01cSJens Axboe 			continue;
8499c55e01cSJens Axboe 		}
8509c55e01cSJens Axboe 		tss.len -= ret;
8519c55e01cSJens Axboe 		spliced += ret;
8529c55e01cSJens Axboe 
8532fe11c9dSPavel Begunkov 		if (!tss.len || !timeo)
85433966dd0SWilly Tarreau 			break;
8559c55e01cSJens Axboe 		release_sock(sk);
8569c55e01cSJens Axboe 		lock_sock(sk);
8579c55e01cSJens Axboe 
8589c55e01cSJens Axboe 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
85933966dd0SWilly Tarreau 		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
8609c55e01cSJens Axboe 		    signal_pending(current))
8619c55e01cSJens Axboe 			break;
8629c55e01cSJens Axboe 	}
8639c55e01cSJens Axboe 
8649c55e01cSJens Axboe 	release_sock(sk);
8659c55e01cSJens Axboe 
8669c55e01cSJens Axboe 	if (spliced)
8679c55e01cSJens Axboe 		return spliced;
8689c55e01cSJens Axboe 
8699c55e01cSJens Axboe 	return ret;
8709c55e01cSJens Axboe }
8714bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_splice_read);
8729c55e01cSJens Axboe 
8735882efffSEric Dumazet struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
874eb934478SEric Dumazet 				     bool force_schedule)
875f561d0f2SPavel Emelyanov {
876f561d0f2SPavel Emelyanov 	struct sk_buff *skb;
877f561d0f2SPavel Emelyanov 
8785882efffSEric Dumazet 	skb = alloc_skb_fclone(MAX_TCP_HEADER, gfp);
8798e4d980aSEric Dumazet 	if (likely(skb)) {
880eb934478SEric Dumazet 		bool mem_scheduled;
8818e4d980aSEric Dumazet 
8829b65b17dSTalal Ahmad 		skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
883eb934478SEric Dumazet 		if (force_schedule) {
884eb934478SEric Dumazet 			mem_scheduled = true;
8858e4d980aSEric Dumazet 			sk_forced_mem_schedule(sk, skb->truesize);
8868e4d980aSEric Dumazet 		} else {
887eb934478SEric Dumazet 			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
8888e4d980aSEric Dumazet 		}
889eb934478SEric Dumazet 		if (likely(mem_scheduled)) {
8908a794df6SEric Dumazet 			skb_reserve(skb, MAX_TCP_HEADER);
891a52fe46eSEric Dumazet 			skb->ip_summed = CHECKSUM_PARTIAL;
892e2080072SEric Dumazet 			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
893f561d0f2SPavel Emelyanov 			return skb;
894f561d0f2SPavel Emelyanov 		}
895f561d0f2SPavel Emelyanov 		__kfree_skb(skb);
896f561d0f2SPavel Emelyanov 	} else {
8975c52ba17SPavel Emelyanov 		sk->sk_prot->enter_memory_pressure(sk);
898f561d0f2SPavel Emelyanov 		sk_stream_moderate_sndbuf(sk);
899f561d0f2SPavel Emelyanov 	}
900f561d0f2SPavel Emelyanov 	return NULL;
901f561d0f2SPavel Emelyanov }
902f561d0f2SPavel Emelyanov 
9030c54b85fSIlpo Järvinen static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
9040c54b85fSIlpo Järvinen 				       int large_allowed)
9050c54b85fSIlpo Järvinen {
9060c54b85fSIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
9076c09fa09SEric Dumazet 	u32 new_size_goal, size_goal;
9080c54b85fSIlpo Järvinen 
90974d4a8f8SEric Dumazet 	if (!large_allowed)
910605ad7f1SEric Dumazet 		return mss_now;
9110c54b85fSIlpo Järvinen 
9126c09fa09SEric Dumazet 	/* Note : tcp_tso_autosize() will eventually split this later */
913ab14f180SDavid Ahern 	new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size);
9142a3a041cSIlpo Järvinen 
9152a3a041cSIlpo Järvinen 	/* We try hard to avoid divides here */
916605ad7f1SEric Dumazet 	size_goal = tp->gso_segs * mss_now;
917605ad7f1SEric Dumazet 	if (unlikely(new_size_goal < size_goal ||
918605ad7f1SEric Dumazet 		     new_size_goal >= size_goal + mss_now)) {
919605ad7f1SEric Dumazet 		tp->gso_segs = min_t(u16, new_size_goal / mss_now,
9201485348dSBen Hutchings 				     sk->sk_gso_max_segs);
921605ad7f1SEric Dumazet 		size_goal = tp->gso_segs * mss_now;
9220c54b85fSIlpo Järvinen 	}
9230c54b85fSIlpo Järvinen 
924605ad7f1SEric Dumazet 	return max(size_goal, mss_now);
9250c54b85fSIlpo Järvinen }
9260c54b85fSIlpo Järvinen 
92735b2c321SMat Martineau int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
9280c54b85fSIlpo Järvinen {
9290c54b85fSIlpo Järvinen 	int mss_now;
9300c54b85fSIlpo Järvinen 
9310c54b85fSIlpo Järvinen 	mss_now = tcp_current_mss(sk);
9320c54b85fSIlpo Järvinen 	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
9330c54b85fSIlpo Järvinen 
9340c54b85fSIlpo Järvinen 	return mss_now;
9350c54b85fSIlpo Järvinen }
9360c54b85fSIlpo Järvinen 
93772bf4f17SEric Dumazet /* In some cases, sendmsg() could have added an skb to the write queue,
938dc97391eSDavid Howells  * but failed adding payload on it. We need to remove it to consume less
939dc97391eSDavid Howells  * memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
94072bf4f17SEric Dumazet  * epoll() users. Another reason is that tcp_write_xmit() does not like
94172bf4f17SEric Dumazet  * finding an empty skb in the write queue.
942fdfc5c85SEric Dumazet  */
94327728ba8SEric Dumazet void tcp_remove_empty_skb(struct sock *sk)
944fdfc5c85SEric Dumazet {
94527728ba8SEric Dumazet 	struct sk_buff *skb = tcp_write_queue_tail(sk);
94627728ba8SEric Dumazet 
947cf12e6f9SJon Maxwell 	if (skb && TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
948fdfc5c85SEric Dumazet 		tcp_unlink_write_queue(skb, sk);
949fdfc5c85SEric Dumazet 		if (tcp_write_queue_empty(sk))
950fdfc5c85SEric Dumazet 			tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
95103271f3aSTalal Ahmad 		tcp_wmem_free_skb(sk, skb);
952fdfc5c85SEric Dumazet 	}
953fdfc5c85SEric Dumazet }
954fdfc5c85SEric Dumazet 
955f8d9d938SEric Dumazet /* skb changing from pure zc to mixed, must charge zc */
956f8d9d938SEric Dumazet static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb)
957f8d9d938SEric Dumazet {
958f8d9d938SEric Dumazet 	if (unlikely(skb_zcopy_pure(skb))) {
959f8d9d938SEric Dumazet 		u32 extra = skb->truesize -
960f8d9d938SEric Dumazet 			    SKB_TRUESIZE(skb_end_offset(skb));
961f8d9d938SEric Dumazet 
962f8d9d938SEric Dumazet 		if (!sk_wmem_schedule(sk, extra))
963f8d9d938SEric Dumazet 			return -ENOMEM;
964f8d9d938SEric Dumazet 
965f8d9d938SEric Dumazet 		sk_mem_charge(sk, extra);
966f8d9d938SEric Dumazet 		skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
967f8d9d938SEric Dumazet 	}
968f8d9d938SEric Dumazet 	return 0;
969f8d9d938SEric Dumazet }
970f8d9d938SEric Dumazet 
971849b425cSEric Dumazet 
972fbf93406SEric Dumazet int tcp_wmem_schedule(struct sock *sk, int copy)
973f54755f6SEric Dumazet {
974f54755f6SEric Dumazet 	int left;
975f54755f6SEric Dumazet 
976f54755f6SEric Dumazet 	if (likely(sk_wmem_schedule(sk, copy)))
977f54755f6SEric Dumazet 		return copy;
978f54755f6SEric Dumazet 
979f54755f6SEric Dumazet 	/* We could be in trouble if we have nothing queued.
980f54755f6SEric Dumazet 	 * Use whatever is left in sk->sk_forward_alloc and tcp_wmem[0]
981f54755f6SEric Dumazet 	 * to guarantee some progress.
982f54755f6SEric Dumazet 	 */
983683a67daSJason Xing 	left = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[0]) - sk->sk_wmem_queued;
984f54755f6SEric Dumazet 	if (left > 0)
985f54755f6SEric Dumazet 		sk_forced_mem_schedule(sk, min(left, copy));
986f54755f6SEric Dumazet 	return min(copy, sk->sk_forward_alloc);
987f54755f6SEric Dumazet }
988f54755f6SEric Dumazet 
989cf60af03SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp)
990cf60af03SYuchung Cheng {
99100db4124SIan Morris 	if (tp->fastopen_req) {
992cf60af03SYuchung Cheng 		kfree(tp->fastopen_req);
993cf60af03SYuchung Cheng 		tp->fastopen_req = NULL;
994cf60af03SYuchung Cheng 	}
995cf60af03SYuchung Cheng }
996cf60af03SYuchung Cheng 
9973242abebSBenjamin Hesmans int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
9983242abebSBenjamin Hesmans 			 size_t size, struct ubuf_info *uarg)
999cf60af03SYuchung Cheng {
1000cf60af03SYuchung Cheng 	struct tcp_sock *tp = tcp_sk(sk);
100119f6d3f3SWei Wang 	struct inet_sock *inet = inet_sk(sk);
1002ba615f67SWei Wang 	struct sockaddr *uaddr = msg->msg_name;
1003cf60af03SYuchung Cheng 	int err, flags;
1004cf60af03SYuchung Cheng 
10055a542133SKuniyuki Iwashima 	if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) &
10065a542133SKuniyuki Iwashima 	      TFO_CLIENT_ENABLE) ||
1007ba615f67SWei Wang 	    (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1008ba615f67SWei Wang 	     uaddr->sa_family == AF_UNSPEC))
1009cf60af03SYuchung Cheng 		return -EOPNOTSUPP;
101000db4124SIan Morris 	if (tp->fastopen_req)
1011cf60af03SYuchung Cheng 		return -EALREADY; /* Another Fast Open is in progress */
1012cf60af03SYuchung Cheng 
1013cf60af03SYuchung Cheng 	tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1014cf60af03SYuchung Cheng 				   sk->sk_allocation);
101551456b29SIan Morris 	if (unlikely(!tp->fastopen_req))
1016cf60af03SYuchung Cheng 		return -ENOBUFS;
1017cf60af03SYuchung Cheng 	tp->fastopen_req->data = msg;
1018f5ddcbbbSEric Dumazet 	tp->fastopen_req->size = size;
1019f859a448SWillem de Bruijn 	tp->fastopen_req->uarg = uarg;
1020cf60af03SYuchung Cheng 
102108e39c0dSEric Dumazet 	if (inet_test_bit(DEFER_CONNECT, sk)) {
102219f6d3f3SWei Wang 		err = tcp_connect(sk);
102319f6d3f3SWei Wang 		/* Same failure procedure as in tcp_v4/6_connect */
102419f6d3f3SWei Wang 		if (err) {
102519f6d3f3SWei Wang 			tcp_set_state(sk, TCP_CLOSE);
102619f6d3f3SWei Wang 			inet->inet_dport = 0;
102719f6d3f3SWei Wang 			sk->sk_route_caps = 0;
102819f6d3f3SWei Wang 		}
102919f6d3f3SWei Wang 	}
1030cf60af03SYuchung Cheng 	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1031ba615f67SWei Wang 	err = __inet_stream_connect(sk->sk_socket, uaddr,
10323979ad7eSWilly Tarreau 				    msg->msg_namelen, flags, 1);
10337db92362SWei Wang 	/* fastopen_req could already be freed in __inet_stream_connect
10347db92362SWei Wang 	 * if the connection times out or gets rst
10357db92362SWei Wang 	 */
10367db92362SWei Wang 	if (tp->fastopen_req) {
1037f5ddcbbbSEric Dumazet 		*copied = tp->fastopen_req->copied;
1038cf60af03SYuchung Cheng 		tcp_free_fastopen_req(tp);
103908e39c0dSEric Dumazet 		inet_clear_bit(DEFER_CONNECT, sk);
10407db92362SWei Wang 	}
1041cf60af03SYuchung Cheng 	return err;
1042cf60af03SYuchung Cheng }
1043cf60af03SYuchung Cheng 
1044306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
10451da177e4SLinus Torvalds {
10461da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1047f214f915SWillem de Bruijn 	struct ubuf_info *uarg = NULL;
10481da177e4SLinus Torvalds 	struct sk_buff *skb;
1049c14ac945SSoheil Hassas Yeganeh 	struct sockcm_cookie sockc;
105057be5bdaSAl Viro 	int flags, err, copied = 0;
105157be5bdaSAl Viro 	int mss_now = 0, size_goal, copied_syn = 0;
10521a991488SEric Dumazet 	int process_backlog = 0;
1053270a1c3dSDavid Howells 	int zc = 0;
10541da177e4SLinus Torvalds 	long timeo;
10551da177e4SLinus Torvalds 
10561da177e4SLinus Torvalds 	flags = msg->msg_flags;
1057f214f915SWillem de Bruijn 
1058eb315a7dSPavel Begunkov 	if ((flags & MSG_ZEROCOPY) && size) {
1059eb315a7dSPavel Begunkov 		if (msg->msg_ubuf) {
1060eb315a7dSPavel Begunkov 			uarg = msg->msg_ubuf;
1061270a1c3dSDavid Howells 			if (sk->sk_route_caps & NETIF_F_SG)
1062270a1c3dSDavid Howells 				zc = MSG_ZEROCOPY;
1063eb315a7dSPavel Begunkov 		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1064eea96a3eSPavel Begunkov 			skb = tcp_write_queue_tail(sk);
10658c793822SJonathan Lemon 			uarg = msg_zerocopy_realloc(sk, size, skb_zcopy(skb));
1066f214f915SWillem de Bruijn 			if (!uarg) {
1067f214f915SWillem de Bruijn 				err = -ENOBUFS;
1068f214f915SWillem de Bruijn 				goto out_err;
1069f214f915SWillem de Bruijn 			}
1070270a1c3dSDavid Howells 			if (sk->sk_route_caps & NETIF_F_SG)
1071270a1c3dSDavid Howells 				zc = MSG_ZEROCOPY;
1072270a1c3dSDavid Howells 			else
1073e7d2b510SPavel Begunkov 				uarg_to_msgzc(uarg)->zerocopy = 0;
1074f214f915SWillem de Bruijn 		}
1075270a1c3dSDavid Howells 	} else if (unlikely(msg->msg_flags & MSG_SPLICE_PAGES) && size) {
1076270a1c3dSDavid Howells 		if (sk->sk_route_caps & NETIF_F_SG)
1077270a1c3dSDavid Howells 			zc = MSG_SPLICE_PAGES;
1078eb315a7dSPavel Begunkov 	}
1079f214f915SWillem de Bruijn 
108008e39c0dSEric Dumazet 	if (unlikely(flags & MSG_FASTOPEN ||
108108e39c0dSEric Dumazet 		     inet_test_bit(DEFER_CONNECT, sk)) &&
108216ae6aa1SYuchung Cheng 	    !tp->repair) {
1083f859a448SWillem de Bruijn 		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
1084cf60af03SYuchung Cheng 		if (err == -EINPROGRESS && copied_syn > 0)
1085cf60af03SYuchung Cheng 			goto out;
1086cf60af03SYuchung Cheng 		else if (err)
1087cf60af03SYuchung Cheng 			goto out_err;
1088cf60af03SYuchung Cheng 	}
1089cf60af03SYuchung Cheng 
10901da177e4SLinus Torvalds 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
10911da177e4SLinus Torvalds 
1092d7722e85SSoheil Hassas Yeganeh 	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1093d7722e85SSoheil Hassas Yeganeh 
10948336886fSJerry Chu 	/* Wait for a connection to finish. One exception is TCP Fast Open
10958336886fSJerry Chu 	 * (passive side) where data is allowed to be sent before a connection
10968336886fSJerry Chu 	 * is fully established.
10978336886fSJerry Chu 	 */
10988336886fSJerry Chu 	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
10998336886fSJerry Chu 	    !tcp_passive_fastopen(sk)) {
1100686a5624SYuvaraja Mariappan 		err = sk_stream_wait_connect(sk, &timeo);
1101686a5624SYuvaraja Mariappan 		if (err != 0)
1102cf60af03SYuchung Cheng 			goto do_error;
11038336886fSJerry Chu 	}
11041da177e4SLinus Torvalds 
1105c0e88ff0SPavel Emelyanov 	if (unlikely(tp->repair)) {
1106c0e88ff0SPavel Emelyanov 		if (tp->repair_queue == TCP_RECV_QUEUE) {
1107c0e88ff0SPavel Emelyanov 			copied = tcp_send_rcvq(sk, msg, size);
11085924f17aSChristoph Paasch 			goto out_nopush;
1109c0e88ff0SPavel Emelyanov 		}
1110c0e88ff0SPavel Emelyanov 
1111c0e88ff0SPavel Emelyanov 		err = -EINVAL;
1112c0e88ff0SPavel Emelyanov 		if (tp->repair_queue == TCP_NO_QUEUE)
1113c0e88ff0SPavel Emelyanov 			goto out_err;
1114c0e88ff0SPavel Emelyanov 
1115c0e88ff0SPavel Emelyanov 		/* 'common' sending to sendq */
1116c0e88ff0SPavel Emelyanov 	}
1117c0e88ff0SPavel Emelyanov 
1118657a0667SWillem de Bruijn 	sockcm_init(&sockc, sk);
1119c14ac945SSoheil Hassas Yeganeh 	if (msg->msg_controllen) {
1120c14ac945SSoheil Hassas Yeganeh 		err = sock_cmsg_send(sk, msg, &sockc);
1121c14ac945SSoheil Hassas Yeganeh 		if (unlikely(err)) {
1122c14ac945SSoheil Hassas Yeganeh 			err = -EINVAL;
1123c14ac945SSoheil Hassas Yeganeh 			goto out_err;
1124c14ac945SSoheil Hassas Yeganeh 		}
1125c14ac945SSoheil Hassas Yeganeh 	}
1126c14ac945SSoheil Hassas Yeganeh 
11271da177e4SLinus Torvalds 	/* This should be in poll */
11289cd3e072SEric Dumazet 	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
11291da177e4SLinus Torvalds 
11301da177e4SLinus Torvalds 	/* Ok commence sending. */
11311da177e4SLinus Torvalds 	copied = 0;
11321da177e4SLinus Torvalds 
1133d41a69f1SEric Dumazet restart:
1134d41a69f1SEric Dumazet 	mss_now = tcp_send_mss(sk, &size_goal, flags);
1135d41a69f1SEric Dumazet 
11361da177e4SLinus Torvalds 	err = -EPIPE;
11371da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
113879d8665bSEric Dumazet 		goto do_error;
11391da177e4SLinus Torvalds 
114001e97e65SAl Viro 	while (msg_data_left(msg)) {
1141270a1c3dSDavid Howells 		ssize_t copy = 0;
11421da177e4SLinus Torvalds 
1143fe067e8aSDavid S. Miller 		skb = tcp_write_queue_tail(sk);
114465ec6097SEric Dumazet 		if (skb)
114565ec6097SEric Dumazet 			copy = size_goal - skb->len;
11461da177e4SLinus Torvalds 
1147c134ecb8SMartin KaFai Lau 		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
11483613b3dbSEric Dumazet 			bool first_skb;
11493613b3dbSEric Dumazet 
11501da177e4SLinus Torvalds new_segment:
11511da177e4SLinus Torvalds 			if (!sk_stream_memory_free(sk))
1152afb83012SSoheil Hassas Yeganeh 				goto wait_for_space;
11531da177e4SLinus Torvalds 
11541a991488SEric Dumazet 			if (unlikely(process_backlog >= 16)) {
11551a991488SEric Dumazet 				process_backlog = 0;
11561a991488SEric Dumazet 				if (sk_flush_backlog(sk))
1157d41a69f1SEric Dumazet 					goto restart;
1158d4011239SEric Dumazet 			}
115975c119afSEric Dumazet 			first_skb = tcp_rtx_and_write_queues_empty(sk);
11605882efffSEric Dumazet 			skb = tcp_stream_alloc_skb(sk, sk->sk_allocation,
11613613b3dbSEric Dumazet 						   first_skb);
11621da177e4SLinus Torvalds 			if (!skb)
1163afb83012SSoheil Hassas Yeganeh 				goto wait_for_space;
11641da177e4SLinus Torvalds 
11651a991488SEric Dumazet 			process_backlog++;
11661da177e4SLinus Torvalds 
116704d8825cSPaolo Abeni 			tcp_skb_entail(sk, skb);
1168c1b4a7e6SDavid S. Miller 			copy = size_goal;
11699d186cacSAndrey Vagin 
11709d186cacSAndrey Vagin 			/* All packets are restored as if they have
1171d3edd06eSEric Dumazet 			 * already been sent. skb_mstamp_ns isn't set to
11729d186cacSAndrey Vagin 			 * avoid wrong rtt estimation.
11739d186cacSAndrey Vagin 			 */
11749d186cacSAndrey Vagin 			if (tp->repair)
11759d186cacSAndrey Vagin 				TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
11761da177e4SLinus Torvalds 		}
11771da177e4SLinus Torvalds 
11781da177e4SLinus Torvalds 		/* Try to append data to the end of skb. */
117901e97e65SAl Viro 		if (copy > msg_data_left(msg))
118001e97e65SAl Viro 			copy = msg_data_left(msg);
11811da177e4SLinus Torvalds 
1182270a1c3dSDavid Howells 		if (zc == 0) {
11835640f768SEric Dumazet 			bool merge = true;
11841da177e4SLinus Torvalds 			int i = skb_shinfo(skb)->nr_frags;
11855640f768SEric Dumazet 			struct page_frag *pfrag = sk_page_frag(sk);
1186761965eaSEric Dumazet 
11875640f768SEric Dumazet 			if (!sk_page_frag_refill(sk, pfrag))
1188afb83012SSoheil Hassas Yeganeh 				goto wait_for_space;
1189761965eaSEric Dumazet 
11905640f768SEric Dumazet 			if (!skb_can_coalesce(skb, i, pfrag->page,
11915640f768SEric Dumazet 					      pfrag->offset)) {
1192a86a0661SEric Dumazet 				if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) {
11931da177e4SLinus Torvalds 					tcp_mark_push(tp, skb);
11941da177e4SLinus Torvalds 					goto new_segment;
11951da177e4SLinus Torvalds 				}
11965640f768SEric Dumazet 				merge = false;
11975640f768SEric Dumazet 			}
1198ef015786SHerbert Xu 
11995640f768SEric Dumazet 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1200ef015786SHerbert Xu 
1201eb315a7dSPavel Begunkov 			if (unlikely(skb_zcopy_pure(skb) || skb_zcopy_managed(skb))) {
1202849b425cSEric Dumazet 				if (tcp_downgrade_zcopy_pure(sk, skb))
1203849b425cSEric Dumazet 					goto wait_for_space;
1204eb315a7dSPavel Begunkov 				skb_zcopy_downgrade_managed(skb);
1205eb315a7dSPavel Begunkov 			}
1206849b425cSEric Dumazet 
1207849b425cSEric Dumazet 			copy = tcp_wmem_schedule(sk, copy);
1208849b425cSEric Dumazet 			if (!copy)
1209afb83012SSoheil Hassas Yeganeh 				goto wait_for_space;
12101da177e4SLinus Torvalds 
121157be5bdaSAl Viro 			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
12125640f768SEric Dumazet 						       pfrag->page,
12135640f768SEric Dumazet 						       pfrag->offset,
12145640f768SEric Dumazet 						       copy);
12155640f768SEric Dumazet 			if (err)
12161da177e4SLinus Torvalds 				goto do_error;
12171da177e4SLinus Torvalds 
12181da177e4SLinus Torvalds 			/* Update the skb. */
12191da177e4SLinus Torvalds 			if (merge) {
12209e903e08SEric Dumazet 				skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
12211da177e4SLinus Torvalds 			} else {
12225640f768SEric Dumazet 				skb_fill_page_desc(skb, i, pfrag->page,
12235640f768SEric Dumazet 						   pfrag->offset, copy);
12244e33e346SEric Dumazet 				page_ref_inc(pfrag->page);
12251da177e4SLinus Torvalds 			}
12265640f768SEric Dumazet 			pfrag->offset += copy;
1227270a1c3dSDavid Howells 		} else if (zc == MSG_ZEROCOPY)  {
12289b65b17dSTalal Ahmad 			/* First append to a fragless skb builds initial
12299b65b17dSTalal Ahmad 			 * pure zerocopy skb
12309b65b17dSTalal Ahmad 			 */
12319b65b17dSTalal Ahmad 			if (!skb->len)
12329b65b17dSTalal Ahmad 				skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
12339b65b17dSTalal Ahmad 
12349b65b17dSTalal Ahmad 			if (!skb_zcopy_pure(skb)) {
1235849b425cSEric Dumazet 				copy = tcp_wmem_schedule(sk, copy);
1236849b425cSEric Dumazet 				if (!copy)
1237358ed624STalal Ahmad 					goto wait_for_space;
12389b65b17dSTalal Ahmad 			}
1239358ed624STalal Ahmad 
1240f214f915SWillem de Bruijn 			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1241111856c7SWillem de Bruijn 			if (err == -EMSGSIZE || err == -EEXIST) {
1242111856c7SWillem de Bruijn 				tcp_mark_push(tp, skb);
1243f214f915SWillem de Bruijn 				goto new_segment;
1244111856c7SWillem de Bruijn 			}
1245f214f915SWillem de Bruijn 			if (err < 0)
1246f214f915SWillem de Bruijn 				goto do_error;
1247f214f915SWillem de Bruijn 			copy = err;
1248270a1c3dSDavid Howells 		} else if (zc == MSG_SPLICE_PAGES) {
1249270a1c3dSDavid Howells 			/* Splice in data if we can; copy if we can't. */
1250270a1c3dSDavid Howells 			if (tcp_downgrade_zcopy_pure(sk, skb))
1251270a1c3dSDavid Howells 				goto wait_for_space;
1252270a1c3dSDavid Howells 			copy = tcp_wmem_schedule(sk, copy);
1253270a1c3dSDavid Howells 			if (!copy)
1254270a1c3dSDavid Howells 				goto wait_for_space;
1255270a1c3dSDavid Howells 
1256270a1c3dSDavid Howells 			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1257270a1c3dSDavid Howells 						   sk->sk_allocation);
1258270a1c3dSDavid Howells 			if (err < 0) {
1259270a1c3dSDavid Howells 				if (err == -EMSGSIZE) {
1260270a1c3dSDavid Howells 					tcp_mark_push(tp, skb);
1261270a1c3dSDavid Howells 					goto new_segment;
1262270a1c3dSDavid Howells 				}
1263270a1c3dSDavid Howells 				goto do_error;
1264270a1c3dSDavid Howells 			}
1265270a1c3dSDavid Howells 			copy = err;
1266270a1c3dSDavid Howells 
1267270a1c3dSDavid Howells 			if (!(flags & MSG_NO_SHARED_FRAGS))
1268270a1c3dSDavid Howells 				skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
1269270a1c3dSDavid Howells 
1270270a1c3dSDavid Howells 			sk_wmem_queued_add(sk, copy);
1271270a1c3dSDavid Howells 			sk_mem_charge(sk, copy);
12721da177e4SLinus Torvalds 		}
12731da177e4SLinus Torvalds 
12741da177e4SLinus Torvalds 		if (!copied)
12754de075e0SEric Dumazet 			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
12761da177e4SLinus Torvalds 
12770f317464SEric Dumazet 		WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
12781da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq += copy;
1279cd7d8498SEric Dumazet 		tcp_skb_pcount_set(skb, 0);
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds 		copied += copy;
128201e97e65SAl Viro 		if (!msg_data_left(msg)) {
1283c134ecb8SMartin KaFai Lau 			if (unlikely(flags & MSG_EOR))
1284c134ecb8SMartin KaFai Lau 				TCP_SKB_CB(skb)->eor = 1;
12851da177e4SLinus Torvalds 			goto out;
12864ed2d765SWillem de Bruijn 		}
12871da177e4SLinus Torvalds 
128865ec6097SEric Dumazet 		if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
12891da177e4SLinus Torvalds 			continue;
12901da177e4SLinus Torvalds 
12911da177e4SLinus Torvalds 		if (forced_push(tp)) {
12921da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
12939e412ba7SIlpo Järvinen 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1294fe067e8aSDavid S. Miller 		} else if (skb == tcp_send_head(sk))
12951da177e4SLinus Torvalds 			tcp_push_one(sk, mss_now);
12961da177e4SLinus Torvalds 		continue;
12971da177e4SLinus Torvalds 
1298afb83012SSoheil Hassas Yeganeh wait_for_space:
12991da177e4SLinus Torvalds 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
130072bf4f17SEric Dumazet 		tcp_remove_empty_skb(sk);
1301ec342325SAndrew Vagin 		if (copied)
1302f54b3111SEric Dumazet 			tcp_push(sk, flags & ~MSG_MORE, mss_now,
1303f54b3111SEric Dumazet 				 TCP_NAGLE_PUSH, size_goal);
13041da177e4SLinus Torvalds 
1305686a5624SYuvaraja Mariappan 		err = sk_stream_wait_memory(sk, &timeo);
1306686a5624SYuvaraja Mariappan 		if (err != 0)
13071da177e4SLinus Torvalds 			goto do_error;
13081da177e4SLinus Torvalds 
13090c54b85fSIlpo Järvinen 		mss_now = tcp_send_mss(sk, &size_goal, flags);
13101da177e4SLinus Torvalds 	}
13111da177e4SLinus Torvalds 
13121da177e4SLinus Torvalds out:
1313ad02c4f5SSoheil Hassas Yeganeh 	if (copied) {
13144e8cc228SEric Dumazet 		tcp_tx_timestamp(sk, sockc.tsflags);
1315f54b3111SEric Dumazet 		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1316ad02c4f5SSoheil Hassas Yeganeh 	}
13175924f17aSChristoph Paasch out_nopush:
1318a7533584SPavel Begunkov 	/* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1319a7533584SPavel Begunkov 	if (uarg && !msg->msg_ubuf)
13208e044917SJonathan Lemon 		net_zcopy_put(uarg);
1321cf60af03SYuchung Cheng 	return copied + copied_syn;
13221da177e4SLinus Torvalds 
13231da177e4SLinus Torvalds do_error:
132427728ba8SEric Dumazet 	tcp_remove_empty_skb(sk);
1325fdfc5c85SEric Dumazet 
1326cf60af03SYuchung Cheng 	if (copied + copied_syn)
13271da177e4SLinus Torvalds 		goto out;
13281da177e4SLinus Torvalds out_err:
1329a7533584SPavel Begunkov 	/* msg->msg_ubuf is pinned by the caller so we don't take extra refs */
1330a7533584SPavel Begunkov 	if (uarg && !msg->msg_ubuf)
13318e044917SJonathan Lemon 		net_zcopy_put_abort(uarg, true);
13321da177e4SLinus Torvalds 	err = sk_stream_error(sk, flags, err);
1333ce5ec440SJason Baron 	/* make sure we wake any epoll edge trigger waiter */
1334216808c6SEric Dumazet 	if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
1335ce5ec440SJason Baron 		sk->sk_write_space(sk);
1336b0f71bd3SFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1337b0f71bd3SFrancis Yan 	}
13381da177e4SLinus Torvalds 	return err;
13391da177e4SLinus Torvalds }
1340774c4673SDavid S. Miller EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1341306b13ebSTom Herbert 
1342306b13ebSTom Herbert int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1343306b13ebSTom Herbert {
1344306b13ebSTom Herbert 	int ret;
1345306b13ebSTom Herbert 
1346306b13ebSTom Herbert 	lock_sock(sk);
1347306b13ebSTom Herbert 	ret = tcp_sendmsg_locked(sk, msg, size);
1348306b13ebSTom Herbert 	release_sock(sk);
1349306b13ebSTom Herbert 
1350306b13ebSTom Herbert 	return ret;
1351306b13ebSTom Herbert }
13524bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_sendmsg);
13531da177e4SLinus Torvalds 
13541d7e4538SDavid Howells void tcp_splice_eof(struct socket *sock)
13551d7e4538SDavid Howells {
13561d7e4538SDavid Howells 	struct sock *sk = sock->sk;
13571d7e4538SDavid Howells 	struct tcp_sock *tp = tcp_sk(sk);
13581d7e4538SDavid Howells 	int mss_now, size_goal;
13591d7e4538SDavid Howells 
13601d7e4538SDavid Howells 	if (!tcp_write_queue_tail(sk))
13611d7e4538SDavid Howells 		return;
13621d7e4538SDavid Howells 
13631d7e4538SDavid Howells 	lock_sock(sk);
13641d7e4538SDavid Howells 	mss_now = tcp_send_mss(sk, &size_goal, 0);
13651d7e4538SDavid Howells 	tcp_push(sk, 0, mss_now, tp->nonagle, size_goal);
13661d7e4538SDavid Howells 	release_sock(sk);
13671d7e4538SDavid Howells }
13681d7e4538SDavid Howells EXPORT_SYMBOL_GPL(tcp_splice_eof);
13691d7e4538SDavid Howells 
13701da177e4SLinus Torvalds /*
13711da177e4SLinus Torvalds  *	Handle reading urgent data. BSD has very simple semantics for
13721da177e4SLinus Torvalds  *	this, no blocking and very strange errors 8)
13731da177e4SLinus Torvalds  */
13741da177e4SLinus Torvalds 
1375377f0a08SRami Rosen static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
13761da177e4SLinus Torvalds {
13771da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
13781da177e4SLinus Torvalds 
13791da177e4SLinus Torvalds 	/* No URG data to read. */
13801da177e4SLinus Torvalds 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
13811da177e4SLinus Torvalds 	    tp->urg_data == TCP_URG_READ)
13821da177e4SLinus Torvalds 		return -EINVAL;	/* Yes this is right ! */
13831da177e4SLinus Torvalds 
13841da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
13851da177e4SLinus Torvalds 		return -ENOTCONN;
13861da177e4SLinus Torvalds 
13871da177e4SLinus Torvalds 	if (tp->urg_data & TCP_URG_VALID) {
13881da177e4SLinus Torvalds 		int err = 0;
13891da177e4SLinus Torvalds 		char c = tp->urg_data;
13901da177e4SLinus Torvalds 
13911da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
13927b6a893aSEric Dumazet 			WRITE_ONCE(tp->urg_data, TCP_URG_READ);
13931da177e4SLinus Torvalds 
13941da177e4SLinus Torvalds 		/* Read urgent data. */
13951da177e4SLinus Torvalds 		msg->msg_flags |= MSG_OOB;
13961da177e4SLinus Torvalds 
13971da177e4SLinus Torvalds 		if (len > 0) {
13981da177e4SLinus Torvalds 			if (!(flags & MSG_TRUNC))
13997eab8d9eSAl Viro 				err = memcpy_to_msg(msg, &c, 1);
14001da177e4SLinus Torvalds 			len = 1;
14011da177e4SLinus Torvalds 		} else
14021da177e4SLinus Torvalds 			msg->msg_flags |= MSG_TRUNC;
14031da177e4SLinus Torvalds 
14041da177e4SLinus Torvalds 		return err ? -EFAULT : len;
14051da177e4SLinus Torvalds 	}
14061da177e4SLinus Torvalds 
14071da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
14081da177e4SLinus Torvalds 		return 0;
14091da177e4SLinus Torvalds 
14101da177e4SLinus Torvalds 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
14111da177e4SLinus Torvalds 	 * the available implementations agree in this case:
14121da177e4SLinus Torvalds 	 * this call should never block, independent of the
14131da177e4SLinus Torvalds 	 * blocking state of the socket.
14141da177e4SLinus Torvalds 	 * Mike <pall@rz.uni-karlsruhe.de>
14151da177e4SLinus Torvalds 	 */
14161da177e4SLinus Torvalds 	return -EAGAIN;
14171da177e4SLinus Torvalds }
14181da177e4SLinus Torvalds 
1419c0e88ff0SPavel Emelyanov static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1420c0e88ff0SPavel Emelyanov {
1421c0e88ff0SPavel Emelyanov 	struct sk_buff *skb;
1422c0e88ff0SPavel Emelyanov 	int copied = 0, err = 0;
1423c0e88ff0SPavel Emelyanov 
142475c119afSEric Dumazet 	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
142575c119afSEric Dumazet 		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
142675c119afSEric Dumazet 		if (err)
142775c119afSEric Dumazet 			return err;
142875c119afSEric Dumazet 		copied += skb->len;
142975c119afSEric Dumazet 	}
143075c119afSEric Dumazet 
1431c0e88ff0SPavel Emelyanov 	skb_queue_walk(&sk->sk_write_queue, skb) {
143251f3d02bSDavid S. Miller 		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1433c0e88ff0SPavel Emelyanov 		if (err)
1434c0e88ff0SPavel Emelyanov 			break;
1435c0e88ff0SPavel Emelyanov 
1436c0e88ff0SPavel Emelyanov 		copied += skb->len;
1437c0e88ff0SPavel Emelyanov 	}
1438c0e88ff0SPavel Emelyanov 
1439c0e88ff0SPavel Emelyanov 	return err ?: copied;
1440c0e88ff0SPavel Emelyanov }
1441c0e88ff0SPavel Emelyanov 
14421da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
14431da177e4SLinus Torvalds  * then send an ACK if necessary.  COPIED is the number of bytes
14441da177e4SLinus Torvalds  * tcp_recvmsg has given to the user so far, it speeds up the
14451da177e4SLinus Torvalds  * calculation of whether or not we must ACK for the sake of
14461da177e4SLinus Torvalds  * a window update.
14471da177e4SLinus Torvalds  */
1448e5c6de5fSJohn Fastabend void __tcp_cleanup_rbuf(struct sock *sk, int copied)
14491da177e4SLinus Torvalds {
14501da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1451a2a385d6SEric Dumazet 	bool time_to_ack = false;
14521da177e4SLinus Torvalds 
1453463c84b9SArnaldo Carvalho de Melo 	if (inet_csk_ack_scheduled(sk)) {
1454463c84b9SArnaldo Carvalho de Melo 		const struct inet_connection_sock *icsk = inet_csk(sk);
1455b6b6d653SEric Dumazet 
1456b6b6d653SEric Dumazet 		if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
1457463c84b9SArnaldo Carvalho de Melo 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
14581da177e4SLinus Torvalds 		    /*
14591da177e4SLinus Torvalds 		     * If this read emptied read buffer, we send ACK, if
14601da177e4SLinus Torvalds 		     * connection is not bidirectional, user drained
14611da177e4SLinus Torvalds 		     * receive buffer and there was a small segment
14621da177e4SLinus Torvalds 		     * in queue.
14631da177e4SLinus Torvalds 		     */
14641ef9696cSAlexey Kuznetsov 		    (copied > 0 &&
14651ef9696cSAlexey Kuznetsov 		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
14661ef9696cSAlexey Kuznetsov 		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
146731954cd8SWei Wang 		       !inet_csk_in_pingpong_mode(sk))) &&
14681ef9696cSAlexey Kuznetsov 		      !atomic_read(&sk->sk_rmem_alloc)))
1469a2a385d6SEric Dumazet 			time_to_ack = true;
14701da177e4SLinus Torvalds 	}
14711da177e4SLinus Torvalds 
14721da177e4SLinus Torvalds 	/* We send an ACK if we can now advertise a non-zero window
14731da177e4SLinus Torvalds 	 * which has been raised "significantly".
14741da177e4SLinus Torvalds 	 *
14751da177e4SLinus Torvalds 	 * Even if window raised up to infinity, do not send window open ACK
14761da177e4SLinus Torvalds 	 * in states, where we will not receive more. It is useless.
14771da177e4SLinus Torvalds 	 */
14781da177e4SLinus Torvalds 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
14791da177e4SLinus Torvalds 		__u32 rcv_window_now = tcp_receive_window(tp);
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 		/* Optimize, __tcp_select_window() is not cheap. */
14821da177e4SLinus Torvalds 		if (2*rcv_window_now <= tp->window_clamp) {
14831da177e4SLinus Torvalds 			__u32 new_window = __tcp_select_window(sk);
14841da177e4SLinus Torvalds 
14851da177e4SLinus Torvalds 			/* Send ACK now, if this read freed lots of space
14861da177e4SLinus Torvalds 			 * in our buffer. Certainly, new_window is new window.
14871da177e4SLinus Torvalds 			 * We can advertise it now, if it is not less than current one.
14881da177e4SLinus Torvalds 			 * "Lots" means "at least twice" here.
14891da177e4SLinus Torvalds 			 */
14901da177e4SLinus Torvalds 			if (new_window && new_window >= 2 * rcv_window_now)
1491a2a385d6SEric Dumazet 				time_to_ack = true;
14921da177e4SLinus Torvalds 		}
14931da177e4SLinus Torvalds 	}
14941da177e4SLinus Torvalds 	if (time_to_ack)
14951da177e4SLinus Torvalds 		tcp_send_ack(sk);
14961da177e4SLinus Torvalds }
14971da177e4SLinus Torvalds 
1498c457985aSCong Wang void tcp_cleanup_rbuf(struct sock *sk, int copied)
1499c457985aSCong Wang {
1500c457985aSCong Wang 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1501c457985aSCong Wang 	struct tcp_sock *tp = tcp_sk(sk);
1502c457985aSCong Wang 
1503c457985aSCong Wang 	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1504c457985aSCong Wang 	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1505c457985aSCong Wang 	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1506c457985aSCong Wang 	__tcp_cleanup_rbuf(sk, copied);
1507c457985aSCong Wang }
1508c457985aSCong Wang 
15093df684c1SEric Dumazet static void tcp_eat_recv_skb(struct sock *sk, struct sk_buff *skb)
15103df684c1SEric Dumazet {
1511f35f8219SEric Dumazet 	__skb_unlink(skb, &sk->sk_receive_queue);
15123df684c1SEric Dumazet 	if (likely(skb->destructor == sock_rfree)) {
15133df684c1SEric Dumazet 		sock_rfree(skb);
15143df684c1SEric Dumazet 		skb->destructor = NULL;
15153df684c1SEric Dumazet 		skb->sk = NULL;
151668822bdfSEric Dumazet 		return skb_attempt_defer_free(skb);
1517f35f8219SEric Dumazet 	}
1518f35f8219SEric Dumazet 	__kfree_skb(skb);
15193df684c1SEric Dumazet }
15203df684c1SEric Dumazet 
15213f92a64eSJakub Kicinski struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
15221da177e4SLinus Torvalds {
15231da177e4SLinus Torvalds 	struct sk_buff *skb;
15241da177e4SLinus Torvalds 	u32 offset;
15251da177e4SLinus Torvalds 
1526f26845b4SEric Dumazet 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
15271da177e4SLinus Torvalds 		offset = seq - TCP_SKB_CB(skb)->seq;
15289d691539SEric Dumazet 		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
15299d691539SEric Dumazet 			pr_err_once("%s: found a SYN, please report !\n", __func__);
15301da177e4SLinus Torvalds 			offset--;
15319d691539SEric Dumazet 		}
1532e11ecddfSEric Dumazet 		if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
15331da177e4SLinus Torvalds 			*off = offset;
15341da177e4SLinus Torvalds 			return skb;
15351da177e4SLinus Torvalds 		}
1536f26845b4SEric Dumazet 		/* This looks weird, but this can happen if TCP collapsing
1537f26845b4SEric Dumazet 		 * splitted a fat GRO packet, while we released socket lock
1538f26845b4SEric Dumazet 		 * in skb_splice_bits()
1539f26845b4SEric Dumazet 		 */
15403df684c1SEric Dumazet 		tcp_eat_recv_skb(sk, skb);
15411da177e4SLinus Torvalds 	}
15421da177e4SLinus Torvalds 	return NULL;
15431da177e4SLinus Torvalds }
15443f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_recv_skb);
15451da177e4SLinus Torvalds 
15461da177e4SLinus Torvalds /*
15471da177e4SLinus Torvalds  * This routine provides an alternative to tcp_recvmsg() for routines
15481da177e4SLinus Torvalds  * that would like to handle copying from skbuffs directly in 'sendfile'
15491da177e4SLinus Torvalds  * fashion.
15501da177e4SLinus Torvalds  * Note:
15511da177e4SLinus Torvalds  *	- It is assumed that the socket was locked by the caller.
15521da177e4SLinus Torvalds  *	- The routine does not block.
15531da177e4SLinus Torvalds  *	- At present, there is no support for reading OOB data
15541da177e4SLinus Torvalds  *	  or for 'peeking' the socket using this routine
15551da177e4SLinus Torvalds  *	  (although both would be easy to implement).
15561da177e4SLinus Torvalds  */
15571da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
15581da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor)
15591da177e4SLinus Torvalds {
15601da177e4SLinus Torvalds 	struct sk_buff *skb;
15611da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
15621da177e4SLinus Torvalds 	u32 seq = tp->copied_seq;
15631da177e4SLinus Torvalds 	u32 offset;
15641da177e4SLinus Torvalds 	int copied = 0;
15651da177e4SLinus Torvalds 
15661da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
15671da177e4SLinus Torvalds 		return -ENOTCONN;
15681da177e4SLinus Torvalds 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
15691da177e4SLinus Torvalds 		if (offset < skb->len) {
1570374e7b59SOctavian Purdila 			int used;
1571374e7b59SOctavian Purdila 			size_t len;
15721da177e4SLinus Torvalds 
15731da177e4SLinus Torvalds 			len = skb->len - offset;
15741da177e4SLinus Torvalds 			/* Stop reading if we hit a patch of urgent data */
1575b96c51bdSEric Dumazet 			if (unlikely(tp->urg_data)) {
15761da177e4SLinus Torvalds 				u32 urg_offset = tp->urg_seq - seq;
15771da177e4SLinus Torvalds 				if (urg_offset < len)
15781da177e4SLinus Torvalds 					len = urg_offset;
15791da177e4SLinus Torvalds 				if (!len)
15801da177e4SLinus Torvalds 					break;
15811da177e4SLinus Torvalds 			}
15821da177e4SLinus Torvalds 			used = recv_actor(desc, skb, offset, len);
1583ff905b1eSEric Dumazet 			if (used <= 0) {
1584ddb61a57SJens Axboe 				if (!copied)
1585ddb61a57SJens Axboe 					copied = used;
1586ddb61a57SJens Axboe 				break;
1587e3d5ea2cSEric Dumazet 			}
1588e3d5ea2cSEric Dumazet 			if (WARN_ON_ONCE(used > len))
1589e3d5ea2cSEric Dumazet 				used = len;
15901da177e4SLinus Torvalds 			seq += used;
15911da177e4SLinus Torvalds 			copied += used;
15921da177e4SLinus Torvalds 			offset += used;
1593e3d5ea2cSEric Dumazet 
159402275a2eSWilly Tarreau 			/* If recv_actor drops the lock (e.g. TCP splice
1595293ad604SOctavian Purdila 			 * receive) the skb pointer might be invalid when
1596293ad604SOctavian Purdila 			 * getting here: tcp_collapse might have deleted it
1597293ad604SOctavian Purdila 			 * while aggregating skbs from the socket queue.
1598293ad604SOctavian Purdila 			 */
1599293ad604SOctavian Purdila 			skb = tcp_recv_skb(sk, seq - 1, &offset);
160002275a2eSWilly Tarreau 			if (!skb)
16011da177e4SLinus Torvalds 				break;
160202275a2eSWilly Tarreau 			/* TCP coalescing might have appended data to the skb.
160302275a2eSWilly Tarreau 			 * Try to splice more frags
160402275a2eSWilly Tarreau 			 */
160502275a2eSWilly Tarreau 			if (offset + 1 != skb->len)
160602275a2eSWilly Tarreau 				continue;
16071da177e4SLinus Torvalds 		}
1608e11ecddfSEric Dumazet 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
16093df684c1SEric Dumazet 			tcp_eat_recv_skb(sk, skb);
16101da177e4SLinus Torvalds 			++seq;
16111da177e4SLinus Torvalds 			break;
16121da177e4SLinus Torvalds 		}
16133df684c1SEric Dumazet 		tcp_eat_recv_skb(sk, skb);
16141da177e4SLinus Torvalds 		if (!desc->count)
16151da177e4SLinus Torvalds 			break;
16167db48e98SEric Dumazet 		WRITE_ONCE(tp->copied_seq, seq);
16171da177e4SLinus Torvalds 	}
16187db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, seq);
16191da177e4SLinus Torvalds 
16201da177e4SLinus Torvalds 	tcp_rcv_space_adjust(sk);
16211da177e4SLinus Torvalds 
16221da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
1623f26845b4SEric Dumazet 	if (copied > 0) {
1624f26845b4SEric Dumazet 		tcp_recv_skb(sk, seq, &offset);
16250e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
1626f26845b4SEric Dumazet 	}
16271da177e4SLinus Torvalds 	return copied;
16281da177e4SLinus Torvalds }
16294bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_read_sock);
16301da177e4SLinus Torvalds 
1631965b57b4SCong Wang int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
163204919bedSCong Wang {
163304919bedSCong Wang 	struct sk_buff *skb;
163404919bedSCong Wang 	int copied = 0;
163504919bedSCong Wang 
163604919bedSCong Wang 	if (sk->sk_state == TCP_LISTEN)
163704919bedSCong Wang 		return -ENOTCONN;
163804919bedSCong Wang 
16399b7177b1SJohn Fastabend 	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1640db4192a7SCong Wang 		u8 tcp_flags;
1641db4192a7SCong Wang 		int used;
164204919bedSCong Wang 
164304919bedSCong Wang 		__skb_unlink(skb, &sk->sk_receive_queue);
164496628951SPeilin Ye 		WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
1645db4192a7SCong Wang 		tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
1646db4192a7SCong Wang 		used = recv_actor(sk, skb);
1647db4192a7SCong Wang 		if (used < 0) {
1648db4192a7SCong Wang 			if (!copied)
1649db4192a7SCong Wang 				copied = used;
1650db4192a7SCong Wang 			break;
1651db4192a7SCong Wang 		}
1652db4192a7SCong Wang 		copied += used;
1653db4192a7SCong Wang 
16549b7177b1SJohn Fastabend 		if (tcp_flags & TCPHDR_FIN)
1655db4192a7SCong Wang 			break;
1656db4192a7SCong Wang 	}
165704919bedSCong Wang 	return copied;
165804919bedSCong Wang }
165904919bedSCong Wang EXPORT_SYMBOL(tcp_read_skb);
166004919bedSCong Wang 
16613f92a64eSJakub Kicinski void tcp_read_done(struct sock *sk, size_t len)
16623f92a64eSJakub Kicinski {
16633f92a64eSJakub Kicinski 	struct tcp_sock *tp = tcp_sk(sk);
16643f92a64eSJakub Kicinski 	u32 seq = tp->copied_seq;
16653f92a64eSJakub Kicinski 	struct sk_buff *skb;
16663f92a64eSJakub Kicinski 	size_t left;
16673f92a64eSJakub Kicinski 	u32 offset;
16683f92a64eSJakub Kicinski 
16693f92a64eSJakub Kicinski 	if (sk->sk_state == TCP_LISTEN)
16703f92a64eSJakub Kicinski 		return;
16713f92a64eSJakub Kicinski 
16723f92a64eSJakub Kicinski 	left = len;
16733f92a64eSJakub Kicinski 	while (left && (skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
16743f92a64eSJakub Kicinski 		int used;
16753f92a64eSJakub Kicinski 
16763f92a64eSJakub Kicinski 		used = min_t(size_t, skb->len - offset, left);
16773f92a64eSJakub Kicinski 		seq += used;
16783f92a64eSJakub Kicinski 		left -= used;
16793f92a64eSJakub Kicinski 
16803f92a64eSJakub Kicinski 		if (skb->len > offset + used)
16813f92a64eSJakub Kicinski 			break;
16823f92a64eSJakub Kicinski 
16833f92a64eSJakub Kicinski 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
16843f92a64eSJakub Kicinski 			tcp_eat_recv_skb(sk, skb);
16853f92a64eSJakub Kicinski 			++seq;
16863f92a64eSJakub Kicinski 			break;
16873f92a64eSJakub Kicinski 		}
16883f92a64eSJakub Kicinski 		tcp_eat_recv_skb(sk, skb);
16893f92a64eSJakub Kicinski 	}
16903f92a64eSJakub Kicinski 	WRITE_ONCE(tp->copied_seq, seq);
16913f92a64eSJakub Kicinski 
16923f92a64eSJakub Kicinski 	tcp_rcv_space_adjust(sk);
16933f92a64eSJakub Kicinski 
16943f92a64eSJakub Kicinski 	/* Clean up data we have read: This will do ACK frames. */
16953f92a64eSJakub Kicinski 	if (left != len)
16963f92a64eSJakub Kicinski 		tcp_cleanup_rbuf(sk, len - left);
16973f92a64eSJakub Kicinski }
16983f92a64eSJakub Kicinski EXPORT_SYMBOL(tcp_read_done);
16993f92a64eSJakub Kicinski 
170032035585STom Herbert int tcp_peek_len(struct socket *sock)
170132035585STom Herbert {
170232035585STom Herbert 	return tcp_inq(sock->sk);
170332035585STom Herbert }
170432035585STom Herbert EXPORT_SYMBOL(tcp_peek_len);
170532035585STom Herbert 
1706d1361840SEric Dumazet /* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
1707d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val)
1708d1361840SEric Dumazet {
1709dfa2f048SEric Dumazet 	int space, cap;
1710867f816bSSoheil Hassas Yeganeh 
1711867f816bSSoheil Hassas Yeganeh 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1712867f816bSSoheil Hassas Yeganeh 		cap = sk->sk_rcvbuf >> 1;
1713867f816bSSoheil Hassas Yeganeh 	else
171402739545SKuniyuki Iwashima 		cap = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]) >> 1;
1715867f816bSSoheil Hassas Yeganeh 	val = min(val, cap);
1716eac66402SEric Dumazet 	WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
171703f45c88SEric Dumazet 
171803f45c88SEric Dumazet 	/* Check if we need to signal EPOLLIN right now */
171903f45c88SEric Dumazet 	tcp_data_ready(sk);
172003f45c88SEric Dumazet 
1721d1361840SEric Dumazet 	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1722d1361840SEric Dumazet 		return 0;
1723d1361840SEric Dumazet 
1724dfa2f048SEric Dumazet 	space = tcp_space_from_win(sk, val);
1725dfa2f048SEric Dumazet 	if (space > sk->sk_rcvbuf) {
1726dfa2f048SEric Dumazet 		WRITE_ONCE(sk->sk_rcvbuf, space);
1727f410cbeaSEric Dumazet 		WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
1728d1361840SEric Dumazet 	}
1729d1361840SEric Dumazet 	return 0;
1730d1361840SEric Dumazet }
1731d1361840SEric Dumazet EXPORT_SYMBOL(tcp_set_rcvlowat);
1732d1361840SEric Dumazet 
1733892bfd3dSFlorian Westphal void tcp_update_recv_tstamps(struct sk_buff *skb,
17347eeba170SArjun Roy 			     struct scm_timestamping_internal *tss)
17357eeba170SArjun Roy {
17367eeba170SArjun Roy 	if (skb->tstamp)
17377eeba170SArjun Roy 		tss->ts[0] = ktime_to_timespec64(skb->tstamp);
17387eeba170SArjun Roy 	else
17397eeba170SArjun Roy 		tss->ts[0] = (struct timespec64) {0};
17407eeba170SArjun Roy 
17417eeba170SArjun Roy 	if (skb_hwtstamps(skb)->hwtstamp)
17427eeba170SArjun Roy 		tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
17437eeba170SArjun Roy 	else
17447eeba170SArjun Roy 		tss->ts[2] = (struct timespec64) {0};
17457eeba170SArjun Roy }
17467eeba170SArjun Roy 
174705255b82SEric Dumazet #ifdef CONFIG_MMU
1748350f6bbcSMatthew Wilcox (Oracle) static const struct vm_operations_struct tcp_vm_ops = {
174905255b82SEric Dumazet };
175005255b82SEric Dumazet 
175193ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock,
175293ab6cc6SEric Dumazet 	     struct vm_area_struct *vma)
175393ab6cc6SEric Dumazet {
175405255b82SEric Dumazet 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
175505255b82SEric Dumazet 		return -EPERM;
17561c71222eSSuren Baghdasaryan 	vm_flags_clear(vma, VM_MAYWRITE | VM_MAYEXEC);
175705255b82SEric Dumazet 
17583e4e28c5SMichel Lespinasse 	/* Instruct vm_insert_page() to not mmap_read_lock(mm) */
17591c71222eSSuren Baghdasaryan 	vm_flags_set(vma, VM_MIXEDMAP);
176005255b82SEric Dumazet 
176105255b82SEric Dumazet 	vma->vm_ops = &tcp_vm_ops;
176205255b82SEric Dumazet 	return 0;
176305255b82SEric Dumazet }
176405255b82SEric Dumazet EXPORT_SYMBOL(tcp_mmap);
176505255b82SEric Dumazet 
17667fba5309SArjun Roy static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb,
17677fba5309SArjun Roy 				       u32 *offset_frag)
17687fba5309SArjun Roy {
17697fba5309SArjun Roy 	skb_frag_t *frag;
17707fba5309SArjun Roy 
177170701b83SArjun Roy 	if (unlikely(offset_skb >= skb->len))
177270701b83SArjun Roy 		return NULL;
177370701b83SArjun Roy 
17747fba5309SArjun Roy 	offset_skb -= skb_headlen(skb);
17757fba5309SArjun Roy 	if ((int)offset_skb < 0 || skb_has_frag_list(skb))
17767fba5309SArjun Roy 		return NULL;
17777fba5309SArjun Roy 
17787fba5309SArjun Roy 	frag = skb_shinfo(skb)->frags;
17797fba5309SArjun Roy 	while (offset_skb) {
17807fba5309SArjun Roy 		if (skb_frag_size(frag) > offset_skb) {
17817fba5309SArjun Roy 			*offset_frag = offset_skb;
17827fba5309SArjun Roy 			return frag;
17837fba5309SArjun Roy 		}
17847fba5309SArjun Roy 		offset_skb -= skb_frag_size(frag);
17857fba5309SArjun Roy 		++frag;
17867fba5309SArjun Roy 	}
17877fba5309SArjun Roy 	*offset_frag = 0;
17887fba5309SArjun Roy 	return frag;
17897fba5309SArjun Roy }
17907fba5309SArjun Roy 
179198917cf0SArjun Roy static bool can_map_frag(const skb_frag_t *frag)
179298917cf0SArjun Roy {
1793577e4432SEric Dumazet 	struct page *page;
1794577e4432SEric Dumazet 
1795577e4432SEric Dumazet 	if (skb_frag_size(frag) != PAGE_SIZE || skb_frag_off(frag))
1796577e4432SEric Dumazet 		return false;
1797577e4432SEric Dumazet 
1798577e4432SEric Dumazet 	page = skb_frag_page(frag);
1799577e4432SEric Dumazet 
1800577e4432SEric Dumazet 	if (PageCompound(page) || page->mapping)
1801577e4432SEric Dumazet 		return false;
1802577e4432SEric Dumazet 
1803577e4432SEric Dumazet 	return true;
180498917cf0SArjun Roy }
180598917cf0SArjun Roy 
180698917cf0SArjun Roy static int find_next_mappable_frag(const skb_frag_t *frag,
180798917cf0SArjun Roy 				   int remaining_in_skb)
180898917cf0SArjun Roy {
180998917cf0SArjun Roy 	int offset = 0;
181098917cf0SArjun Roy 
181198917cf0SArjun Roy 	if (likely(can_map_frag(frag)))
181298917cf0SArjun Roy 		return 0;
181398917cf0SArjun Roy 
181498917cf0SArjun Roy 	while (offset < remaining_in_skb && !can_map_frag(frag)) {
181598917cf0SArjun Roy 		offset += skb_frag_size(frag);
181698917cf0SArjun Roy 		++frag;
181798917cf0SArjun Roy 	}
181898917cf0SArjun Roy 	return offset;
181998917cf0SArjun Roy }
182098917cf0SArjun Roy 
18210c3936d3SArjun Roy static void tcp_zerocopy_set_hint_for_skb(struct sock *sk,
18220c3936d3SArjun Roy 					  struct tcp_zerocopy_receive *zc,
18230c3936d3SArjun Roy 					  struct sk_buff *skb, u32 offset)
18240c3936d3SArjun Roy {
18250c3936d3SArjun Roy 	u32 frag_offset, partial_frag_remainder = 0;
18260c3936d3SArjun Roy 	int mappable_offset;
18270c3936d3SArjun Roy 	skb_frag_t *frag;
18280c3936d3SArjun Roy 
18290c3936d3SArjun Roy 	/* worst case: skip to next skb. try to improve on this case below */
18300c3936d3SArjun Roy 	zc->recv_skip_hint = skb->len - offset;
18310c3936d3SArjun Roy 
18320c3936d3SArjun Roy 	/* Find the frag containing this offset (and how far into that frag) */
18330c3936d3SArjun Roy 	frag = skb_advance_to_frag(skb, offset, &frag_offset);
18340c3936d3SArjun Roy 	if (!frag)
18350c3936d3SArjun Roy 		return;
18360c3936d3SArjun Roy 
18370c3936d3SArjun Roy 	if (frag_offset) {
18380c3936d3SArjun Roy 		struct skb_shared_info *info = skb_shinfo(skb);
18390c3936d3SArjun Roy 
18400c3936d3SArjun Roy 		/* We read part of the last frag, must recvmsg() rest of skb. */
18410c3936d3SArjun Roy 		if (frag == &info->frags[info->nr_frags - 1])
18420c3936d3SArjun Roy 			return;
18430c3936d3SArjun Roy 
18440c3936d3SArjun Roy 		/* Else, we must at least read the remainder in this frag. */
18450c3936d3SArjun Roy 		partial_frag_remainder = skb_frag_size(frag) - frag_offset;
18460c3936d3SArjun Roy 		zc->recv_skip_hint -= partial_frag_remainder;
18470c3936d3SArjun Roy 		++frag;
18480c3936d3SArjun Roy 	}
18490c3936d3SArjun Roy 
18500c3936d3SArjun Roy 	/* partial_frag_remainder: If part way through a frag, must read rest.
18510c3936d3SArjun Roy 	 * mappable_offset: Bytes till next mappable frag, *not* counting bytes
18520c3936d3SArjun Roy 	 * in partial_frag_remainder.
18530c3936d3SArjun Roy 	 */
18540c3936d3SArjun Roy 	mappable_offset = find_next_mappable_frag(frag, zc->recv_skip_hint);
18550c3936d3SArjun Roy 	zc->recv_skip_hint = mappable_offset + partial_frag_remainder;
18560c3936d3SArjun Roy }
18570c3936d3SArjun Roy 
1858f21a3c48SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
1859ec095263SOliver Hartkopp 			      int flags, struct scm_timestamping_internal *tss,
1860f21a3c48SArjun Roy 			      int *cmsg_flags);
1861f21a3c48SArjun Roy static int receive_fallback_to_copy(struct sock *sk,
18627eeba170SArjun Roy 				    struct tcp_zerocopy_receive *zc, int inq,
18637eeba170SArjun Roy 				    struct scm_timestamping_internal *tss)
1864f21a3c48SArjun Roy {
1865f21a3c48SArjun Roy 	unsigned long copy_address = (unsigned long)zc->copybuf_address;
1866f21a3c48SArjun Roy 	struct msghdr msg = {};
18677eeba170SArjun Roy 	int err;
1868f21a3c48SArjun Roy 
1869f21a3c48SArjun Roy 	zc->length = 0;
1870f21a3c48SArjun Roy 	zc->recv_skip_hint = 0;
1871f21a3c48SArjun Roy 
1872f21a3c48SArjun Roy 	if (copy_address != zc->copybuf_address)
1873f21a3c48SArjun Roy 		return -EINVAL;
1874f21a3c48SArjun Roy 
18759fd7874cSJens Axboe 	err = import_ubuf(ITER_DEST, (void __user *)copy_address, inq,
18769fd7874cSJens Axboe 			  &msg.msg_iter);
1877f21a3c48SArjun Roy 	if (err)
1878f21a3c48SArjun Roy 		return err;
1879f21a3c48SArjun Roy 
1880ec095263SOliver Hartkopp 	err = tcp_recvmsg_locked(sk, &msg, inq, MSG_DONTWAIT,
18817eeba170SArjun Roy 				 tss, &zc->msg_flags);
1882f21a3c48SArjun Roy 	if (err < 0)
1883f21a3c48SArjun Roy 		return err;
1884f21a3c48SArjun Roy 
1885f21a3c48SArjun Roy 	zc->copybuf_len = err;
18860c3936d3SArjun Roy 	if (likely(zc->copybuf_len)) {
18870c3936d3SArjun Roy 		struct sk_buff *skb;
18880c3936d3SArjun Roy 		u32 offset;
18890c3936d3SArjun Roy 
18900c3936d3SArjun Roy 		skb = tcp_recv_skb(sk, tcp_sk(sk)->copied_seq, &offset);
18910c3936d3SArjun Roy 		if (skb)
18920c3936d3SArjun Roy 			tcp_zerocopy_set_hint_for_skb(sk, zc, skb, offset);
18930c3936d3SArjun Roy 	}
1894f21a3c48SArjun Roy 	return 0;
1895f21a3c48SArjun Roy }
1896f21a3c48SArjun Roy 
189718fb76edSArjun Roy static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc,
189818fb76edSArjun Roy 				   struct sk_buff *skb, u32 copylen,
189918fb76edSArjun Roy 				   u32 *offset, u32 *seq)
190018fb76edSArjun Roy {
190118fb76edSArjun Roy 	unsigned long copy_address = (unsigned long)zc->copybuf_address;
190218fb76edSArjun Roy 	struct msghdr msg = {};
190318fb76edSArjun Roy 	int err;
190418fb76edSArjun Roy 
190518fb76edSArjun Roy 	if (copy_address != zc->copybuf_address)
190618fb76edSArjun Roy 		return -EINVAL;
190718fb76edSArjun Roy 
19089fd7874cSJens Axboe 	err = import_ubuf(ITER_DEST, (void __user *)copy_address, copylen,
19099fd7874cSJens Axboe 			  &msg.msg_iter);
191018fb76edSArjun Roy 	if (err)
191118fb76edSArjun Roy 		return err;
191218fb76edSArjun Roy 	err = skb_copy_datagram_msg(skb, *offset, &msg, copylen);
191318fb76edSArjun Roy 	if (err)
191418fb76edSArjun Roy 		return err;
191518fb76edSArjun Roy 	zc->recv_skip_hint -= copylen;
191618fb76edSArjun Roy 	*offset += copylen;
191718fb76edSArjun Roy 	*seq += copylen;
191818fb76edSArjun Roy 	return (__s32)copylen;
191918fb76edSArjun Roy }
192018fb76edSArjun Roy 
19217eeba170SArjun Roy static int tcp_zc_handle_leftover(struct tcp_zerocopy_receive *zc,
192218fb76edSArjun Roy 				  struct sock *sk,
192318fb76edSArjun Roy 				  struct sk_buff *skb,
192418fb76edSArjun Roy 				  u32 *seq,
19257eeba170SArjun Roy 				  s32 copybuf_len,
19267eeba170SArjun Roy 				  struct scm_timestamping_internal *tss)
192718fb76edSArjun Roy {
192818fb76edSArjun Roy 	u32 offset, copylen = min_t(u32, copybuf_len, zc->recv_skip_hint);
192918fb76edSArjun Roy 
193018fb76edSArjun Roy 	if (!copylen)
193118fb76edSArjun Roy 		return 0;
193218fb76edSArjun Roy 	/* skb is null if inq < PAGE_SIZE. */
19337eeba170SArjun Roy 	if (skb) {
193418fb76edSArjun Roy 		offset = *seq - TCP_SKB_CB(skb)->seq;
19357eeba170SArjun Roy 	} else {
193618fb76edSArjun Roy 		skb = tcp_recv_skb(sk, *seq, &offset);
19377eeba170SArjun Roy 		if (TCP_SKB_CB(skb)->has_rxtstamp) {
19387eeba170SArjun Roy 			tcp_update_recv_tstamps(skb, tss);
19397eeba170SArjun Roy 			zc->msg_flags |= TCP_CMSG_TS;
19407eeba170SArjun Roy 		}
19417eeba170SArjun Roy 	}
194218fb76edSArjun Roy 
194318fb76edSArjun Roy 	zc->copybuf_len = tcp_copy_straggler_data(zc, skb, copylen, &offset,
194418fb76edSArjun Roy 						  seq);
194518fb76edSArjun Roy 	return zc->copybuf_len < 0 ? 0 : copylen;
194618fb76edSArjun Roy }
194718fb76edSArjun Roy 
194894ab9eb9SArjun Roy static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma,
194994ab9eb9SArjun Roy 					      struct page **pending_pages,
195094ab9eb9SArjun Roy 					      unsigned long pages_remaining,
195194ab9eb9SArjun Roy 					      unsigned long *address,
195294ab9eb9SArjun Roy 					      u32 *length,
195394ab9eb9SArjun Roy 					      u32 *seq,
195494ab9eb9SArjun Roy 					      struct tcp_zerocopy_receive *zc,
195594ab9eb9SArjun Roy 					      u32 total_bytes_to_map,
195694ab9eb9SArjun Roy 					      int err)
195794ab9eb9SArjun Roy {
195894ab9eb9SArjun Roy 	/* At least one page did not map. Try zapping if we skipped earlier. */
195994ab9eb9SArjun Roy 	if (err == -EBUSY &&
196094ab9eb9SArjun Roy 	    zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT) {
196194ab9eb9SArjun Roy 		u32 maybe_zap_len;
196294ab9eb9SArjun Roy 
196394ab9eb9SArjun Roy 		maybe_zap_len = total_bytes_to_map -  /* All bytes to map */
196494ab9eb9SArjun Roy 				*length + /* Mapped or pending */
196594ab9eb9SArjun Roy 				(pages_remaining * PAGE_SIZE); /* Failed map. */
1966e9adcfecSMike Kravetz 		zap_page_range_single(vma, *address, maybe_zap_len, NULL);
196794ab9eb9SArjun Roy 		err = 0;
196894ab9eb9SArjun Roy 	}
196994ab9eb9SArjun Roy 
197094ab9eb9SArjun Roy 	if (!err) {
197194ab9eb9SArjun Roy 		unsigned long leftover_pages = pages_remaining;
197294ab9eb9SArjun Roy 		int bytes_mapped;
197394ab9eb9SArjun Roy 
1974e9adcfecSMike Kravetz 		/* We called zap_page_range_single, try to reinsert. */
197594ab9eb9SArjun Roy 		err = vm_insert_pages(vma, *address,
197694ab9eb9SArjun Roy 				      pending_pages,
197794ab9eb9SArjun Roy 				      &pages_remaining);
197894ab9eb9SArjun Roy 		bytes_mapped = PAGE_SIZE * (leftover_pages - pages_remaining);
197994ab9eb9SArjun Roy 		*seq += bytes_mapped;
198094ab9eb9SArjun Roy 		*address += bytes_mapped;
198194ab9eb9SArjun Roy 	}
198294ab9eb9SArjun Roy 	if (err) {
198394ab9eb9SArjun Roy 		/* Either we were unable to zap, OR we zapped, retried an
198494ab9eb9SArjun Roy 		 * insert, and still had an issue. Either ways, pages_remaining
198594ab9eb9SArjun Roy 		 * is the number of pages we were unable to map, and we unroll
198694ab9eb9SArjun Roy 		 * some state we speculatively touched before.
198794ab9eb9SArjun Roy 		 */
198894ab9eb9SArjun Roy 		const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
198994ab9eb9SArjun Roy 
199094ab9eb9SArjun Roy 		*length -= bytes_not_mapped;
199194ab9eb9SArjun Roy 		zc->recv_skip_hint += bytes_not_mapped;
199294ab9eb9SArjun Roy 	}
199394ab9eb9SArjun Roy 	return err;
199494ab9eb9SArjun Roy }
199594ab9eb9SArjun Roy 
19963763a24cSArjun Roy static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
19973763a24cSArjun Roy 					struct page **pages,
199894ab9eb9SArjun Roy 					unsigned int pages_to_map,
199994ab9eb9SArjun Roy 					unsigned long *address,
200094ab9eb9SArjun Roy 					u32 *length,
20013763a24cSArjun Roy 					u32 *seq,
200294ab9eb9SArjun Roy 					struct tcp_zerocopy_receive *zc,
200394ab9eb9SArjun Roy 					u32 total_bytes_to_map)
20043763a24cSArjun Roy {
20053763a24cSArjun Roy 	unsigned long pages_remaining = pages_to_map;
200694ab9eb9SArjun Roy 	unsigned int pages_mapped;
200794ab9eb9SArjun Roy 	unsigned int bytes_mapped;
200894ab9eb9SArjun Roy 	int err;
20093763a24cSArjun Roy 
201094ab9eb9SArjun Roy 	err = vm_insert_pages(vma, *address, pages, &pages_remaining);
201194ab9eb9SArjun Roy 	pages_mapped = pages_to_map - (unsigned int)pages_remaining;
201294ab9eb9SArjun Roy 	bytes_mapped = PAGE_SIZE * pages_mapped;
20133763a24cSArjun Roy 	/* Even if vm_insert_pages fails, it may have partially succeeded in
20143763a24cSArjun Roy 	 * mapping (some but not all of the pages).
20153763a24cSArjun Roy 	 */
20163763a24cSArjun Roy 	*seq += bytes_mapped;
201794ab9eb9SArjun Roy 	*address += bytes_mapped;
201894ab9eb9SArjun Roy 
201994ab9eb9SArjun Roy 	if (likely(!err))
202094ab9eb9SArjun Roy 		return 0;
202194ab9eb9SArjun Roy 
202294ab9eb9SArjun Roy 	/* Error: maybe zap and retry + rollback state for failed inserts. */
202394ab9eb9SArjun Roy 	return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped,
202494ab9eb9SArjun Roy 		pages_remaining, address, length, seq, zc, total_bytes_to_map,
202594ab9eb9SArjun Roy 		err);
20263763a24cSArjun Roy }
20273763a24cSArjun Roy 
20283c5a2fd0SArjun Roy #define TCP_VALID_ZC_MSG_FLAGS   (TCP_CMSG_TS)
20297eeba170SArjun Roy static void tcp_zc_finalize_rx_tstamp(struct sock *sk,
20307eeba170SArjun Roy 				      struct tcp_zerocopy_receive *zc,
20317eeba170SArjun Roy 				      struct scm_timestamping_internal *tss)
20327eeba170SArjun Roy {
20337eeba170SArjun Roy 	unsigned long msg_control_addr;
20347eeba170SArjun Roy 	struct msghdr cmsg_dummy;
20357eeba170SArjun Roy 
20367eeba170SArjun Roy 	msg_control_addr = (unsigned long)zc->msg_control;
2037c39ef213SKevin Brodsky 	cmsg_dummy.msg_control_user = (void __user *)msg_control_addr;
20387eeba170SArjun Roy 	cmsg_dummy.msg_controllen =
20397eeba170SArjun Roy 		(__kernel_size_t)zc->msg_controllen;
20407eeba170SArjun Roy 	cmsg_dummy.msg_flags = in_compat_syscall()
20417eeba170SArjun Roy 		? MSG_CMSG_COMPAT : 0;
2042a6f8ee58SArjun Roy 	cmsg_dummy.msg_control_is_user = true;
20437eeba170SArjun Roy 	zc->msg_flags = 0;
20447eeba170SArjun Roy 	if (zc->msg_control == msg_control_addr &&
20457eeba170SArjun Roy 	    zc->msg_controllen == cmsg_dummy.msg_controllen) {
20467eeba170SArjun Roy 		tcp_recv_timestamp(&cmsg_dummy, sk, tss);
20477eeba170SArjun Roy 		zc->msg_control = (__u64)
2048c39ef213SKevin Brodsky 			((uintptr_t)cmsg_dummy.msg_control_user);
20497eeba170SArjun Roy 		zc->msg_controllen =
20507eeba170SArjun Roy 			(__u64)cmsg_dummy.msg_controllen;
20517eeba170SArjun Roy 		zc->msg_flags = (__u32)cmsg_dummy.msg_flags;
20527eeba170SArjun Roy 	}
20537eeba170SArjun Roy }
20547eeba170SArjun Roy 
20557a7f0946SArjun Roy static struct vm_area_struct *find_tcp_vma(struct mm_struct *mm,
20567a7f0946SArjun Roy 					   unsigned long address,
20577a7f0946SArjun Roy 					   bool *mmap_locked)
20587a7f0946SArjun Roy {
2059350f6bbcSMatthew Wilcox (Oracle) 	struct vm_area_struct *vma = lock_vma_under_rcu(mm, address);
20607a7f0946SArjun Roy 
20617a7f0946SArjun Roy 	if (vma) {
2062350f6bbcSMatthew Wilcox (Oracle) 		if (vma->vm_ops != &tcp_vm_ops) {
20637a7f0946SArjun Roy 			vma_end_read(vma);
20647a7f0946SArjun Roy 			return NULL;
20657a7f0946SArjun Roy 		}
20667a7f0946SArjun Roy 		*mmap_locked = false;
20677a7f0946SArjun Roy 		return vma;
20687a7f0946SArjun Roy 	}
20697a7f0946SArjun Roy 
20707a7f0946SArjun Roy 	mmap_read_lock(mm);
20717a7f0946SArjun Roy 	vma = vma_lookup(mm, address);
2072350f6bbcSMatthew Wilcox (Oracle) 	if (!vma || vma->vm_ops != &tcp_vm_ops) {
20737a7f0946SArjun Roy 		mmap_read_unlock(mm);
20747a7f0946SArjun Roy 		return NULL;
20757a7f0946SArjun Roy 	}
20767a7f0946SArjun Roy 	*mmap_locked = true;
20777a7f0946SArjun Roy 	return vma;
20787a7f0946SArjun Roy }
20797a7f0946SArjun Roy 
208094ab9eb9SArjun Roy #define TCP_ZEROCOPY_PAGE_BATCH_SIZE 32
208105255b82SEric Dumazet static int tcp_zerocopy_receive(struct sock *sk,
20827eeba170SArjun Roy 				struct tcp_zerocopy_receive *zc,
20837eeba170SArjun Roy 				struct scm_timestamping_internal *tss)
208405255b82SEric Dumazet {
208594ab9eb9SArjun Roy 	u32 length = 0, offset, vma_len, avail_len, copylen = 0;
208605255b82SEric Dumazet 	unsigned long address = (unsigned long)zc->address;
208794ab9eb9SArjun Roy 	struct page *pages[TCP_ZEROCOPY_PAGE_BATCH_SIZE];
208818fb76edSArjun Roy 	s32 copybuf_len = zc->copybuf_len;
208918fb76edSArjun Roy 	struct tcp_sock *tp = tcp_sk(sk);
209005255b82SEric Dumazet 	const skb_frag_t *frags = NULL;
209194ab9eb9SArjun Roy 	unsigned int pages_to_map = 0;
209205255b82SEric Dumazet 	struct vm_area_struct *vma;
209305255b82SEric Dumazet 	struct sk_buff *skb = NULL;
209418fb76edSArjun Roy 	u32 seq = tp->copied_seq;
209594ab9eb9SArjun Roy 	u32 total_bytes_to_map;
209618fb76edSArjun Roy 	int inq = tcp_inq(sk);
20977a7f0946SArjun Roy 	bool mmap_locked;
209893ab6cc6SEric Dumazet 	int ret;
209993ab6cc6SEric Dumazet 
210018fb76edSArjun Roy 	zc->copybuf_len = 0;
21017eeba170SArjun Roy 	zc->msg_flags = 0;
210218fb76edSArjun Roy 
210305255b82SEric Dumazet 	if (address & (PAGE_SIZE - 1) || address != zc->address)
210493ab6cc6SEric Dumazet 		return -EINVAL;
210593ab6cc6SEric Dumazet 
210693ab6cc6SEric Dumazet 	if (sk->sk_state == TCP_LISTEN)
210705255b82SEric Dumazet 		return -ENOTCONN;
210893ab6cc6SEric Dumazet 
210993ab6cc6SEric Dumazet 	sock_rps_record_flow(sk);
211093ab6cc6SEric Dumazet 
2111f21a3c48SArjun Roy 	if (inq && inq <= copybuf_len)
21127eeba170SArjun Roy 		return receive_fallback_to_copy(sk, zc, inq, tss);
2113f21a3c48SArjun Roy 
2114936ced41SArjun Roy 	if (inq < PAGE_SIZE) {
2115936ced41SArjun Roy 		zc->length = 0;
2116936ced41SArjun Roy 		zc->recv_skip_hint = inq;
2117936ced41SArjun Roy 		if (!inq && sock_flag(sk, SOCK_DONE))
2118936ced41SArjun Roy 			return -EIO;
2119936ced41SArjun Roy 		return 0;
2120936ced41SArjun Roy 	}
2121936ced41SArjun Roy 
21227a7f0946SArjun Roy 	vma = find_tcp_vma(current->mm, address, &mmap_locked);
21237a7f0946SArjun Roy 	if (!vma)
2124e776af60SEric Dumazet 		return -EINVAL;
21257a7f0946SArjun Roy 
212618fb76edSArjun Roy 	vma_len = min_t(unsigned long, zc->length, vma->vm_end - address);
212718fb76edSArjun Roy 	avail_len = min_t(u32, vma_len, inq);
212894ab9eb9SArjun Roy 	total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1);
212994ab9eb9SArjun Roy 	if (total_bytes_to_map) {
213094ab9eb9SArjun Roy 		if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT))
2131e9adcfecSMike Kravetz 			zap_page_range_single(vma, address, total_bytes_to_map,
2132e9adcfecSMike Kravetz 					      NULL);
213394ab9eb9SArjun Roy 		zc->length = total_bytes_to_map;
213405255b82SEric Dumazet 		zc->recv_skip_hint = 0;
21358f2b0293SSoheil Hassas Yeganeh 	} else {
213618fb76edSArjun Roy 		zc->length = avail_len;
213718fb76edSArjun Roy 		zc->recv_skip_hint = avail_len;
21388f2b0293SSoheil Hassas Yeganeh 	}
213905255b82SEric Dumazet 	ret = 0;
214005255b82SEric Dumazet 	while (length + PAGE_SIZE <= zc->length) {
214198917cf0SArjun Roy 		int mappable_offset;
214294ab9eb9SArjun Roy 		struct page *page;
214398917cf0SArjun Roy 
214405255b82SEric Dumazet 		if (zc->recv_skip_hint < PAGE_SIZE) {
21457fba5309SArjun Roy 			u32 offset_frag;
21467fba5309SArjun Roy 
214705255b82SEric Dumazet 			if (skb) {
21480e627190SArjun Roy 				if (zc->recv_skip_hint > 0)
21490e627190SArjun Roy 					break;
215005255b82SEric Dumazet 				skb = skb->next;
215105255b82SEric Dumazet 				offset = seq - TCP_SKB_CB(skb)->seq;
215205255b82SEric Dumazet 			} else {
215393ab6cc6SEric Dumazet 				skb = tcp_recv_skb(sk, seq, &offset);
215405255b82SEric Dumazet 			}
21557eeba170SArjun Roy 
21567eeba170SArjun Roy 			if (TCP_SKB_CB(skb)->has_rxtstamp) {
21577eeba170SArjun Roy 				tcp_update_recv_tstamps(skb, tss);
21587eeba170SArjun Roy 				zc->msg_flags |= TCP_CMSG_TS;
21597eeba170SArjun Roy 			}
216005255b82SEric Dumazet 			zc->recv_skip_hint = skb->len - offset;
21617fba5309SArjun Roy 			frags = skb_advance_to_frag(skb, offset, &offset_frag);
21627fba5309SArjun Roy 			if (!frags || offset_frag)
216305255b82SEric Dumazet 				break;
216405255b82SEric Dumazet 		}
2165789762ceSSoheil Hassas Yeganeh 
216698917cf0SArjun Roy 		mappable_offset = find_next_mappable_frag(frags,
216798917cf0SArjun Roy 							  zc->recv_skip_hint);
216898917cf0SArjun Roy 		if (mappable_offset) {
216998917cf0SArjun Roy 			zc->recv_skip_hint = mappable_offset;
217005255b82SEric Dumazet 			break;
2171789762ceSSoheil Hassas Yeganeh 		}
217294ab9eb9SArjun Roy 		page = skb_frag_page(frags);
217394ab9eb9SArjun Roy 		prefetchw(page);
217494ab9eb9SArjun Roy 		pages[pages_to_map++] = page;
217505255b82SEric Dumazet 		length += PAGE_SIZE;
217605255b82SEric Dumazet 		zc->recv_skip_hint -= PAGE_SIZE;
217705255b82SEric Dumazet 		frags++;
217894ab9eb9SArjun Roy 		if (pages_to_map == TCP_ZEROCOPY_PAGE_BATCH_SIZE ||
217994ab9eb9SArjun Roy 		    zc->recv_skip_hint < PAGE_SIZE) {
218094ab9eb9SArjun Roy 			/* Either full batch, or we're about to go to next skb
218194ab9eb9SArjun Roy 			 * (and we cannot unroll failed ops across skbs).
218294ab9eb9SArjun Roy 			 */
218394ab9eb9SArjun Roy 			ret = tcp_zerocopy_vm_insert_batch(vma, pages,
218494ab9eb9SArjun Roy 							   pages_to_map,
218594ab9eb9SArjun Roy 							   &address, &length,
218694ab9eb9SArjun Roy 							   &seq, zc,
218794ab9eb9SArjun Roy 							   total_bytes_to_map);
21883763a24cSArjun Roy 			if (ret)
21893763a24cSArjun Roy 				goto out;
219094ab9eb9SArjun Roy 			pages_to_map = 0;
21913763a24cSArjun Roy 		}
21923763a24cSArjun Roy 	}
219394ab9eb9SArjun Roy 	if (pages_to_map) {
219494ab9eb9SArjun Roy 		ret = tcp_zerocopy_vm_insert_batch(vma, pages, pages_to_map,
219594ab9eb9SArjun Roy 						   &address, &length, &seq,
219694ab9eb9SArjun Roy 						   zc, total_bytes_to_map);
219793ab6cc6SEric Dumazet 	}
219805255b82SEric Dumazet out:
21997a7f0946SArjun Roy 	if (mmap_locked)
2200d8ed45c5SMichel Lespinasse 		mmap_read_unlock(current->mm);
22017a7f0946SArjun Roy 	else
22027a7f0946SArjun Roy 		vma_end_read(vma);
220318fb76edSArjun Roy 	/* Try to copy straggler data. */
220418fb76edSArjun Roy 	if (!ret)
22057eeba170SArjun Roy 		copylen = tcp_zc_handle_leftover(zc, sk, skb, &seq, copybuf_len, tss);
220618fb76edSArjun Roy 
220718fb76edSArjun Roy 	if (length + copylen) {
22087db48e98SEric Dumazet 		WRITE_ONCE(tp->copied_seq, seq);
220993ab6cc6SEric Dumazet 		tcp_rcv_space_adjust(sk);
221093ab6cc6SEric Dumazet 
221193ab6cc6SEric Dumazet 		/* Clean up data we have read: This will do ACK frames. */
221293ab6cc6SEric Dumazet 		tcp_recv_skb(sk, seq, &offset);
221318fb76edSArjun Roy 		tcp_cleanup_rbuf(sk, length + copylen);
221493ab6cc6SEric Dumazet 		ret = 0;
221505255b82SEric Dumazet 		if (length == zc->length)
221605255b82SEric Dumazet 			zc->recv_skip_hint = 0;
221705255b82SEric Dumazet 	} else {
221805255b82SEric Dumazet 		if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
221905255b82SEric Dumazet 			ret = -EIO;
222005255b82SEric Dumazet 	}
222105255b82SEric Dumazet 	zc->length = length;
222293ab6cc6SEric Dumazet 	return ret;
222393ab6cc6SEric Dumazet }
222405255b82SEric Dumazet #endif
222593ab6cc6SEric Dumazet 
222698aaa913SMike Maloney /* Similar to __sock_recv_timestamp, but does not require an skb */
2227892bfd3dSFlorian Westphal void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
22289718475eSDeepa Dinamani 			struct scm_timestamping_internal *tss)
222998aaa913SMike Maloney {
2230887feae3SDeepa Dinamani 	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
223198aaa913SMike Maloney 	bool has_timestamping = false;
223298aaa913SMike Maloney 
223398aaa913SMike Maloney 	if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
223498aaa913SMike Maloney 		if (sock_flag(sk, SOCK_RCVTSTAMP)) {
223598aaa913SMike Maloney 			if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
2236887feae3SDeepa Dinamani 				if (new_tstamp) {
2237df1b4ba9SArnd Bergmann 					struct __kernel_timespec kts = {
2238df1b4ba9SArnd Bergmann 						.tv_sec = tss->ts[0].tv_sec,
2239df1b4ba9SArnd Bergmann 						.tv_nsec = tss->ts[0].tv_nsec,
2240df1b4ba9SArnd Bergmann 					};
2241887feae3SDeepa Dinamani 					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
2242887feae3SDeepa Dinamani 						 sizeof(kts), &kts);
2243887feae3SDeepa Dinamani 				} else {
2244df1b4ba9SArnd Bergmann 					struct __kernel_old_timespec ts_old = {
2245df1b4ba9SArnd Bergmann 						.tv_sec = tss->ts[0].tv_sec,
2246df1b4ba9SArnd Bergmann 						.tv_nsec = tss->ts[0].tv_nsec,
2247df1b4ba9SArnd Bergmann 					};
22487f1bc6e9SDeepa Dinamani 					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
22499718475eSDeepa Dinamani 						 sizeof(ts_old), &ts_old);
2250887feae3SDeepa Dinamani 				}
225198aaa913SMike Maloney 			} else {
2252887feae3SDeepa Dinamani 				if (new_tstamp) {
2253df1b4ba9SArnd Bergmann 					struct __kernel_sock_timeval stv = {
2254df1b4ba9SArnd Bergmann 						.tv_sec = tss->ts[0].tv_sec,
2255df1b4ba9SArnd Bergmann 						.tv_usec = tss->ts[0].tv_nsec / 1000,
2256df1b4ba9SArnd Bergmann 					};
2257887feae3SDeepa Dinamani 					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
2258887feae3SDeepa Dinamani 						 sizeof(stv), &stv);
2259887feae3SDeepa Dinamani 				} else {
2260df1b4ba9SArnd Bergmann 					struct __kernel_old_timeval tv = {
2261df1b4ba9SArnd Bergmann 						.tv_sec = tss->ts[0].tv_sec,
2262df1b4ba9SArnd Bergmann 						.tv_usec = tss->ts[0].tv_nsec / 1000,
2263df1b4ba9SArnd Bergmann 					};
22647f1bc6e9SDeepa Dinamani 					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
226598aaa913SMike Maloney 						 sizeof(tv), &tv);
226698aaa913SMike Maloney 				}
226798aaa913SMike Maloney 			}
2268887feae3SDeepa Dinamani 		}
226998aaa913SMike Maloney 
2270e3390b30SEric Dumazet 		if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_SOFTWARE)
227198aaa913SMike Maloney 			has_timestamping = true;
227298aaa913SMike Maloney 		else
22739718475eSDeepa Dinamani 			tss->ts[0] = (struct timespec64) {0};
227498aaa913SMike Maloney 	}
227598aaa913SMike Maloney 
227698aaa913SMike Maloney 	if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
2277e3390b30SEric Dumazet 		if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_RAW_HARDWARE)
227898aaa913SMike Maloney 			has_timestamping = true;
227998aaa913SMike Maloney 		else
22809718475eSDeepa Dinamani 			tss->ts[2] = (struct timespec64) {0};
228198aaa913SMike Maloney 	}
228298aaa913SMike Maloney 
228398aaa913SMike Maloney 	if (has_timestamping) {
22849718475eSDeepa Dinamani 		tss->ts[1] = (struct timespec64) {0};
22859718475eSDeepa Dinamani 		if (sock_flag(sk, SOCK_TSTAMP_NEW))
22869718475eSDeepa Dinamani 			put_cmsg_scm_timestamping64(msg, tss);
22879718475eSDeepa Dinamani 		else
22889718475eSDeepa Dinamani 			put_cmsg_scm_timestamping(msg, tss);
228998aaa913SMike Maloney 	}
229098aaa913SMike Maloney }
229198aaa913SMike Maloney 
2292b75eba76SSoheil Hassas Yeganeh static int tcp_inq_hint(struct sock *sk)
2293b75eba76SSoheil Hassas Yeganeh {
2294b75eba76SSoheil Hassas Yeganeh 	const struct tcp_sock *tp = tcp_sk(sk);
2295b75eba76SSoheil Hassas Yeganeh 	u32 copied_seq = READ_ONCE(tp->copied_seq);
2296b75eba76SSoheil Hassas Yeganeh 	u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
2297b75eba76SSoheil Hassas Yeganeh 	int inq;
2298b75eba76SSoheil Hassas Yeganeh 
2299b75eba76SSoheil Hassas Yeganeh 	inq = rcv_nxt - copied_seq;
2300b75eba76SSoheil Hassas Yeganeh 	if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
2301b75eba76SSoheil Hassas Yeganeh 		lock_sock(sk);
2302b75eba76SSoheil Hassas Yeganeh 		inq = tp->rcv_nxt - tp->copied_seq;
2303b75eba76SSoheil Hassas Yeganeh 		release_sock(sk);
2304b75eba76SSoheil Hassas Yeganeh 	}
23056466e715SSoheil Hassas Yeganeh 	/* After receiving a FIN, tell the user-space to continue reading
23066466e715SSoheil Hassas Yeganeh 	 * by returning a non-zero inq.
23076466e715SSoheil Hassas Yeganeh 	 */
23086466e715SSoheil Hassas Yeganeh 	if (inq == 0 && sock_flag(sk, SOCK_DONE))
23096466e715SSoheil Hassas Yeganeh 		inq = 1;
2310b75eba76SSoheil Hassas Yeganeh 	return inq;
2311b75eba76SSoheil Hassas Yeganeh }
2312b75eba76SSoheil Hassas Yeganeh 
23131da177e4SLinus Torvalds /*
23141da177e4SLinus Torvalds  *	This routine copies from a sock struct into the user buffer.
23151da177e4SLinus Torvalds  *
23161da177e4SLinus Torvalds  *	Technical note: in 2.3 we work on _locked_ socket, so that
23171da177e4SLinus Torvalds  *	tricks with *seq access order and skb->users are not required.
23181da177e4SLinus Torvalds  *	Probably, code can be easily improved even more.
23191da177e4SLinus Torvalds  */
23201da177e4SLinus Torvalds 
23212cd81161SArjun Roy static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
2322ec095263SOliver Hartkopp 			      int flags, struct scm_timestamping_internal *tss,
23232cd81161SArjun Roy 			      int *cmsg_flags)
23241da177e4SLinus Torvalds {
23251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
23261da177e4SLinus Torvalds 	int copied = 0;
23271da177e4SLinus Torvalds 	u32 peek_seq;
23281da177e4SLinus Torvalds 	u32 *seq;
23291da177e4SLinus Torvalds 	unsigned long used;
23302cd81161SArjun Roy 	int err;
23311da177e4SLinus Torvalds 	int target;		/* Read at least this many bytes */
23321da177e4SLinus Torvalds 	long timeo;
2333dfbafc99SSabrina Dubroca 	struct sk_buff *skb, *last;
233405ea4916SJon Maloy 	u32 peek_offset = 0;
233577527313SIlpo Järvinen 	u32 urg_hole = 0;
23361da177e4SLinus Torvalds 
23371da177e4SLinus Torvalds 	err = -ENOTCONN;
23381da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
23391da177e4SLinus Torvalds 		goto out;
23401da177e4SLinus Torvalds 
2341f94fd25cSJens Axboe 	if (tp->recvmsg_inq) {
2342925bba24SArjun Roy 		*cmsg_flags = TCP_CMSG_INQ;
2343f94fd25cSJens Axboe 		msg->msg_get_inq = 1;
2344f94fd25cSJens Axboe 	}
2345ec095263SOliver Hartkopp 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
23461da177e4SLinus Torvalds 
23471da177e4SLinus Torvalds 	/* Urgent data needs to be handled specially. */
23481da177e4SLinus Torvalds 	if (flags & MSG_OOB)
23491da177e4SLinus Torvalds 		goto recv_urg;
23501da177e4SLinus Torvalds 
2351c0e88ff0SPavel Emelyanov 	if (unlikely(tp->repair)) {
2352c0e88ff0SPavel Emelyanov 		err = -EPERM;
2353c0e88ff0SPavel Emelyanov 		if (!(flags & MSG_PEEK))
2354c0e88ff0SPavel Emelyanov 			goto out;
2355c0e88ff0SPavel Emelyanov 
2356c0e88ff0SPavel Emelyanov 		if (tp->repair_queue == TCP_SEND_QUEUE)
2357c0e88ff0SPavel Emelyanov 			goto recv_sndq;
2358c0e88ff0SPavel Emelyanov 
2359c0e88ff0SPavel Emelyanov 		err = -EINVAL;
2360c0e88ff0SPavel Emelyanov 		if (tp->repair_queue == TCP_NO_QUEUE)
2361c0e88ff0SPavel Emelyanov 			goto out;
2362c0e88ff0SPavel Emelyanov 
2363c0e88ff0SPavel Emelyanov 		/* 'common' recv queue MSG_PEEK-ing */
2364c0e88ff0SPavel Emelyanov 	}
2365c0e88ff0SPavel Emelyanov 
23661da177e4SLinus Torvalds 	seq = &tp->copied_seq;
23671da177e4SLinus Torvalds 	if (flags & MSG_PEEK) {
236805ea4916SJon Maloy 		peek_offset = max(sk_peek_offset(sk, flags), 0);
236905ea4916SJon Maloy 		peek_seq = tp->copied_seq + peek_offset;
23701da177e4SLinus Torvalds 		seq = &peek_seq;
23711da177e4SLinus Torvalds 	}
23721da177e4SLinus Torvalds 
23731da177e4SLinus Torvalds 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
23741da177e4SLinus Torvalds 
23751da177e4SLinus Torvalds 	do {
23761da177e4SLinus Torvalds 		u32 offset;
23771da177e4SLinus Torvalds 
23781da177e4SLinus Torvalds 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2379b96c51bdSEric Dumazet 		if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
23801da177e4SLinus Torvalds 			if (copied)
23811da177e4SLinus Torvalds 				break;
23821da177e4SLinus Torvalds 			if (signal_pending(current)) {
23831da177e4SLinus Torvalds 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
23841da177e4SLinus Torvalds 				break;
23851da177e4SLinus Torvalds 			}
23861da177e4SLinus Torvalds 		}
23871da177e4SLinus Torvalds 
23881da177e4SLinus Torvalds 		/* Next get a buffer. */
23891da177e4SLinus Torvalds 
2390dfbafc99SSabrina Dubroca 		last = skb_peek_tail(&sk->sk_receive_queue);
239191521944SDavid S. Miller 		skb_queue_walk(&sk->sk_receive_queue, skb) {
2392dfbafc99SSabrina Dubroca 			last = skb;
23931da177e4SLinus Torvalds 			/* Now that we have two receive queues this
23941da177e4SLinus Torvalds 			 * shouldn't happen.
23951da177e4SLinus Torvalds 			 */
2396d792c100SIlpo Järvinen 			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2397e56b8ce3SRandy Dunlap 				 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
23982af6fd8bSJoe Perches 				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2399d792c100SIlpo Järvinen 				 flags))
24001da177e4SLinus Torvalds 				break;
2401d792c100SIlpo Järvinen 
24021da177e4SLinus Torvalds 			offset = *seq - TCP_SKB_CB(skb)->seq;
24039d691539SEric Dumazet 			if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
24049d691539SEric Dumazet 				pr_err_once("%s: found a SYN, please report !\n", __func__);
24051da177e4SLinus Torvalds 				offset--;
24069d691539SEric Dumazet 			}
24071da177e4SLinus Torvalds 			if (offset < skb->len)
24081da177e4SLinus Torvalds 				goto found_ok_skb;
2409e11ecddfSEric Dumazet 			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
24101da177e4SLinus Torvalds 				goto found_fin_ok;
24112af6fd8bSJoe Perches 			WARN(!(flags & MSG_PEEK),
2412e56b8ce3SRandy Dunlap 			     "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
24132af6fd8bSJoe Perches 			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
241491521944SDavid S. Miller 		}
24151da177e4SLinus Torvalds 
24161da177e4SLinus Torvalds 		/* Well, if we have backlog, try to process it now yet. */
24171da177e4SLinus Torvalds 
24189ed498c6SEric Dumazet 		if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
24191da177e4SLinus Torvalds 			break;
24201da177e4SLinus Torvalds 
24211da177e4SLinus Torvalds 		if (copied) {
24228bd172b7SEric Dumazet 			if (!timeo ||
24238bd172b7SEric Dumazet 			    sk->sk_err ||
24241da177e4SLinus Torvalds 			    sk->sk_state == TCP_CLOSE ||
24251da177e4SLinus Torvalds 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2426518a09efSDavid S. Miller 			    signal_pending(current))
24271da177e4SLinus Torvalds 				break;
24281da177e4SLinus Torvalds 		} else {
24291da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_DONE))
24301da177e4SLinus Torvalds 				break;
24311da177e4SLinus Torvalds 
24321da177e4SLinus Torvalds 			if (sk->sk_err) {
24331da177e4SLinus Torvalds 				copied = sock_error(sk);
24341da177e4SLinus Torvalds 				break;
24351da177e4SLinus Torvalds 			}
24361da177e4SLinus Torvalds 
24371da177e4SLinus Torvalds 			if (sk->sk_shutdown & RCV_SHUTDOWN)
24381da177e4SLinus Torvalds 				break;
24391da177e4SLinus Torvalds 
24401da177e4SLinus Torvalds 			if (sk->sk_state == TCP_CLOSE) {
24411da177e4SLinus Torvalds 				/* This occurs when user tries to read
24421da177e4SLinus Torvalds 				 * from never connected socket.
24431da177e4SLinus Torvalds 				 */
24441da177e4SLinus Torvalds 				copied = -ENOTCONN;
24451da177e4SLinus Torvalds 				break;
24461da177e4SLinus Torvalds 			}
24471da177e4SLinus Torvalds 
24481da177e4SLinus Torvalds 			if (!timeo) {
24491da177e4SLinus Torvalds 				copied = -EAGAIN;
24501da177e4SLinus Torvalds 				break;
24511da177e4SLinus Torvalds 			}
24521da177e4SLinus Torvalds 
24531da177e4SLinus Torvalds 			if (signal_pending(current)) {
24541da177e4SLinus Torvalds 				copied = sock_intr_errno(timeo);
24551da177e4SLinus Torvalds 				break;
24561da177e4SLinus Torvalds 			}
24571da177e4SLinus Torvalds 		}
24581da177e4SLinus Torvalds 
24591da177e4SLinus Torvalds 		if (copied >= target) {
24601da177e4SLinus Torvalds 			/* Do not sleep, just process backlog. */
246193afcfd1SEric Dumazet 			__sk_flush_backlog(sk);
2462dfbafc99SSabrina Dubroca 		} else {
246329fbc26eSEric Dumazet 			tcp_cleanup_rbuf(sk, copied);
2464419ce133SPaolo Abeni 			err = sk_wait_data(sk, &timeo, last);
2465419ce133SPaolo Abeni 			if (err < 0) {
2466419ce133SPaolo Abeni 				err = copied ? : err;
2467419ce133SPaolo Abeni 				goto out;
2468419ce133SPaolo Abeni 			}
2469dfbafc99SSabrina Dubroca 		}
24701da177e4SLinus Torvalds 
247177527313SIlpo Järvinen 		if ((flags & MSG_PEEK) &&
247205ea4916SJon Maloy 		    (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) {
2473e87cc472SJoe Perches 			net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2474e87cc472SJoe Perches 					    current->comm,
2475e87cc472SJoe Perches 					    task_pid_nr(current));
247605ea4916SJon Maloy 			peek_seq = tp->copied_seq + peek_offset;
24771da177e4SLinus Torvalds 		}
24781da177e4SLinus Torvalds 		continue;
24791da177e4SLinus Torvalds 
24801da177e4SLinus Torvalds found_ok_skb:
24811da177e4SLinus Torvalds 		/* Ok so how much can we use? */
24821da177e4SLinus Torvalds 		used = skb->len - offset;
24831da177e4SLinus Torvalds 		if (len < used)
24841da177e4SLinus Torvalds 			used = len;
24851da177e4SLinus Torvalds 
24861da177e4SLinus Torvalds 		/* Do we have urgent data here? */
2487b96c51bdSEric Dumazet 		if (unlikely(tp->urg_data)) {
24881da177e4SLinus Torvalds 			u32 urg_offset = tp->urg_seq - *seq;
24891da177e4SLinus Torvalds 			if (urg_offset < used) {
24901da177e4SLinus Torvalds 				if (!urg_offset) {
24911da177e4SLinus Torvalds 					if (!sock_flag(sk, SOCK_URGINLINE)) {
24927db48e98SEric Dumazet 						WRITE_ONCE(*seq, *seq + 1);
249377527313SIlpo Järvinen 						urg_hole++;
24941da177e4SLinus Torvalds 						offset++;
24951da177e4SLinus Torvalds 						used--;
24961da177e4SLinus Torvalds 						if (!used)
24971da177e4SLinus Torvalds 							goto skip_copy;
24981da177e4SLinus Torvalds 					}
24991da177e4SLinus Torvalds 				} else
25001da177e4SLinus Torvalds 					used = urg_offset;
25011da177e4SLinus Torvalds 			}
25021da177e4SLinus Torvalds 		}
25031da177e4SLinus Torvalds 
25041da177e4SLinus Torvalds 		if (!(flags & MSG_TRUNC)) {
250551f3d02bSDavid S. Miller 			err = skb_copy_datagram_msg(skb, offset, msg, used);
25061da177e4SLinus Torvalds 			if (err) {
25071da177e4SLinus Torvalds 				/* Exception. Bailout! */
25081da177e4SLinus Torvalds 				if (!copied)
25091da177e4SLinus Torvalds 					copied = -EFAULT;
25101da177e4SLinus Torvalds 				break;
25111da177e4SLinus Torvalds 			}
25121da177e4SLinus Torvalds 		}
25131da177e4SLinus Torvalds 
25147db48e98SEric Dumazet 		WRITE_ONCE(*seq, *seq + used);
25151da177e4SLinus Torvalds 		copied += used;
25161da177e4SLinus Torvalds 		len -= used;
251705ea4916SJon Maloy 		if (flags & MSG_PEEK)
251805ea4916SJon Maloy 			sk_peek_offset_fwd(sk, used);
251905ea4916SJon Maloy 		else
252005ea4916SJon Maloy 			sk_peek_offset_bwd(sk, used);
25211da177e4SLinus Torvalds 		tcp_rcv_space_adjust(sk);
25221da177e4SLinus Torvalds 
25231da177e4SLinus Torvalds skip_copy:
2524b96c51bdSEric Dumazet 		if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
25257b6a893aSEric Dumazet 			WRITE_ONCE(tp->urg_data, 0);
252631770e34SFlorian Westphal 			tcp_fast_path_check(sk);
252731770e34SFlorian Westphal 		}
25281da177e4SLinus Torvalds 
252998aaa913SMike Maloney 		if (TCP_SKB_CB(skb)->has_rxtstamp) {
25302cd81161SArjun Roy 			tcp_update_recv_tstamps(skb, tss);
2531925bba24SArjun Roy 			*cmsg_flags |= TCP_CMSG_TS;
253298aaa913SMike Maloney 		}
2533cc4de047SKelly Littlepage 
2534cc4de047SKelly Littlepage 		if (used + offset < skb->len)
2535cc4de047SKelly Littlepage 			continue;
2536cc4de047SKelly Littlepage 
2537e11ecddfSEric Dumazet 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
25381da177e4SLinus Torvalds 			goto found_fin_ok;
25397bced397SDan Williams 		if (!(flags & MSG_PEEK))
25403df684c1SEric Dumazet 			tcp_eat_recv_skb(sk, skb);
25411da177e4SLinus Torvalds 		continue;
25421da177e4SLinus Torvalds 
25431da177e4SLinus Torvalds found_fin_ok:
25441da177e4SLinus Torvalds 		/* Process the FIN. */
25457db48e98SEric Dumazet 		WRITE_ONCE(*seq, *seq + 1);
25467bced397SDan Williams 		if (!(flags & MSG_PEEK))
25473df684c1SEric Dumazet 			tcp_eat_recv_skb(sk, skb);
25481da177e4SLinus Torvalds 		break;
25491da177e4SLinus Torvalds 	} while (len > 0);
25501da177e4SLinus Torvalds 
25511da177e4SLinus Torvalds 	/* According to UNIX98, msg_name/msg_namelen are ignored
25521da177e4SLinus Torvalds 	 * on connected socket. I was just happy when found this 8) --ANK
25531da177e4SLinus Torvalds 	 */
25541da177e4SLinus Torvalds 
25551da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
25560e4b4992SChris Leech 	tcp_cleanup_rbuf(sk, copied);
25571da177e4SLinus Torvalds 	return copied;
25581da177e4SLinus Torvalds 
25591da177e4SLinus Torvalds out:
25601da177e4SLinus Torvalds 	return err;
25611da177e4SLinus Torvalds 
25621da177e4SLinus Torvalds recv_urg:
2563377f0a08SRami Rosen 	err = tcp_recv_urg(sk, msg, len, flags);
25641da177e4SLinus Torvalds 	goto out;
2565c0e88ff0SPavel Emelyanov 
2566c0e88ff0SPavel Emelyanov recv_sndq:
2567c0e88ff0SPavel Emelyanov 	err = tcp_peek_sndq(sk, msg, len);
2568c0e88ff0SPavel Emelyanov 	goto out;
25691da177e4SLinus Torvalds }
25702cd81161SArjun Roy 
2571ec095263SOliver Hartkopp int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags,
2572ec095263SOliver Hartkopp 		int *addr_len)
25732cd81161SArjun Roy {
2574f94fd25cSJens Axboe 	int cmsg_flags = 0, ret;
25752cd81161SArjun Roy 	struct scm_timestamping_internal tss;
25762cd81161SArjun Roy 
25772cd81161SArjun Roy 	if (unlikely(flags & MSG_ERRQUEUE))
25782cd81161SArjun Roy 		return inet_recv_error(sk, msg, len, addr_len);
25792cd81161SArjun Roy 
25802cd81161SArjun Roy 	if (sk_can_busy_loop(sk) &&
25812cd81161SArjun Roy 	    skb_queue_empty_lockless(&sk->sk_receive_queue) &&
25822cd81161SArjun Roy 	    sk->sk_state == TCP_ESTABLISHED)
2583ec095263SOliver Hartkopp 		sk_busy_loop(sk, flags & MSG_DONTWAIT);
25842cd81161SArjun Roy 
25852cd81161SArjun Roy 	lock_sock(sk);
2586ec095263SOliver Hartkopp 	ret = tcp_recvmsg_locked(sk, msg, len, flags, &tss, &cmsg_flags);
25872cd81161SArjun Roy 	release_sock(sk);
25882cd81161SArjun Roy 
2589f94fd25cSJens Axboe 	if ((cmsg_flags || msg->msg_get_inq) && ret >= 0) {
2590925bba24SArjun Roy 		if (cmsg_flags & TCP_CMSG_TS)
25912cd81161SArjun Roy 			tcp_recv_timestamp(msg, sk, &tss);
2592f94fd25cSJens Axboe 		if (msg->msg_get_inq) {
2593f94fd25cSJens Axboe 			msg->msg_inq = tcp_inq_hint(sk);
2594f94fd25cSJens Axboe 			if (cmsg_flags & TCP_CMSG_INQ)
2595f94fd25cSJens Axboe 				put_cmsg(msg, SOL_TCP, TCP_CM_INQ,
2596f94fd25cSJens Axboe 					 sizeof(msg->msg_inq), &msg->msg_inq);
25972cd81161SArjun Roy 		}
25982cd81161SArjun Roy 	}
25992cd81161SArjun Roy 	return ret;
26002cd81161SArjun Roy }
26014bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_recvmsg);
26021da177e4SLinus Torvalds 
2603490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state)
2604490d5046SIlpo Järvinen {
2605490d5046SIlpo Järvinen 	int oldstate = sk->sk_state;
2606490d5046SIlpo Järvinen 
2607d4487491SLawrence Brakmo 	/* We defined a new enum for TCP states that are exported in BPF
2608d4487491SLawrence Brakmo 	 * so as not force the internal TCP states to be frozen. The
2609d4487491SLawrence Brakmo 	 * following checks will detect if an internal state value ever
2610d4487491SLawrence Brakmo 	 * differs from the BPF value. If this ever happens, then we will
2611d4487491SLawrence Brakmo 	 * need to remap the internal value to the BPF value before calling
2612d4487491SLawrence Brakmo 	 * tcp_call_bpf_2arg.
2613d4487491SLawrence Brakmo 	 */
2614d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2615d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2616d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2617d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2618d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2619d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2620d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2621d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2622d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2623d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2624d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2625d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
262691051f00SGuillaume Nault 	BUILD_BUG_ON((int)BPF_TCP_BOUND_INACTIVE != (int)TCP_BOUND_INACTIVE);
2627d4487491SLawrence Brakmo 	BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2628d4487491SLawrence Brakmo 
262997a19cafSYonghong Song 	/* bpf uapi header bpf.h defines an anonymous enum with values
263097a19cafSYonghong Song 	 * BPF_TCP_* used by bpf programs. Currently gcc built vmlinux
263197a19cafSYonghong Song 	 * is able to emit this enum in DWARF due to the above BUILD_BUG_ON.
263297a19cafSYonghong Song 	 * But clang built vmlinux does not have this enum in DWARF
263397a19cafSYonghong Song 	 * since clang removes the above code before generating IR/debuginfo.
263497a19cafSYonghong Song 	 * Let us explicitly emit the type debuginfo to ensure the
263597a19cafSYonghong Song 	 * above-mentioned anonymous enum in the vmlinux DWARF and hence BTF
263697a19cafSYonghong Song 	 * regardless of which compiler is used.
263797a19cafSYonghong Song 	 */
263897a19cafSYonghong Song 	BTF_TYPE_EMIT_ENUM(BPF_TCP_ESTABLISHED);
263997a19cafSYonghong Song 
2640d4487491SLawrence Brakmo 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2641d4487491SLawrence Brakmo 		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2642e8fce239SSong Liu 
2643490d5046SIlpo Järvinen 	switch (state) {
2644490d5046SIlpo Järvinen 	case TCP_ESTABLISHED:
2645490d5046SIlpo Järvinen 		if (oldstate != TCP_ESTABLISHED)
264681cc8a75SPavel Emelyanov 			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2647490d5046SIlpo Järvinen 		break;
2648490d5046SIlpo Järvinen 
2649490d5046SIlpo Järvinen 	case TCP_CLOSE:
2650490d5046SIlpo Järvinen 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
265181cc8a75SPavel Emelyanov 			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2652490d5046SIlpo Järvinen 
2653490d5046SIlpo Järvinen 		sk->sk_prot->unhash(sk);
2654490d5046SIlpo Järvinen 		if (inet_csk(sk)->icsk_bind_hash &&
2655490d5046SIlpo Järvinen 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2656ab1e0a13SArnaldo Carvalho de Melo 			inet_put_port(sk);
2657a8eceea8SJoe Perches 		fallthrough;
2658490d5046SIlpo Järvinen 	default:
2659490d5046SIlpo Järvinen 		if (oldstate == TCP_ESTABLISHED)
266074688e48SPavel Emelyanov 			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2661490d5046SIlpo Järvinen 	}
2662490d5046SIlpo Järvinen 
2663490d5046SIlpo Järvinen 	/* Change state AFTER socket is unhashed to avoid closed
2664490d5046SIlpo Järvinen 	 * socket sitting in hash tables.
2665490d5046SIlpo Järvinen 	 */
2666563e0bb0SYafang Shao 	inet_sk_state_store(sk, state);
2667490d5046SIlpo Järvinen }
2668490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state);
2669490d5046SIlpo Järvinen 
26701da177e4SLinus Torvalds /*
26711da177e4SLinus Torvalds  *	State processing on a close. This implements the state shift for
26721da177e4SLinus Torvalds  *	sending our FIN frame. Note that we only send a FIN for some
26731da177e4SLinus Torvalds  *	states. A shutdown() may have already sent the FIN, or we may be
26741da177e4SLinus Torvalds  *	closed.
26751da177e4SLinus Torvalds  */
26761da177e4SLinus Torvalds 
26779b5b5cffSArjan van de Ven static const unsigned char new_state[16] = {
26781da177e4SLinus Torvalds   /* current state:        new state:      action:	*/
26790980c1e3SEric Dumazet   [0 /* (Invalid) */]	= TCP_CLOSE,
26800980c1e3SEric Dumazet   [TCP_ESTABLISHED]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
26810980c1e3SEric Dumazet   [TCP_SYN_SENT]	= TCP_CLOSE,
26820980c1e3SEric Dumazet   [TCP_SYN_RECV]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
26830980c1e3SEric Dumazet   [TCP_FIN_WAIT1]	= TCP_FIN_WAIT1,
26840980c1e3SEric Dumazet   [TCP_FIN_WAIT2]	= TCP_FIN_WAIT2,
26850980c1e3SEric Dumazet   [TCP_TIME_WAIT]	= TCP_CLOSE,
26860980c1e3SEric Dumazet   [TCP_CLOSE]		= TCP_CLOSE,
26870980c1e3SEric Dumazet   [TCP_CLOSE_WAIT]	= TCP_LAST_ACK  | TCP_ACTION_FIN,
26880980c1e3SEric Dumazet   [TCP_LAST_ACK]	= TCP_LAST_ACK,
26890980c1e3SEric Dumazet   [TCP_LISTEN]		= TCP_CLOSE,
26900980c1e3SEric Dumazet   [TCP_CLOSING]		= TCP_CLOSING,
26910980c1e3SEric Dumazet   [TCP_NEW_SYN_RECV]	= TCP_CLOSE,	/* should not happen ! */
26921da177e4SLinus Torvalds };
26931da177e4SLinus Torvalds 
26941da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
26951da177e4SLinus Torvalds {
26961da177e4SLinus Torvalds 	int next = (int)new_state[sk->sk_state];
26971da177e4SLinus Torvalds 	int ns = next & TCP_STATE_MASK;
26981da177e4SLinus Torvalds 
26991da177e4SLinus Torvalds 	tcp_set_state(sk, ns);
27001da177e4SLinus Torvalds 
27011da177e4SLinus Torvalds 	return next & TCP_ACTION_FIN;
27021da177e4SLinus Torvalds }
27031da177e4SLinus Torvalds 
27041da177e4SLinus Torvalds /*
27051da177e4SLinus Torvalds  *	Shutdown the sending side of a connection. Much like close except
27061f29b058SSatoru SATOH  *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
27071da177e4SLinus Torvalds  */
27081da177e4SLinus Torvalds 
27091da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
27101da177e4SLinus Torvalds {
27111da177e4SLinus Torvalds 	/*	We need to grab some memory, and put together a FIN,
27121da177e4SLinus Torvalds 	 *	and then put it into the queue to be sent.
27131da177e4SLinus Torvalds 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
27141da177e4SLinus Torvalds 	 */
27151da177e4SLinus Torvalds 	if (!(how & SEND_SHUTDOWN))
27161da177e4SLinus Torvalds 		return;
27171da177e4SLinus Torvalds 
27181da177e4SLinus Torvalds 	/* If we've already sent a FIN, or it's a closed state, skip this. */
27191da177e4SLinus Torvalds 	if ((1 << sk->sk_state) &
27201da177e4SLinus Torvalds 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
27211da177e4SLinus Torvalds 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
27221da177e4SLinus Torvalds 		/* Clear out any half completed packets.  FIN if needed. */
27231da177e4SLinus Torvalds 		if (tcp_close_state(sk))
27241da177e4SLinus Torvalds 			tcp_send_fin(sk);
27251da177e4SLinus Torvalds 	}
27261da177e4SLinus Torvalds }
27274bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_shutdown);
27281da177e4SLinus Torvalds 
272919757cebSEric Dumazet int tcp_orphan_count_sum(void)
273019757cebSEric Dumazet {
273119757cebSEric Dumazet 	int i, total = 0;
273219757cebSEric Dumazet 
273319757cebSEric Dumazet 	for_each_possible_cpu(i)
273419757cebSEric Dumazet 		total += per_cpu(tcp_orphan_count, i);
273519757cebSEric Dumazet 
273619757cebSEric Dumazet 	return max(total, 0);
273719757cebSEric Dumazet }
273819757cebSEric Dumazet 
273919757cebSEric Dumazet static int tcp_orphan_cache;
274019757cebSEric Dumazet static struct timer_list tcp_orphan_timer;
274119757cebSEric Dumazet #define TCP_ORPHAN_TIMER_PERIOD msecs_to_jiffies(100)
274219757cebSEric Dumazet 
274319757cebSEric Dumazet static void tcp_orphan_update(struct timer_list *unused)
274419757cebSEric Dumazet {
274519757cebSEric Dumazet 	WRITE_ONCE(tcp_orphan_cache, tcp_orphan_count_sum());
274619757cebSEric Dumazet 	mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
274719757cebSEric Dumazet }
274819757cebSEric Dumazet 
274919757cebSEric Dumazet static bool tcp_too_many_orphans(int shift)
275019757cebSEric Dumazet {
275147e6ab24SKuniyuki Iwashima 	return READ_ONCE(tcp_orphan_cache) << shift >
275247e6ab24SKuniyuki Iwashima 		READ_ONCE(sysctl_tcp_max_orphans);
275319757cebSEric Dumazet }
275419757cebSEric Dumazet 
2755*dda4d96aSEric Dumazet static bool tcp_out_of_memory(const struct sock *sk)
2756*dda4d96aSEric Dumazet {
2757*dda4d96aSEric Dumazet 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
2758*dda4d96aSEric Dumazet 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
2759*dda4d96aSEric Dumazet 		return true;
2760*dda4d96aSEric Dumazet 	return false;
2761*dda4d96aSEric Dumazet }
2762*dda4d96aSEric Dumazet 
2763*dda4d96aSEric Dumazet bool tcp_check_oom(const struct sock *sk, int shift)
2764efcdbf24SArun Sharma {
2765efcdbf24SArun Sharma 	bool too_many_orphans, out_of_socket_memory;
2766efcdbf24SArun Sharma 
276719757cebSEric Dumazet 	too_many_orphans = tcp_too_many_orphans(shift);
2768efcdbf24SArun Sharma 	out_of_socket_memory = tcp_out_of_memory(sk);
2769efcdbf24SArun Sharma 
2770e87cc472SJoe Perches 	if (too_many_orphans)
2771e87cc472SJoe Perches 		net_info_ratelimited("too many orphaned sockets\n");
2772e87cc472SJoe Perches 	if (out_of_socket_memory)
2773e87cc472SJoe Perches 		net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2774efcdbf24SArun Sharma 	return too_many_orphans || out_of_socket_memory;
2775efcdbf24SArun Sharma }
2776efcdbf24SArun Sharma 
277777c3c956SPaolo Abeni void __tcp_close(struct sock *sk, long timeout)
27781da177e4SLinus Torvalds {
27791da177e4SLinus Torvalds 	struct sk_buff *skb;
27801da177e4SLinus Torvalds 	int data_was_unread = 0;
278175c2d907SHerbert Xu 	int state;
27821da177e4SLinus Torvalds 
2783e14cadfdSEric Dumazet 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
27841da177e4SLinus Torvalds 
27851da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN) {
27861da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
27871da177e4SLinus Torvalds 
27881da177e4SLinus Torvalds 		/* Special case. */
27890a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
27901da177e4SLinus Torvalds 
27911da177e4SLinus Torvalds 		goto adjudge_to_death;
27921da177e4SLinus Torvalds 	}
27931da177e4SLinus Torvalds 
27941da177e4SLinus Torvalds 	/*  We need to flush the recv. buffs.  We do this only on the
27951da177e4SLinus Torvalds 	 *  descriptor close, not protocol-sourced closes, because the
27961da177e4SLinus Torvalds 	 *  reader process may not have drained the data yet!
27971da177e4SLinus Torvalds 	 */
27981da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2799e11ecddfSEric Dumazet 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2800e11ecddfSEric Dumazet 
2801e11ecddfSEric Dumazet 		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2802e11ecddfSEric Dumazet 			len--;
28031da177e4SLinus Torvalds 		data_was_unread += len;
28041da177e4SLinus Torvalds 		__kfree_skb(skb);
28051da177e4SLinus Torvalds 	}
28061da177e4SLinus Torvalds 
2807565b7b2dSKonstantin Khorenko 	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2808565b7b2dSKonstantin Khorenko 	if (sk->sk_state == TCP_CLOSE)
2809565b7b2dSKonstantin Khorenko 		goto adjudge_to_death;
2810565b7b2dSKonstantin Khorenko 
281165bb723cSGerrit Renker 	/* As outlined in RFC 2525, section 2.17, we send a RST here because
281265bb723cSGerrit Renker 	 * data was lost. To witness the awful effects of the old behavior of
281365bb723cSGerrit Renker 	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
281465bb723cSGerrit Renker 	 * GET in an FTP client, suspend the process, wait for the client to
281565bb723cSGerrit Renker 	 * advertise a zero window, then kill -9 the FTP client, wheee...
281665bb723cSGerrit Renker 	 * Note: timeout is always zero in such a case.
28171da177e4SLinus Torvalds 	 */
2818ee995283SPavel Emelyanov 	if (unlikely(tcp_sk(sk)->repair)) {
2819ee995283SPavel Emelyanov 		sk->sk_prot->disconnect(sk, 0);
2820ee995283SPavel Emelyanov 	} else if (data_was_unread) {
28211da177e4SLinus Torvalds 		/* Unread data was tossed, zap the connection. */
28226aef70a8SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
28231da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
28245691276bSJason Xing 		tcp_send_active_reset(sk, sk->sk_allocation,
28255691276bSJason Xing 				      SK_RST_REASON_NOT_SPECIFIED);
28261da177e4SLinus Torvalds 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
28271da177e4SLinus Torvalds 		/* Check zero linger _after_ checking for unread data. */
28281da177e4SLinus Torvalds 		sk->sk_prot->disconnect(sk, 0);
28296aef70a8SEric Dumazet 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
28301da177e4SLinus Torvalds 	} else if (tcp_close_state(sk)) {
28311da177e4SLinus Torvalds 		/* We FIN if the application ate all the data before
28321da177e4SLinus Torvalds 		 * zapping the connection.
28331da177e4SLinus Torvalds 		 */
28341da177e4SLinus Torvalds 
28351da177e4SLinus Torvalds 		/* RED-PEN. Formally speaking, we have broken TCP state
28361da177e4SLinus Torvalds 		 * machine. State transitions:
28371da177e4SLinus Torvalds 		 *
28381da177e4SLinus Torvalds 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
28391da177e4SLinus Torvalds 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
28401da177e4SLinus Torvalds 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
28411da177e4SLinus Torvalds 		 *
28421da177e4SLinus Torvalds 		 * are legal only when FIN has been sent (i.e. in window),
28431da177e4SLinus Torvalds 		 * rather than queued out of window. Purists blame.
28441da177e4SLinus Torvalds 		 *
28451da177e4SLinus Torvalds 		 * F.e. "RFC state" is ESTABLISHED,
28461da177e4SLinus Torvalds 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
28471da177e4SLinus Torvalds 		 *
28481da177e4SLinus Torvalds 		 * The visible declinations are that sometimes
28491da177e4SLinus Torvalds 		 * we enter time-wait state, when it is not required really
28501da177e4SLinus Torvalds 		 * (harmless), do not send active resets, when they are
28511da177e4SLinus Torvalds 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
28521da177e4SLinus Torvalds 		 * they look as CLOSING or LAST_ACK for Linux)
28531da177e4SLinus Torvalds 		 * Probably, I missed some more holelets.
28541da177e4SLinus Torvalds 		 * 						--ANK
28558336886fSJerry Chu 		 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
28568336886fSJerry Chu 		 * in a single packet! (May consider it later but will
28578336886fSJerry Chu 		 * probably need API support or TCP_CORK SYN-ACK until
28588336886fSJerry Chu 		 * data is written and socket is closed.)
28591da177e4SLinus Torvalds 		 */
28601da177e4SLinus Torvalds 		tcp_send_fin(sk);
28611da177e4SLinus Torvalds 	}
28621da177e4SLinus Torvalds 
28631da177e4SLinus Torvalds 	sk_stream_wait_close(sk, timeout);
28641da177e4SLinus Torvalds 
28651da177e4SLinus Torvalds adjudge_to_death:
286675c2d907SHerbert Xu 	state = sk->sk_state;
286775c2d907SHerbert Xu 	sock_hold(sk);
286875c2d907SHerbert Xu 	sock_orphan(sk);
286975c2d907SHerbert Xu 
28701da177e4SLinus Torvalds 	local_bh_disable();
28711da177e4SLinus Torvalds 	bh_lock_sock(sk);
28728873c064SEric Dumazet 	/* remove backlog if any, without releasing ownership. */
28738873c064SEric Dumazet 	__release_sock(sk);
28741da177e4SLinus Torvalds 
287519757cebSEric Dumazet 	this_cpu_inc(tcp_orphan_count);
2876eb4dea58SHerbert Xu 
287775c2d907SHerbert Xu 	/* Have we already been destroyed by a softirq or backlog? */
287875c2d907SHerbert Xu 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
287975c2d907SHerbert Xu 		goto out;
28801da177e4SLinus Torvalds 
28811da177e4SLinus Torvalds 	/*	This is a (useful) BSD violating of the RFC. There is a
28821da177e4SLinus Torvalds 	 *	problem with TCP as specified in that the other end could
28831da177e4SLinus Torvalds 	 *	keep a socket open forever with no application left this end.
2884b10bd54cSJesper Juhl 	 *	We use a 1 minute timeout (about the same as BSD) then kill
28851da177e4SLinus Torvalds 	 *	our end. If they send after that then tough - BUT: long enough
28861da177e4SLinus Torvalds 	 *	that we won't make the old 4*rto = almost no time - whoops
28871da177e4SLinus Torvalds 	 *	reset mistake.
28881da177e4SLinus Torvalds 	 *
28891da177e4SLinus Torvalds 	 *	Nope, it was not mistake. It is really desired behaviour
28901da177e4SLinus Torvalds 	 *	f.e. on http servers, when such sockets are useless, but
28911da177e4SLinus Torvalds 	 *	consume significant resources. Let's do it with special
28921da177e4SLinus Torvalds 	 *	linger2	option.					--ANK
28931da177e4SLinus Torvalds 	 */
28941da177e4SLinus Torvalds 
28951da177e4SLinus Torvalds 	if (sk->sk_state == TCP_FIN_WAIT2) {
28961da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
2897a81722ddSEric Dumazet 		if (READ_ONCE(tp->linger2) < 0) {
28981da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
28995691276bSJason Xing 			tcp_send_active_reset(sk, GFP_ATOMIC,
29005691276bSJason Xing 					      SK_RST_REASON_NOT_SPECIFIED);
290102a1d6e7SEric Dumazet 			__NET_INC_STATS(sock_net(sk),
2902de0744afSPavel Emelyanov 					LINUX_MIB_TCPABORTONLINGER);
29031da177e4SLinus Torvalds 		} else {
2904463c84b9SArnaldo Carvalho de Melo 			const int tmo = tcp_fin_time(sk);
29051da177e4SLinus Torvalds 
29061da177e4SLinus Torvalds 			if (tmo > TCP_TIMEWAIT_LEN) {
290752499afeSDavid S. Miller 				inet_csk_reset_keepalive_timer(sk,
290852499afeSDavid S. Miller 						tmo - TCP_TIMEWAIT_LEN);
29091da177e4SLinus Torvalds 			} else {
29101da177e4SLinus Torvalds 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
29111da177e4SLinus Torvalds 				goto out;
29121da177e4SLinus Torvalds 			}
29131da177e4SLinus Torvalds 		}
29141da177e4SLinus Torvalds 	}
29151da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
2916efcdbf24SArun Sharma 		if (tcp_check_oom(sk, 0)) {
29171da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
29185691276bSJason Xing 			tcp_send_active_reset(sk, GFP_ATOMIC,
29195691276bSJason Xing 					      SK_RST_REASON_NOT_SPECIFIED);
292002a1d6e7SEric Dumazet 			__NET_INC_STATS(sock_net(sk),
2921de0744afSPavel Emelyanov 					LINUX_MIB_TCPABORTONMEMORY);
29224ee806d5SDan Streetman 		} else if (!check_net(sock_net(sk))) {
29234ee806d5SDan Streetman 			/* Not possible to send reset; just close */
29244ee806d5SDan Streetman 			tcp_set_state(sk, TCP_CLOSE);
29251da177e4SLinus Torvalds 		}
29261da177e4SLinus Torvalds 	}
29271da177e4SLinus Torvalds 
29288336886fSJerry Chu 	if (sk->sk_state == TCP_CLOSE) {
2929d983ea6fSEric Dumazet 		struct request_sock *req;
2930d983ea6fSEric Dumazet 
2931d983ea6fSEric Dumazet 		req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
2932d983ea6fSEric Dumazet 						lockdep_sock_is_held(sk));
29338336886fSJerry Chu 		/* We could get here with a non-NULL req if the socket is
29348336886fSJerry Chu 		 * aborted (e.g., closed with unread data) before 3WHS
29358336886fSJerry Chu 		 * finishes.
29368336886fSJerry Chu 		 */
293700db4124SIan Morris 		if (req)
29388336886fSJerry Chu 			reqsk_fastopen_remove(sk, req, false);
29390a5578cfSArnaldo Carvalho de Melo 		inet_csk_destroy_sock(sk);
29408336886fSJerry Chu 	}
29411da177e4SLinus Torvalds 	/* Otherwise, socket is reprieved until protocol close. */
29421da177e4SLinus Torvalds 
29431da177e4SLinus Torvalds out:
29441da177e4SLinus Torvalds 	bh_unlock_sock(sk);
29451da177e4SLinus Torvalds 	local_bh_enable();
294677c3c956SPaolo Abeni }
294777c3c956SPaolo Abeni 
294877c3c956SPaolo Abeni void tcp_close(struct sock *sk, long timeout)
294977c3c956SPaolo Abeni {
295077c3c956SPaolo Abeni 	lock_sock(sk);
295177c3c956SPaolo Abeni 	__tcp_close(sk, timeout);
29528873c064SEric Dumazet 	release_sock(sk);
2953151c9c72SEric Dumazet 	if (!sk->sk_net_refcnt)
2954151c9c72SEric Dumazet 		inet_csk_clear_xmit_timers_sync(sk);
29551da177e4SLinus Torvalds 	sock_put(sk);
29561da177e4SLinus Torvalds }
29574bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_close);
29581da177e4SLinus Torvalds 
29591da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
29601da177e4SLinus Torvalds 
2961a2a385d6SEric Dumazet static inline bool tcp_need_reset(int state)
29621da177e4SLinus Torvalds {
29631da177e4SLinus Torvalds 	return (1 << state) &
29641da177e4SLinus Torvalds 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2965a7150e38SEric Dumazet 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
29661da177e4SLinus Torvalds }
29671da177e4SLinus Torvalds 
296875c119afSEric Dumazet static void tcp_rtx_queue_purge(struct sock *sk)
296975c119afSEric Dumazet {
297075c119afSEric Dumazet 	struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
297175c119afSEric Dumazet 
29722bec445fSEric Dumazet 	tcp_sk(sk)->highest_sack = NULL;
297375c119afSEric Dumazet 	while (p) {
297475c119afSEric Dumazet 		struct sk_buff *skb = rb_to_skb(p);
297575c119afSEric Dumazet 
297675c119afSEric Dumazet 		p = rb_next(p);
297775c119afSEric Dumazet 		/* Since we are deleting whole queue, no need to
297875c119afSEric Dumazet 		 * list_del(&skb->tcp_tsorted_anchor)
297975c119afSEric Dumazet 		 */
298075c119afSEric Dumazet 		tcp_rtx_queue_unlink(skb, sk);
298103271f3aSTalal Ahmad 		tcp_wmem_free_skb(sk, skb);
298275c119afSEric Dumazet 	}
298375c119afSEric Dumazet }
298475c119afSEric Dumazet 
2985ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk)
2986ac3f09baSEric Dumazet {
2987ac3f09baSEric Dumazet 	struct sk_buff *skb;
2988ac3f09baSEric Dumazet 
2989ac3f09baSEric Dumazet 	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
2990ac3f09baSEric Dumazet 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
2991ac3f09baSEric Dumazet 		tcp_skb_tsorted_anchor_cleanup(skb);
299203271f3aSTalal Ahmad 		tcp_wmem_free_skb(sk, skb);
2993ac3f09baSEric Dumazet 	}
299475c119afSEric Dumazet 	tcp_rtx_queue_purge(sk);
2995ac3f09baSEric Dumazet 	INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
2996ac3f09baSEric Dumazet 	tcp_clear_all_retrans_hints(tcp_sk(sk));
2997bffd168cSSoheil Hassas Yeganeh 	tcp_sk(sk)->packets_out = 0;
299804c03114SEric Dumazet 	inet_csk(sk)->icsk_backoff = 0;
2999ac3f09baSEric Dumazet }
3000ac3f09baSEric Dumazet 
30011da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
30021da177e4SLinus Torvalds {
30031da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
3004463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
30051da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
30061da177e4SLinus Torvalds 	int old_state = sk->sk_state;
30070f317464SEric Dumazet 	u32 seq;
30081da177e4SLinus Torvalds 
30091da177e4SLinus Torvalds 	if (old_state != TCP_CLOSE)
30101da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
30111da177e4SLinus Torvalds 
30121da177e4SLinus Torvalds 	/* ABORT function of RFC793 */
30131da177e4SLinus Torvalds 	if (old_state == TCP_LISTEN) {
30140a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
3015ee995283SPavel Emelyanov 	} else if (unlikely(tp->repair)) {
3016e13ec3daSEric Dumazet 		WRITE_ONCE(sk->sk_err, ECONNABORTED);
30171da177e4SLinus Torvalds 	} else if (tcp_need_reset(old_state) ||
30181da177e4SLinus Torvalds 		   (tp->snd_nxt != tp->write_seq &&
30191da177e4SLinus Torvalds 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
3020caa20d9aSStephen Hemminger 		/* The last check adjusts for discrepancy of Linux wrt. RFC
30211da177e4SLinus Torvalds 		 * states
30221da177e4SLinus Torvalds 		 */
30235691276bSJason Xing 		tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_NOT_SPECIFIED);
3024e13ec3daSEric Dumazet 		WRITE_ONCE(sk->sk_err, ECONNRESET);
3025a7150e38SEric Dumazet 	} else if (old_state == TCP_SYN_SENT)
3026e13ec3daSEric Dumazet 		WRITE_ONCE(sk->sk_err, ECONNRESET);
30271da177e4SLinus Torvalds 
30281da177e4SLinus Torvalds 	tcp_clear_xmit_timers(sk);
30291da177e4SLinus Torvalds 	__skb_queue_purge(&sk->sk_receive_queue);
30307db48e98SEric Dumazet 	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
30317b6a893aSEric Dumazet 	WRITE_ONCE(tp->urg_data, 0);
303205ea4916SJon Maloy 	sk_set_peek_off(sk, -1);
3033fe067e8aSDavid S. Miller 	tcp_write_queue_purge(sk);
3034cf1ef3f0SWei Wang 	tcp_fastopen_active_disable_ofo_check(sk);
30359f5afeaeSYaogong Wang 	skb_rbtree_purge(&tp->out_of_order_queue);
30361da177e4SLinus Torvalds 
3037c720c7e8SEric Dumazet 	inet->inet_dport = 0;
30381da177e4SLinus Torvalds 
3039e0833d1fSKuniyuki Iwashima 	inet_bhash2_reset_saddr(sk);
30401da177e4SLinus Torvalds 
3041e14cadfdSEric Dumazet 	WRITE_ONCE(sk->sk_shutdown, 0);
30421da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
3043740b0f18SEric Dumazet 	tp->srtt_us = 0;
3044b9e2e689SEric Dumazet 	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
30453f6c65d6SWei Wang 	tp->rcv_rtt_last_tsecr = 0;
30460f317464SEric Dumazet 
30470f317464SEric Dumazet 	seq = tp->write_seq + tp->max_window + 2;
30480f317464SEric Dumazet 	if (!seq)
30490f317464SEric Dumazet 		seq = 1;
30500f317464SEric Dumazet 	WRITE_ONCE(tp->write_seq, seq);
30510f317464SEric Dumazet 
3052463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_backoff = 0;
30536687e988SArnaldo Carvalho de Melo 	icsk->icsk_probes_out = 0;
30549d9b1ee0SEnke Chen 	icsk->icsk_probes_tstamp = 0;
30556a408147SEric Dumazet 	icsk->icsk_rto = TCP_TIMEOUT_INIT;
3056ca584ba0SMartin KaFai Lau 	icsk->icsk_rto_min = TCP_RTO_MIN;
30572b8ee4f0SMartin KaFai Lau 	icsk->icsk_delack_max = TCP_DELACK_MAX;
30580b6a05c1SIlpo Järvinen 	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
305940570375SEric Dumazet 	tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
30601da177e4SLinus Torvalds 	tp->snd_cwnd_cnt = 0;
3061f4ce91ceSNeal Cardwell 	tp->is_cwnd_limited = 0;
3062f4ce91ceSNeal Cardwell 	tp->max_packets_out = 0;
30631fdf475aSEric Dumazet 	tp->window_clamp = 0;
30642fbdd562SEric Dumazet 	tp->delivered = 0;
3065e21db6f6SYuchung Cheng 	tp->delivered_ce = 0;
3066ce69e563SChristoph Paasch 	if (icsk->icsk_ca_ops->release)
3067ce69e563SChristoph Paasch 		icsk->icsk_ca_ops->release(sk);
3068ce69e563SChristoph Paasch 	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
30698919a9b3SNeal Cardwell 	icsk->icsk_ca_initialized = 0;
30706687e988SArnaldo Carvalho de Melo 	tcp_set_ca_state(sk, TCP_CA_Open);
3071d4761754SYousuk Seung 	tp->is_sack_reneg = 0;
30721da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
3073c13c48c0SEric Dumazet 	tp->total_retrans = 0;
3074463c84b9SArnaldo Carvalho de Melo 	inet_csk_delack_init(sk);
3075499350a5SWei Wang 	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
3076499350a5SWei Wang 	 * issue in __tcp_select_window()
3077499350a5SWei Wang 	 */
3078499350a5SWei Wang 	icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
3079b40b4f79SSrinivas Aji 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
30801da177e4SLinus Torvalds 	__sk_dst_reset(sk);
30818f905c0eSEric Dumazet 	dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL));
308217c3060bSEric Dumazet 	tcp_saved_syn_free(tp);
30835d9f4262SEric Dumazet 	tp->compressed_ack = 0;
3084784f8344SEric Dumazet 	tp->segs_in = 0;
3085784f8344SEric Dumazet 	tp->segs_out = 0;
3086ba113c3aSWei Wang 	tp->bytes_sent = 0;
3087e858faf5SChristoph Paasch 	tp->bytes_acked = 0;
3088e858faf5SChristoph Paasch 	tp->bytes_received = 0;
3089fb31c9b9SWei Wang 	tp->bytes_retrans = 0;
3090db7ffee6SEric Dumazet 	tp->data_segs_in = 0;
3091db7ffee6SEric Dumazet 	tp->data_segs_out = 0;
30927788174eSYuchung Cheng 	tp->duplicate_sack[0].start_seq = 0;
30937788174eSYuchung Cheng 	tp->duplicate_sack[0].end_seq = 0;
30947e10b655SWei Wang 	tp->dsack_dups = 0;
30957ec65372SWei Wang 	tp->reord_seen = 0;
30965c701549SEric Dumazet 	tp->retrans_out = 0;
30975c701549SEric Dumazet 	tp->sacked_out = 0;
30985c701549SEric Dumazet 	tp->tlp_high_seq = 0;
30995c701549SEric Dumazet 	tp->last_oow_ack_time = 0;
310029c1c446SMubashir Adnan Qureshi 	tp->plb_rehash = 0;
31016cda8b74SEric Dumazet 	/* There's a bubble in the pipe until at least the first ACK. */
31026cda8b74SEric Dumazet 	tp->app_limited = ~0U;
3103300b655dSDavid Morley 	tp->rate_app_limited = 1;
3104792c4354SEric Dumazet 	tp->rack.mstamp = 0;
3105792c4354SEric Dumazet 	tp->rack.advanced = 0;
3106792c4354SEric Dumazet 	tp->rack.reo_wnd_steps = 1;
3107792c4354SEric Dumazet 	tp->rack.last_delivered = 0;
3108792c4354SEric Dumazet 	tp->rack.reo_wnd_persist = 0;
3109792c4354SEric Dumazet 	tp->rack.dsack_seen = 0;
31106bcdc40dSEric Dumazet 	tp->syn_data_acked = 0;
31116bcdc40dSEric Dumazet 	tp->rx_opt.saw_tstamp = 0;
31126bcdc40dSEric Dumazet 	tp->rx_opt.dsack = 0;
31136bcdc40dSEric Dumazet 	tp->rx_opt.num_sacks = 0;
3114f9af2dbbSThomas Higdon 	tp->rcv_ooopack = 0;
31156cda8b74SEric Dumazet 
31161da177e4SLinus Torvalds 
31177db92362SWei Wang 	/* Clean up fastopen related fields */
31187db92362SWei Wang 	tcp_free_fastopen_req(tp);
311908e39c0dSEric Dumazet 	inet_clear_bit(DEFER_CONNECT, sk);
312048027478SJason Baron 	tp->fastopen_client_fail = 0;
31217db92362SWei Wang 
3122c720c7e8SEric Dumazet 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
31231da177e4SLinus Torvalds 
31249b42d55aSLi RongQing 	if (sk->sk_frag.page) {
31259b42d55aSLi RongQing 		put_page(sk->sk_frag.page);
31269b42d55aSLi RongQing 		sk->sk_frag.page = NULL;
31279b42d55aSLi RongQing 		sk->sk_frag.offset = 0;
31289b42d55aSLi RongQing 	}
3129e3ae2365SAlexander Aring 	sk_error_report(sk);
3130a01512b1SYueHaibing 	return 0;
31311da177e4SLinus Torvalds }
31324bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_disconnect);
31331da177e4SLinus Torvalds 
3134a2a385d6SEric Dumazet static inline bool tcp_can_repair_sock(const struct sock *sk)
3135ee995283SPavel Emelyanov {
3136cb388e7eSMartin KaFai Lau 	return sockopt_ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
3137319b0534SAndrey Vagin 		(sk->sk_state != TCP_LISTEN);
3138ee995283SPavel Emelyanov }
3139ee995283SPavel Emelyanov 
3140d38d2b00SChristoph Hellwig static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
3141b1ed4c4fSAndrey Vagin {
3142b1ed4c4fSAndrey Vagin 	struct tcp_repair_window opt;
3143b1ed4c4fSAndrey Vagin 
3144b1ed4c4fSAndrey Vagin 	if (!tp->repair)
3145b1ed4c4fSAndrey Vagin 		return -EPERM;
3146b1ed4c4fSAndrey Vagin 
3147b1ed4c4fSAndrey Vagin 	if (len != sizeof(opt))
3148b1ed4c4fSAndrey Vagin 		return -EINVAL;
3149b1ed4c4fSAndrey Vagin 
3150d38d2b00SChristoph Hellwig 	if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
3151b1ed4c4fSAndrey Vagin 		return -EFAULT;
3152b1ed4c4fSAndrey Vagin 
3153b1ed4c4fSAndrey Vagin 	if (opt.max_window < opt.snd_wnd)
3154b1ed4c4fSAndrey Vagin 		return -EINVAL;
3155b1ed4c4fSAndrey Vagin 
3156b1ed4c4fSAndrey Vagin 	if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
3157b1ed4c4fSAndrey Vagin 		return -EINVAL;
3158b1ed4c4fSAndrey Vagin 
3159b1ed4c4fSAndrey Vagin 	if (after(opt.rcv_wup, tp->rcv_nxt))
3160b1ed4c4fSAndrey Vagin 		return -EINVAL;
3161b1ed4c4fSAndrey Vagin 
3162b1ed4c4fSAndrey Vagin 	tp->snd_wl1	= opt.snd_wl1;
3163b1ed4c4fSAndrey Vagin 	tp->snd_wnd	= opt.snd_wnd;
3164b1ed4c4fSAndrey Vagin 	tp->max_window	= opt.max_window;
3165b1ed4c4fSAndrey Vagin 
3166b1ed4c4fSAndrey Vagin 	tp->rcv_wnd	= opt.rcv_wnd;
3167b1ed4c4fSAndrey Vagin 	tp->rcv_wup	= opt.rcv_wup;
3168b1ed4c4fSAndrey Vagin 
3169b1ed4c4fSAndrey Vagin 	return 0;
3170b1ed4c4fSAndrey Vagin }
3171b1ed4c4fSAndrey Vagin 
3172d38d2b00SChristoph Hellwig static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
3173d38d2b00SChristoph Hellwig 		unsigned int len)
3174b139ba4eSPavel Emelyanov {
317515e56515SDouglas Caetano dos Santos 	struct tcp_sock *tp = tcp_sk(sk);
3176de248a75SPavel Emelyanov 	struct tcp_repair_opt opt;
3177d3c48151SChristoph Hellwig 	size_t offset = 0;
3178b139ba4eSPavel Emelyanov 
3179de248a75SPavel Emelyanov 	while (len >= sizeof(opt)) {
3180d3c48151SChristoph Hellwig 		if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt)))
3181b139ba4eSPavel Emelyanov 			return -EFAULT;
3182b139ba4eSPavel Emelyanov 
3183d3c48151SChristoph Hellwig 		offset += sizeof(opt);
3184de248a75SPavel Emelyanov 		len -= sizeof(opt);
3185b139ba4eSPavel Emelyanov 
3186de248a75SPavel Emelyanov 		switch (opt.opt_code) {
3187de248a75SPavel Emelyanov 		case TCPOPT_MSS:
3188de248a75SPavel Emelyanov 			tp->rx_opt.mss_clamp = opt.opt_val;
318915e56515SDouglas Caetano dos Santos 			tcp_mtup_init(sk);
3190b139ba4eSPavel Emelyanov 			break;
3191de248a75SPavel Emelyanov 		case TCPOPT_WINDOW:
3192bc26ccd8SAndrey Vagin 			{
3193bc26ccd8SAndrey Vagin 				u16 snd_wscale = opt.opt_val & 0xFFFF;
3194bc26ccd8SAndrey Vagin 				u16 rcv_wscale = opt.opt_val >> 16;
3195bc26ccd8SAndrey Vagin 
3196589c49cbSGao Feng 				if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
3197b139ba4eSPavel Emelyanov 					return -EFBIG;
3198b139ba4eSPavel Emelyanov 
3199bc26ccd8SAndrey Vagin 				tp->rx_opt.snd_wscale = snd_wscale;
3200bc26ccd8SAndrey Vagin 				tp->rx_opt.rcv_wscale = rcv_wscale;
3201bc26ccd8SAndrey Vagin 				tp->rx_opt.wscale_ok = 1;
3202bc26ccd8SAndrey Vagin 			}
3203b139ba4eSPavel Emelyanov 			break;
3204b139ba4eSPavel Emelyanov 		case TCPOPT_SACK_PERM:
3205de248a75SPavel Emelyanov 			if (opt.opt_val != 0)
3206de248a75SPavel Emelyanov 				return -EINVAL;
3207de248a75SPavel Emelyanov 
3208b139ba4eSPavel Emelyanov 			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
3209b139ba4eSPavel Emelyanov 			break;
3210b139ba4eSPavel Emelyanov 		case TCPOPT_TIMESTAMP:
3211de248a75SPavel Emelyanov 			if (opt.opt_val != 0)
3212de248a75SPavel Emelyanov 				return -EINVAL;
3213de248a75SPavel Emelyanov 
3214b139ba4eSPavel Emelyanov 			tp->rx_opt.tstamp_ok = 1;
3215b139ba4eSPavel Emelyanov 			break;
3216b139ba4eSPavel Emelyanov 		}
3217b139ba4eSPavel Emelyanov 	}
3218b139ba4eSPavel Emelyanov 
3219b139ba4eSPavel Emelyanov 	return 0;
3220b139ba4eSPavel Emelyanov }
3221b139ba4eSPavel Emelyanov 
3222a842fe14SEric Dumazet DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
3223a842fe14SEric Dumazet EXPORT_SYMBOL(tcp_tx_delay_enabled);
3224a842fe14SEric Dumazet 
3225a842fe14SEric Dumazet static void tcp_enable_tx_delay(void)
3226a842fe14SEric Dumazet {
3227a842fe14SEric Dumazet 	if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
3228a842fe14SEric Dumazet 		static int __tcp_tx_delay_enabled = 0;
3229a842fe14SEric Dumazet 
3230a842fe14SEric Dumazet 		if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
3231a842fe14SEric Dumazet 			static_branch_enable(&tcp_tx_delay_enabled);
3232a842fe14SEric Dumazet 			pr_info("TCP_TX_DELAY enabled\n");
3233a842fe14SEric Dumazet 		}
3234a842fe14SEric Dumazet 	}
3235a842fe14SEric Dumazet }
3236a842fe14SEric Dumazet 
3237db10538aSChristoph Hellwig /* When set indicates to always queue non-full frames.  Later the user clears
3238db10538aSChristoph Hellwig  * this option and we transmit any pending partial frames in the queue.  This is
3239db10538aSChristoph Hellwig  * meant to be used alongside sendfile() to get properly filled frames when the
3240db10538aSChristoph Hellwig  * user (for example) must write out headers with a write() call first and then
3241db10538aSChristoph Hellwig  * use sendfile to send out the data parts.
3242db10538aSChristoph Hellwig  *
3243db10538aSChristoph Hellwig  * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
3244db10538aSChristoph Hellwig  * TCP_NODELAY.
3245db10538aSChristoph Hellwig  */
32466fadaa56SMaxim Galaganov void __tcp_sock_set_cork(struct sock *sk, bool on)
3247db10538aSChristoph Hellwig {
3248db10538aSChristoph Hellwig 	struct tcp_sock *tp = tcp_sk(sk);
3249db10538aSChristoph Hellwig 
3250db10538aSChristoph Hellwig 	if (on) {
3251db10538aSChristoph Hellwig 		tp->nonagle |= TCP_NAGLE_CORK;
3252db10538aSChristoph Hellwig 	} else {
3253db10538aSChristoph Hellwig 		tp->nonagle &= ~TCP_NAGLE_CORK;
3254db10538aSChristoph Hellwig 		if (tp->nonagle & TCP_NAGLE_OFF)
3255db10538aSChristoph Hellwig 			tp->nonagle |= TCP_NAGLE_PUSH;
3256db10538aSChristoph Hellwig 		tcp_push_pending_frames(sk);
3257db10538aSChristoph Hellwig 	}
3258db10538aSChristoph Hellwig }
3259db10538aSChristoph Hellwig 
3260db10538aSChristoph Hellwig void tcp_sock_set_cork(struct sock *sk, bool on)
3261db10538aSChristoph Hellwig {
3262db10538aSChristoph Hellwig 	lock_sock(sk);
3263db10538aSChristoph Hellwig 	__tcp_sock_set_cork(sk, on);
3264db10538aSChristoph Hellwig 	release_sock(sk);
3265db10538aSChristoph Hellwig }
3266db10538aSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_cork);
3267db10538aSChristoph Hellwig 
326812abc5eeSChristoph Hellwig /* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
326912abc5eeSChristoph Hellwig  * remembered, but it is not activated until cork is cleared.
327012abc5eeSChristoph Hellwig  *
327112abc5eeSChristoph Hellwig  * However, when TCP_NODELAY is set we make an explicit push, which overrides
327212abc5eeSChristoph Hellwig  * even TCP_CORK for currently queued segments.
327312abc5eeSChristoph Hellwig  */
32746fadaa56SMaxim Galaganov void __tcp_sock_set_nodelay(struct sock *sk, bool on)
327512abc5eeSChristoph Hellwig {
327612abc5eeSChristoph Hellwig 	if (on) {
327712abc5eeSChristoph Hellwig 		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
327812abc5eeSChristoph Hellwig 		tcp_push_pending_frames(sk);
327912abc5eeSChristoph Hellwig 	} else {
328012abc5eeSChristoph Hellwig 		tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF;
328112abc5eeSChristoph Hellwig 	}
328212abc5eeSChristoph Hellwig }
328312abc5eeSChristoph Hellwig 
328412abc5eeSChristoph Hellwig void tcp_sock_set_nodelay(struct sock *sk)
328512abc5eeSChristoph Hellwig {
328612abc5eeSChristoph Hellwig 	lock_sock(sk);
328712abc5eeSChristoph Hellwig 	__tcp_sock_set_nodelay(sk, true);
328812abc5eeSChristoph Hellwig 	release_sock(sk);
328912abc5eeSChristoph Hellwig }
329012abc5eeSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_nodelay);
329112abc5eeSChristoph Hellwig 
3292ddd061b8SChristoph Hellwig static void __tcp_sock_set_quickack(struct sock *sk, int val)
3293ddd061b8SChristoph Hellwig {
3294ddd061b8SChristoph Hellwig 	if (!val) {
3295ddd061b8SChristoph Hellwig 		inet_csk_enter_pingpong_mode(sk);
3296ddd061b8SChristoph Hellwig 		return;
3297ddd061b8SChristoph Hellwig 	}
3298ddd061b8SChristoph Hellwig 
3299ddd061b8SChristoph Hellwig 	inet_csk_exit_pingpong_mode(sk);
3300ddd061b8SChristoph Hellwig 	if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
3301ddd061b8SChristoph Hellwig 	    inet_csk_ack_scheduled(sk)) {
3302ddd061b8SChristoph Hellwig 		inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED;
3303ddd061b8SChristoph Hellwig 		tcp_cleanup_rbuf(sk, 1);
3304ddd061b8SChristoph Hellwig 		if (!(val & 1))
3305ddd061b8SChristoph Hellwig 			inet_csk_enter_pingpong_mode(sk);
3306ddd061b8SChristoph Hellwig 	}
3307ddd061b8SChristoph Hellwig }
3308ddd061b8SChristoph Hellwig 
3309ddd061b8SChristoph Hellwig void tcp_sock_set_quickack(struct sock *sk, int val)
3310ddd061b8SChristoph Hellwig {
3311ddd061b8SChristoph Hellwig 	lock_sock(sk);
3312ddd061b8SChristoph Hellwig 	__tcp_sock_set_quickack(sk, val);
3313ddd061b8SChristoph Hellwig 	release_sock(sk);
3314ddd061b8SChristoph Hellwig }
3315ddd061b8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_quickack);
3316ddd061b8SChristoph Hellwig 
3317557eadfcSChristoph Hellwig int tcp_sock_set_syncnt(struct sock *sk, int val)
3318557eadfcSChristoph Hellwig {
3319557eadfcSChristoph Hellwig 	if (val < 1 || val > MAX_TCP_SYNCNT)
3320557eadfcSChristoph Hellwig 		return -EINVAL;
3321557eadfcSChristoph Hellwig 
33223a037f0fSEric Dumazet 	WRITE_ONCE(inet_csk(sk)->icsk_syn_retries, val);
3323557eadfcSChristoph Hellwig 	return 0;
3324557eadfcSChristoph Hellwig }
3325557eadfcSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_syncnt);
3326557eadfcSChristoph Hellwig 
3327d58f2e15SEric Dumazet int tcp_sock_set_user_timeout(struct sock *sk, int val)
3328c488aeadSChristoph Hellwig {
3329d58f2e15SEric Dumazet 	/* Cap the max time in ms TCP will retry or probe the window
3330d58f2e15SEric Dumazet 	 * before giving up and aborting (ETIMEDOUT) a connection.
3331d58f2e15SEric Dumazet 	 */
3332d58f2e15SEric Dumazet 	if (val < 0)
3333d58f2e15SEric Dumazet 		return -EINVAL;
3334d58f2e15SEric Dumazet 
333526023e91SEric Dumazet 	WRITE_ONCE(inet_csk(sk)->icsk_user_timeout, val);
3336d58f2e15SEric Dumazet 	return 0;
3337c488aeadSChristoph Hellwig }
3338c488aeadSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_user_timeout);
3339c488aeadSChristoph Hellwig 
3340aad4a0a9SDmitry Yakunin int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
334171c48eb8SChristoph Hellwig {
334271c48eb8SChristoph Hellwig 	struct tcp_sock *tp = tcp_sk(sk);
334371c48eb8SChristoph Hellwig 
334471c48eb8SChristoph Hellwig 	if (val < 1 || val > MAX_TCP_KEEPIDLE)
334571c48eb8SChristoph Hellwig 		return -EINVAL;
334671c48eb8SChristoph Hellwig 
33474164245cSEric Dumazet 	/* Paired with WRITE_ONCE() in keepalive_time_when() */
33484164245cSEric Dumazet 	WRITE_ONCE(tp->keepalive_time, val * HZ);
334971c48eb8SChristoph Hellwig 	if (sock_flag(sk, SOCK_KEEPOPEN) &&
335071c48eb8SChristoph Hellwig 	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
335171c48eb8SChristoph Hellwig 		u32 elapsed = keepalive_time_elapsed(tp);
335271c48eb8SChristoph Hellwig 
335371c48eb8SChristoph Hellwig 		if (tp->keepalive_time > elapsed)
335471c48eb8SChristoph Hellwig 			elapsed = tp->keepalive_time - elapsed;
335571c48eb8SChristoph Hellwig 		else
335671c48eb8SChristoph Hellwig 			elapsed = 0;
335771c48eb8SChristoph Hellwig 		inet_csk_reset_keepalive_timer(sk, elapsed);
335871c48eb8SChristoph Hellwig 	}
335971c48eb8SChristoph Hellwig 
336071c48eb8SChristoph Hellwig 	return 0;
336171c48eb8SChristoph Hellwig }
336271c48eb8SChristoph Hellwig 
336371c48eb8SChristoph Hellwig int tcp_sock_set_keepidle(struct sock *sk, int val)
336471c48eb8SChristoph Hellwig {
336571c48eb8SChristoph Hellwig 	int err;
336671c48eb8SChristoph Hellwig 
336771c48eb8SChristoph Hellwig 	lock_sock(sk);
3368aad4a0a9SDmitry Yakunin 	err = tcp_sock_set_keepidle_locked(sk, val);
336971c48eb8SChristoph Hellwig 	release_sock(sk);
337071c48eb8SChristoph Hellwig 	return err;
337171c48eb8SChristoph Hellwig }
337271c48eb8SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepidle);
337371c48eb8SChristoph Hellwig 
3374d41ecaacSChristoph Hellwig int tcp_sock_set_keepintvl(struct sock *sk, int val)
3375d41ecaacSChristoph Hellwig {
3376d41ecaacSChristoph Hellwig 	if (val < 1 || val > MAX_TCP_KEEPINTVL)
3377d41ecaacSChristoph Hellwig 		return -EINVAL;
3378d41ecaacSChristoph Hellwig 
33795ecf9d4fSEric Dumazet 	WRITE_ONCE(tcp_sk(sk)->keepalive_intvl, val * HZ);
3380d41ecaacSChristoph Hellwig 	return 0;
3381d41ecaacSChristoph Hellwig }
3382d41ecaacSChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepintvl);
3383d41ecaacSChristoph Hellwig 
3384480aeb96SChristoph Hellwig int tcp_sock_set_keepcnt(struct sock *sk, int val)
3385480aeb96SChristoph Hellwig {
3386480aeb96SChristoph Hellwig 	if (val < 1 || val > MAX_TCP_KEEPCNT)
3387480aeb96SChristoph Hellwig 		return -EINVAL;
3388480aeb96SChristoph Hellwig 
33896e5e1de6SEric Dumazet 	/* Paired with READ_ONCE() in keepalive_probes() */
33906e5e1de6SEric Dumazet 	WRITE_ONCE(tcp_sk(sk)->keepalive_probes, val);
3391480aeb96SChristoph Hellwig 	return 0;
3392480aeb96SChristoph Hellwig }
3393480aeb96SChristoph Hellwig EXPORT_SYMBOL(tcp_sock_set_keepcnt);
3394480aeb96SChristoph Hellwig 
3395cb811109SPrankur gupta int tcp_set_window_clamp(struct sock *sk, int val)
3396cb811109SPrankur gupta {
3397cb811109SPrankur gupta 	struct tcp_sock *tp = tcp_sk(sk);
3398cb811109SPrankur gupta 
3399cb811109SPrankur gupta 	if (!val) {
3400cb811109SPrankur gupta 		if (sk->sk_state != TCP_CLOSE)
3401cb811109SPrankur gupta 			return -EINVAL;
3402f410cbeaSEric Dumazet 		WRITE_ONCE(tp->window_clamp, 0);
3403cb811109SPrankur gupta 	} else {
340458d3aadeSPaolo Abeni 		u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
340558d3aadeSPaolo Abeni 		u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
3406cb811109SPrankur gupta 						SOCK_MIN_RCVBUF / 2 : val;
340758d3aadeSPaolo Abeni 
340858d3aadeSPaolo Abeni 		if (new_window_clamp == old_window_clamp)
340958d3aadeSPaolo Abeni 			return 0;
341058d3aadeSPaolo Abeni 
3411f410cbeaSEric Dumazet 		WRITE_ONCE(tp->window_clamp, new_window_clamp);
341258d3aadeSPaolo Abeni 		if (new_window_clamp < old_window_clamp) {
341358d3aadeSPaolo Abeni 			/* need to apply the reserved mem provisioning only
341458d3aadeSPaolo Abeni 			 * when shrinking the window clamp
341558d3aadeSPaolo Abeni 			 */
341658d3aadeSPaolo Abeni 			__tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
341758d3aadeSPaolo Abeni 
341858d3aadeSPaolo Abeni 		} else {
341958d3aadeSPaolo Abeni 			new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
342058d3aadeSPaolo Abeni 			tp->rcv_ssthresh = max(new_rcv_ssthresh,
342158d3aadeSPaolo Abeni 					       tp->rcv_ssthresh);
342258d3aadeSPaolo Abeni 		}
3423cb811109SPrankur gupta 	}
3424cb811109SPrankur gupta 	return 0;
3425cb811109SPrankur gupta }
3426cb811109SPrankur gupta 
34271da177e4SLinus Torvalds /*
34281da177e4SLinus Torvalds  *	Socket option code for TCP.
34291da177e4SLinus Torvalds  */
34300c751f70SMartin KaFai Lau int do_tcp_setsockopt(struct sock *sk, int level, int optname,
3431d38d2b00SChristoph Hellwig 		      sockptr_t optval, unsigned int optlen)
34321da177e4SLinus Torvalds {
34331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3434463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
34351e579caaSNikolay Borisov 	struct net *net = sock_net(sk);
34361da177e4SLinus Torvalds 	int val;
34371da177e4SLinus Torvalds 	int err = 0;
34381da177e4SLinus Torvalds 
3439e56fb50fSWilliam Allen Simpson 	/* These are data/string values, all the others are ints */
3440e56fb50fSWilliam Allen Simpson 	switch (optname) {
3441e56fb50fSWilliam Allen Simpson 	case TCP_CONGESTION: {
34425f8ef48dSStephen Hemminger 		char name[TCP_CA_NAME_MAX];
34435f8ef48dSStephen Hemminger 
34445f8ef48dSStephen Hemminger 		if (optlen < 1)
34455f8ef48dSStephen Hemminger 			return -EINVAL;
34465f8ef48dSStephen Hemminger 
3447d38d2b00SChristoph Hellwig 		val = strncpy_from_sockptr(name, optval,
34484fdb78d3SAndrew Morton 					min_t(long, TCP_CA_NAME_MAX-1, optlen));
34495f8ef48dSStephen Hemminger 		if (val < 0)
34505f8ef48dSStephen Hemminger 			return -EFAULT;
34515f8ef48dSStephen Hemminger 		name[val] = 0;
34525f8ef48dSStephen Hemminger 
3453cb388e7eSMartin KaFai Lau 		sockopt_lock_sock(sk);
345484e5a0f2SMartin KaFai Lau 		err = tcp_set_congestion_control(sk, name, !has_current_bpf_ctx(),
3455cb388e7eSMartin KaFai Lau 						 sockopt_ns_capable(sock_net(sk)->user_ns,
34568d650cdeSEric Dumazet 								    CAP_NET_ADMIN));
3457cb388e7eSMartin KaFai Lau 		sockopt_release_sock(sk);
34585f8ef48dSStephen Hemminger 		return err;
34595f8ef48dSStephen Hemminger 	}
3460734942ccSDave Watson 	case TCP_ULP: {
3461734942ccSDave Watson 		char name[TCP_ULP_NAME_MAX];
3462734942ccSDave Watson 
3463734942ccSDave Watson 		if (optlen < 1)
3464734942ccSDave Watson 			return -EINVAL;
3465734942ccSDave Watson 
3466d38d2b00SChristoph Hellwig 		val = strncpy_from_sockptr(name, optval,
3467734942ccSDave Watson 					min_t(long, TCP_ULP_NAME_MAX - 1,
3468734942ccSDave Watson 					      optlen));
3469734942ccSDave Watson 		if (val < 0)
3470734942ccSDave Watson 			return -EFAULT;
3471734942ccSDave Watson 		name[val] = 0;
3472734942ccSDave Watson 
3473cb388e7eSMartin KaFai Lau 		sockopt_lock_sock(sk);
3474734942ccSDave Watson 		err = tcp_set_ulp(sk, name);
3475cb388e7eSMartin KaFai Lau 		sockopt_release_sock(sk);
3476734942ccSDave Watson 		return err;
3477734942ccSDave Watson 	}
34781fba70e5SYuchung Cheng 	case TCP_FASTOPEN_KEY: {
34790f1ce023SJason Baron 		__u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
34800f1ce023SJason Baron 		__u8 *backup_key = NULL;
34811fba70e5SYuchung Cheng 
34820f1ce023SJason Baron 		/* Allow a backup key as well to facilitate key rotation
34830f1ce023SJason Baron 		 * First key is the active one.
34840f1ce023SJason Baron 		 */
34850f1ce023SJason Baron 		if (optlen != TCP_FASTOPEN_KEY_LENGTH &&
34860f1ce023SJason Baron 		    optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
34871fba70e5SYuchung Cheng 			return -EINVAL;
34881fba70e5SYuchung Cheng 
3489d38d2b00SChristoph Hellwig 		if (copy_from_sockptr(key, optval, optlen))
34901fba70e5SYuchung Cheng 			return -EFAULT;
34911fba70e5SYuchung Cheng 
34920f1ce023SJason Baron 		if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
34930f1ce023SJason Baron 			backup_key = key + TCP_FASTOPEN_KEY_LENGTH;
34940f1ce023SJason Baron 
3495438ac880SArd Biesheuvel 		return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
34961fba70e5SYuchung Cheng 	}
3497e56fb50fSWilliam Allen Simpson 	default:
3498e56fb50fSWilliam Allen Simpson 		/* fallthru */
3499e56fb50fSWilliam Allen Simpson 		break;
3500ccbd6a5aSJoe Perches 	}
35015f8ef48dSStephen Hemminger 
35021da177e4SLinus Torvalds 	if (optlen < sizeof(int))
35031da177e4SLinus Torvalds 		return -EINVAL;
35041da177e4SLinus Torvalds 
3505d38d2b00SChristoph Hellwig 	if (copy_from_sockptr(&val, optval, sizeof(val)))
35061da177e4SLinus Torvalds 		return -EFAULT;
35071da177e4SLinus Torvalds 
3508d44fd4a7SEric Dumazet 	/* Handle options that can be set without locking the socket. */
3509d44fd4a7SEric Dumazet 	switch (optname) {
3510d44fd4a7SEric Dumazet 	case TCP_SYNCNT:
3511d44fd4a7SEric Dumazet 		return tcp_sock_set_syncnt(sk, val);
3512d58f2e15SEric Dumazet 	case TCP_USER_TIMEOUT:
3513d58f2e15SEric Dumazet 		return tcp_sock_set_user_timeout(sk, val);
35146fd70a6bSEric Dumazet 	case TCP_KEEPINTVL:
35156fd70a6bSEric Dumazet 		return tcp_sock_set_keepintvl(sk, val);
351684485080SEric Dumazet 	case TCP_KEEPCNT:
351784485080SEric Dumazet 		return tcp_sock_set_keepcnt(sk, val);
3518a81722ddSEric Dumazet 	case TCP_LINGER2:
3519a81722ddSEric Dumazet 		if (val < 0)
3520a81722ddSEric Dumazet 			WRITE_ONCE(tp->linger2, -1);
3521a81722ddSEric Dumazet 		else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
3522a81722ddSEric Dumazet 			WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
3523a81722ddSEric Dumazet 		else
3524a81722ddSEric Dumazet 			WRITE_ONCE(tp->linger2, val * HZ);
3525a81722ddSEric Dumazet 		return 0;
35266e97ba55SEric Dumazet 	case TCP_DEFER_ACCEPT:
35276e97ba55SEric Dumazet 		/* Translate value in seconds to number of retransmits */
35286e97ba55SEric Dumazet 		WRITE_ONCE(icsk->icsk_accept_queue.rskq_defer_accept,
35296e97ba55SEric Dumazet 			   secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
35306e97ba55SEric Dumazet 					   TCP_RTO_MAX / HZ));
35316e97ba55SEric Dumazet 		return 0;
3532d44fd4a7SEric Dumazet 	}
3533d44fd4a7SEric Dumazet 
3534cb388e7eSMartin KaFai Lau 	sockopt_lock_sock(sk);
35351da177e4SLinus Torvalds 
35361da177e4SLinus Torvalds 	switch (optname) {
35371da177e4SLinus Torvalds 	case TCP_MAXSEG:
35381da177e4SLinus Torvalds 		/* Values greater than interface MTU won't take effect. However
35391da177e4SLinus Torvalds 		 * at the point when this call is done we typically don't yet
3540a777f715SRohit Chavan 		 * know which interface is going to be used
3541a777f715SRohit Chavan 		 */
3542cfc62d87SGao Feng 		if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
35431da177e4SLinus Torvalds 			err = -EINVAL;
35441da177e4SLinus Torvalds 			break;
35451da177e4SLinus Torvalds 		}
35461da177e4SLinus Torvalds 		tp->rx_opt.user_mss = val;
35471da177e4SLinus Torvalds 		break;
35481da177e4SLinus Torvalds 
35491da177e4SLinus Torvalds 	case TCP_NODELAY:
355012abc5eeSChristoph Hellwig 		__tcp_sock_set_nodelay(sk, val);
35511da177e4SLinus Torvalds 		break;
35521da177e4SLinus Torvalds 
355336e31b0aSAndreas Petlund 	case TCP_THIN_LINEAR_TIMEOUTS:
355436e31b0aSAndreas Petlund 		if (val < 0 || val > 1)
355536e31b0aSAndreas Petlund 			err = -EINVAL;
355636e31b0aSAndreas Petlund 		else
355736e31b0aSAndreas Petlund 			tp->thin_lto = val;
355836e31b0aSAndreas Petlund 		break;
355936e31b0aSAndreas Petlund 
35607e380175SAndreas Petlund 	case TCP_THIN_DUPACK:
35617e380175SAndreas Petlund 		if (val < 0 || val > 1)
35627e380175SAndreas Petlund 			err = -EINVAL;
35637e380175SAndreas Petlund 		break;
35647e380175SAndreas Petlund 
3565ee995283SPavel Emelyanov 	case TCP_REPAIR:
3566ee995283SPavel Emelyanov 		if (!tcp_can_repair_sock(sk))
3567ee995283SPavel Emelyanov 			err = -EPERM;
356831048d7aSStefan Baranoff 		else if (val == TCP_REPAIR_ON) {
3569ee995283SPavel Emelyanov 			tp->repair = 1;
3570ee995283SPavel Emelyanov 			sk->sk_reuse = SK_FORCE_REUSE;
3571ee995283SPavel Emelyanov 			tp->repair_queue = TCP_NO_QUEUE;
357231048d7aSStefan Baranoff 		} else if (val == TCP_REPAIR_OFF) {
3573ee995283SPavel Emelyanov 			tp->repair = 0;
3574ee995283SPavel Emelyanov 			sk->sk_reuse = SK_NO_REUSE;
3575ee995283SPavel Emelyanov 			tcp_send_window_probe(sk);
357631048d7aSStefan Baranoff 		} else if (val == TCP_REPAIR_OFF_NO_WP) {
357731048d7aSStefan Baranoff 			tp->repair = 0;
357831048d7aSStefan Baranoff 			sk->sk_reuse = SK_NO_REUSE;
3579ee995283SPavel Emelyanov 		} else
3580ee995283SPavel Emelyanov 			err = -EINVAL;
3581ee995283SPavel Emelyanov 
3582ee995283SPavel Emelyanov 		break;
3583ee995283SPavel Emelyanov 
3584ee995283SPavel Emelyanov 	case TCP_REPAIR_QUEUE:
3585ee995283SPavel Emelyanov 		if (!tp->repair)
3586ee995283SPavel Emelyanov 			err = -EPERM;
3587bf2acc94SEric Dumazet 		else if ((unsigned int)val < TCP_QUEUES_NR)
3588ee995283SPavel Emelyanov 			tp->repair_queue = val;
3589ee995283SPavel Emelyanov 		else
3590ee995283SPavel Emelyanov 			err = -EINVAL;
3591ee995283SPavel Emelyanov 		break;
3592ee995283SPavel Emelyanov 
3593ee995283SPavel Emelyanov 	case TCP_QUEUE_SEQ:
35948811f4a9SEric Dumazet 		if (sk->sk_state != TCP_CLOSE) {
3595ee995283SPavel Emelyanov 			err = -EPERM;
35968811f4a9SEric Dumazet 		} else if (tp->repair_queue == TCP_SEND_QUEUE) {
35978811f4a9SEric Dumazet 			if (!tcp_rtx_queue_empty(sk))
35988811f4a9SEric Dumazet 				err = -EPERM;
35998811f4a9SEric Dumazet 			else
36000f317464SEric Dumazet 				WRITE_ONCE(tp->write_seq, val);
36018811f4a9SEric Dumazet 		} else if (tp->repair_queue == TCP_RECV_QUEUE) {
36028811f4a9SEric Dumazet 			if (tp->rcv_nxt != tp->copied_seq) {
36038811f4a9SEric Dumazet 				err = -EPERM;
36048811f4a9SEric Dumazet 			} else {
3605dba7d9b8SEric Dumazet 				WRITE_ONCE(tp->rcv_nxt, val);
36066cd6cbf5SEric Dumazet 				WRITE_ONCE(tp->copied_seq, val);
36076cd6cbf5SEric Dumazet 			}
36088811f4a9SEric Dumazet 		} else {
3609ee995283SPavel Emelyanov 			err = -EINVAL;
36108811f4a9SEric Dumazet 		}
3611ee995283SPavel Emelyanov 		break;
3612ee995283SPavel Emelyanov 
3613b139ba4eSPavel Emelyanov 	case TCP_REPAIR_OPTIONS:
3614b139ba4eSPavel Emelyanov 		if (!tp->repair)
3615b139ba4eSPavel Emelyanov 			err = -EINVAL;
36160c175da7SLu Wei 		else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent)
3617d38d2b00SChristoph Hellwig 			err = tcp_repair_options_est(sk, optval, optlen);
3618b139ba4eSPavel Emelyanov 		else
3619b139ba4eSPavel Emelyanov 			err = -EPERM;
3620b139ba4eSPavel Emelyanov 		break;
3621b139ba4eSPavel Emelyanov 
36221da177e4SLinus Torvalds 	case TCP_CORK:
3623db10538aSChristoph Hellwig 		__tcp_sock_set_cork(sk, val);
36241da177e4SLinus Torvalds 		break;
36251da177e4SLinus Torvalds 
36261da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
3627aad4a0a9SDmitry Yakunin 		err = tcp_sock_set_keepidle_locked(sk, val);
36281da177e4SLinus Torvalds 		break;
3629cd8ae852SEric Dumazet 	case TCP_SAVE_SYN:
3630267cf9faSMartin KaFai Lau 		/* 0: disable, 1: enable, 2: start from ether_header */
3631267cf9faSMartin KaFai Lau 		if (val < 0 || val > 2)
3632cd8ae852SEric Dumazet 			err = -EINVAL;
3633cd8ae852SEric Dumazet 		else
3634cd8ae852SEric Dumazet 			tp->save_syn = val;
3635cd8ae852SEric Dumazet 		break;
3636cd8ae852SEric Dumazet 
36371da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
3638cb811109SPrankur gupta 		err = tcp_set_window_clamp(sk, val);
36391da177e4SLinus Torvalds 		break;
36401da177e4SLinus Torvalds 
36411da177e4SLinus Torvalds 	case TCP_QUICKACK:
3642ddd061b8SChristoph Hellwig 		__tcp_sock_set_quickack(sk, val);
36431da177e4SLinus Torvalds 		break;
36441da177e4SLinus Torvalds 
3645faadfabaSDmitry Safonov 	case TCP_AO_REPAIR:
3646965c00e4SDmitry Safonov 		if (!tcp_can_repair_sock(sk)) {
3647965c00e4SDmitry Safonov 			err = -EPERM;
3648965c00e4SDmitry Safonov 			break;
3649965c00e4SDmitry Safonov 		}
3650faadfabaSDmitry Safonov 		err = tcp_ao_set_repair(sk, optval, optlen);
3651faadfabaSDmitry Safonov 		break;
36524954f17dSDmitry Safonov #ifdef CONFIG_TCP_AO
36534954f17dSDmitry Safonov 	case TCP_AO_ADD_KEY:
36544954f17dSDmitry Safonov 	case TCP_AO_DEL_KEY:
36554954f17dSDmitry Safonov 	case TCP_AO_INFO: {
36564954f17dSDmitry Safonov 		/* If this is the first TCP-AO setsockopt() on the socket,
3657faadfabaSDmitry Safonov 		 * sk_state has to be LISTEN or CLOSE. Allow TCP_REPAIR
3658faadfabaSDmitry Safonov 		 * in any state.
36594954f17dSDmitry Safonov 		 */
3660faadfabaSDmitry Safonov 		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
3661faadfabaSDmitry Safonov 			goto ao_parse;
3662faadfabaSDmitry Safonov 		if (rcu_dereference_protected(tcp_sk(sk)->ao_info,
36634954f17dSDmitry Safonov 					      lockdep_sock_is_held(sk)))
3664faadfabaSDmitry Safonov 			goto ao_parse;
3665faadfabaSDmitry Safonov 		if (tp->repair)
3666faadfabaSDmitry Safonov 			goto ao_parse;
36674954f17dSDmitry Safonov 		err = -EISCONN;
36684954f17dSDmitry Safonov 		break;
3669faadfabaSDmitry Safonov ao_parse:
3670faadfabaSDmitry Safonov 		err = tp->af_specific->ao_parse(sk, optname, optval, optlen);
3671faadfabaSDmitry Safonov 		break;
36724954f17dSDmitry Safonov 	}
36734954f17dSDmitry Safonov #endif
3674cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
3675cfb6eeb4SYOSHIFUJI Hideaki 	case TCP_MD5SIG:
36768917a777SIvan Delalande 	case TCP_MD5SIG_EXT:
3677d38d2b00SChristoph Hellwig 		err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3678cfb6eeb4SYOSHIFUJI Hideaki 		break;
3679cfb6eeb4SYOSHIFUJI Hideaki #endif
36808336886fSJerry Chu 	case TCP_FASTOPEN:
36818336886fSJerry Chu 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
3682dfea2aa6SChristoph Paasch 		    TCPF_LISTEN))) {
368343713848SHaishuang Yan 			tcp_fastopen_init_key_once(net);
3684dfea2aa6SChristoph Paasch 
36850536fcc0SEric Dumazet 			fastopen_queue_tune(sk, val);
3686dfea2aa6SChristoph Paasch 		} else {
36878336886fSJerry Chu 			err = -EINVAL;
3688dfea2aa6SChristoph Paasch 		}
36898336886fSJerry Chu 		break;
369019f6d3f3SWei Wang 	case TCP_FASTOPEN_CONNECT:
369119f6d3f3SWei Wang 		if (val > 1 || val < 0) {
369219f6d3f3SWei Wang 			err = -EINVAL;
36935a542133SKuniyuki Iwashima 		} else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) &
36945a542133SKuniyuki Iwashima 			   TFO_CLIENT_ENABLE) {
369519f6d3f3SWei Wang 			if (sk->sk_state == TCP_CLOSE)
369619f6d3f3SWei Wang 				tp->fastopen_connect = val;
369719f6d3f3SWei Wang 			else
369819f6d3f3SWei Wang 				err = -EINVAL;
369919f6d3f3SWei Wang 		} else {
370019f6d3f3SWei Wang 			err = -EOPNOTSUPP;
370119f6d3f3SWei Wang 		}
370219f6d3f3SWei Wang 		break;
370371c02379SChristoph Paasch 	case TCP_FASTOPEN_NO_COOKIE:
370471c02379SChristoph Paasch 		if (val > 1 || val < 0)
370571c02379SChristoph Paasch 			err = -EINVAL;
370671c02379SChristoph Paasch 		else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
370771c02379SChristoph Paasch 			err = -EINVAL;
370871c02379SChristoph Paasch 		else
370971c02379SChristoph Paasch 			tp->fastopen_no_cookie = val;
371071c02379SChristoph Paasch 		break;
371193be6ce0SAndrey Vagin 	case TCP_TIMESTAMP:
3712614e8316SEric Dumazet 		if (!tp->repair) {
371393be6ce0SAndrey Vagin 			err = -EPERM;
3714614e8316SEric Dumazet 			break;
3715614e8316SEric Dumazet 		}
3716614e8316SEric Dumazet 		/* val is an opaque field,
3717614e8316SEric Dumazet 		 * and low order bit contains usec_ts enable bit.
3718614e8316SEric Dumazet 		 * Its a best effort, and we do not care if user makes an error.
3719614e8316SEric Dumazet 		 */
3720614e8316SEric Dumazet 		tp->tcp_usec_ts = val & 1;
3721614e8316SEric Dumazet 		WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts));
372293be6ce0SAndrey Vagin 		break;
3723b1ed4c4fSAndrey Vagin 	case TCP_REPAIR_WINDOW:
3724b1ed4c4fSAndrey Vagin 		err = tcp_repair_set_window(tp, optval, optlen);
3725b1ed4c4fSAndrey Vagin 		break;
3726c9bee3b7SEric Dumazet 	case TCP_NOTSENT_LOWAT:
37271aeb87bcSEric Dumazet 		WRITE_ONCE(tp->notsent_lowat, val);
3728c9bee3b7SEric Dumazet 		sk->sk_write_space(sk);
3729c9bee3b7SEric Dumazet 		break;
3730b75eba76SSoheil Hassas Yeganeh 	case TCP_INQ:
3731b75eba76SSoheil Hassas Yeganeh 		if (val > 1 || val < 0)
3732b75eba76SSoheil Hassas Yeganeh 			err = -EINVAL;
3733b75eba76SSoheil Hassas Yeganeh 		else
3734b75eba76SSoheil Hassas Yeganeh 			tp->recvmsg_inq = val;
3735b75eba76SSoheil Hassas Yeganeh 		break;
3736a842fe14SEric Dumazet 	case TCP_TX_DELAY:
3737a842fe14SEric Dumazet 		if (val)
3738a842fe14SEric Dumazet 			tcp_enable_tx_delay();
3739348b81b6SEric Dumazet 		WRITE_ONCE(tp->tcp_tx_delay, val);
3740a842fe14SEric Dumazet 		break;
37411da177e4SLinus Torvalds 	default:
37421da177e4SLinus Torvalds 		err = -ENOPROTOOPT;
37431da177e4SLinus Torvalds 		break;
37443ff50b79SStephen Hemminger 	}
37453ff50b79SStephen Hemminger 
3746cb388e7eSMartin KaFai Lau 	sockopt_release_sock(sk);
37471da177e4SLinus Torvalds 	return err;
37481da177e4SLinus Torvalds }
37491da177e4SLinus Torvalds 
3750a7b75c5aSChristoph Hellwig int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
3751b7058842SDavid S. Miller 		   unsigned int optlen)
37523fdadf7dSDmitry Mishin {
3753cf533ea5SEric Dumazet 	const struct inet_connection_sock *icsk = inet_csk(sk);
37543fdadf7dSDmitry Mishin 
37553fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
3756f49cd2f4SKuniyuki Iwashima 		/* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
3757f49cd2f4SKuniyuki Iwashima 		return READ_ONCE(icsk->icsk_af_ops)->setsockopt(sk, level, optname,
37583fdadf7dSDmitry Mishin 								optval, optlen);
3759a7b75c5aSChristoph Hellwig 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
37603fdadf7dSDmitry Mishin }
37614bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_setsockopt);
37623fdadf7dSDmitry Mishin 
3763efd90174SFrancis Yan static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
3764efd90174SFrancis Yan 				      struct tcp_info *info)
3765efd90174SFrancis Yan {
3766efd90174SFrancis Yan 	u64 stats[__TCP_CHRONO_MAX], total = 0;
3767efd90174SFrancis Yan 	enum tcp_chrono i;
3768efd90174SFrancis Yan 
3769efd90174SFrancis Yan 	for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
3770efd90174SFrancis Yan 		stats[i] = tp->chrono_stat[i - 1];
3771efd90174SFrancis Yan 		if (i == tp->chrono_type)
3772628174ccSEric Dumazet 			stats[i] += tcp_jiffies32 - tp->chrono_start;
3773efd90174SFrancis Yan 		stats[i] *= USEC_PER_SEC / HZ;
3774efd90174SFrancis Yan 		total += stats[i];
3775efd90174SFrancis Yan 	}
3776efd90174SFrancis Yan 
3777efd90174SFrancis Yan 	info->tcpi_busy_time = total;
3778efd90174SFrancis Yan 	info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
3779efd90174SFrancis Yan 	info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
3780efd90174SFrancis Yan }
3781efd90174SFrancis Yan 
37821da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
37830df48c26SEric Dumazet void tcp_get_info(struct sock *sk, struct tcp_info *info)
37841da177e4SLinus Torvalds {
378535ac838aSCraig Gallek 	const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
3786463c84b9SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
378776a9ebe8SEric Dumazet 	unsigned long rate;
37880263598cSWei Wang 	u32 now;
3789ff5d7497SEric Dumazet 	u64 rate64;
379067db3e4bSEric Dumazet 	bool slow;
37911da177e4SLinus Torvalds 
37921da177e4SLinus Torvalds 	memset(info, 0, sizeof(*info));
379335ac838aSCraig Gallek 	if (sk->sk_type != SOCK_STREAM)
379435ac838aSCraig Gallek 		return;
37951da177e4SLinus Torvalds 
3796986ffdfdSYafang Shao 	info->tcpi_state = inet_sk_state_load(sk);
379700fd38d9SEric Dumazet 
3798ccbf3bfaSEric Dumazet 	/* Report meaningful fields for all TCP states, including listeners */
3799ccbf3bfaSEric Dumazet 	rate = READ_ONCE(sk->sk_pacing_rate);
380076a9ebe8SEric Dumazet 	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3801f522a5fcSEric Dumazet 	info->tcpi_pacing_rate = rate64;
3802ccbf3bfaSEric Dumazet 
3803ccbf3bfaSEric Dumazet 	rate = READ_ONCE(sk->sk_max_pacing_rate);
380476a9ebe8SEric Dumazet 	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3805f522a5fcSEric Dumazet 	info->tcpi_max_pacing_rate = rate64;
3806ccbf3bfaSEric Dumazet 
3807ccbf3bfaSEric Dumazet 	info->tcpi_reordering = tp->reordering;
380840570375SEric Dumazet 	info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
3809ccbf3bfaSEric Dumazet 
3810ccbf3bfaSEric Dumazet 	if (info->tcpi_state == TCP_LISTEN) {
3811ccbf3bfaSEric Dumazet 		/* listeners aliased fields :
3812ccbf3bfaSEric Dumazet 		 * tcpi_unacked -> Number of children ready for accept()
3813ccbf3bfaSEric Dumazet 		 * tcpi_sacked  -> max backlog
3814ccbf3bfaSEric Dumazet 		 */
3815288efe86SEric Dumazet 		info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
3816099ecf59SEric Dumazet 		info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
3817ccbf3bfaSEric Dumazet 		return;
3818ccbf3bfaSEric Dumazet 	}
3819b369e7fdSEric Dumazet 
3820b369e7fdSEric Dumazet 	slow = lock_sock_fast(sk);
3821b369e7fdSEric Dumazet 
38226687e988SArnaldo Carvalho de Melo 	info->tcpi_ca_state = icsk->icsk_ca_state;
3823463c84b9SArnaldo Carvalho de Melo 	info->tcpi_retransmits = icsk->icsk_retransmits;
38246687e988SArnaldo Carvalho de Melo 	info->tcpi_probes = icsk->icsk_probes_out;
3825463c84b9SArnaldo Carvalho de Melo 	info->tcpi_backoff = icsk->icsk_backoff;
38261da177e4SLinus Torvalds 
38271da177e4SLinus Torvalds 	if (tp->rx_opt.tstamp_ok)
38281da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
3829e60402d0SIlpo Järvinen 	if (tcp_is_sack(tp))
38301da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_SACK;
38311da177e4SLinus Torvalds 	if (tp->rx_opt.wscale_ok) {
38321da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_WSCALE;
38331da177e4SLinus Torvalds 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
38341da177e4SLinus Torvalds 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
38351da177e4SLinus Torvalds 	}
38361da177e4SLinus Torvalds 
38371da177e4SLinus Torvalds 	if (tp->ecn_flags & TCP_ECN_OK)
38381da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_ECN;
3839b5c5693bSEric Dumazet 	if (tp->ecn_flags & TCP_ECN_SEEN)
3840b5c5693bSEric Dumazet 		info->tcpi_options |= TCPI_OPT_ECN_SEEN;
38416f73601eSYuchung Cheng 	if (tp->syn_data_acked)
38426f73601eSYuchung Cheng 		info->tcpi_options |= TCPI_OPT_SYN_DATA;
3843a77a0f5cSEric Dumazet 	if (tp->tcp_usec_ts)
3844a77a0f5cSEric Dumazet 		info->tcpi_options |= TCPI_OPT_USEC_TS;
38451da177e4SLinus Torvalds 
3846463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
384795b9a87cSDavid Morley 	info->tcpi_ato = jiffies_to_usecs(min_t(u32, icsk->icsk_ack.ato,
3848bbf80d71SEric Dumazet 						tcp_delack_max(sk)));
3849c1b4a7e6SDavid S. Miller 	info->tcpi_snd_mss = tp->mss_cache;
3850463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
38511da177e4SLinus Torvalds 
38521da177e4SLinus Torvalds 	info->tcpi_unacked = tp->packets_out;
38531da177e4SLinus Torvalds 	info->tcpi_sacked = tp->sacked_out;
3854ccbf3bfaSEric Dumazet 
38551da177e4SLinus Torvalds 	info->tcpi_lost = tp->lost_out;
38561da177e4SLinus Torvalds 	info->tcpi_retrans = tp->retrans_out;
38571da177e4SLinus Torvalds 
3858d635fbe2SEric Dumazet 	now = tcp_jiffies32;
38591da177e4SLinus Torvalds 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
3860463c84b9SArnaldo Carvalho de Melo 	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
38611da177e4SLinus Torvalds 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
38621da177e4SLinus Torvalds 
3863d83d8461SArnaldo Carvalho de Melo 	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
38641da177e4SLinus Torvalds 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
3865740b0f18SEric Dumazet 	info->tcpi_rtt = tp->srtt_us >> 3;
3866740b0f18SEric Dumazet 	info->tcpi_rttvar = tp->mdev_us >> 2;
38671da177e4SLinus Torvalds 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
38681da177e4SLinus Torvalds 	info->tcpi_advmss = tp->advmss;
38691da177e4SLinus Torvalds 
3870645f4c6fSEric Dumazet 	info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
38711da177e4SLinus Torvalds 	info->tcpi_rcv_space = tp->rcvq_space.space;
38721da177e4SLinus Torvalds 
38731da177e4SLinus Torvalds 	info->tcpi_total_retrans = tp->total_retrans;
3874977cb0ecSEric Dumazet 
3875f522a5fcSEric Dumazet 	info->tcpi_bytes_acked = tp->bytes_acked;
3876f522a5fcSEric Dumazet 	info->tcpi_bytes_received = tp->bytes_received;
387767db3e4bSEric Dumazet 	info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
3878efd90174SFrancis Yan 	tcp_get_info_chrono_stats(tp, info);
387967db3e4bSEric Dumazet 
38802efd055cSMarcelo Ricardo Leitner 	info->tcpi_segs_out = tp->segs_out;
38810307a0b7SEric Dumazet 
38820307a0b7SEric Dumazet 	/* segs_in and data_segs_in can be updated from tcp_segs_in() from BH */
38830307a0b7SEric Dumazet 	info->tcpi_segs_in = READ_ONCE(tp->segs_in);
38840307a0b7SEric Dumazet 	info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
3885cd9b2660SEric Dumazet 
3886cd9b2660SEric Dumazet 	info->tcpi_min_rtt = tcp_min_rtt(tp);
3887a44d6eacSMartin KaFai Lau 	info->tcpi_data_segs_out = tp->data_segs_out;
3888eb8329e0SYuchung Cheng 
3889eb8329e0SYuchung Cheng 	info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
38900263598cSWei Wang 	rate64 = tcp_compute_delivery_rate(tp);
38910263598cSWei Wang 	if (rate64)
3892f522a5fcSEric Dumazet 		info->tcpi_delivery_rate = rate64;
3893feb5f2ecSYuchung Cheng 	info->tcpi_delivered = tp->delivered;
3894feb5f2ecSYuchung Cheng 	info->tcpi_delivered_ce = tp->delivered_ce;
3895ba113c3aSWei Wang 	info->tcpi_bytes_sent = tp->bytes_sent;
3896fb31c9b9SWei Wang 	info->tcpi_bytes_retrans = tp->bytes_retrans;
38977e10b655SWei Wang 	info->tcpi_dsack_dups = tp->dsack_dups;
38987ec65372SWei Wang 	info->tcpi_reord_seen = tp->reord_seen;
3899f9af2dbbSThomas Higdon 	info->tcpi_rcv_ooopack = tp->rcv_ooopack;
39008f7baad7SThomas Higdon 	info->tcpi_snd_wnd = tp->snd_wnd;
390171fc7047SMubashir Adnan Qureshi 	info->tcpi_rcv_wnd = tp->rcv_wnd;
390271fc7047SMubashir Adnan Qureshi 	info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
390348027478SJason Baron 	info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
39043868ab0fSAananth V 
39053868ab0fSAananth V 	info->tcpi_total_rto = tp->total_rto;
39063868ab0fSAananth V 	info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
39073868ab0fSAananth V 	info->tcpi_total_rto_time = tp->total_rto_time;
39082a7c8d29SEric Dumazet 	if (tp->rto_stamp)
39092a7c8d29SEric Dumazet 		info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp;
39103868ab0fSAananth V 
3911b369e7fdSEric Dumazet 	unlock_sock_fast(sk, slow);
39121da177e4SLinus Torvalds }
39131da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
39141da177e4SLinus Torvalds 
3915984988aaSWei Wang static size_t tcp_opt_stats_get_size(void)
3916984988aaSWei Wang {
3917984988aaSWei Wang 	return
3918984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
3919984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
3920984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
3921984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
3922984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
3923984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
3924984988aaSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
3925984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
3926984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
3927984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
3928984988aaSWei Wang 		nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
3929984988aaSWei Wang 		nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
3930984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
3931984988aaSWei Wang 		nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
3932984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
3933984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
3934984988aaSWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
3935ba113c3aSWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
3936fb31c9b9SWei Wang 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
39377e10b655SWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
39387ec65372SWei Wang 		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
3939e8bd8fcaSYousuk Seung 		nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
394032efcc06SAbdul Kabbani 		nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
3941e08ab0b3SYousuk Seung 		nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
394248040793SYousuk Seung 		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */
3943e7ed11eeSYousuk Seung 		nla_total_size(sizeof(u8)) + /* TCP_NLA_TTL */
394429c1c446SMubashir Adnan Qureshi 		nla_total_size(sizeof(u32)) + /* TCP_NLA_REHASH */
3945984988aaSWei Wang 		0;
3946984988aaSWei Wang }
3947984988aaSWei Wang 
3948e7ed11eeSYousuk Seung /* Returns TTL or hop limit of an incoming packet from skb. */
3949e7ed11eeSYousuk Seung static u8 tcp_skb_ttl_or_hop_limit(const struct sk_buff *skb)
3950e7ed11eeSYousuk Seung {
3951e7ed11eeSYousuk Seung 	if (skb->protocol == htons(ETH_P_IP))
3952e7ed11eeSYousuk Seung 		return ip_hdr(skb)->ttl;
3953e7ed11eeSYousuk Seung 	else if (skb->protocol == htons(ETH_P_IPV6))
3954e7ed11eeSYousuk Seung 		return ipv6_hdr(skb)->hop_limit;
3955e7ed11eeSYousuk Seung 	else
3956e7ed11eeSYousuk Seung 		return 0;
3957e7ed11eeSYousuk Seung }
3958e7ed11eeSYousuk Seung 
395948040793SYousuk Seung struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
3960e7ed11eeSYousuk Seung 					       const struct sk_buff *orig_skb,
3961e7ed11eeSYousuk Seung 					       const struct sk_buff *ack_skb)
39621c885808SFrancis Yan {
39631c885808SFrancis Yan 	const struct tcp_sock *tp = tcp_sk(sk);
39641c885808SFrancis Yan 	struct sk_buff *stats;
39651c885808SFrancis Yan 	struct tcp_info info;
396676a9ebe8SEric Dumazet 	unsigned long rate;
3967bb7c19f9SWei Wang 	u64 rate64;
39681c885808SFrancis Yan 
3969984988aaSWei Wang 	stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
39701c885808SFrancis Yan 	if (!stats)
39711c885808SFrancis Yan 		return NULL;
39721c885808SFrancis Yan 
39731c885808SFrancis Yan 	tcp_get_info_chrono_stats(tp, &info);
39741c885808SFrancis Yan 	nla_put_u64_64bit(stats, TCP_NLA_BUSY,
39751c885808SFrancis Yan 			  info.tcpi_busy_time, TCP_NLA_PAD);
39761c885808SFrancis Yan 	nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
39771c885808SFrancis Yan 			  info.tcpi_rwnd_limited, TCP_NLA_PAD);
39781c885808SFrancis Yan 	nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
39791c885808SFrancis Yan 			  info.tcpi_sndbuf_limited, TCP_NLA_PAD);
39807e98102fSYuchung Cheng 	nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
39817e98102fSYuchung Cheng 			  tp->data_segs_out, TCP_NLA_PAD);
39827e98102fSYuchung Cheng 	nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
39837e98102fSYuchung Cheng 			  tp->total_retrans, TCP_NLA_PAD);
3984bb7c19f9SWei Wang 
3985bb7c19f9SWei Wang 	rate = READ_ONCE(sk->sk_pacing_rate);
398676a9ebe8SEric Dumazet 	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3987bb7c19f9SWei Wang 	nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
3988bb7c19f9SWei Wang 
3989bb7c19f9SWei Wang 	rate64 = tcp_compute_delivery_rate(tp);
3990bb7c19f9SWei Wang 	nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3991bb7c19f9SWei Wang 
399240570375SEric Dumazet 	nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
3993bb7c19f9SWei Wang 	nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3994bb7c19f9SWei Wang 	nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3995bb7c19f9SWei Wang 
3996bb7c19f9SWei Wang 	nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3997bb7c19f9SWei Wang 	nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
39987156d194SYousuk Seung 	nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
3999feb5f2ecSYuchung Cheng 	nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
4000feb5f2ecSYuchung Cheng 	nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
400187ecc95dSPriyaranjan Jha 
400287ecc95dSPriyaranjan Jha 	nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
4003be631892SPriyaranjan Jha 	nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
4004feb5f2ecSYuchung Cheng 
4005ba113c3aSWei Wang 	nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
4006ba113c3aSWei Wang 			  TCP_NLA_PAD);
4007fb31c9b9SWei Wang 	nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
4008fb31c9b9SWei Wang 			  TCP_NLA_PAD);
40097e10b655SWei Wang 	nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
40107ec65372SWei Wang 	nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
4011e8bd8fcaSYousuk Seung 	nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
401232efcc06SAbdul Kabbani 	nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
4013e08ab0b3SYousuk Seung 	nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
4014e08ab0b3SYousuk Seung 		    max_t(int, 0, tp->write_seq - tp->snd_nxt));
401548040793SYousuk Seung 	nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns,
401648040793SYousuk Seung 			  TCP_NLA_PAD);
4017e7ed11eeSYousuk Seung 	if (ack_skb)
4018e7ed11eeSYousuk Seung 		nla_put_u8(stats, TCP_NLA_TTL,
4019e7ed11eeSYousuk Seung 			   tcp_skb_ttl_or_hop_limit(ack_skb));
4020ba113c3aSWei Wang 
402129c1c446SMubashir Adnan Qureshi 	nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash);
40221c885808SFrancis Yan 	return stats;
40231c885808SFrancis Yan }
40241c885808SFrancis Yan 
4025273b7f0fSMartin KaFai Lau int do_tcp_getsockopt(struct sock *sk, int level,
402634704ef0SMartin KaFai Lau 		      int optname, sockptr_t optval, sockptr_t optlen)
40271da177e4SLinus Torvalds {
4028295f7324SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
40291da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
40306fa25166SNikolay Borisov 	struct net *net = sock_net(sk);
40311da177e4SLinus Torvalds 	int val, len;
40321da177e4SLinus Torvalds 
403334704ef0SMartin KaFai Lau 	if (copy_from_sockptr(&len, optlen, sizeof(int)))
40341da177e4SLinus Torvalds 		return -EFAULT;
40351da177e4SLinus Torvalds 
40361da177e4SLinus Torvalds 	if (len < 0)
40371da177e4SLinus Torvalds 		return -EINVAL;
40381da177e4SLinus Torvalds 
4039716edc97SGavrilov Ilia 	len = min_t(unsigned int, len, sizeof(int));
4040716edc97SGavrilov Ilia 
40411da177e4SLinus Torvalds 	switch (optname) {
40421da177e4SLinus Torvalds 	case TCP_MAXSEG:
4043c1b4a7e6SDavid S. Miller 		val = tp->mss_cache;
404434dfde4aSCambda Zhu 		if (tp->rx_opt.user_mss &&
404534dfde4aSCambda Zhu 		    ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
40461da177e4SLinus Torvalds 			val = tp->rx_opt.user_mss;
40475e6a3ce6SPavel Emelyanov 		if (tp->repair)
40485e6a3ce6SPavel Emelyanov 			val = tp->rx_opt.mss_clamp;
40491da177e4SLinus Torvalds 		break;
40501da177e4SLinus Torvalds 	case TCP_NODELAY:
40511da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
40521da177e4SLinus Torvalds 		break;
40531da177e4SLinus Torvalds 	case TCP_CORK:
40541da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
40551da177e4SLinus Torvalds 		break;
40561da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
4057df19a626SEric Dumazet 		val = keepalive_time_when(tp) / HZ;
40581da177e4SLinus Torvalds 		break;
40591da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
4060df19a626SEric Dumazet 		val = keepalive_intvl_when(tp) / HZ;
40611da177e4SLinus Torvalds 		break;
40621da177e4SLinus Torvalds 	case TCP_KEEPCNT:
4063df19a626SEric Dumazet 		val = keepalive_probes(tp);
40641da177e4SLinus Torvalds 		break;
40651da177e4SLinus Torvalds 	case TCP_SYNCNT:
40663a037f0fSEric Dumazet 		val = READ_ONCE(icsk->icsk_syn_retries) ? :
406720a3b1c0SKuniyuki Iwashima 			READ_ONCE(net->ipv4.sysctl_tcp_syn_retries);
40681da177e4SLinus Torvalds 		break;
40691da177e4SLinus Torvalds 	case TCP_LINGER2:
40709df5335cSEric Dumazet 		val = READ_ONCE(tp->linger2);
40711da177e4SLinus Torvalds 		if (val >= 0)
407239e24435SKuniyuki Iwashima 			val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ;
40731da177e4SLinus Torvalds 		break;
40741da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
4075ae488c74SEric Dumazet 		val = READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept);
4076ae488c74SEric Dumazet 		val = retrans_to_secs(val, TCP_TIMEOUT_INIT / HZ,
4077ae488c74SEric Dumazet 				      TCP_RTO_MAX / HZ);
40781da177e4SLinus Torvalds 		break;
40791da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
4080f410cbeaSEric Dumazet 		val = READ_ONCE(tp->window_clamp);
40811da177e4SLinus Torvalds 		break;
40821da177e4SLinus Torvalds 	case TCP_INFO: {
40831da177e4SLinus Torvalds 		struct tcp_info info;
40841da177e4SLinus Torvalds 
408534704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
40861da177e4SLinus Torvalds 			return -EFAULT;
40871da177e4SLinus Torvalds 
40881da177e4SLinus Torvalds 		tcp_get_info(sk, &info);
40891da177e4SLinus Torvalds 
40901da177e4SLinus Torvalds 		len = min_t(unsigned int, len, sizeof(info));
409134704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optlen, &len, sizeof(int)))
40921da177e4SLinus Torvalds 			return -EFAULT;
409334704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optval, &info, len))
40941da177e4SLinus Torvalds 			return -EFAULT;
40951da177e4SLinus Torvalds 		return 0;
40961da177e4SLinus Torvalds 	}
40976e9250f5SEric Dumazet 	case TCP_CC_INFO: {
40986e9250f5SEric Dumazet 		const struct tcp_congestion_ops *ca_ops;
40996e9250f5SEric Dumazet 		union tcp_cc_info info;
41006e9250f5SEric Dumazet 		size_t sz = 0;
41016e9250f5SEric Dumazet 		int attr;
41026e9250f5SEric Dumazet 
410334704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
41046e9250f5SEric Dumazet 			return -EFAULT;
41056e9250f5SEric Dumazet 
41066e9250f5SEric Dumazet 		ca_ops = icsk->icsk_ca_ops;
41076e9250f5SEric Dumazet 		if (ca_ops && ca_ops->get_info)
41086e9250f5SEric Dumazet 			sz = ca_ops->get_info(sk, ~0U, &attr, &info);
41096e9250f5SEric Dumazet 
41106e9250f5SEric Dumazet 		len = min_t(unsigned int, len, sz);
411134704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optlen, &len, sizeof(int)))
41126e9250f5SEric Dumazet 			return -EFAULT;
411334704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optval, &info, len))
41146e9250f5SEric Dumazet 			return -EFAULT;
41156e9250f5SEric Dumazet 		return 0;
41166e9250f5SEric Dumazet 	}
41171da177e4SLinus Torvalds 	case TCP_QUICKACK:
411831954cd8SWei Wang 		val = !inet_csk_in_pingpong_mode(sk);
41191da177e4SLinus Torvalds 		break;
41205f8ef48dSStephen Hemminger 
41215f8ef48dSStephen Hemminger 	case TCP_CONGESTION:
412234704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
41235f8ef48dSStephen Hemminger 			return -EFAULT;
41245f8ef48dSStephen Hemminger 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
412534704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optlen, &len, sizeof(int)))
41265f8ef48dSStephen Hemminger 			return -EFAULT;
412734704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optval, icsk->icsk_ca_ops->name, len))
41285f8ef48dSStephen Hemminger 			return -EFAULT;
41295f8ef48dSStephen Hemminger 		return 0;
4130e56fb50fSWilliam Allen Simpson 
4131734942ccSDave Watson 	case TCP_ULP:
413234704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
4133734942ccSDave Watson 			return -EFAULT;
4134734942ccSDave Watson 		len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
4135d97af30fSDave Watson 		if (!icsk->icsk_ulp_ops) {
413634704ef0SMartin KaFai Lau 			len = 0;
413734704ef0SMartin KaFai Lau 			if (copy_to_sockptr(optlen, &len, sizeof(int)))
4138d97af30fSDave Watson 				return -EFAULT;
4139d97af30fSDave Watson 			return 0;
4140d97af30fSDave Watson 		}
414134704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optlen, &len, sizeof(int)))
4142734942ccSDave Watson 			return -EFAULT;
414334704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optval, icsk->icsk_ulp_ops->name, len))
4144734942ccSDave Watson 			return -EFAULT;
4145734942ccSDave Watson 		return 0;
4146734942ccSDave Watson 
41471fba70e5SYuchung Cheng 	case TCP_FASTOPEN_KEY: {
4148f19008e6SJason Baron 		u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
4149f19008e6SJason Baron 		unsigned int key_len;
41501fba70e5SYuchung Cheng 
415134704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
41521fba70e5SYuchung Cheng 			return -EFAULT;
41531fba70e5SYuchung Cheng 
4154f19008e6SJason Baron 		key_len = tcp_fastopen_get_cipher(net, icsk, key) *
41550f1ce023SJason Baron 				TCP_FASTOPEN_KEY_LENGTH;
41560f1ce023SJason Baron 		len = min_t(unsigned int, len, key_len);
415734704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optlen, &len, sizeof(int)))
41581fba70e5SYuchung Cheng 			return -EFAULT;
415934704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optval, key, len))
41601fba70e5SYuchung Cheng 			return -EFAULT;
41611fba70e5SYuchung Cheng 		return 0;
41621fba70e5SYuchung Cheng 	}
41633c0fef0bSJosh Hunt 	case TCP_THIN_LINEAR_TIMEOUTS:
41643c0fef0bSJosh Hunt 		val = tp->thin_lto;
41653c0fef0bSJosh Hunt 		break;
41664a7f6009SYuchung Cheng 
41673c0fef0bSJosh Hunt 	case TCP_THIN_DUPACK:
41684a7f6009SYuchung Cheng 		val = 0;
41693c0fef0bSJosh Hunt 		break;
4170dca43c75SJerry Chu 
4171ee995283SPavel Emelyanov 	case TCP_REPAIR:
4172ee995283SPavel Emelyanov 		val = tp->repair;
4173ee995283SPavel Emelyanov 		break;
4174ee995283SPavel Emelyanov 
4175ee995283SPavel Emelyanov 	case TCP_REPAIR_QUEUE:
4176ee995283SPavel Emelyanov 		if (tp->repair)
4177ee995283SPavel Emelyanov 			val = tp->repair_queue;
4178ee995283SPavel Emelyanov 		else
4179ee995283SPavel Emelyanov 			return -EINVAL;
4180ee995283SPavel Emelyanov 		break;
4181ee995283SPavel Emelyanov 
4182b1ed4c4fSAndrey Vagin 	case TCP_REPAIR_WINDOW: {
4183b1ed4c4fSAndrey Vagin 		struct tcp_repair_window opt;
4184b1ed4c4fSAndrey Vagin 
418534704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
4186b1ed4c4fSAndrey Vagin 			return -EFAULT;
4187b1ed4c4fSAndrey Vagin 
4188b1ed4c4fSAndrey Vagin 		if (len != sizeof(opt))
4189b1ed4c4fSAndrey Vagin 			return -EINVAL;
4190b1ed4c4fSAndrey Vagin 
4191b1ed4c4fSAndrey Vagin 		if (!tp->repair)
4192b1ed4c4fSAndrey Vagin 			return -EPERM;
4193b1ed4c4fSAndrey Vagin 
4194b1ed4c4fSAndrey Vagin 		opt.snd_wl1	= tp->snd_wl1;
4195b1ed4c4fSAndrey Vagin 		opt.snd_wnd	= tp->snd_wnd;
4196b1ed4c4fSAndrey Vagin 		opt.max_window	= tp->max_window;
4197b1ed4c4fSAndrey Vagin 		opt.rcv_wnd	= tp->rcv_wnd;
4198b1ed4c4fSAndrey Vagin 		opt.rcv_wup	= tp->rcv_wup;
4199b1ed4c4fSAndrey Vagin 
420034704ef0SMartin KaFai Lau 		if (copy_to_sockptr(optval, &opt, len))
4201b1ed4c4fSAndrey Vagin 			return -EFAULT;
4202b1ed4c4fSAndrey Vagin 		return 0;
4203b1ed4c4fSAndrey Vagin 	}
4204ee995283SPavel Emelyanov 	case TCP_QUEUE_SEQ:
4205ee995283SPavel Emelyanov 		if (tp->repair_queue == TCP_SEND_QUEUE)
4206ee995283SPavel Emelyanov 			val = tp->write_seq;
4207ee995283SPavel Emelyanov 		else if (tp->repair_queue == TCP_RECV_QUEUE)
4208ee995283SPavel Emelyanov 			val = tp->rcv_nxt;
4209ee995283SPavel Emelyanov 		else
4210ee995283SPavel Emelyanov 			return -EINVAL;
4211ee995283SPavel Emelyanov 		break;
4212ee995283SPavel Emelyanov 
4213dca43c75SJerry Chu 	case TCP_USER_TIMEOUT:
421426023e91SEric Dumazet 		val = READ_ONCE(icsk->icsk_user_timeout);
4215dca43c75SJerry Chu 		break;
42161536e285SKenjiro Nakayama 
42171536e285SKenjiro Nakayama 	case TCP_FASTOPEN:
421870f360ddSEric Dumazet 		val = READ_ONCE(icsk->icsk_accept_queue.fastopenq.max_qlen);
42191536e285SKenjiro Nakayama 		break;
42201536e285SKenjiro Nakayama 
422119f6d3f3SWei Wang 	case TCP_FASTOPEN_CONNECT:
422219f6d3f3SWei Wang 		val = tp->fastopen_connect;
422319f6d3f3SWei Wang 		break;
422419f6d3f3SWei Wang 
422571c02379SChristoph Paasch 	case TCP_FASTOPEN_NO_COOKIE:
422671c02379SChristoph Paasch 		val = tp->fastopen_no_cookie;
422771c02379SChristoph Paasch 		break;
422871c02379SChristoph Paasch 
4229a842fe14SEric Dumazet 	case TCP_TX_DELAY:
4230348b81b6SEric Dumazet 		val = READ_ONCE(tp->tcp_tx_delay);
4231a842fe14SEric Dumazet 		break;
4232a842fe14SEric Dumazet 
423393be6ce0SAndrey Vagin 	case TCP_TIMESTAMP:
4234614e8316SEric Dumazet 		val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset);
4235614e8316SEric Dumazet 		if (tp->tcp_usec_ts)
4236614e8316SEric Dumazet 			val |= 1;
4237614e8316SEric Dumazet 		else
4238614e8316SEric Dumazet 			val &= ~1;
423993be6ce0SAndrey Vagin 		break;
4240c9bee3b7SEric Dumazet 	case TCP_NOTSENT_LOWAT:
42411aeb87bcSEric Dumazet 		val = READ_ONCE(tp->notsent_lowat);
4242c9bee3b7SEric Dumazet 		break;
4243b75eba76SSoheil Hassas Yeganeh 	case TCP_INQ:
4244b75eba76SSoheil Hassas Yeganeh 		val = tp->recvmsg_inq;
4245b75eba76SSoheil Hassas Yeganeh 		break;
4246cd8ae852SEric Dumazet 	case TCP_SAVE_SYN:
4247cd8ae852SEric Dumazet 		val = tp->save_syn;
4248cd8ae852SEric Dumazet 		break;
4249cd8ae852SEric Dumazet 	case TCP_SAVED_SYN: {
425034704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
4251cd8ae852SEric Dumazet 			return -EFAULT;
4252cd8ae852SEric Dumazet 
4253d51bbff2SMartin KaFai Lau 		sockopt_lock_sock(sk);
4254cd8ae852SEric Dumazet 		if (tp->saved_syn) {
425570a217f1SMartin KaFai Lau 			if (len < tcp_saved_syn_len(tp->saved_syn)) {
425634704ef0SMartin KaFai Lau 				len = tcp_saved_syn_len(tp->saved_syn);
425734704ef0SMartin KaFai Lau 				if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4258d51bbff2SMartin KaFai Lau 					sockopt_release_sock(sk);
4259aea0929eSEric B Munson 					return -EFAULT;
4260aea0929eSEric B Munson 				}
4261d51bbff2SMartin KaFai Lau 				sockopt_release_sock(sk);
4262aea0929eSEric B Munson 				return -EINVAL;
4263aea0929eSEric B Munson 			}
426470a217f1SMartin KaFai Lau 			len = tcp_saved_syn_len(tp->saved_syn);
426534704ef0SMartin KaFai Lau 			if (copy_to_sockptr(optlen, &len, sizeof(int))) {
4266d51bbff2SMartin KaFai Lau 				sockopt_release_sock(sk);
4267cd8ae852SEric Dumazet 				return -EFAULT;
4268cd8ae852SEric Dumazet 			}
426934704ef0SMartin KaFai Lau 			if (copy_to_sockptr(optval, tp->saved_syn->data, len)) {
4270d51bbff2SMartin KaFai Lau 				sockopt_release_sock(sk);
4271cd8ae852SEric Dumazet 				return -EFAULT;
4272cd8ae852SEric Dumazet 			}
4273cd8ae852SEric Dumazet 			tcp_saved_syn_free(tp);
4274d51bbff2SMartin KaFai Lau 			sockopt_release_sock(sk);
4275cd8ae852SEric Dumazet 		} else {
4276d51bbff2SMartin KaFai Lau 			sockopt_release_sock(sk);
4277cd8ae852SEric Dumazet 			len = 0;
427834704ef0SMartin KaFai Lau 			if (copy_to_sockptr(optlen, &len, sizeof(int)))
4279cd8ae852SEric Dumazet 				return -EFAULT;
4280cd8ae852SEric Dumazet 		}
4281cd8ae852SEric Dumazet 		return 0;
4282cd8ae852SEric Dumazet 	}
428305255b82SEric Dumazet #ifdef CONFIG_MMU
428405255b82SEric Dumazet 	case TCP_ZEROCOPY_RECEIVE: {
42857eeba170SArjun Roy 		struct scm_timestamping_internal tss;
4286e0fecb28SArjun Roy 		struct tcp_zerocopy_receive zc = {};
428705255b82SEric Dumazet 		int err;
428805255b82SEric Dumazet 
428934704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&len, optlen, sizeof(int)))
429005255b82SEric Dumazet 			return -EFAULT;
42912107d45fSArjun Roy 		if (len < 0 ||
42922107d45fSArjun Roy 		    len < offsetofend(struct tcp_zerocopy_receive, length))
429305255b82SEric Dumazet 			return -EINVAL;
42943c5a2fd0SArjun Roy 		if (unlikely(len > sizeof(zc))) {
429534704ef0SMartin KaFai Lau 			err = check_zeroed_sockptr(optval, sizeof(zc),
42963c5a2fd0SArjun Roy 						   len - sizeof(zc));
42973c5a2fd0SArjun Roy 			if (err < 1)
42983c5a2fd0SArjun Roy 				return err == 0 ? -EINVAL : err;
4299c8856c05SArjun Roy 			len = sizeof(zc);
430034704ef0SMartin KaFai Lau 			if (copy_to_sockptr(optlen, &len, sizeof(int)))
43010b7f41f6SArjun Roy 				return -EFAULT;
43020b7f41f6SArjun Roy 		}
430334704ef0SMartin KaFai Lau 		if (copy_from_sockptr(&zc, optval, len))
430405255b82SEric Dumazet 			return -EFAULT;
43053c5a2fd0SArjun Roy 		if (zc.reserved)
43063c5a2fd0SArjun Roy 			return -EINVAL;
43073c5a2fd0SArjun Roy 		if (zc.msg_flags &  ~(TCP_VALID_ZC_MSG_FLAGS))
43083c5a2fd0SArjun Roy 			return -EINVAL;
4309d51bbff2SMartin KaFai Lau 		sockopt_lock_sock(sk);
43107eeba170SArjun Roy 		err = tcp_zerocopy_receive(sk, &zc, &tss);
43119cacf81fSStanislav Fomichev 		err = BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sk, level, optname,
43129cacf81fSStanislav Fomichev 							  &zc, &len, err);
4313d51bbff2SMartin KaFai Lau 		sockopt_release_sock(sk);
43147eeba170SArjun Roy 		if (len >= offsetofend(struct tcp_zerocopy_receive, msg_flags))
43157eeba170SArjun Roy 			goto zerocopy_rcv_cmsg;
4316c8856c05SArjun Roy 		switch (len) {
43177eeba170SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, msg_flags):
43187eeba170SArjun Roy 			goto zerocopy_rcv_cmsg;
43197eeba170SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, msg_controllen):
43207eeba170SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, msg_control):
43217eeba170SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, flags):
43227eeba170SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, copybuf_len):
43237eeba170SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, copybuf_address):
432433946518SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, err):
432533946518SArjun Roy 			goto zerocopy_rcv_sk_err;
4326c8856c05SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, inq):
4327c8856c05SArjun Roy 			goto zerocopy_rcv_inq;
4328c8856c05SArjun Roy 		case offsetofend(struct tcp_zerocopy_receive, length):
4329c8856c05SArjun Roy 		default:
4330c8856c05SArjun Roy 			goto zerocopy_rcv_out;
4331c8856c05SArjun Roy 		}
43327eeba170SArjun Roy zerocopy_rcv_cmsg:
43337eeba170SArjun Roy 		if (zc.msg_flags & TCP_CMSG_TS)
43347eeba170SArjun Roy 			tcp_zc_finalize_rx_tstamp(sk, &zc, &tss);
43357eeba170SArjun Roy 		else
43367eeba170SArjun Roy 			zc.msg_flags = 0;
433733946518SArjun Roy zerocopy_rcv_sk_err:
433833946518SArjun Roy 		if (!err)
433933946518SArjun Roy 			zc.err = sock_error(sk);
4340c8856c05SArjun Roy zerocopy_rcv_inq:
4341c8856c05SArjun Roy 		zc.inq = tcp_inq_hint(sk);
4342c8856c05SArjun Roy zerocopy_rcv_out:
434334704ef0SMartin KaFai Lau 		if (!err && copy_to_sockptr(optval, &zc, len))
434405255b82SEric Dumazet 			err = -EFAULT;
434505255b82SEric Dumazet 		return err;
434605255b82SEric Dumazet 	}
434705255b82SEric Dumazet #endif
4348faadfabaSDmitry Safonov 	case TCP_AO_REPAIR:
4349965c00e4SDmitry Safonov 		if (!tcp_can_repair_sock(sk))
4350965c00e4SDmitry Safonov 			return -EPERM;
4351faadfabaSDmitry Safonov 		return tcp_ao_get_repair(sk, optval, optlen);
4352ef84703aSDmitry Safonov 	case TCP_AO_GET_KEYS:
4353ef84703aSDmitry Safonov 	case TCP_AO_INFO: {
4354ef84703aSDmitry Safonov 		int err;
4355ef84703aSDmitry Safonov 
4356ef84703aSDmitry Safonov 		sockopt_lock_sock(sk);
4357ef84703aSDmitry Safonov 		if (optname == TCP_AO_GET_KEYS)
4358ef84703aSDmitry Safonov 			err = tcp_ao_get_mkts(sk, optval, optlen);
4359ef84703aSDmitry Safonov 		else
4360ef84703aSDmitry Safonov 			err = tcp_ao_get_sock_info(sk, optval, optlen);
4361ef84703aSDmitry Safonov 		sockopt_release_sock(sk);
4362ef84703aSDmitry Safonov 
4363ef84703aSDmitry Safonov 		return err;
4364ef84703aSDmitry Safonov 	}
43651da177e4SLinus Torvalds 	default:
43661da177e4SLinus Torvalds 		return -ENOPROTOOPT;
43673ff50b79SStephen Hemminger 	}
43681da177e4SLinus Torvalds 
436934704ef0SMartin KaFai Lau 	if (copy_to_sockptr(optlen, &len, sizeof(int)))
43701da177e4SLinus Torvalds 		return -EFAULT;
437134704ef0SMartin KaFai Lau 	if (copy_to_sockptr(optval, &val, len))
43721da177e4SLinus Torvalds 		return -EFAULT;
43731da177e4SLinus Torvalds 	return 0;
43741da177e4SLinus Torvalds }
43751da177e4SLinus Torvalds 
43769cacf81fSStanislav Fomichev bool tcp_bpf_bypass_getsockopt(int level, int optname)
43779cacf81fSStanislav Fomichev {
43789cacf81fSStanislav Fomichev 	/* TCP do_tcp_getsockopt has optimized getsockopt implementation
43799cacf81fSStanislav Fomichev 	 * to avoid extra socket lock for TCP_ZEROCOPY_RECEIVE.
43809cacf81fSStanislav Fomichev 	 */
43819cacf81fSStanislav Fomichev 	if (level == SOL_TCP && optname == TCP_ZEROCOPY_RECEIVE)
43829cacf81fSStanislav Fomichev 		return true;
43839cacf81fSStanislav Fomichev 
43849cacf81fSStanislav Fomichev 	return false;
43859cacf81fSStanislav Fomichev }
43869cacf81fSStanislav Fomichev EXPORT_SYMBOL(tcp_bpf_bypass_getsockopt);
43879cacf81fSStanislav Fomichev 
43883fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
43893fdadf7dSDmitry Mishin 		   int __user *optlen)
43903fdadf7dSDmitry Mishin {
43913fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
43923fdadf7dSDmitry Mishin 
43933fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
4394f49cd2f4SKuniyuki Iwashima 		/* Paired with WRITE_ONCE() in do_ipv6_setsockopt() and tcp_v6_connect() */
4395f49cd2f4SKuniyuki Iwashima 		return READ_ONCE(icsk->icsk_af_ops)->getsockopt(sk, level, optname,
43963fdadf7dSDmitry Mishin 								optval, optlen);
439734704ef0SMartin KaFai Lau 	return do_tcp_getsockopt(sk, level, optname, USER_SOCKPTR(optval),
439834704ef0SMartin KaFai Lau 				 USER_SOCKPTR(optlen));
43993fdadf7dSDmitry Mishin }
44004bc2f18bSEric Dumazet EXPORT_SYMBOL(tcp_getsockopt);
44013fdadf7dSDmitry Mishin 
4402cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
44038c73b263SDmitry Safonov int tcp_md5_sigpool_id = -1;
44048c73b263SDmitry Safonov EXPORT_SYMBOL_GPL(tcp_md5_sigpool_id);
4405cfb6eeb4SYOSHIFUJI Hideaki 
44068c73b263SDmitry Safonov int tcp_md5_alloc_sigpool(void)
4407cfb6eeb4SYOSHIFUJI Hideaki {
44088c73b263SDmitry Safonov 	size_t scratch_size;
44098c73b263SDmitry Safonov 	int ret;
4410cfb6eeb4SYOSHIFUJI Hideaki 
44118c73b263SDmitry Safonov 	scratch_size = sizeof(union tcp_md5sum_block) + sizeof(struct tcphdr);
44128c73b263SDmitry Safonov 	ret = tcp_sigpool_alloc_ahash("md5", scratch_size);
44138c73b263SDmitry Safonov 	if (ret >= 0) {
44148c73b263SDmitry Safonov 		/* As long as any md5 sigpool was allocated, the return
44158c73b263SDmitry Safonov 		 * id would stay the same. Re-write the id only for the case
44168c73b263SDmitry Safonov 		 * when previously all MD5 keys were deleted and this call
44178c73b263SDmitry Safonov 		 * allocates the first MD5 key, which may return a different
44188c73b263SDmitry Safonov 		 * sigpool id than was used previously.
441971cea17eSEric Dumazet 		 */
44208c73b263SDmitry Safonov 		WRITE_ONCE(tcp_md5_sigpool_id, ret); /* Avoids the compiler potentially being smart here */
442149a72dfbSAdam Langley 		return 0;
442249a72dfbSAdam Langley 	}
44238c73b263SDmitry Safonov 	return ret;
44248c73b263SDmitry Safonov }
442549a72dfbSAdam Langley 
44268c73b263SDmitry Safonov void tcp_md5_release_sigpool(void)
44278c73b263SDmitry Safonov {
44288c73b263SDmitry Safonov 	tcp_sigpool_release(READ_ONCE(tcp_md5_sigpool_id));
44298c73b263SDmitry Safonov }
44308c73b263SDmitry Safonov 
44318c73b263SDmitry Safonov void tcp_md5_add_sigpool(void)
44328c73b263SDmitry Safonov {
44338c73b263SDmitry Safonov 	tcp_sigpool_get(READ_ONCE(tcp_md5_sigpool_id));
44348c73b263SDmitry Safonov }
44358c73b263SDmitry Safonov 
44368c73b263SDmitry Safonov int tcp_md5_hash_key(struct tcp_sigpool *hp,
44378c73b263SDmitry Safonov 		     const struct tcp_md5sig_key *key)
443849a72dfbSAdam Langley {
4439e6ced831SEric Dumazet 	u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
444049a72dfbSAdam Langley 	struct scatterlist sg;
444149a72dfbSAdam Langley 
44426a2febecSEric Dumazet 	sg_init_one(&sg, key->key, keylen);
44438c73b263SDmitry Safonov 	ahash_request_set_crypt(hp->req, &sg, NULL, keylen);
4444e6ced831SEric Dumazet 
44458c73b263SDmitry Safonov 	/* We use data_race() because tcp_md5_do_add() might change
44468c73b263SDmitry Safonov 	 * key->key under us
44478c73b263SDmitry Safonov 	 */
44488c73b263SDmitry Safonov 	return data_race(crypto_ahash_update(hp->req));
444949a72dfbSAdam Langley }
445049a72dfbSAdam Langley EXPORT_SYMBOL(tcp_md5_hash_key);
445149a72dfbSAdam Langley 
44527bbb765bSDmitry Safonov /* Called with rcu_read_lock() */
44531330b6efSJakub Kicinski enum skb_drop_reason
44541330b6efSJakub Kicinski tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb,
44557bbb765bSDmitry Safonov 		     const void *saddr, const void *daddr,
44560a3a8090SDmitry Safonov 		     int family, int l3index, const __u8 *hash_location)
44577bbb765bSDmitry Safonov {
44580a3a8090SDmitry Safonov 	/* This gets called for each TCP segment that has TCP-MD5 option.
44597bbb765bSDmitry Safonov 	 * We have 3 drop cases:
44607bbb765bSDmitry Safonov 	 * o No MD5 hash and one expected.
44617bbb765bSDmitry Safonov 	 * o MD5 hash and we're not expecting one.
44627bbb765bSDmitry Safonov 	 * o MD5 hash and its wrong.
44637bbb765bSDmitry Safonov 	 */
4464e9d9da91SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
44650a3a8090SDmitry Safonov 	struct tcp_md5sig_key *key;
44667bbb765bSDmitry Safonov 	u8 newhash[16];
44670a3a8090SDmitry Safonov 	int genhash;
44687bbb765bSDmitry Safonov 
44690a3a8090SDmitry Safonov 	key = tcp_md5_do_lookup(sk, l3index, saddr, family);
44707bbb765bSDmitry Safonov 
44710a3a8090SDmitry Safonov 	if (!key && hash_location) {
44727bbb765bSDmitry Safonov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
44732717b5adSDmitry Safonov 		tcp_hash_fail("Unexpected MD5 Hash found", family, skb, "");
44741330b6efSJakub Kicinski 		return SKB_DROP_REASON_TCP_MD5UNEXPECTED;
44757bbb765bSDmitry Safonov 	}
44767bbb765bSDmitry Safonov 
4477e62d2e11SEric Dumazet 	/* Check the signature.
4478e62d2e11SEric Dumazet 	 * To support dual stack listeners, we need to handle
4479e62d2e11SEric Dumazet 	 * IPv4-mapped case.
4480e62d2e11SEric Dumazet 	 */
4481e62d2e11SEric Dumazet 	if (family == AF_INET)
44820a3a8090SDmitry Safonov 		genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
4483e62d2e11SEric Dumazet 	else
44840a3a8090SDmitry Safonov 		genhash = tp->af_specific->calc_md5_hash(newhash, key,
44857bbb765bSDmitry Safonov 							 NULL, skb);
44867bbb765bSDmitry Safonov 	if (genhash || memcmp(hash_location, newhash, 16) != 0) {
44877bbb765bSDmitry Safonov 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
44887bbb765bSDmitry Safonov 		if (family == AF_INET) {
44892717b5adSDmitry Safonov 			tcp_hash_fail("MD5 Hash failed", AF_INET, skb, "%s L3 index %d",
44907bbb765bSDmitry Safonov 				      genhash ? "tcp_v4_calc_md5_hash failed"
44917bbb765bSDmitry Safonov 				      : "", l3index);
44927bbb765bSDmitry Safonov 		} else {
44932717b5adSDmitry Safonov 			if (genhash) {
44942717b5adSDmitry Safonov 				tcp_hash_fail("MD5 Hash failed",
44952717b5adSDmitry Safonov 					      AF_INET6, skb, "L3 index %d",
44962717b5adSDmitry Safonov 					      l3index);
44972717b5adSDmitry Safonov 			} else {
44982717b5adSDmitry Safonov 				tcp_hash_fail("MD5 Hash mismatch",
44992717b5adSDmitry Safonov 					      AF_INET6, skb, "L3 index %d",
45002717b5adSDmitry Safonov 					      l3index);
45012717b5adSDmitry Safonov 			}
45027bbb765bSDmitry Safonov 		}
45031330b6efSJakub Kicinski 		return SKB_DROP_REASON_TCP_MD5FAILURE;
45047bbb765bSDmitry Safonov 	}
45051330b6efSJakub Kicinski 	return SKB_NOT_DROPPED_YET;
45067bbb765bSDmitry Safonov }
45077bbb765bSDmitry Safonov EXPORT_SYMBOL(tcp_inbound_md5_hash);
45087bbb765bSDmitry Safonov 
4509cfb6eeb4SYOSHIFUJI Hideaki #endif
4510cfb6eeb4SYOSHIFUJI Hideaki 
45114ac02babSAndi Kleen void tcp_done(struct sock *sk)
45124ac02babSAndi Kleen {
4513d983ea6fSEric Dumazet 	struct request_sock *req;
45148336886fSJerry Chu 
4515cab209e5SEric Dumazet 	/* We might be called with a new socket, after
4516cab209e5SEric Dumazet 	 * inet_csk_prepare_forced_close() has been called
4517cab209e5SEric Dumazet 	 * so we can not use lockdep_sock_is_held(sk)
4518cab209e5SEric Dumazet 	 */
4519cab209e5SEric Dumazet 	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
45204ac02babSAndi Kleen 
45214ac02babSAndi Kleen 	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
4522c10d9310SEric Dumazet 		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
45234ac02babSAndi Kleen 
45244ac02babSAndi Kleen 	tcp_set_state(sk, TCP_CLOSE);
45254ac02babSAndi Kleen 	tcp_clear_xmit_timers(sk);
452600db4124SIan Morris 	if (req)
45278336886fSJerry Chu 		reqsk_fastopen_remove(sk, req, false);
45284ac02babSAndi Kleen 
4529e14cadfdSEric Dumazet 	WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
45304ac02babSAndi Kleen 
45314ac02babSAndi Kleen 	if (!sock_flag(sk, SOCK_DEAD))
45324ac02babSAndi Kleen 		sk->sk_state_change(sk);
45334ac02babSAndi Kleen 	else
45344ac02babSAndi Kleen 		inet_csk_destroy_sock(sk);
45354ac02babSAndi Kleen }
45364ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done);
45374ac02babSAndi Kleen 
4538c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err)
4539c1e64e29SLorenzo Colitti {
4540af9784d0SEric Dumazet 	int state = inet_sk_state_load(sk);
4541af9784d0SEric Dumazet 
4542af9784d0SEric Dumazet 	if (state == TCP_NEW_SYN_RECV) {
454307f6f4a3SEric Dumazet 		struct request_sock *req = inet_reqsk(sk);
454407f6f4a3SEric Dumazet 
454507f6f4a3SEric Dumazet 		local_bh_disable();
4546acc2cf4eSLorenzo Colitti 		inet_csk_reqsk_queue_drop(req->rsk_listener, req);
454707f6f4a3SEric Dumazet 		local_bh_enable();
454807f6f4a3SEric Dumazet 		return 0;
454907f6f4a3SEric Dumazet 	}
4550af9784d0SEric Dumazet 	if (state == TCP_TIME_WAIT) {
4551af9784d0SEric Dumazet 		struct inet_timewait_sock *tw = inet_twsk(sk);
4552af9784d0SEric Dumazet 
4553af9784d0SEric Dumazet 		refcount_inc(&tw->tw_refcnt);
4554af9784d0SEric Dumazet 		local_bh_disable();
4555af9784d0SEric Dumazet 		inet_twsk_deschedule_put(tw);
4556af9784d0SEric Dumazet 		local_bh_enable();
4557af9784d0SEric Dumazet 		return 0;
4558c1e64e29SLorenzo Colitti 	}
4559c1e64e29SLorenzo Colitti 
45604ddbcb88SAditi Ghag 	/* BPF context ensures sock locking. */
45614ddbcb88SAditi Ghag 	if (!has_current_bpf_ctx())
4562c1e64e29SLorenzo Colitti 		/* Don't race with userspace socket closes such as tcp_close. */
4563c1e64e29SLorenzo Colitti 		lock_sock(sk);
4564c1e64e29SLorenzo Colitti 
45652010b93eSLorenzo Colitti 	if (sk->sk_state == TCP_LISTEN) {
45662010b93eSLorenzo Colitti 		tcp_set_state(sk, TCP_CLOSE);
45672010b93eSLorenzo Colitti 		inet_csk_listen_stop(sk);
45682010b93eSLorenzo Colitti 	}
45692010b93eSLorenzo Colitti 
4570c1e64e29SLorenzo Colitti 	/* Don't race with BH socket closes such as inet_csk_listen_stop. */
4571c1e64e29SLorenzo Colitti 	local_bh_disable();
4572c1e64e29SLorenzo Colitti 	bh_lock_sock(sk);
4573c1e64e29SLorenzo Colitti 
4574c1e64e29SLorenzo Colitti 	if (!sock_flag(sk, SOCK_DEAD)) {
4575e13ec3daSEric Dumazet 		WRITE_ONCE(sk->sk_err, err);
4576c1e64e29SLorenzo Colitti 		/* This barrier is coupled with smp_rmb() in tcp_poll() */
4577c1e64e29SLorenzo Colitti 		smp_wmb();
4578e3ae2365SAlexander Aring 		sk_error_report(sk);
4579c1e64e29SLorenzo Colitti 		if (tcp_need_reset(sk->sk_state))
45805691276bSJason Xing 			tcp_send_active_reset(sk, GFP_ATOMIC,
45815691276bSJason Xing 					      SK_RST_REASON_NOT_SPECIFIED);
4582c1e64e29SLorenzo Colitti 		tcp_done(sk);
4583c1e64e29SLorenzo Colitti 	}
4584c1e64e29SLorenzo Colitti 
4585c1e64e29SLorenzo Colitti 	bh_unlock_sock(sk);
4586c1e64e29SLorenzo Colitti 	local_bh_enable();
4587e05836acSSoheil Hassas Yeganeh 	tcp_write_queue_purge(sk);
45884ddbcb88SAditi Ghag 	if (!has_current_bpf_ctx())
4589c1e64e29SLorenzo Colitti 		release_sock(sk);
4590c1e64e29SLorenzo Colitti 	return 0;
4591c1e64e29SLorenzo Colitti }
4592c1e64e29SLorenzo Colitti EXPORT_SYMBOL_GPL(tcp_abort);
4593c1e64e29SLorenzo Colitti 
45945f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
45951da177e4SLinus Torvalds 
45961da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
45971da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
45981da177e4SLinus Torvalds {
4599413c27d8SEldad Zack 	ssize_t ret;
4600413c27d8SEldad Zack 
46011da177e4SLinus Torvalds 	if (!str)
46021da177e4SLinus Torvalds 		return 0;
4603413c27d8SEldad Zack 
4604413c27d8SEldad Zack 	ret = kstrtoul(str, 0, &thash_entries);
4605413c27d8SEldad Zack 	if (ret)
4606413c27d8SEldad Zack 		return 0;
4607413c27d8SEldad Zack 
46081da177e4SLinus Torvalds 	return 1;
46091da177e4SLinus Torvalds }
46101da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
46111da177e4SLinus Torvalds 
461247d7a88cSFabian Frederick static void __init tcp_init_mem(void)
46134acb4190SGlauber Costa {
4614b66e91ccSEric Dumazet 	unsigned long limit = nr_free_buffer_pages() / 16;
4615b66e91ccSEric Dumazet 
46164acb4190SGlauber Costa 	limit = max(limit, 128UL);
4617b66e91ccSEric Dumazet 	sysctl_tcp_mem[0] = limit / 4 * 3;		/* 4.68 % */
4618b66e91ccSEric Dumazet 	sysctl_tcp_mem[1] = limit;			/* 6.25 % */
4619b66e91ccSEric Dumazet 	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;	/* 9.37 % */
46204acb4190SGlauber Costa }
46214acb4190SGlauber Costa 
4622d5fed5adSCoco Li static void __init tcp_struct_check(void)
4623d5fed5adSCoco Li {
4624d5fed5adSCoco Li 	/* TX read-mostly hotpath cache lines */
4625d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, max_window);
4626d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, rcv_ssthresh);
4627d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, reordering);
4628d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, notsent_lowat);
4629d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, gso_segs);
4630d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, lost_skb_hint);
4631d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_tx, retransmit_skb_hint);
4632d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_tx, 40);
4633d5fed5adSCoco Li 
4634d5fed5adSCoco Li 	/* TXRX read-mostly hotpath cache lines */
4635d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, tsoffset);
4636d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_wnd);
4637d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, mss_cache);
4638d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, snd_cwnd);
4639d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, prr_out);
4640d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, lost_out);
4641d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, sacked_out);
4642119ff048SEric Dumazet 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_txrx, scaling_ratio);
4643119ff048SEric Dumazet 	CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_txrx, 32);
4644d5fed5adSCoco Li 
4645d5fed5adSCoco Li 	/* RX read-mostly hotpath cache lines */
4646d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, copied_seq);
4647d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rcv_tstamp);
4648d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_wl1);
4649d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, tlp_high_seq);
4650d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rttvar_us);
4651d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, retrans_out);
4652d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, advmss);
4653d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, urg_data);
4654d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, lost);
4655d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, rtt_min);
4656d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, out_of_order_queue);
4657d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_read_rx, snd_ssthresh);
4658d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_read_rx, 69);
4659d5fed5adSCoco Li 
4660d5fed5adSCoco Li 	/* TX read-write hotpath cache lines */
4661d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, segs_out);
4662d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, data_segs_out);
4663d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, bytes_sent);
4664d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, snd_sml);
4665d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_start);
4666d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, chrono_stat);
4667d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, write_seq);
4668d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, pushed_seq);
4669d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime);
4670d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us);
4671d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns);
4672d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq);
4673d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue);
4674d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack);
4675d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags);
4676d2c3a7ebSEric Dumazet 	CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89);
4677d5fed5adSCoco Li 
4678d5fed5adSCoco Li 	/* TXRX read-write hotpath cache lines */
4679d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags);
4680d2c3a7ebSEric Dumazet 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache);
4681d2c3a7ebSEric Dumazet 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp);
4682d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt);
4683d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt);
4684d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una);
4685d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, window_clamp);
4686d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, srtt_us);
4687d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, packets_out);
4688d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_up);
4689d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered);
4690d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, delivered_ce);
4691d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited);
4692d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd);
4693d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt);
46949b9fd458SEric Dumazet 
46959b9fd458SEric Dumazet 	/* 32bit arches with 8byte alignment on u64 fields might need padding
46969b9fd458SEric Dumazet 	 * before tcp_clock_cache.
46979b9fd458SEric Dumazet 	 */
46989b9fd458SEric Dumazet 	CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92 + 4);
4699d5fed5adSCoco Li 
4700d5fed5adSCoco Li 	/* RX read-write hotpath cache lines */
4701d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);
4702d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, segs_in);
4703d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, data_segs_in);
4704d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_wup);
4705d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, max_packets_out);
4706d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, cwnd_usage_seq);
4707d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_delivered);
4708d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rate_interval_us);
4709d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_last_tsecr);
4710d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, first_tx_mstamp);
4711d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, delivered_mstamp);
4712d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_acked);
4713d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcv_rtt_est);
4714d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, rcvq_space);
4715d5fed5adSCoco Li 	CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_rx, 99);
4716d5fed5adSCoco Li }
4717d5fed5adSCoco Li 
47181da177e4SLinus Torvalds void __init tcp_init(void)
47191da177e4SLinus Torvalds {
4720b49960a0SEric Dumazet 	int max_rshare, max_wshare, cnt;
4721b2d3ea4aSEric Dumazet 	unsigned long limit;
4722074b8517SDimitri Sivanich 	unsigned int i;
47231da177e4SLinus Torvalds 
47243b4929f6SEric Dumazet 	BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
4725b2d3ea4aSEric Dumazet 	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
4726c593642cSPankaj Bharadiya 		     sizeof_field(struct sk_buff, cb));
47271da177e4SLinus Torvalds 
4728d5fed5adSCoco Li 	tcp_struct_check();
4729d5fed5adSCoco Li 
4730908c7f19STejun Heo 	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
473119757cebSEric Dumazet 
473219757cebSEric Dumazet 	timer_setup(&tcp_orphan_timer, tcp_orphan_update, TIMER_DEFERRABLE);
473319757cebSEric Dumazet 	mod_timer(&tcp_orphan_timer, jiffies + TCP_ORPHAN_TIMER_PERIOD);
473419757cebSEric Dumazet 
473527da6d37SMartin KaFai Lau 	inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
473627da6d37SMartin KaFai Lau 			    thash_entries, 21,  /* one slot per 2 MB*/
473727da6d37SMartin KaFai Lau 			    0, 64 * 1024);
47386e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bind_bucket_cachep =
47396e04e021SArnaldo Carvalho de Melo 		kmem_cache_create("tcp_bind_bucket",
47406e04e021SArnaldo Carvalho de Melo 				  sizeof(struct inet_bind_bucket), 0,
4741990c74e3SVasily Averin 				  SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4742990c74e3SVasily Averin 				  SLAB_ACCOUNT,
4743990c74e3SVasily Averin 				  NULL);
474428044fc1SJoanne Koong 	tcp_hashinfo.bind2_bucket_cachep =
474528044fc1SJoanne Koong 		kmem_cache_create("tcp_bind2_bucket",
474628044fc1SJoanne Koong 				  sizeof(struct inet_bind2_bucket), 0,
474728044fc1SJoanne Koong 				  SLAB_HWCACHE_ALIGN | SLAB_PANIC |
474828044fc1SJoanne Koong 				  SLAB_ACCOUNT,
474928044fc1SJoanne Koong 				  NULL);
47501da177e4SLinus Torvalds 
47511da177e4SLinus Torvalds 	/* Size and allocate the main established and bind bucket
47521da177e4SLinus Torvalds 	 * hash tables.
47531da177e4SLinus Torvalds 	 *
47541da177e4SLinus Torvalds 	 * The methodology is similar to that of the buffer cache.
47551da177e4SLinus Torvalds 	 */
47566e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.ehash =
47571da177e4SLinus Torvalds 		alloc_large_system_hash("TCP established",
47580f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_ehash_bucket),
47591da177e4SLinus Torvalds 					thash_entries,
4760fd90b29dSEric Dumazet 					17, /* one slot per 128 KB of memory */
47619e950efaSJohn Heffner 					0,
47621da177e4SLinus Torvalds 					NULL,
4763f373b53bSEric Dumazet 					&tcp_hashinfo.ehash_mask,
476431fe62b9STim Bird 					0,
47650ccfe618SJean Delvare 					thash_entries ? 0 : 512 * 1024);
476605dbc7b5SEric Dumazet 	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
47673ab5aee7SEric Dumazet 		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
476805dbc7b5SEric Dumazet 
4769230140cfSEric Dumazet 	if (inet_ehash_locks_alloc(&tcp_hashinfo))
4770230140cfSEric Dumazet 		panic("TCP: failed to alloc ehash_locks");
47716e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash =
4772593d1ebeSJoanne Koong 		alloc_large_system_hash("TCP bind",
477328044fc1SJoanne Koong 					2 * sizeof(struct inet_bind_hashbucket),
4774f373b53bSEric Dumazet 					tcp_hashinfo.ehash_mask + 1,
4775fd90b29dSEric Dumazet 					17, /* one slot per 128 KB of memory */
47769e950efaSJohn Heffner 					0,
47776e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.bhash_size,
47781da177e4SLinus Torvalds 					NULL,
477931fe62b9STim Bird 					0,
47801da177e4SLinus Torvalds 					64 * 1024);
4781074b8517SDimitri Sivanich 	tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
478228044fc1SJoanne Koong 	tcp_hashinfo.bhash2 = tcp_hashinfo.bhash + tcp_hashinfo.bhash_size;
47836e04e021SArnaldo Carvalho de Melo 	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
47846e04e021SArnaldo Carvalho de Melo 		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
47856e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
478628044fc1SJoanne Koong 		spin_lock_init(&tcp_hashinfo.bhash2[i].lock);
478728044fc1SJoanne Koong 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash2[i].chain);
47881da177e4SLinus Torvalds 	}
47891da177e4SLinus Torvalds 
4790d1e5e640SKuniyuki Iwashima 	tcp_hashinfo.pernet = false;
4791c5ed63d6SEric Dumazet 
4792c5ed63d6SEric Dumazet 	cnt = tcp_hashinfo.ehash_mask + 1;
4793c5ed63d6SEric Dumazet 	sysctl_tcp_max_orphans = cnt / 2;
47941da177e4SLinus Torvalds 
4795a4fe34bfSEric W. Biederman 	tcp_init_mem();
4796c43b874dSJason Wang 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
47975fb84b14SEric Dumazet 	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
4798b49960a0SEric Dumazet 	max_wshare = min(4UL*1024*1024, limit);
4799b49960a0SEric Dumazet 	max_rshare = min(6UL*1024*1024, limit);
48007b4f4b5eSJohn Heffner 
4801100fdd1fSEric Dumazet 	init_net.ipv4.sysctl_tcp_wmem[0] = PAGE_SIZE;
4802356d1833SEric Dumazet 	init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
4803356d1833SEric Dumazet 	init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
48047b4f4b5eSJohn Heffner 
4805100fdd1fSEric Dumazet 	init_net.ipv4.sysctl_tcp_rmem[0] = PAGE_SIZE;
4806a337531bSYuchung Cheng 	init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
4807a337531bSYuchung Cheng 	init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
48081da177e4SLinus Torvalds 
4809afd46503SJoe Perches 	pr_info("Hash tables configured (established %u bind %u)\n",
4810f373b53bSEric Dumazet 		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
4811317a76f9SStephen Hemminger 
48121946e672SHaishuang Yan 	tcp_v4_init();
481351c5d0c4SDavid S. Miller 	tcp_metrics_init();
481455d8694fSFlorian Westphal 	BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
481546d3ceabSEric Dumazet 	tcp_tasklet_init();
4816f870fa0bSMat Martineau 	mptcp_init();
48171da177e4SLinus Torvalds }
4818