xref: /linux/net/ipv4/tcp.c (revision 293ad60401da621b8b329abbe8c388edb25f658a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  *
221da177e4SLinus Torvalds  * Fixes:
231da177e4SLinus Torvalds  *		Alan Cox	:	Numerous verify_area() calls
241da177e4SLinus Torvalds  *		Alan Cox	:	Set the ACK bit on a reset
251da177e4SLinus Torvalds  *		Alan Cox	:	Stopped it crashing if it closed while
261da177e4SLinus Torvalds  *					sk->inuse=1 and was trying to connect
271da177e4SLinus Torvalds  *					(tcp_err()).
281da177e4SLinus Torvalds  *		Alan Cox	:	All icmp error handling was broken
291da177e4SLinus Torvalds  *					pointers passed where wrong and the
301da177e4SLinus Torvalds  *					socket was looked up backwards. Nobody
311da177e4SLinus Torvalds  *					tested any icmp error code obviously.
321da177e4SLinus Torvalds  *		Alan Cox	:	tcp_err() now handled properly. It
331da177e4SLinus Torvalds  *					wakes people on errors. poll
341da177e4SLinus Torvalds  *					behaves and the icmp error race
351da177e4SLinus Torvalds  *					has gone by moving it into sock.c
361da177e4SLinus Torvalds  *		Alan Cox	:	tcp_send_reset() fixed to work for
371da177e4SLinus Torvalds  *					everything not just packets for
381da177e4SLinus Torvalds  *					unknown sockets.
391da177e4SLinus Torvalds  *		Alan Cox	:	tcp option processing.
401da177e4SLinus Torvalds  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
411da177e4SLinus Torvalds  *					syn rule wrong]
421da177e4SLinus Torvalds  *		Herp Rosmanith  :	More reset fixes
431da177e4SLinus Torvalds  *		Alan Cox	:	No longer acks invalid rst frames.
441da177e4SLinus Torvalds  *					Acking any kind of RST is right out.
451da177e4SLinus Torvalds  *		Alan Cox	:	Sets an ignore me flag on an rst
461da177e4SLinus Torvalds  *					receive otherwise odd bits of prattle
471da177e4SLinus Torvalds  *					escape still
481da177e4SLinus Torvalds  *		Alan Cox	:	Fixed another acking RST frame bug.
491da177e4SLinus Torvalds  *					Should stop LAN workplace lockups.
501da177e4SLinus Torvalds  *		Alan Cox	: 	Some tidyups using the new skb list
511da177e4SLinus Torvalds  *					facilities
521da177e4SLinus Torvalds  *		Alan Cox	:	sk->keepopen now seems to work
531da177e4SLinus Torvalds  *		Alan Cox	:	Pulls options out correctly on accepts
541da177e4SLinus Torvalds  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
551da177e4SLinus Torvalds  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
561da177e4SLinus Torvalds  *					bit to skb ops.
571da177e4SLinus Torvalds  *		Alan Cox	:	Tidied tcp_data to avoid a potential
581da177e4SLinus Torvalds  *					nasty.
591da177e4SLinus Torvalds  *		Alan Cox	:	Added some better commenting, as the
601da177e4SLinus Torvalds  *					tcp is hard to follow
611da177e4SLinus Torvalds  *		Alan Cox	:	Removed incorrect check for 20 * psh
621da177e4SLinus Torvalds  *	Michael O'Reilly	:	ack < copied bug fix.
631da177e4SLinus Torvalds  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
641da177e4SLinus Torvalds  *		Alan Cox	:	FIN with no memory -> CRASH
651da177e4SLinus Torvalds  *		Alan Cox	:	Added socket option proto entries.
661da177e4SLinus Torvalds  *					Also added awareness of them to accept.
671da177e4SLinus Torvalds  *		Alan Cox	:	Added TCP options (SOL_TCP)
681da177e4SLinus Torvalds  *		Alan Cox	:	Switched wakeup calls to callbacks,
691da177e4SLinus Torvalds  *					so the kernel can layer network
701da177e4SLinus Torvalds  *					sockets.
711da177e4SLinus Torvalds  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
721da177e4SLinus Torvalds  *		Alan Cox	:	Handle FIN (more) properly (we hope).
731da177e4SLinus Torvalds  *		Alan Cox	:	RST frames sent on unsynchronised
741da177e4SLinus Torvalds  *					state ack error.
751da177e4SLinus Torvalds  *		Alan Cox	:	Put in missing check for SYN bit.
761da177e4SLinus Torvalds  *		Alan Cox	:	Added tcp_select_window() aka NET2E
771da177e4SLinus Torvalds  *					window non shrink trick.
781da177e4SLinus Torvalds  *		Alan Cox	:	Added a couple of small NET2E timer
791da177e4SLinus Torvalds  *					fixes
801da177e4SLinus Torvalds  *		Charles Hedrick :	TCP fixes
811da177e4SLinus Torvalds  *		Toomas Tamm	:	TCP window fixes
821da177e4SLinus Torvalds  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
831da177e4SLinus Torvalds  *		Charles Hedrick	:	Rewrote most of it to actually work
841da177e4SLinus Torvalds  *		Linus		:	Rewrote tcp_read() and URG handling
851da177e4SLinus Torvalds  *					completely
861da177e4SLinus Torvalds  *		Gerhard Koerting:	Fixed some missing timer handling
871da177e4SLinus Torvalds  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
881da177e4SLinus Torvalds  *		Gerhard Koerting:	PC/TCP workarounds
891da177e4SLinus Torvalds  *		Adam Caldwell	:	Assorted timer/timing errors
901da177e4SLinus Torvalds  *		Matthew Dillon	:	Fixed another RST bug
911da177e4SLinus Torvalds  *		Alan Cox	:	Move to kernel side addressing changes.
921da177e4SLinus Torvalds  *		Alan Cox	:	Beginning work on TCP fastpathing
931da177e4SLinus Torvalds  *					(not yet usable)
941da177e4SLinus Torvalds  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
951da177e4SLinus Torvalds  *		Alan Cox	:	TCP fast path debugging
961da177e4SLinus Torvalds  *		Alan Cox	:	Window clamping
971da177e4SLinus Torvalds  *		Michael Riepe	:	Bug in tcp_check()
981da177e4SLinus Torvalds  *		Matt Dillon	:	More TCP improvements and RST bug fixes
991da177e4SLinus Torvalds  *		Matt Dillon	:	Yet more small nasties remove from the
1001da177e4SLinus Torvalds  *					TCP code (Be very nice to this man if
1011da177e4SLinus Torvalds  *					tcp finally works 100%) 8)
1021da177e4SLinus Torvalds  *		Alan Cox	:	BSD accept semantics.
1031da177e4SLinus Torvalds  *		Alan Cox	:	Reset on closedown bug.
1041da177e4SLinus Torvalds  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
1051da177e4SLinus Torvalds  *		Michael Pall	:	Handle poll() after URG properly in
1061da177e4SLinus Torvalds  *					all cases.
1071da177e4SLinus Torvalds  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
1081da177e4SLinus Torvalds  *					(multi URG PUSH broke rlogin).
1091da177e4SLinus Torvalds  *		Michael Pall	:	Fix the multi URG PUSH problem in
1101da177e4SLinus Torvalds  *					tcp_readable(), poll() after URG
1111da177e4SLinus Torvalds  *					works now.
1121da177e4SLinus Torvalds  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
1131da177e4SLinus Torvalds  *					BSD api.
1141da177e4SLinus Torvalds  *		Alan Cox	:	Changed the semantics of sk->socket to
1151da177e4SLinus Torvalds  *					fix a race and a signal problem with
1161da177e4SLinus Torvalds  *					accept() and async I/O.
1171da177e4SLinus Torvalds  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
1181da177e4SLinus Torvalds  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
1191da177e4SLinus Torvalds  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
1201da177e4SLinus Torvalds  *					clients/servers which listen in on
1211da177e4SLinus Torvalds  *					fixed ports.
1221da177e4SLinus Torvalds  *		Alan Cox	:	Cleaned the above up and shrank it to
1231da177e4SLinus Torvalds  *					a sensible code size.
1241da177e4SLinus Torvalds  *		Alan Cox	:	Self connect lockup fix.
1251da177e4SLinus Torvalds  *		Alan Cox	:	No connect to multicast.
1261da177e4SLinus Torvalds  *		Ross Biro	:	Close unaccepted children on master
1271da177e4SLinus Torvalds  *					socket close.
1281da177e4SLinus Torvalds  *		Alan Cox	:	Reset tracing code.
1291da177e4SLinus Torvalds  *		Alan Cox	:	Spurious resets on shutdown.
1301da177e4SLinus Torvalds  *		Alan Cox	:	Giant 15 minute/60 second timer error
1311da177e4SLinus Torvalds  *		Alan Cox	:	Small whoops in polling before an
1321da177e4SLinus Torvalds  *					accept.
1331da177e4SLinus Torvalds  *		Alan Cox	:	Kept the state trace facility since
1341da177e4SLinus Torvalds  *					it's handy for debugging.
1351da177e4SLinus Torvalds  *		Alan Cox	:	More reset handler fixes.
1361da177e4SLinus Torvalds  *		Alan Cox	:	Started rewriting the code based on
1371da177e4SLinus Torvalds  *					the RFC's for other useful protocol
1381da177e4SLinus Torvalds  *					references see: Comer, KA9Q NOS, and
1391da177e4SLinus Torvalds  *					for a reference on the difference
1401da177e4SLinus Torvalds  *					between specifications and how BSD
1411da177e4SLinus Torvalds  *					works see the 4.4lite source.
1421da177e4SLinus Torvalds  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
1431da177e4SLinus Torvalds  *					close.
1441da177e4SLinus Torvalds  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
1451da177e4SLinus Torvalds  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
1461da177e4SLinus Torvalds  *		Alan Cox	:	Reimplemented timers as per the RFC
1471da177e4SLinus Torvalds  *					and using multiple timers for sanity.
1481da177e4SLinus Torvalds  *		Alan Cox	:	Small bug fixes, and a lot of new
1491da177e4SLinus Torvalds  *					comments.
1501da177e4SLinus Torvalds  *		Alan Cox	:	Fixed dual reader crash by locking
1511da177e4SLinus Torvalds  *					the buffers (much like datagram.c)
1521da177e4SLinus Torvalds  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
1531da177e4SLinus Torvalds  *					now gets fed up of retrying without
1541da177e4SLinus Torvalds  *					(even a no space) answer.
1551da177e4SLinus Torvalds  *		Alan Cox	:	Extracted closing code better
1561da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the closing state machine to
1571da177e4SLinus Torvalds  *					resemble the RFC.
1581da177e4SLinus Torvalds  *		Alan Cox	:	More 'per spec' fixes.
1591da177e4SLinus Torvalds  *		Jorge Cwik	:	Even faster checksumming.
1601da177e4SLinus Torvalds  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
1611da177e4SLinus Torvalds  *					only frames. At least one pc tcp stack
1621da177e4SLinus Torvalds  *					generates them.
1631da177e4SLinus Torvalds  *		Alan Cox	:	Cache last socket.
1641da177e4SLinus Torvalds  *		Alan Cox	:	Per route irtt.
1651da177e4SLinus Torvalds  *		Matt Day	:	poll()->select() match BSD precisely on error
1661da177e4SLinus Torvalds  *		Alan Cox	:	New buffers
1671da177e4SLinus Torvalds  *		Marc Tamsky	:	Various sk->prot->retransmits and
1681da177e4SLinus Torvalds  *					sk->retransmits misupdating fixed.
1691da177e4SLinus Torvalds  *					Fixed tcp_write_timeout: stuck close,
1701da177e4SLinus Torvalds  *					and TCP syn retries gets used now.
1711da177e4SLinus Torvalds  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
1721da177e4SLinus Torvalds  *					ack if state is TCP_CLOSED.
1731da177e4SLinus Torvalds  *		Alan Cox	:	Look up device on a retransmit - routes may
1741da177e4SLinus Torvalds  *					change. Doesn't yet cope with MSS shrink right
1751da177e4SLinus Torvalds  *					but it's a start!
1761da177e4SLinus Torvalds  *		Marc Tamsky	:	Closing in closing fixes.
1771da177e4SLinus Torvalds  *		Mike Shaver	:	RFC1122 verifications.
1781da177e4SLinus Torvalds  *		Alan Cox	:	rcv_saddr errors.
1791da177e4SLinus Torvalds  *		Alan Cox	:	Block double connect().
1801da177e4SLinus Torvalds  *		Alan Cox	:	Small hooks for enSKIP.
1811da177e4SLinus Torvalds  *		Alexey Kuznetsov:	Path MTU discovery.
1821da177e4SLinus Torvalds  *		Alan Cox	:	Support soft errors.
1831da177e4SLinus Torvalds  *		Alan Cox	:	Fix MTU discovery pathological case
1841da177e4SLinus Torvalds  *					when the remote claims no mtu!
1851da177e4SLinus Torvalds  *		Marc Tamsky	:	TCP_CLOSE fix.
1861da177e4SLinus Torvalds  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
1871da177e4SLinus Torvalds  *					window but wrong (fixes NT lpd problems)
1881da177e4SLinus Torvalds  *		Pedro Roque	:	Better TCP window handling, delayed ack.
1891da177e4SLinus Torvalds  *		Joerg Reuter	:	No modification of locked buffers in
1901da177e4SLinus Torvalds  *					tcp_do_retransmit()
1911da177e4SLinus Torvalds  *		Eric Schenk	:	Changed receiver side silly window
1921da177e4SLinus Torvalds  *					avoidance algorithm to BSD style
1931da177e4SLinus Torvalds  *					algorithm. This doubles throughput
1941da177e4SLinus Torvalds  *					against machines running Solaris,
1951da177e4SLinus Torvalds  *					and seems to result in general
1961da177e4SLinus Torvalds  *					improvement.
1971da177e4SLinus Torvalds  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
1981da177e4SLinus Torvalds  *	Willy Konynenberg	:	Transparent proxying support.
1991da177e4SLinus Torvalds  *	Mike McLagan		:	Routing by source
2001da177e4SLinus Torvalds  *		Keith Owens	:	Do proper merging with partial SKB's in
2011da177e4SLinus Torvalds  *					tcp_do_sendmsg to avoid burstiness.
2021da177e4SLinus Torvalds  *		Eric Schenk	:	Fix fast close down bug with
2031da177e4SLinus Torvalds  *					shutdown() followed by close().
2041da177e4SLinus Torvalds  *		Andi Kleen 	:	Make poll agree with SIGIO
2051da177e4SLinus Torvalds  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
2061da177e4SLinus Torvalds  *					lingertime == 0 (RFC 793 ABORT Call)
2071da177e4SLinus Torvalds  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
2081da177e4SLinus Torvalds  *					csum_and_copy_from_user() if possible.
2091da177e4SLinus Torvalds  *
2101da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
2111da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
2121da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
2131da177e4SLinus Torvalds  *		2 of the License, or(at your option) any later version.
2141da177e4SLinus Torvalds  *
2151da177e4SLinus Torvalds  * Description of States:
2161da177e4SLinus Torvalds  *
2171da177e4SLinus Torvalds  *	TCP_SYN_SENT		sent a connection request, waiting for ack
2181da177e4SLinus Torvalds  *
2191da177e4SLinus Torvalds  *	TCP_SYN_RECV		received a connection request, sent ack,
2201da177e4SLinus Torvalds  *				waiting for final ack in three-way handshake.
2211da177e4SLinus Torvalds  *
2221da177e4SLinus Torvalds  *	TCP_ESTABLISHED		connection established
2231da177e4SLinus Torvalds  *
2241da177e4SLinus Torvalds  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
2251da177e4SLinus Torvalds  *				transmission of remaining buffered data
2261da177e4SLinus Torvalds  *
2271da177e4SLinus Torvalds  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
2281da177e4SLinus Torvalds  *				to shutdown
2291da177e4SLinus Torvalds  *
2301da177e4SLinus Torvalds  *	TCP_CLOSING		both sides have shutdown but we still have
2311da177e4SLinus Torvalds  *				data we have to finish sending
2321da177e4SLinus Torvalds  *
2331da177e4SLinus Torvalds  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
2341da177e4SLinus Torvalds  *				closed, can only be entered from FIN_WAIT2
2351da177e4SLinus Torvalds  *				or CLOSING.  Required because the other end
2361da177e4SLinus Torvalds  *				may not have gotten our last ACK causing it
2371da177e4SLinus Torvalds  *				to retransmit the data packet (which we ignore)
2381da177e4SLinus Torvalds  *
2391da177e4SLinus Torvalds  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
2401da177e4SLinus Torvalds  *				us to finish writing our data and to shutdown
2411da177e4SLinus Torvalds  *				(we have to close() to move on to LAST_ACK)
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  *	TCP_LAST_ACK		out side has shutdown after remote has
2441da177e4SLinus Torvalds  *				shutdown.  There may still be data in our
2451da177e4SLinus Torvalds  *				buffer that we have to finish sending
2461da177e4SLinus Torvalds  *
2471da177e4SLinus Torvalds  *	TCP_CLOSE		socket is finished
2481da177e4SLinus Torvalds  */
2491da177e4SLinus Torvalds 
250172589ccSIlpo Järvinen #include <linux/kernel.h>
2511da177e4SLinus Torvalds #include <linux/module.h>
2521da177e4SLinus Torvalds #include <linux/types.h>
2531da177e4SLinus Torvalds #include <linux/fcntl.h>
2541da177e4SLinus Torvalds #include <linux/poll.h>
2551da177e4SLinus Torvalds #include <linux/init.h>
2561da177e4SLinus Torvalds #include <linux/fs.h>
2579c55e01cSJens Axboe #include <linux/skbuff.h>
2589c55e01cSJens Axboe #include <linux/splice.h>
2599c55e01cSJens Axboe #include <linux/net.h>
2609c55e01cSJens Axboe #include <linux/socket.h>
2611da177e4SLinus Torvalds #include <linux/random.h>
2621da177e4SLinus Torvalds #include <linux/bootmem.h>
263b8059eadSDavid S. Miller #include <linux/cache.h>
264f4c50d99SHerbert Xu #include <linux/err.h>
265cfb6eeb4SYOSHIFUJI Hideaki #include <linux/crypto.h>
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds #include <net/icmp.h>
2681da177e4SLinus Torvalds #include <net/tcp.h>
2691da177e4SLinus Torvalds #include <net/xfrm.h>
2701da177e4SLinus Torvalds #include <net/ip.h>
2711a2449a8SChris Leech #include <net/netdma.h>
2729c55e01cSJens Axboe #include <net/sock.h>
2731da177e4SLinus Torvalds 
2741da177e4SLinus Torvalds #include <asm/uaccess.h>
2751da177e4SLinus Torvalds #include <asm/ioctls.h>
2761da177e4SLinus Torvalds 
277ab32ea5dSBrian Haley int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
2781da177e4SLinus Torvalds 
279ba89966cSEric Dumazet DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
2801da177e4SLinus Torvalds 
2811da177e4SLinus Torvalds atomic_t tcp_orphan_count = ATOMIC_INIT(0);
2821da177e4SLinus Torvalds 
2830a5578cfSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(tcp_orphan_count);
2840a5578cfSArnaldo Carvalho de Melo 
285b8059eadSDavid S. Miller int sysctl_tcp_mem[3] __read_mostly;
286b8059eadSDavid S. Miller int sysctl_tcp_wmem[3] __read_mostly;
287b8059eadSDavid S. Miller int sysctl_tcp_rmem[3] __read_mostly;
2881da177e4SLinus Torvalds 
2891da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_mem);
2901da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_rmem);
2911da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_wmem);
2921da177e4SLinus Torvalds 
2931da177e4SLinus Torvalds atomic_t tcp_memory_allocated;	/* Current allocated memory. */
2941da177e4SLinus Torvalds atomic_t tcp_sockets_allocated;	/* Current number of TCP sockets. */
2951da177e4SLinus Torvalds 
2961da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
2971da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
2981da177e4SLinus Torvalds 
2991da177e4SLinus Torvalds /*
3009c55e01cSJens Axboe  * TCP splice context
3019c55e01cSJens Axboe  */
3029c55e01cSJens Axboe struct tcp_splice_state {
3039c55e01cSJens Axboe 	struct pipe_inode_info *pipe;
3049c55e01cSJens Axboe 	size_t len;
3059c55e01cSJens Axboe 	unsigned int flags;
3069c55e01cSJens Axboe };
3079c55e01cSJens Axboe 
3089c55e01cSJens Axboe /*
3091da177e4SLinus Torvalds  * Pressure flag: try to collapse.
3101da177e4SLinus Torvalds  * Technical note: it is used by multiple contexts non atomically.
3113ab224beSHideo Aoki  * All the __sk_mem_schedule() is of this nature: accounting
3121da177e4SLinus Torvalds  * is strict, actions are advisory and have some latency.
3131da177e4SLinus Torvalds  */
3144103f8cdSEric Dumazet int tcp_memory_pressure __read_mostly;
3151da177e4SLinus Torvalds 
3161da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_pressure);
3171da177e4SLinus Torvalds 
3181da177e4SLinus Torvalds void tcp_enter_memory_pressure(void)
3191da177e4SLinus Torvalds {
3201da177e4SLinus Torvalds 	if (!tcp_memory_pressure) {
3211da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
3221da177e4SLinus Torvalds 		tcp_memory_pressure = 1;
3231da177e4SLinus Torvalds 	}
3241da177e4SLinus Torvalds }
3251da177e4SLinus Torvalds 
3261da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_enter_memory_pressure);
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds /*
3291da177e4SLinus Torvalds  *	Wait for a TCP event.
3301da177e4SLinus Torvalds  *
3311da177e4SLinus Torvalds  *	Note that we don't need to lock the socket, as the upper poll layers
3321da177e4SLinus Torvalds  *	take care of normal races (between the test and the event) and we don't
3331da177e4SLinus Torvalds  *	go look at any of the socket buffers directly.
3341da177e4SLinus Torvalds  */
3351da177e4SLinus Torvalds unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
3361da177e4SLinus Torvalds {
3371da177e4SLinus Torvalds 	unsigned int mask;
3381da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
3391da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3401da177e4SLinus Torvalds 
3411da177e4SLinus Torvalds 	poll_wait(file, sk->sk_sleep, wait);
3421da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
343dc40c7bcSArnaldo Carvalho de Melo 		return inet_csk_listen_poll(sk);
3441da177e4SLinus Torvalds 
3451da177e4SLinus Torvalds 	/* Socket is not locked. We are protected from async events
3461da177e4SLinus Torvalds 	   by poll logic and correct handling of state changes
3471da177e4SLinus Torvalds 	   made by another threads is impossible in any case.
3481da177e4SLinus Torvalds 	 */
3491da177e4SLinus Torvalds 
3501da177e4SLinus Torvalds 	mask = 0;
3511da177e4SLinus Torvalds 	if (sk->sk_err)
3521da177e4SLinus Torvalds 		mask = POLLERR;
3531da177e4SLinus Torvalds 
3541da177e4SLinus Torvalds 	/*
3551da177e4SLinus Torvalds 	 * POLLHUP is certainly not done right. But poll() doesn't
3561da177e4SLinus Torvalds 	 * have a notion of HUP in just one direction, and for a
3571da177e4SLinus Torvalds 	 * socket the read side is more interesting.
3581da177e4SLinus Torvalds 	 *
3591da177e4SLinus Torvalds 	 * Some poll() documentation says that POLLHUP is incompatible
3601da177e4SLinus Torvalds 	 * with the POLLOUT/POLLWR flags, so somebody should check this
3611da177e4SLinus Torvalds 	 * all. But careful, it tends to be safer to return too many
3621da177e4SLinus Torvalds 	 * bits than too few, and you can easily break real applications
3631da177e4SLinus Torvalds 	 * if you don't tell them that something has hung up!
3641da177e4SLinus Torvalds 	 *
3651da177e4SLinus Torvalds 	 * Check-me.
3661da177e4SLinus Torvalds 	 *
3671da177e4SLinus Torvalds 	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
3681da177e4SLinus Torvalds 	 * our fs/select.c). It means that after we received EOF,
3691da177e4SLinus Torvalds 	 * poll always returns immediately, making impossible poll() on write()
3701da177e4SLinus Torvalds 	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
3711da177e4SLinus Torvalds 	 * if and only if shutdown has been made in both directions.
3721da177e4SLinus Torvalds 	 * Actually, it is interesting to look how Solaris and DUX
3731da177e4SLinus Torvalds 	 * solve this dilemma. I would prefer, if PULLHUP were maskable,
3741da177e4SLinus Torvalds 	 * then we could set it on SND_SHUTDOWN. BTW examples given
3751da177e4SLinus Torvalds 	 * in Stevens' books assume exactly this behaviour, it explains
3761da177e4SLinus Torvalds 	 * why PULLHUP is incompatible with POLLOUT.	--ANK
3771da177e4SLinus Torvalds 	 *
3781da177e4SLinus Torvalds 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
3791da177e4SLinus Torvalds 	 * blocking on fresh not-connected or disconnected socket. --ANK
3801da177e4SLinus Torvalds 	 */
3811da177e4SLinus Torvalds 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
3821da177e4SLinus Torvalds 		mask |= POLLHUP;
3831da177e4SLinus Torvalds 	if (sk->sk_shutdown & RCV_SHUTDOWN)
384f348d70aSDavide Libenzi 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
3851da177e4SLinus Torvalds 
3861da177e4SLinus Torvalds 	/* Connected? */
3871da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
3881da177e4SLinus Torvalds 		/* Potential race condition. If read of tp below will
3891da177e4SLinus Torvalds 		 * escape above sk->sk_state, we can be illegally awaken
3901da177e4SLinus Torvalds 		 * in SYN_* states. */
3911da177e4SLinus Torvalds 		if ((tp->rcv_nxt != tp->copied_seq) &&
3921da177e4SLinus Torvalds 		    (tp->urg_seq != tp->copied_seq ||
3931da177e4SLinus Torvalds 		     tp->rcv_nxt != tp->copied_seq + 1 ||
3941da177e4SLinus Torvalds 		     sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
3951da177e4SLinus Torvalds 			mask |= POLLIN | POLLRDNORM;
3961da177e4SLinus Torvalds 
3971da177e4SLinus Torvalds 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
3981da177e4SLinus Torvalds 			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
3991da177e4SLinus Torvalds 				mask |= POLLOUT | POLLWRNORM;
4001da177e4SLinus Torvalds 			} else {  /* send SIGIO later */
4011da177e4SLinus Torvalds 				set_bit(SOCK_ASYNC_NOSPACE,
4021da177e4SLinus Torvalds 					&sk->sk_socket->flags);
4031da177e4SLinus Torvalds 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
4041da177e4SLinus Torvalds 
4051da177e4SLinus Torvalds 				/* Race breaker. If space is freed after
4061da177e4SLinus Torvalds 				 * wspace test but before the flags are set,
4071da177e4SLinus Torvalds 				 * IO signal will be lost.
4081da177e4SLinus Torvalds 				 */
4091da177e4SLinus Torvalds 				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
4101da177e4SLinus Torvalds 					mask |= POLLOUT | POLLWRNORM;
4111da177e4SLinus Torvalds 			}
4121da177e4SLinus Torvalds 		}
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds 		if (tp->urg_data & TCP_URG_VALID)
4151da177e4SLinus Torvalds 			mask |= POLLPRI;
4161da177e4SLinus Torvalds 	}
4171da177e4SLinus Torvalds 	return mask;
4181da177e4SLinus Torvalds }
4191da177e4SLinus Torvalds 
4201da177e4SLinus Torvalds int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4211da177e4SLinus Torvalds {
4221da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4231da177e4SLinus Torvalds 	int answ;
4241da177e4SLinus Torvalds 
4251da177e4SLinus Torvalds 	switch (cmd) {
4261da177e4SLinus Torvalds 	case SIOCINQ:
4271da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4281da177e4SLinus Torvalds 			return -EINVAL;
4291da177e4SLinus Torvalds 
4301da177e4SLinus Torvalds 		lock_sock(sk);
4311da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4321da177e4SLinus Torvalds 			answ = 0;
4331da177e4SLinus Torvalds 		else if (sock_flag(sk, SOCK_URGINLINE) ||
4341da177e4SLinus Torvalds 			 !tp->urg_data ||
4351da177e4SLinus Torvalds 			 before(tp->urg_seq, tp->copied_seq) ||
4361da177e4SLinus Torvalds 			 !before(tp->urg_seq, tp->rcv_nxt)) {
4371da177e4SLinus Torvalds 			answ = tp->rcv_nxt - tp->copied_seq;
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 			/* Subtract 1, if FIN is in queue. */
4401da177e4SLinus Torvalds 			if (answ && !skb_queue_empty(&sk->sk_receive_queue))
4411da177e4SLinus Torvalds 				answ -=
442aa8223c7SArnaldo Carvalho de Melo 		       tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
4431da177e4SLinus Torvalds 		} else
4441da177e4SLinus Torvalds 			answ = tp->urg_seq - tp->copied_seq;
4451da177e4SLinus Torvalds 		release_sock(sk);
4461da177e4SLinus Torvalds 		break;
4471da177e4SLinus Torvalds 	case SIOCATMARK:
4481da177e4SLinus Torvalds 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
4491da177e4SLinus Torvalds 		break;
4501da177e4SLinus Torvalds 	case SIOCOUTQ:
4511da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4521da177e4SLinus Torvalds 			return -EINVAL;
4531da177e4SLinus Torvalds 
4541da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4551da177e4SLinus Torvalds 			answ = 0;
4561da177e4SLinus Torvalds 		else
4571da177e4SLinus Torvalds 			answ = tp->write_seq - tp->snd_una;
4581da177e4SLinus Torvalds 		break;
4591da177e4SLinus Torvalds 	default:
4601da177e4SLinus Torvalds 		return -ENOIOCTLCMD;
4613ff50b79SStephen Hemminger 	}
4621da177e4SLinus Torvalds 
4631da177e4SLinus Torvalds 	return put_user(answ, (int __user *)arg);
4641da177e4SLinus Torvalds }
4651da177e4SLinus Torvalds 
4661da177e4SLinus Torvalds static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
4671da177e4SLinus Torvalds {
4681da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
4691da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
4701da177e4SLinus Torvalds }
4711da177e4SLinus Torvalds 
4721da177e4SLinus Torvalds static inline int forced_push(struct tcp_sock *tp)
4731da177e4SLinus Torvalds {
4741da177e4SLinus Torvalds 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
4751da177e4SLinus Torvalds }
4761da177e4SLinus Torvalds 
4779e412ba7SIlpo Järvinen static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
4781da177e4SLinus Torvalds {
4799e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
480352d4800SArnaldo Carvalho de Melo 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
481352d4800SArnaldo Carvalho de Melo 
4821da177e4SLinus Torvalds 	skb->csum    = 0;
483352d4800SArnaldo Carvalho de Melo 	tcb->seq     = tcb->end_seq = tp->write_seq;
484352d4800SArnaldo Carvalho de Melo 	tcb->flags   = TCPCB_FLAG_ACK;
485352d4800SArnaldo Carvalho de Melo 	tcb->sacked  = 0;
4861da177e4SLinus Torvalds 	skb_header_release(skb);
487fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
4883ab224beSHideo Aoki 	sk->sk_wmem_queued += skb->truesize;
4893ab224beSHideo Aoki 	sk_mem_charge(sk, skb->truesize);
49089ebd197SDavid S. Miller 	if (tp->nonagle & TCP_NAGLE_PUSH)
4911da177e4SLinus Torvalds 		tp->nonagle &= ~TCP_NAGLE_PUSH;
4921da177e4SLinus Torvalds }
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
4951da177e4SLinus Torvalds 				struct sk_buff *skb)
4961da177e4SLinus Torvalds {
4971da177e4SLinus Torvalds 	if (flags & MSG_OOB) {
4981da177e4SLinus Torvalds 		tp->urg_mode = 1;
4991da177e4SLinus Torvalds 		tp->snd_up = tp->write_seq;
5001da177e4SLinus Torvalds 	}
5011da177e4SLinus Torvalds }
5021da177e4SLinus Torvalds 
5039e412ba7SIlpo Järvinen static inline void tcp_push(struct sock *sk, int flags, int mss_now,
5049e412ba7SIlpo Järvinen 			    int nonagle)
5051da177e4SLinus Torvalds {
5069e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
5079e412ba7SIlpo Järvinen 
508fe067e8aSDavid S. Miller 	if (tcp_send_head(sk)) {
509fe067e8aSDavid S. Miller 		struct sk_buff *skb = tcp_write_queue_tail(sk);
5101da177e4SLinus Torvalds 		if (!(flags & MSG_MORE) || forced_push(tp))
5111da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
5121da177e4SLinus Torvalds 		tcp_mark_urg(tp, flags, skb);
5139e412ba7SIlpo Järvinen 		__tcp_push_pending_frames(sk, mss_now,
5141da177e4SLinus Torvalds 					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
5151da177e4SLinus Torvalds 	}
5161da177e4SLinus Torvalds }
5171da177e4SLinus Torvalds 
5186ff7751dSAdrian Bunk static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
5199c55e01cSJens Axboe 				unsigned int offset, size_t len)
5209c55e01cSJens Axboe {
5219c55e01cSJens Axboe 	struct tcp_splice_state *tss = rd_desc->arg.data;
5229c55e01cSJens Axboe 
5239c55e01cSJens Axboe 	return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
5249c55e01cSJens Axboe }
5259c55e01cSJens Axboe 
5269c55e01cSJens Axboe static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
5279c55e01cSJens Axboe {
5289c55e01cSJens Axboe 	/* Store TCP splice context information in read_descriptor_t. */
5299c55e01cSJens Axboe 	read_descriptor_t rd_desc = {
5309c55e01cSJens Axboe 		.arg.data = tss,
5319c55e01cSJens Axboe 	};
5329c55e01cSJens Axboe 
5339c55e01cSJens Axboe 	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
5349c55e01cSJens Axboe }
5359c55e01cSJens Axboe 
5369c55e01cSJens Axboe /**
5379c55e01cSJens Axboe  *  tcp_splice_read - splice data from TCP socket to a pipe
5389c55e01cSJens Axboe  * @sock:	socket to splice from
5399c55e01cSJens Axboe  * @ppos:	position (not valid)
5409c55e01cSJens Axboe  * @pipe:	pipe to splice to
5419c55e01cSJens Axboe  * @len:	number of bytes to splice
5429c55e01cSJens Axboe  * @flags:	splice modifier flags
5439c55e01cSJens Axboe  *
5449c55e01cSJens Axboe  * Description:
5459c55e01cSJens Axboe  *    Will read pages from given socket and fill them into a pipe.
5469c55e01cSJens Axboe  *
5479c55e01cSJens Axboe  **/
5489c55e01cSJens Axboe ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
5499c55e01cSJens Axboe 			struct pipe_inode_info *pipe, size_t len,
5509c55e01cSJens Axboe 			unsigned int flags)
5519c55e01cSJens Axboe {
5529c55e01cSJens Axboe 	struct sock *sk = sock->sk;
5539c55e01cSJens Axboe 	struct tcp_splice_state tss = {
5549c55e01cSJens Axboe 		.pipe = pipe,
5559c55e01cSJens Axboe 		.len = len,
5569c55e01cSJens Axboe 		.flags = flags,
5579c55e01cSJens Axboe 	};
5589c55e01cSJens Axboe 	long timeo;
5599c55e01cSJens Axboe 	ssize_t spliced;
5609c55e01cSJens Axboe 	int ret;
5619c55e01cSJens Axboe 
5629c55e01cSJens Axboe 	/*
5639c55e01cSJens Axboe 	 * We can't seek on a socket input
5649c55e01cSJens Axboe 	 */
5659c55e01cSJens Axboe 	if (unlikely(*ppos))
5669c55e01cSJens Axboe 		return -ESPIPE;
5679c55e01cSJens Axboe 
5689c55e01cSJens Axboe 	ret = spliced = 0;
5699c55e01cSJens Axboe 
5709c55e01cSJens Axboe 	lock_sock(sk);
5719c55e01cSJens Axboe 
5729c55e01cSJens Axboe 	timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
5739c55e01cSJens Axboe 	while (tss.len) {
5749c55e01cSJens Axboe 		ret = __tcp_splice_read(sk, &tss);
5759c55e01cSJens Axboe 		if (ret < 0)
5769c55e01cSJens Axboe 			break;
5779c55e01cSJens Axboe 		else if (!ret) {
5789c55e01cSJens Axboe 			if (spliced)
5799c55e01cSJens Axboe 				break;
5809c55e01cSJens Axboe 			if (flags & SPLICE_F_NONBLOCK) {
5819c55e01cSJens Axboe 				ret = -EAGAIN;
5829c55e01cSJens Axboe 				break;
5839c55e01cSJens Axboe 			}
5849c55e01cSJens Axboe 			if (sock_flag(sk, SOCK_DONE))
5859c55e01cSJens Axboe 				break;
5869c55e01cSJens Axboe 			if (sk->sk_err) {
5879c55e01cSJens Axboe 				ret = sock_error(sk);
5889c55e01cSJens Axboe 				break;
5899c55e01cSJens Axboe 			}
5909c55e01cSJens Axboe 			if (sk->sk_shutdown & RCV_SHUTDOWN)
5919c55e01cSJens Axboe 				break;
5929c55e01cSJens Axboe 			if (sk->sk_state == TCP_CLOSE) {
5939c55e01cSJens Axboe 				/*
5949c55e01cSJens Axboe 				 * This occurs when user tries to read
5959c55e01cSJens Axboe 				 * from never connected socket.
5969c55e01cSJens Axboe 				 */
5979c55e01cSJens Axboe 				if (!sock_flag(sk, SOCK_DONE))
5989c55e01cSJens Axboe 					ret = -ENOTCONN;
5999c55e01cSJens Axboe 				break;
6009c55e01cSJens Axboe 			}
6019c55e01cSJens Axboe 			if (!timeo) {
6029c55e01cSJens Axboe 				ret = -EAGAIN;
6039c55e01cSJens Axboe 				break;
6049c55e01cSJens Axboe 			}
6059c55e01cSJens Axboe 			sk_wait_data(sk, &timeo);
6069c55e01cSJens Axboe 			if (signal_pending(current)) {
6079c55e01cSJens Axboe 				ret = sock_intr_errno(timeo);
6089c55e01cSJens Axboe 				break;
6099c55e01cSJens Axboe 			}
6109c55e01cSJens Axboe 			continue;
6119c55e01cSJens Axboe 		}
6129c55e01cSJens Axboe 		tss.len -= ret;
6139c55e01cSJens Axboe 		spliced += ret;
6149c55e01cSJens Axboe 
6159c55e01cSJens Axboe 		release_sock(sk);
6169c55e01cSJens Axboe 		lock_sock(sk);
6179c55e01cSJens Axboe 
6189c55e01cSJens Axboe 		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
6199c55e01cSJens Axboe 		    (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
6209c55e01cSJens Axboe 		    signal_pending(current))
6219c55e01cSJens Axboe 			break;
6229c55e01cSJens Axboe 	}
6239c55e01cSJens Axboe 
6249c55e01cSJens Axboe 	release_sock(sk);
6259c55e01cSJens Axboe 
6269c55e01cSJens Axboe 	if (spliced)
6279c55e01cSJens Axboe 		return spliced;
6289c55e01cSJens Axboe 
6299c55e01cSJens Axboe 	return ret;
6309c55e01cSJens Axboe }
6319c55e01cSJens Axboe 
632df97c708SPavel Emelyanov struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
633f561d0f2SPavel Emelyanov {
634f561d0f2SPavel Emelyanov 	struct sk_buff *skb;
635f561d0f2SPavel Emelyanov 
636f561d0f2SPavel Emelyanov 	/* The TCP header must be at least 32-bit aligned.  */
637f561d0f2SPavel Emelyanov 	size = ALIGN(size, 4);
638f561d0f2SPavel Emelyanov 
639f561d0f2SPavel Emelyanov 	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
640f561d0f2SPavel Emelyanov 	if (skb) {
6413ab224beSHideo Aoki 		if (sk_wmem_schedule(sk, skb->truesize)) {
642f561d0f2SPavel Emelyanov 			/*
643f561d0f2SPavel Emelyanov 			 * Make sure that we have exactly size bytes
644f561d0f2SPavel Emelyanov 			 * available to the caller, no more, no less.
645f561d0f2SPavel Emelyanov 			 */
646f561d0f2SPavel Emelyanov 			skb_reserve(skb, skb_tailroom(skb) - size);
647f561d0f2SPavel Emelyanov 			return skb;
648f561d0f2SPavel Emelyanov 		}
649f561d0f2SPavel Emelyanov 		__kfree_skb(skb);
650f561d0f2SPavel Emelyanov 	} else {
651f561d0f2SPavel Emelyanov 		sk->sk_prot->enter_memory_pressure();
652f561d0f2SPavel Emelyanov 		sk_stream_moderate_sndbuf(sk);
653f561d0f2SPavel Emelyanov 	}
654f561d0f2SPavel Emelyanov 	return NULL;
655f561d0f2SPavel Emelyanov }
656f561d0f2SPavel Emelyanov 
6571da177e4SLinus Torvalds static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
6581da177e4SLinus Torvalds 			 size_t psize, int flags)
6591da177e4SLinus Torvalds {
6601da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
661c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
6621da177e4SLinus Torvalds 	int err;
6631da177e4SLinus Torvalds 	ssize_t copied;
6641da177e4SLinus Torvalds 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
6651da177e4SLinus Torvalds 
6661da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
6671da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
6681da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
6691da177e4SLinus Torvalds 			goto out_err;
6701da177e4SLinus Torvalds 
6711da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
6721da177e4SLinus Torvalds 
6731da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
674c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
6751da177e4SLinus Torvalds 	copied = 0;
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds 	err = -EPIPE;
6781da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
6791da177e4SLinus Torvalds 		goto do_error;
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds 	while (psize > 0) {
682fe067e8aSDavid S. Miller 		struct sk_buff *skb = tcp_write_queue_tail(sk);
6831da177e4SLinus Torvalds 		struct page *page = pages[poffset / PAGE_SIZE];
6841da177e4SLinus Torvalds 		int copy, i, can_coalesce;
6851da177e4SLinus Torvalds 		int offset = poffset % PAGE_SIZE;
6861da177e4SLinus Torvalds 		int size = min_t(size_t, psize, PAGE_SIZE - offset);
6871da177e4SLinus Torvalds 
688fe067e8aSDavid S. Miller 		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
6891da177e4SLinus Torvalds new_segment:
6901da177e4SLinus Torvalds 			if (!sk_stream_memory_free(sk))
6911da177e4SLinus Torvalds 				goto wait_for_sndbuf;
6921da177e4SLinus Torvalds 
693df97c708SPavel Emelyanov 			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
6941da177e4SLinus Torvalds 			if (!skb)
6951da177e4SLinus Torvalds 				goto wait_for_memory;
6961da177e4SLinus Torvalds 
6979e412ba7SIlpo Järvinen 			skb_entail(sk, skb);
698c1b4a7e6SDavid S. Miller 			copy = size_goal;
6991da177e4SLinus Torvalds 		}
7001da177e4SLinus Torvalds 
7011da177e4SLinus Torvalds 		if (copy > size)
7021da177e4SLinus Torvalds 			copy = size;
7031da177e4SLinus Torvalds 
7041da177e4SLinus Torvalds 		i = skb_shinfo(skb)->nr_frags;
7051da177e4SLinus Torvalds 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
7061da177e4SLinus Torvalds 		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
7071da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
7081da177e4SLinus Torvalds 			goto new_segment;
7091da177e4SLinus Torvalds 		}
7103ab224beSHideo Aoki 		if (!sk_wmem_schedule(sk, copy))
7111da177e4SLinus Torvalds 			goto wait_for_memory;
7121da177e4SLinus Torvalds 
7131da177e4SLinus Torvalds 		if (can_coalesce) {
7141da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[i - 1].size += copy;
7151da177e4SLinus Torvalds 		} else {
7161da177e4SLinus Torvalds 			get_page(page);
7171da177e4SLinus Torvalds 			skb_fill_page_desc(skb, i, page, offset, copy);
7181da177e4SLinus Torvalds 		}
7191da177e4SLinus Torvalds 
7201da177e4SLinus Torvalds 		skb->len += copy;
7211da177e4SLinus Torvalds 		skb->data_len += copy;
7221da177e4SLinus Torvalds 		skb->truesize += copy;
7231da177e4SLinus Torvalds 		sk->sk_wmem_queued += copy;
7243ab224beSHideo Aoki 		sk_mem_charge(sk, copy);
72584fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
7261da177e4SLinus Torvalds 		tp->write_seq += copy;
7271da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq += copy;
7287967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 0;
7291da177e4SLinus Torvalds 
7301da177e4SLinus Torvalds 		if (!copied)
7311da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
7321da177e4SLinus Torvalds 
7331da177e4SLinus Torvalds 		copied += copy;
7341da177e4SLinus Torvalds 		poffset += copy;
7351da177e4SLinus Torvalds 		if (!(psize -= copy))
7361da177e4SLinus Torvalds 			goto out;
7371da177e4SLinus Torvalds 
73869d15067SHerbert Xu 		if (skb->len < size_goal || (flags & MSG_OOB))
7391da177e4SLinus Torvalds 			continue;
7401da177e4SLinus Torvalds 
7411da177e4SLinus Torvalds 		if (forced_push(tp)) {
7421da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
7439e412ba7SIlpo Järvinen 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
744fe067e8aSDavid S. Miller 		} else if (skb == tcp_send_head(sk))
7451da177e4SLinus Torvalds 			tcp_push_one(sk, mss_now);
7461da177e4SLinus Torvalds 		continue;
7471da177e4SLinus Torvalds 
7481da177e4SLinus Torvalds wait_for_sndbuf:
7491da177e4SLinus Torvalds 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
7501da177e4SLinus Torvalds wait_for_memory:
7511da177e4SLinus Torvalds 		if (copied)
7529e412ba7SIlpo Järvinen 			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
7531da177e4SLinus Torvalds 
7541da177e4SLinus Torvalds 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
7551da177e4SLinus Torvalds 			goto do_error;
7561da177e4SLinus Torvalds 
7571da177e4SLinus Torvalds 		mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
758c1b4a7e6SDavid S. Miller 		size_goal = tp->xmit_size_goal;
7591da177e4SLinus Torvalds 	}
7601da177e4SLinus Torvalds 
7611da177e4SLinus Torvalds out:
7621da177e4SLinus Torvalds 	if (copied)
7639e412ba7SIlpo Järvinen 		tcp_push(sk, flags, mss_now, tp->nonagle);
7641da177e4SLinus Torvalds 	return copied;
7651da177e4SLinus Torvalds 
7661da177e4SLinus Torvalds do_error:
7671da177e4SLinus Torvalds 	if (copied)
7681da177e4SLinus Torvalds 		goto out;
7691da177e4SLinus Torvalds out_err:
7701da177e4SLinus Torvalds 	return sk_stream_error(sk, flags, err);
7711da177e4SLinus Torvalds }
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
7741da177e4SLinus Torvalds 		     size_t size, int flags)
7751da177e4SLinus Torvalds {
7761da177e4SLinus Torvalds 	ssize_t res;
7771da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
7781da177e4SLinus Torvalds 
7791da177e4SLinus Torvalds 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
7808648b305SHerbert Xu 	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
7811da177e4SLinus Torvalds 		return sock_no_sendpage(sock, page, offset, size, flags);
7821da177e4SLinus Torvalds 
7831da177e4SLinus Torvalds 	lock_sock(sk);
7841da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
7851da177e4SLinus Torvalds 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
7861da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
7871da177e4SLinus Torvalds 	release_sock(sk);
7881da177e4SLinus Torvalds 	return res;
7891da177e4SLinus Torvalds }
7901da177e4SLinus Torvalds 
7911da177e4SLinus Torvalds #define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
7921da177e4SLinus Torvalds #define TCP_OFF(sk)	(sk->sk_sndmsg_off)
7931da177e4SLinus Torvalds 
7949e412ba7SIlpo Järvinen static inline int select_size(struct sock *sk)
7951da177e4SLinus Torvalds {
7969e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
797c1b4a7e6SDavid S. Miller 	int tmp = tp->mss_cache;
7981da177e4SLinus Torvalds 
799b4e26f5eSDavid S. Miller 	if (sk->sk_route_caps & NETIF_F_SG) {
800bcd76111SHerbert Xu 		if (sk_can_gso(sk))
801c65f7f00SDavid S. Miller 			tmp = 0;
802b4e26f5eSDavid S. Miller 		else {
803b4e26f5eSDavid S. Miller 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
804b4e26f5eSDavid S. Miller 
805b4e26f5eSDavid S. Miller 			if (tmp >= pgbreak &&
806b4e26f5eSDavid S. Miller 			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
807b4e26f5eSDavid S. Miller 				tmp = pgbreak;
808b4e26f5eSDavid S. Miller 		}
809b4e26f5eSDavid S. Miller 	}
8101da177e4SLinus Torvalds 
8111da177e4SLinus Torvalds 	return tmp;
8121da177e4SLinus Torvalds }
8131da177e4SLinus Torvalds 
8143516ffb0SDavid S. Miller int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
8151da177e4SLinus Torvalds 		size_t size)
8161da177e4SLinus Torvalds {
8173516ffb0SDavid S. Miller 	struct sock *sk = sock->sk;
8181da177e4SLinus Torvalds 	struct iovec *iov;
8191da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
8201da177e4SLinus Torvalds 	struct sk_buff *skb;
8211da177e4SLinus Torvalds 	int iovlen, flags;
822c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
8231da177e4SLinus Torvalds 	int err, copied;
8241da177e4SLinus Torvalds 	long timeo;
8251da177e4SLinus Torvalds 
8261da177e4SLinus Torvalds 	lock_sock(sk);
8271da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
8281da177e4SLinus Torvalds 
8291da177e4SLinus Torvalds 	flags = msg->msg_flags;
8301da177e4SLinus Torvalds 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
8331da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
8341da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
8351da177e4SLinus Torvalds 			goto out_err;
8361da177e4SLinus Torvalds 
8371da177e4SLinus Torvalds 	/* This should be in poll */
8381da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
8391da177e4SLinus Torvalds 
8401da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
841c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
8421da177e4SLinus Torvalds 
8431da177e4SLinus Torvalds 	/* Ok commence sending. */
8441da177e4SLinus Torvalds 	iovlen = msg->msg_iovlen;
8451da177e4SLinus Torvalds 	iov = msg->msg_iov;
8461da177e4SLinus Torvalds 	copied = 0;
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds 	err = -EPIPE;
8491da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
8501da177e4SLinus Torvalds 		goto do_error;
8511da177e4SLinus Torvalds 
8521da177e4SLinus Torvalds 	while (--iovlen >= 0) {
8531da177e4SLinus Torvalds 		int seglen = iov->iov_len;
8541da177e4SLinus Torvalds 		unsigned char __user *from = iov->iov_base;
8551da177e4SLinus Torvalds 
8561da177e4SLinus Torvalds 		iov++;
8571da177e4SLinus Torvalds 
8581da177e4SLinus Torvalds 		while (seglen > 0) {
8591da177e4SLinus Torvalds 			int copy;
8601da177e4SLinus Torvalds 
861fe067e8aSDavid S. Miller 			skb = tcp_write_queue_tail(sk);
8621da177e4SLinus Torvalds 
863fe067e8aSDavid S. Miller 			if (!tcp_send_head(sk) ||
864c1b4a7e6SDavid S. Miller 			    (copy = size_goal - skb->len) <= 0) {
8651da177e4SLinus Torvalds 
8661da177e4SLinus Torvalds new_segment:
8671da177e4SLinus Torvalds 				/* Allocate new segment. If the interface is SG,
8681da177e4SLinus Torvalds 				 * allocate skb fitting to single page.
8691da177e4SLinus Torvalds 				 */
8701da177e4SLinus Torvalds 				if (!sk_stream_memory_free(sk))
8711da177e4SLinus Torvalds 					goto wait_for_sndbuf;
8721da177e4SLinus Torvalds 
873df97c708SPavel Emelyanov 				skb = sk_stream_alloc_skb(sk, select_size(sk),
874df97c708SPavel Emelyanov 						sk->sk_allocation);
8751da177e4SLinus Torvalds 				if (!skb)
8761da177e4SLinus Torvalds 					goto wait_for_memory;
8771da177e4SLinus Torvalds 
8781da177e4SLinus Torvalds 				/*
8791da177e4SLinus Torvalds 				 * Check whether we can use HW checksum.
8801da177e4SLinus Torvalds 				 */
8818648b305SHerbert Xu 				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
88284fa7933SPatrick McHardy 					skb->ip_summed = CHECKSUM_PARTIAL;
8831da177e4SLinus Torvalds 
8849e412ba7SIlpo Järvinen 				skb_entail(sk, skb);
885c1b4a7e6SDavid S. Miller 				copy = size_goal;
8861da177e4SLinus Torvalds 			}
8871da177e4SLinus Torvalds 
8881da177e4SLinus Torvalds 			/* Try to append data to the end of skb. */
8891da177e4SLinus Torvalds 			if (copy > seglen)
8901da177e4SLinus Torvalds 				copy = seglen;
8911da177e4SLinus Torvalds 
8921da177e4SLinus Torvalds 			/* Where to copy to? */
8931da177e4SLinus Torvalds 			if (skb_tailroom(skb) > 0) {
8941da177e4SLinus Torvalds 				/* We have some space in skb head. Superb! */
8951da177e4SLinus Torvalds 				if (copy > skb_tailroom(skb))
8961da177e4SLinus Torvalds 					copy = skb_tailroom(skb);
8971da177e4SLinus Torvalds 				if ((err = skb_add_data(skb, from, copy)) != 0)
8981da177e4SLinus Torvalds 					goto do_fault;
8991da177e4SLinus Torvalds 			} else {
9001da177e4SLinus Torvalds 				int merge = 0;
9011da177e4SLinus Torvalds 				int i = skb_shinfo(skb)->nr_frags;
9021da177e4SLinus Torvalds 				struct page *page = TCP_PAGE(sk);
9031da177e4SLinus Torvalds 				int off = TCP_OFF(sk);
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 				if (skb_can_coalesce(skb, i, page, off) &&
9061da177e4SLinus Torvalds 				    off != PAGE_SIZE) {
9071da177e4SLinus Torvalds 					/* We can extend the last page
9081da177e4SLinus Torvalds 					 * fragment. */
9091da177e4SLinus Torvalds 					merge = 1;
9101da177e4SLinus Torvalds 				} else if (i == MAX_SKB_FRAGS ||
9111da177e4SLinus Torvalds 					   (!i &&
9121da177e4SLinus Torvalds 					   !(sk->sk_route_caps & NETIF_F_SG))) {
9131da177e4SLinus Torvalds 					/* Need to add new fragment and cannot
9141da177e4SLinus Torvalds 					 * do this because interface is non-SG,
9151da177e4SLinus Torvalds 					 * or because all the page slots are
9161da177e4SLinus Torvalds 					 * busy. */
9171da177e4SLinus Torvalds 					tcp_mark_push(tp, skb);
9181da177e4SLinus Torvalds 					goto new_segment;
9191da177e4SLinus Torvalds 				} else if (page) {
9201da177e4SLinus Torvalds 					if (off == PAGE_SIZE) {
9211da177e4SLinus Torvalds 						put_page(page);
9221da177e4SLinus Torvalds 						TCP_PAGE(sk) = page = NULL;
923fb5f5e6eSHerbert Xu 						off = 0;
9241da177e4SLinus Torvalds 					}
925ef015786SHerbert Xu 				} else
926fb5f5e6eSHerbert Xu 					off = 0;
927ef015786SHerbert Xu 
928ef015786SHerbert Xu 				if (copy > PAGE_SIZE - off)
929ef015786SHerbert Xu 					copy = PAGE_SIZE - off;
930ef015786SHerbert Xu 
9313ab224beSHideo Aoki 				if (!sk_wmem_schedule(sk, copy))
932ef015786SHerbert Xu 					goto wait_for_memory;
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds 				if (!page) {
9351da177e4SLinus Torvalds 					/* Allocate new cache page. */
9361da177e4SLinus Torvalds 					if (!(page = sk_stream_alloc_page(sk)))
9371da177e4SLinus Torvalds 						goto wait_for_memory;
9381da177e4SLinus Torvalds 				}
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds 				/* Time to copy data. We are close to
9411da177e4SLinus Torvalds 				 * the end! */
9421da177e4SLinus Torvalds 				err = skb_copy_to_page(sk, from, skb, page,
9431da177e4SLinus Torvalds 						       off, copy);
9441da177e4SLinus Torvalds 				if (err) {
9451da177e4SLinus Torvalds 					/* If this page was new, give it to the
9461da177e4SLinus Torvalds 					 * socket so it does not get leaked.
9471da177e4SLinus Torvalds 					 */
9481da177e4SLinus Torvalds 					if (!TCP_PAGE(sk)) {
9491da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
9501da177e4SLinus Torvalds 						TCP_OFF(sk) = 0;
9511da177e4SLinus Torvalds 					}
9521da177e4SLinus Torvalds 					goto do_error;
9531da177e4SLinus Torvalds 				}
9541da177e4SLinus Torvalds 
9551da177e4SLinus Torvalds 				/* Update the skb. */
9561da177e4SLinus Torvalds 				if (merge) {
9571da177e4SLinus Torvalds 					skb_shinfo(skb)->frags[i - 1].size +=
9581da177e4SLinus Torvalds 									copy;
9591da177e4SLinus Torvalds 				} else {
9601da177e4SLinus Torvalds 					skb_fill_page_desc(skb, i, page, off, copy);
9611da177e4SLinus Torvalds 					if (TCP_PAGE(sk)) {
9621da177e4SLinus Torvalds 						get_page(page);
9631da177e4SLinus Torvalds 					} else if (off + copy < PAGE_SIZE) {
9641da177e4SLinus Torvalds 						get_page(page);
9651da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
9661da177e4SLinus Torvalds 					}
9671da177e4SLinus Torvalds 				}
9681da177e4SLinus Torvalds 
9691da177e4SLinus Torvalds 				TCP_OFF(sk) = off + copy;
9701da177e4SLinus Torvalds 			}
9711da177e4SLinus Torvalds 
9721da177e4SLinus Torvalds 			if (!copied)
9731da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
9741da177e4SLinus Torvalds 
9751da177e4SLinus Torvalds 			tp->write_seq += copy;
9761da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->end_seq += copy;
9777967168cSHerbert Xu 			skb_shinfo(skb)->gso_segs = 0;
9781da177e4SLinus Torvalds 
9791da177e4SLinus Torvalds 			from += copy;
9801da177e4SLinus Torvalds 			copied += copy;
9811da177e4SLinus Torvalds 			if ((seglen -= copy) == 0 && iovlen == 0)
9821da177e4SLinus Torvalds 				goto out;
9831da177e4SLinus Torvalds 
98469d15067SHerbert Xu 			if (skb->len < size_goal || (flags & MSG_OOB))
9851da177e4SLinus Torvalds 				continue;
9861da177e4SLinus Torvalds 
9871da177e4SLinus Torvalds 			if (forced_push(tp)) {
9881da177e4SLinus Torvalds 				tcp_mark_push(tp, skb);
9899e412ba7SIlpo Järvinen 				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
990fe067e8aSDavid S. Miller 			} else if (skb == tcp_send_head(sk))
9911da177e4SLinus Torvalds 				tcp_push_one(sk, mss_now);
9921da177e4SLinus Torvalds 			continue;
9931da177e4SLinus Torvalds 
9941da177e4SLinus Torvalds wait_for_sndbuf:
9951da177e4SLinus Torvalds 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
9961da177e4SLinus Torvalds wait_for_memory:
9971da177e4SLinus Torvalds 			if (copied)
9989e412ba7SIlpo Järvinen 				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
9991da177e4SLinus Torvalds 
10001da177e4SLinus Torvalds 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
10011da177e4SLinus Torvalds 				goto do_error;
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds 			mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
1004c1b4a7e6SDavid S. Miller 			size_goal = tp->xmit_size_goal;
10051da177e4SLinus Torvalds 		}
10061da177e4SLinus Torvalds 	}
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds out:
10091da177e4SLinus Torvalds 	if (copied)
10109e412ba7SIlpo Järvinen 		tcp_push(sk, flags, mss_now, tp->nonagle);
10111da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
10121da177e4SLinus Torvalds 	release_sock(sk);
10131da177e4SLinus Torvalds 	return copied;
10141da177e4SLinus Torvalds 
10151da177e4SLinus Torvalds do_fault:
10161da177e4SLinus Torvalds 	if (!skb->len) {
1017fe067e8aSDavid S. Miller 		tcp_unlink_write_queue(skb, sk);
1018fe067e8aSDavid S. Miller 		/* It is the one place in all of TCP, except connection
1019fe067e8aSDavid S. Miller 		 * reset, where we can be unlinking the send_head.
1020fe067e8aSDavid S. Miller 		 */
1021fe067e8aSDavid S. Miller 		tcp_check_send_head(sk, skb);
10223ab224beSHideo Aoki 		sk_wmem_free_skb(sk, skb);
10231da177e4SLinus Torvalds 	}
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds do_error:
10261da177e4SLinus Torvalds 	if (copied)
10271da177e4SLinus Torvalds 		goto out;
10281da177e4SLinus Torvalds out_err:
10291da177e4SLinus Torvalds 	err = sk_stream_error(sk, flags, err);
10301da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
10311da177e4SLinus Torvalds 	release_sock(sk);
10321da177e4SLinus Torvalds 	return err;
10331da177e4SLinus Torvalds }
10341da177e4SLinus Torvalds 
10351da177e4SLinus Torvalds /*
10361da177e4SLinus Torvalds  *	Handle reading urgent data. BSD has very simple semantics for
10371da177e4SLinus Torvalds  *	this, no blocking and very strange errors 8)
10381da177e4SLinus Torvalds  */
10391da177e4SLinus Torvalds 
10401da177e4SLinus Torvalds static int tcp_recv_urg(struct sock *sk, long timeo,
10411da177e4SLinus Torvalds 			struct msghdr *msg, int len, int flags,
10421da177e4SLinus Torvalds 			int *addr_len)
10431da177e4SLinus Torvalds {
10441da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10451da177e4SLinus Torvalds 
10461da177e4SLinus Torvalds 	/* No URG data to read. */
10471da177e4SLinus Torvalds 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
10481da177e4SLinus Torvalds 	    tp->urg_data == TCP_URG_READ)
10491da177e4SLinus Torvalds 		return -EINVAL;	/* Yes this is right ! */
10501da177e4SLinus Torvalds 
10511da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
10521da177e4SLinus Torvalds 		return -ENOTCONN;
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds 	if (tp->urg_data & TCP_URG_VALID) {
10551da177e4SLinus Torvalds 		int err = 0;
10561da177e4SLinus Torvalds 		char c = tp->urg_data;
10571da177e4SLinus Torvalds 
10581da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
10591da177e4SLinus Torvalds 			tp->urg_data = TCP_URG_READ;
10601da177e4SLinus Torvalds 
10611da177e4SLinus Torvalds 		/* Read urgent data. */
10621da177e4SLinus Torvalds 		msg->msg_flags |= MSG_OOB;
10631da177e4SLinus Torvalds 
10641da177e4SLinus Torvalds 		if (len > 0) {
10651da177e4SLinus Torvalds 			if (!(flags & MSG_TRUNC))
10661da177e4SLinus Torvalds 				err = memcpy_toiovec(msg->msg_iov, &c, 1);
10671da177e4SLinus Torvalds 			len = 1;
10681da177e4SLinus Torvalds 		} else
10691da177e4SLinus Torvalds 			msg->msg_flags |= MSG_TRUNC;
10701da177e4SLinus Torvalds 
10711da177e4SLinus Torvalds 		return err ? -EFAULT : len;
10721da177e4SLinus Torvalds 	}
10731da177e4SLinus Torvalds 
10741da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
10751da177e4SLinus Torvalds 		return 0;
10761da177e4SLinus Torvalds 
10771da177e4SLinus Torvalds 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
10781da177e4SLinus Torvalds 	 * the available implementations agree in this case:
10791da177e4SLinus Torvalds 	 * this call should never block, independent of the
10801da177e4SLinus Torvalds 	 * blocking state of the socket.
10811da177e4SLinus Torvalds 	 * Mike <pall@rz.uni-karlsruhe.de>
10821da177e4SLinus Torvalds 	 */
10831da177e4SLinus Torvalds 	return -EAGAIN;
10841da177e4SLinus Torvalds }
10851da177e4SLinus Torvalds 
10861da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
10871da177e4SLinus Torvalds  * then send an ACK if necessary.  COPIED is the number of bytes
10881da177e4SLinus Torvalds  * tcp_recvmsg has given to the user so far, it speeds up the
10891da177e4SLinus Torvalds  * calculation of whether or not we must ACK for the sake of
10901da177e4SLinus Torvalds  * a window update.
10911da177e4SLinus Torvalds  */
10920e4b4992SChris Leech void tcp_cleanup_rbuf(struct sock *sk, int copied)
10931da177e4SLinus Torvalds {
10941da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10951da177e4SLinus Torvalds 	int time_to_ack = 0;
10961da177e4SLinus Torvalds 
10971da177e4SLinus Torvalds #if TCP_DEBUG
10981da177e4SLinus Torvalds 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
10991da177e4SLinus Torvalds 
11001da177e4SLinus Torvalds 	BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
11011da177e4SLinus Torvalds #endif
11021da177e4SLinus Torvalds 
1103463c84b9SArnaldo Carvalho de Melo 	if (inet_csk_ack_scheduled(sk)) {
1104463c84b9SArnaldo Carvalho de Melo 		const struct inet_connection_sock *icsk = inet_csk(sk);
11051da177e4SLinus Torvalds 		   /* Delayed ACKs frequently hit locked sockets during bulk
11061da177e4SLinus Torvalds 		    * receive. */
1107463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
11081da177e4SLinus Torvalds 		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1109463c84b9SArnaldo Carvalho de Melo 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
11101da177e4SLinus Torvalds 		    /*
11111da177e4SLinus Torvalds 		     * If this read emptied read buffer, we send ACK, if
11121da177e4SLinus Torvalds 		     * connection is not bidirectional, user drained
11131da177e4SLinus Torvalds 		     * receive buffer and there was a small segment
11141da177e4SLinus Torvalds 		     * in queue.
11151da177e4SLinus Torvalds 		     */
11161ef9696cSAlexey Kuznetsov 		    (copied > 0 &&
11171ef9696cSAlexey Kuznetsov 		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
11181ef9696cSAlexey Kuznetsov 		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
11191ef9696cSAlexey Kuznetsov 		       !icsk->icsk_ack.pingpong)) &&
11201ef9696cSAlexey Kuznetsov 		      !atomic_read(&sk->sk_rmem_alloc)))
11211da177e4SLinus Torvalds 			time_to_ack = 1;
11221da177e4SLinus Torvalds 	}
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds 	/* We send an ACK if we can now advertise a non-zero window
11251da177e4SLinus Torvalds 	 * which has been raised "significantly".
11261da177e4SLinus Torvalds 	 *
11271da177e4SLinus Torvalds 	 * Even if window raised up to infinity, do not send window open ACK
11281da177e4SLinus Torvalds 	 * in states, where we will not receive more. It is useless.
11291da177e4SLinus Torvalds 	 */
11301da177e4SLinus Torvalds 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
11311da177e4SLinus Torvalds 		__u32 rcv_window_now = tcp_receive_window(tp);
11321da177e4SLinus Torvalds 
11331da177e4SLinus Torvalds 		/* Optimize, __tcp_select_window() is not cheap. */
11341da177e4SLinus Torvalds 		if (2*rcv_window_now <= tp->window_clamp) {
11351da177e4SLinus Torvalds 			__u32 new_window = __tcp_select_window(sk);
11361da177e4SLinus Torvalds 
11371da177e4SLinus Torvalds 			/* Send ACK now, if this read freed lots of space
11381da177e4SLinus Torvalds 			 * in our buffer. Certainly, new_window is new window.
11391da177e4SLinus Torvalds 			 * We can advertise it now, if it is not less than current one.
11401da177e4SLinus Torvalds 			 * "Lots" means "at least twice" here.
11411da177e4SLinus Torvalds 			 */
11421da177e4SLinus Torvalds 			if (new_window && new_window >= 2 * rcv_window_now)
11431da177e4SLinus Torvalds 				time_to_ack = 1;
11441da177e4SLinus Torvalds 		}
11451da177e4SLinus Torvalds 	}
11461da177e4SLinus Torvalds 	if (time_to_ack)
11471da177e4SLinus Torvalds 		tcp_send_ack(sk);
11481da177e4SLinus Torvalds }
11491da177e4SLinus Torvalds 
11501da177e4SLinus Torvalds static void tcp_prequeue_process(struct sock *sk)
11511da177e4SLinus Torvalds {
11521da177e4SLinus Torvalds 	struct sk_buff *skb;
11531da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11541da177e4SLinus Torvalds 
1155b03efcfbSDavid S. Miller 	NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 	/* RX process wants to run with disabled BHs, though it is not
11581da177e4SLinus Torvalds 	 * necessary */
11591da177e4SLinus Torvalds 	local_bh_disable();
11601da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
11611da177e4SLinus Torvalds 		sk->sk_backlog_rcv(sk, skb);
11621da177e4SLinus Torvalds 	local_bh_enable();
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 	/* Clear memory counter. */
11651da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
11661da177e4SLinus Torvalds }
11671da177e4SLinus Torvalds 
11681da177e4SLinus Torvalds static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
11691da177e4SLinus Torvalds {
11701da177e4SLinus Torvalds 	struct sk_buff *skb;
11711da177e4SLinus Torvalds 	u32 offset;
11721da177e4SLinus Torvalds 
11731da177e4SLinus Torvalds 	skb_queue_walk(&sk->sk_receive_queue, skb) {
11741da177e4SLinus Torvalds 		offset = seq - TCP_SKB_CB(skb)->seq;
1175aa8223c7SArnaldo Carvalho de Melo 		if (tcp_hdr(skb)->syn)
11761da177e4SLinus Torvalds 			offset--;
1177aa8223c7SArnaldo Carvalho de Melo 		if (offset < skb->len || tcp_hdr(skb)->fin) {
11781da177e4SLinus Torvalds 			*off = offset;
11791da177e4SLinus Torvalds 			return skb;
11801da177e4SLinus Torvalds 		}
11811da177e4SLinus Torvalds 	}
11821da177e4SLinus Torvalds 	return NULL;
11831da177e4SLinus Torvalds }
11841da177e4SLinus Torvalds 
11851da177e4SLinus Torvalds /*
11861da177e4SLinus Torvalds  * This routine provides an alternative to tcp_recvmsg() for routines
11871da177e4SLinus Torvalds  * that would like to handle copying from skbuffs directly in 'sendfile'
11881da177e4SLinus Torvalds  * fashion.
11891da177e4SLinus Torvalds  * Note:
11901da177e4SLinus Torvalds  *	- It is assumed that the socket was locked by the caller.
11911da177e4SLinus Torvalds  *	- The routine does not block.
11921da177e4SLinus Torvalds  *	- At present, there is no support for reading OOB data
11931da177e4SLinus Torvalds  *	  or for 'peeking' the socket using this routine
11941da177e4SLinus Torvalds  *	  (although both would be easy to implement).
11951da177e4SLinus Torvalds  */
11961da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
11971da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor)
11981da177e4SLinus Torvalds {
11991da177e4SLinus Torvalds 	struct sk_buff *skb;
12001da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12011da177e4SLinus Torvalds 	u32 seq = tp->copied_seq;
12021da177e4SLinus Torvalds 	u32 offset;
12031da177e4SLinus Torvalds 	int copied = 0;
12041da177e4SLinus Torvalds 
12051da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
12061da177e4SLinus Torvalds 		return -ENOTCONN;
12071da177e4SLinus Torvalds 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
12081da177e4SLinus Torvalds 		if (offset < skb->len) {
12091da177e4SLinus Torvalds 			size_t used, len;
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds 			len = skb->len - offset;
12121da177e4SLinus Torvalds 			/* Stop reading if we hit a patch of urgent data */
12131da177e4SLinus Torvalds 			if (tp->urg_data) {
12141da177e4SLinus Torvalds 				u32 urg_offset = tp->urg_seq - seq;
12151da177e4SLinus Torvalds 				if (urg_offset < len)
12161da177e4SLinus Torvalds 					len = urg_offset;
12171da177e4SLinus Torvalds 				if (!len)
12181da177e4SLinus Torvalds 					break;
12191da177e4SLinus Torvalds 			}
12201da177e4SLinus Torvalds 			used = recv_actor(desc, skb, offset, len);
1221ddb61a57SJens Axboe 			if (used < 0) {
1222ddb61a57SJens Axboe 				if (!copied)
1223ddb61a57SJens Axboe 					copied = used;
1224ddb61a57SJens Axboe 				break;
1225ddb61a57SJens Axboe 			} else if (used <= len) {
12261da177e4SLinus Torvalds 				seq += used;
12271da177e4SLinus Torvalds 				copied += used;
12281da177e4SLinus Torvalds 				offset += used;
12291da177e4SLinus Torvalds 			}
1230*293ad604SOctavian Purdila 			/*
1231*293ad604SOctavian Purdila 			 * If recv_actor drops the lock (e.g. TCP splice
1232*293ad604SOctavian Purdila 			 * receive) the skb pointer might be invalid when
1233*293ad604SOctavian Purdila 			 * getting here: tcp_collapse might have deleted it
1234*293ad604SOctavian Purdila 			 * while aggregating skbs from the socket queue.
1235*293ad604SOctavian Purdila 			 */
1236*293ad604SOctavian Purdila 			skb = tcp_recv_skb(sk, seq-1, &offset);
1237*293ad604SOctavian Purdila 			if (!skb || (offset+1 != skb->len))
12381da177e4SLinus Torvalds 				break;
12391da177e4SLinus Torvalds 		}
1240aa8223c7SArnaldo Carvalho de Melo 		if (tcp_hdr(skb)->fin) {
1241624d1164SChris Leech 			sk_eat_skb(sk, skb, 0);
12421da177e4SLinus Torvalds 			++seq;
12431da177e4SLinus Torvalds 			break;
12441da177e4SLinus Torvalds 		}
1245624d1164SChris Leech 		sk_eat_skb(sk, skb, 0);
12461da177e4SLinus Torvalds 		if (!desc->count)
12471da177e4SLinus Torvalds 			break;
12481da177e4SLinus Torvalds 	}
12491da177e4SLinus Torvalds 	tp->copied_seq = seq;
12501da177e4SLinus Torvalds 
12511da177e4SLinus Torvalds 	tcp_rcv_space_adjust(sk);
12521da177e4SLinus Torvalds 
12531da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
1254ddb61a57SJens Axboe 	if (copied > 0)
12550e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
12561da177e4SLinus Torvalds 	return copied;
12571da177e4SLinus Torvalds }
12581da177e4SLinus Torvalds 
12591da177e4SLinus Torvalds /*
12601da177e4SLinus Torvalds  *	This routine copies from a sock struct into the user buffer.
12611da177e4SLinus Torvalds  *
12621da177e4SLinus Torvalds  *	Technical note: in 2.3 we work on _locked_ socket, so that
12631da177e4SLinus Torvalds  *	tricks with *seq access order and skb->users are not required.
12641da177e4SLinus Torvalds  *	Probably, code can be easily improved even more.
12651da177e4SLinus Torvalds  */
12661da177e4SLinus Torvalds 
12671da177e4SLinus Torvalds int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
12681da177e4SLinus Torvalds 		size_t len, int nonblock, int flags, int *addr_len)
12691da177e4SLinus Torvalds {
12701da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12711da177e4SLinus Torvalds 	int copied = 0;
12721da177e4SLinus Torvalds 	u32 peek_seq;
12731da177e4SLinus Torvalds 	u32 *seq;
12741da177e4SLinus Torvalds 	unsigned long used;
12751da177e4SLinus Torvalds 	int err;
12761da177e4SLinus Torvalds 	int target;		/* Read at least this many bytes */
12771da177e4SLinus Torvalds 	long timeo;
12781da177e4SLinus Torvalds 	struct task_struct *user_recv = NULL;
12791a2449a8SChris Leech 	int copied_early = 0;
12802b1244a4SChris Leech 	struct sk_buff *skb;
12811da177e4SLinus Torvalds 
12821da177e4SLinus Torvalds 	lock_sock(sk);
12831da177e4SLinus Torvalds 
12841da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
12851da177e4SLinus Torvalds 
12861da177e4SLinus Torvalds 	err = -ENOTCONN;
12871da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
12881da177e4SLinus Torvalds 		goto out;
12891da177e4SLinus Torvalds 
12901da177e4SLinus Torvalds 	timeo = sock_rcvtimeo(sk, nonblock);
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 	/* Urgent data needs to be handled specially. */
12931da177e4SLinus Torvalds 	if (flags & MSG_OOB)
12941da177e4SLinus Torvalds 		goto recv_urg;
12951da177e4SLinus Torvalds 
12961da177e4SLinus Torvalds 	seq = &tp->copied_seq;
12971da177e4SLinus Torvalds 	if (flags & MSG_PEEK) {
12981da177e4SLinus Torvalds 		peek_seq = tp->copied_seq;
12991da177e4SLinus Torvalds 		seq = &peek_seq;
13001da177e4SLinus Torvalds 	}
13011da177e4SLinus Torvalds 
13021da177e4SLinus Torvalds 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
13031da177e4SLinus Torvalds 
13041a2449a8SChris Leech #ifdef CONFIG_NET_DMA
13051a2449a8SChris Leech 	tp->ucopy.dma_chan = NULL;
13061a2449a8SChris Leech 	preempt_disable();
13072b1244a4SChris Leech 	skb = skb_peek_tail(&sk->sk_receive_queue);
1308e00c5d8bSAndrew Morton 	{
1309e00c5d8bSAndrew Morton 		int available = 0;
1310e00c5d8bSAndrew Morton 
13112b1244a4SChris Leech 		if (skb)
13122b1244a4SChris Leech 			available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
13132b1244a4SChris Leech 		if ((available < target) &&
13142b1244a4SChris Leech 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1315e00c5d8bSAndrew Morton 		    !sysctl_tcp_low_latency &&
1316e00c5d8bSAndrew Morton 		    __get_cpu_var(softnet_data).net_dma) {
13171a2449a8SChris Leech 			preempt_enable_no_resched();
1318e00c5d8bSAndrew Morton 			tp->ucopy.pinned_list =
1319e00c5d8bSAndrew Morton 					dma_pin_iovec_pages(msg->msg_iov, len);
1320e00c5d8bSAndrew Morton 		} else {
13211a2449a8SChris Leech 			preempt_enable_no_resched();
1322e00c5d8bSAndrew Morton 		}
1323e00c5d8bSAndrew Morton 	}
13241a2449a8SChris Leech #endif
13251a2449a8SChris Leech 
13261da177e4SLinus Torvalds 	do {
13271da177e4SLinus Torvalds 		u32 offset;
13281da177e4SLinus Torvalds 
13291da177e4SLinus Torvalds 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
13301da177e4SLinus Torvalds 		if (tp->urg_data && tp->urg_seq == *seq) {
13311da177e4SLinus Torvalds 			if (copied)
13321da177e4SLinus Torvalds 				break;
13331da177e4SLinus Torvalds 			if (signal_pending(current)) {
13341da177e4SLinus Torvalds 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
13351da177e4SLinus Torvalds 				break;
13361da177e4SLinus Torvalds 			}
13371da177e4SLinus Torvalds 		}
13381da177e4SLinus Torvalds 
13391da177e4SLinus Torvalds 		/* Next get a buffer. */
13401da177e4SLinus Torvalds 
13411da177e4SLinus Torvalds 		skb = skb_peek(&sk->sk_receive_queue);
13421da177e4SLinus Torvalds 		do {
13431da177e4SLinus Torvalds 			if (!skb)
13441da177e4SLinus Torvalds 				break;
13451da177e4SLinus Torvalds 
13461da177e4SLinus Torvalds 			/* Now that we have two receive queues this
13471da177e4SLinus Torvalds 			 * shouldn't happen.
13481da177e4SLinus Torvalds 			 */
13491da177e4SLinus Torvalds 			if (before(*seq, TCP_SKB_CB(skb)->seq)) {
13501da177e4SLinus Torvalds 				printk(KERN_INFO "recvmsg bug: copied %X "
13511da177e4SLinus Torvalds 				       "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
13521da177e4SLinus Torvalds 				break;
13531da177e4SLinus Torvalds 			}
13541da177e4SLinus Torvalds 			offset = *seq - TCP_SKB_CB(skb)->seq;
1355aa8223c7SArnaldo Carvalho de Melo 			if (tcp_hdr(skb)->syn)
13561da177e4SLinus Torvalds 				offset--;
13571da177e4SLinus Torvalds 			if (offset < skb->len)
13581da177e4SLinus Torvalds 				goto found_ok_skb;
1359aa8223c7SArnaldo Carvalho de Melo 			if (tcp_hdr(skb)->fin)
13601da177e4SLinus Torvalds 				goto found_fin_ok;
13611da177e4SLinus Torvalds 			BUG_TRAP(flags & MSG_PEEK);
13621da177e4SLinus Torvalds 			skb = skb->next;
13631da177e4SLinus Torvalds 		} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
13641da177e4SLinus Torvalds 
13651da177e4SLinus Torvalds 		/* Well, if we have backlog, try to process it now yet. */
13661da177e4SLinus Torvalds 
13671da177e4SLinus Torvalds 		if (copied >= target && !sk->sk_backlog.tail)
13681da177e4SLinus Torvalds 			break;
13691da177e4SLinus Torvalds 
13701da177e4SLinus Torvalds 		if (copied) {
13711da177e4SLinus Torvalds 			if (sk->sk_err ||
13721da177e4SLinus Torvalds 			    sk->sk_state == TCP_CLOSE ||
13731da177e4SLinus Torvalds 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
13741da177e4SLinus Torvalds 			    !timeo ||
13751da177e4SLinus Torvalds 			    signal_pending(current) ||
13761da177e4SLinus Torvalds 			    (flags & MSG_PEEK))
13771da177e4SLinus Torvalds 				break;
13781da177e4SLinus Torvalds 		} else {
13791da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_DONE))
13801da177e4SLinus Torvalds 				break;
13811da177e4SLinus Torvalds 
13821da177e4SLinus Torvalds 			if (sk->sk_err) {
13831da177e4SLinus Torvalds 				copied = sock_error(sk);
13841da177e4SLinus Torvalds 				break;
13851da177e4SLinus Torvalds 			}
13861da177e4SLinus Torvalds 
13871da177e4SLinus Torvalds 			if (sk->sk_shutdown & RCV_SHUTDOWN)
13881da177e4SLinus Torvalds 				break;
13891da177e4SLinus Torvalds 
13901da177e4SLinus Torvalds 			if (sk->sk_state == TCP_CLOSE) {
13911da177e4SLinus Torvalds 				if (!sock_flag(sk, SOCK_DONE)) {
13921da177e4SLinus Torvalds 					/* This occurs when user tries to read
13931da177e4SLinus Torvalds 					 * from never connected socket.
13941da177e4SLinus Torvalds 					 */
13951da177e4SLinus Torvalds 					copied = -ENOTCONN;
13961da177e4SLinus Torvalds 					break;
13971da177e4SLinus Torvalds 				}
13981da177e4SLinus Torvalds 				break;
13991da177e4SLinus Torvalds 			}
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 			if (!timeo) {
14021da177e4SLinus Torvalds 				copied = -EAGAIN;
14031da177e4SLinus Torvalds 				break;
14041da177e4SLinus Torvalds 			}
14051da177e4SLinus Torvalds 
14061da177e4SLinus Torvalds 			if (signal_pending(current)) {
14071da177e4SLinus Torvalds 				copied = sock_intr_errno(timeo);
14081da177e4SLinus Torvalds 				break;
14091da177e4SLinus Torvalds 			}
14101da177e4SLinus Torvalds 		}
14111da177e4SLinus Torvalds 
14120e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
14131da177e4SLinus Torvalds 
14147df55125SDavid S. Miller 		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
14151da177e4SLinus Torvalds 			/* Install new reader */
14161da177e4SLinus Torvalds 			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
14171da177e4SLinus Torvalds 				user_recv = current;
14181da177e4SLinus Torvalds 				tp->ucopy.task = user_recv;
14191da177e4SLinus Torvalds 				tp->ucopy.iov = msg->msg_iov;
14201da177e4SLinus Torvalds 			}
14211da177e4SLinus Torvalds 
14221da177e4SLinus Torvalds 			tp->ucopy.len = len;
14231da177e4SLinus Torvalds 
14241da177e4SLinus Torvalds 			BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
14251da177e4SLinus Torvalds 				 (flags & (MSG_PEEK | MSG_TRUNC)));
14261da177e4SLinus Torvalds 
14271da177e4SLinus Torvalds 			/* Ugly... If prequeue is not empty, we have to
14281da177e4SLinus Torvalds 			 * process it before releasing socket, otherwise
14291da177e4SLinus Torvalds 			 * order will be broken at second iteration.
14301da177e4SLinus Torvalds 			 * More elegant solution is required!!!
14311da177e4SLinus Torvalds 			 *
14321da177e4SLinus Torvalds 			 * Look: we have the following (pseudo)queues:
14331da177e4SLinus Torvalds 			 *
14341da177e4SLinus Torvalds 			 * 1. packets in flight
14351da177e4SLinus Torvalds 			 * 2. backlog
14361da177e4SLinus Torvalds 			 * 3. prequeue
14371da177e4SLinus Torvalds 			 * 4. receive_queue
14381da177e4SLinus Torvalds 			 *
14391da177e4SLinus Torvalds 			 * Each queue can be processed only if the next ones
14401da177e4SLinus Torvalds 			 * are empty. At this point we have empty receive_queue.
14411da177e4SLinus Torvalds 			 * But prequeue _can_ be not empty after 2nd iteration,
14421da177e4SLinus Torvalds 			 * when we jumped to start of loop because backlog
14431da177e4SLinus Torvalds 			 * processing added something to receive_queue.
14441da177e4SLinus Torvalds 			 * We cannot release_sock(), because backlog contains
14451da177e4SLinus Torvalds 			 * packets arrived _after_ prequeued ones.
14461da177e4SLinus Torvalds 			 *
14471da177e4SLinus Torvalds 			 * Shortly, algorithm is clear --- to process all
14481da177e4SLinus Torvalds 			 * the queues in order. We could make it more directly,
14491da177e4SLinus Torvalds 			 * requeueing packets from backlog to prequeue, if
14501da177e4SLinus Torvalds 			 * is not empty. It is more elegant, but eats cycles,
14511da177e4SLinus Torvalds 			 * unfortunately.
14521da177e4SLinus Torvalds 			 */
1453b03efcfbSDavid S. Miller 			if (!skb_queue_empty(&tp->ucopy.prequeue))
14541da177e4SLinus Torvalds 				goto do_prequeue;
14551da177e4SLinus Torvalds 
14561da177e4SLinus Torvalds 			/* __ Set realtime policy in scheduler __ */
14571da177e4SLinus Torvalds 		}
14581da177e4SLinus Torvalds 
14591da177e4SLinus Torvalds 		if (copied >= target) {
14601da177e4SLinus Torvalds 			/* Do not sleep, just process backlog. */
14611da177e4SLinus Torvalds 			release_sock(sk);
14621da177e4SLinus Torvalds 			lock_sock(sk);
14631da177e4SLinus Torvalds 		} else
14641da177e4SLinus Torvalds 			sk_wait_data(sk, &timeo);
14651da177e4SLinus Torvalds 
14661a2449a8SChris Leech #ifdef CONFIG_NET_DMA
14671a2449a8SChris Leech 		tp->ucopy.wakeup = 0;
14681a2449a8SChris Leech #endif
14691a2449a8SChris Leech 
14701da177e4SLinus Torvalds 		if (user_recv) {
14711da177e4SLinus Torvalds 			int chunk;
14721da177e4SLinus Torvalds 
14731da177e4SLinus Torvalds 			/* __ Restore normal policy in scheduler __ */
14741da177e4SLinus Torvalds 
14751da177e4SLinus Torvalds 			if ((chunk = len - tp->ucopy.len) != 0) {
14761da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
14771da177e4SLinus Torvalds 				len -= chunk;
14781da177e4SLinus Torvalds 				copied += chunk;
14791da177e4SLinus Torvalds 			}
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 			if (tp->rcv_nxt == tp->copied_seq &&
1482b03efcfbSDavid S. Miller 			    !skb_queue_empty(&tp->ucopy.prequeue)) {
14831da177e4SLinus Torvalds do_prequeue:
14841da177e4SLinus Torvalds 				tcp_prequeue_process(sk);
14851da177e4SLinus Torvalds 
14861da177e4SLinus Torvalds 				if ((chunk = len - tp->ucopy.len) != 0) {
14871da177e4SLinus Torvalds 					NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
14881da177e4SLinus Torvalds 					len -= chunk;
14891da177e4SLinus Torvalds 					copied += chunk;
14901da177e4SLinus Torvalds 				}
14911da177e4SLinus Torvalds 			}
14921da177e4SLinus Torvalds 		}
14931da177e4SLinus Torvalds 		if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
14941da177e4SLinus Torvalds 			if (net_ratelimit())
14951da177e4SLinus Torvalds 				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1496ba25f9dcSPavel Emelyanov 				       current->comm, task_pid_nr(current));
14971da177e4SLinus Torvalds 			peek_seq = tp->copied_seq;
14981da177e4SLinus Torvalds 		}
14991da177e4SLinus Torvalds 		continue;
15001da177e4SLinus Torvalds 
15011da177e4SLinus Torvalds 	found_ok_skb:
15021da177e4SLinus Torvalds 		/* Ok so how much can we use? */
15031da177e4SLinus Torvalds 		used = skb->len - offset;
15041da177e4SLinus Torvalds 		if (len < used)
15051da177e4SLinus Torvalds 			used = len;
15061da177e4SLinus Torvalds 
15071da177e4SLinus Torvalds 		/* Do we have urgent data here? */
15081da177e4SLinus Torvalds 		if (tp->urg_data) {
15091da177e4SLinus Torvalds 			u32 urg_offset = tp->urg_seq - *seq;
15101da177e4SLinus Torvalds 			if (urg_offset < used) {
15111da177e4SLinus Torvalds 				if (!urg_offset) {
15121da177e4SLinus Torvalds 					if (!sock_flag(sk, SOCK_URGINLINE)) {
15131da177e4SLinus Torvalds 						++*seq;
15141da177e4SLinus Torvalds 						offset++;
15151da177e4SLinus Torvalds 						used--;
15161da177e4SLinus Torvalds 						if (!used)
15171da177e4SLinus Torvalds 							goto skip_copy;
15181da177e4SLinus Torvalds 					}
15191da177e4SLinus Torvalds 				} else
15201da177e4SLinus Torvalds 					used = urg_offset;
15211da177e4SLinus Torvalds 			}
15221da177e4SLinus Torvalds 		}
15231da177e4SLinus Torvalds 
15241da177e4SLinus Torvalds 		if (!(flags & MSG_TRUNC)) {
15251a2449a8SChris Leech #ifdef CONFIG_NET_DMA
15261a2449a8SChris Leech 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
15271a2449a8SChris Leech 				tp->ucopy.dma_chan = get_softnet_dma();
15281a2449a8SChris Leech 
15291a2449a8SChris Leech 			if (tp->ucopy.dma_chan) {
15301a2449a8SChris Leech 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
15311a2449a8SChris Leech 					tp->ucopy.dma_chan, skb, offset,
15321a2449a8SChris Leech 					msg->msg_iov, used,
15331a2449a8SChris Leech 					tp->ucopy.pinned_list);
15341a2449a8SChris Leech 
15351a2449a8SChris Leech 				if (tp->ucopy.dma_cookie < 0) {
15361a2449a8SChris Leech 
15371a2449a8SChris Leech 					printk(KERN_ALERT "dma_cookie < 0\n");
15381a2449a8SChris Leech 
15391a2449a8SChris Leech 					/* Exception. Bailout! */
15401a2449a8SChris Leech 					if (!copied)
15411a2449a8SChris Leech 						copied = -EFAULT;
15421a2449a8SChris Leech 					break;
15431a2449a8SChris Leech 				}
15441a2449a8SChris Leech 				if ((offset + used) == skb->len)
15451a2449a8SChris Leech 					copied_early = 1;
15461a2449a8SChris Leech 
15471a2449a8SChris Leech 			} else
15481a2449a8SChris Leech #endif
15491a2449a8SChris Leech 			{
15501da177e4SLinus Torvalds 				err = skb_copy_datagram_iovec(skb, offset,
15511da177e4SLinus Torvalds 						msg->msg_iov, used);
15521da177e4SLinus Torvalds 				if (err) {
15531da177e4SLinus Torvalds 					/* Exception. Bailout! */
15541da177e4SLinus Torvalds 					if (!copied)
15551da177e4SLinus Torvalds 						copied = -EFAULT;
15561da177e4SLinus Torvalds 					break;
15571da177e4SLinus Torvalds 				}
15581da177e4SLinus Torvalds 			}
15591a2449a8SChris Leech 		}
15601da177e4SLinus Torvalds 
15611da177e4SLinus Torvalds 		*seq += used;
15621da177e4SLinus Torvalds 		copied += used;
15631da177e4SLinus Torvalds 		len -= used;
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds 		tcp_rcv_space_adjust(sk);
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds skip_copy:
15681da177e4SLinus Torvalds 		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
15691da177e4SLinus Torvalds 			tp->urg_data = 0;
15709e412ba7SIlpo Järvinen 			tcp_fast_path_check(sk);
15711da177e4SLinus Torvalds 		}
15721da177e4SLinus Torvalds 		if (used + offset < skb->len)
15731da177e4SLinus Torvalds 			continue;
15741da177e4SLinus Torvalds 
1575aa8223c7SArnaldo Carvalho de Melo 		if (tcp_hdr(skb)->fin)
15761da177e4SLinus Torvalds 			goto found_fin_ok;
15771a2449a8SChris Leech 		if (!(flags & MSG_PEEK)) {
15781a2449a8SChris Leech 			sk_eat_skb(sk, skb, copied_early);
15791a2449a8SChris Leech 			copied_early = 0;
15801a2449a8SChris Leech 		}
15811da177e4SLinus Torvalds 		continue;
15821da177e4SLinus Torvalds 
15831da177e4SLinus Torvalds 	found_fin_ok:
15841da177e4SLinus Torvalds 		/* Process the FIN. */
15851da177e4SLinus Torvalds 		++*seq;
15861a2449a8SChris Leech 		if (!(flags & MSG_PEEK)) {
15871a2449a8SChris Leech 			sk_eat_skb(sk, skb, copied_early);
15881a2449a8SChris Leech 			copied_early = 0;
15891a2449a8SChris Leech 		}
15901da177e4SLinus Torvalds 		break;
15911da177e4SLinus Torvalds 	} while (len > 0);
15921da177e4SLinus Torvalds 
15931da177e4SLinus Torvalds 	if (user_recv) {
1594b03efcfbSDavid S. Miller 		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
15951da177e4SLinus Torvalds 			int chunk;
15961da177e4SLinus Torvalds 
15971da177e4SLinus Torvalds 			tp->ucopy.len = copied > 0 ? len : 0;
15981da177e4SLinus Torvalds 
15991da177e4SLinus Torvalds 			tcp_prequeue_process(sk);
16001da177e4SLinus Torvalds 
16011da177e4SLinus Torvalds 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
16021da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
16031da177e4SLinus Torvalds 				len -= chunk;
16041da177e4SLinus Torvalds 				copied += chunk;
16051da177e4SLinus Torvalds 			}
16061da177e4SLinus Torvalds 		}
16071da177e4SLinus Torvalds 
16081da177e4SLinus Torvalds 		tp->ucopy.task = NULL;
16091da177e4SLinus Torvalds 		tp->ucopy.len = 0;
16101da177e4SLinus Torvalds 	}
16111da177e4SLinus Torvalds 
16121a2449a8SChris Leech #ifdef CONFIG_NET_DMA
16131a2449a8SChris Leech 	if (tp->ucopy.dma_chan) {
16141a2449a8SChris Leech 		dma_cookie_t done, used;
16151a2449a8SChris Leech 
16161a2449a8SChris Leech 		dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
16171a2449a8SChris Leech 
16181a2449a8SChris Leech 		while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
16191a2449a8SChris Leech 						 tp->ucopy.dma_cookie, &done,
16201a2449a8SChris Leech 						 &used) == DMA_IN_PROGRESS) {
16211a2449a8SChris Leech 			/* do partial cleanup of sk_async_wait_queue */
16221a2449a8SChris Leech 			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
16231a2449a8SChris Leech 			       (dma_async_is_complete(skb->dma_cookie, done,
16241a2449a8SChris Leech 						      used) == DMA_SUCCESS)) {
16251a2449a8SChris Leech 				__skb_dequeue(&sk->sk_async_wait_queue);
16261a2449a8SChris Leech 				kfree_skb(skb);
16271a2449a8SChris Leech 			}
16281a2449a8SChris Leech 		}
16291a2449a8SChris Leech 
16301a2449a8SChris Leech 		/* Safe to free early-copied skbs now */
16311a2449a8SChris Leech 		__skb_queue_purge(&sk->sk_async_wait_queue);
16321a2449a8SChris Leech 		dma_chan_put(tp->ucopy.dma_chan);
16331a2449a8SChris Leech 		tp->ucopy.dma_chan = NULL;
16341a2449a8SChris Leech 	}
16351a2449a8SChris Leech 	if (tp->ucopy.pinned_list) {
16361a2449a8SChris Leech 		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
16371a2449a8SChris Leech 		tp->ucopy.pinned_list = NULL;
16381a2449a8SChris Leech 	}
16391a2449a8SChris Leech #endif
16401a2449a8SChris Leech 
16411da177e4SLinus Torvalds 	/* According to UNIX98, msg_name/msg_namelen are ignored
16421da177e4SLinus Torvalds 	 * on connected socket. I was just happy when found this 8) --ANK
16431da177e4SLinus Torvalds 	 */
16441da177e4SLinus Torvalds 
16451da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
16460e4b4992SChris Leech 	tcp_cleanup_rbuf(sk, copied);
16471da177e4SLinus Torvalds 
16481da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
16491da177e4SLinus Torvalds 	release_sock(sk);
16501da177e4SLinus Torvalds 	return copied;
16511da177e4SLinus Torvalds 
16521da177e4SLinus Torvalds out:
16531da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
16541da177e4SLinus Torvalds 	release_sock(sk);
16551da177e4SLinus Torvalds 	return err;
16561da177e4SLinus Torvalds 
16571da177e4SLinus Torvalds recv_urg:
16581da177e4SLinus Torvalds 	err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
16591da177e4SLinus Torvalds 	goto out;
16601da177e4SLinus Torvalds }
16611da177e4SLinus Torvalds 
1662490d5046SIlpo Järvinen void tcp_set_state(struct sock *sk, int state)
1663490d5046SIlpo Järvinen {
1664490d5046SIlpo Järvinen 	int oldstate = sk->sk_state;
1665490d5046SIlpo Järvinen 
1666490d5046SIlpo Järvinen 	switch (state) {
1667490d5046SIlpo Järvinen 	case TCP_ESTABLISHED:
1668490d5046SIlpo Järvinen 		if (oldstate != TCP_ESTABLISHED)
1669490d5046SIlpo Järvinen 			TCP_INC_STATS(TCP_MIB_CURRESTAB);
1670490d5046SIlpo Järvinen 		break;
1671490d5046SIlpo Järvinen 
1672490d5046SIlpo Järvinen 	case TCP_CLOSE:
1673490d5046SIlpo Järvinen 		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1674490d5046SIlpo Järvinen 			TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1675490d5046SIlpo Järvinen 
1676490d5046SIlpo Järvinen 		sk->sk_prot->unhash(sk);
1677490d5046SIlpo Järvinen 		if (inet_csk(sk)->icsk_bind_hash &&
1678490d5046SIlpo Järvinen 		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1679ab1e0a13SArnaldo Carvalho de Melo 			inet_put_port(sk);
1680490d5046SIlpo Järvinen 		/* fall through */
1681490d5046SIlpo Järvinen 	default:
1682490d5046SIlpo Järvinen 		if (oldstate==TCP_ESTABLISHED)
1683490d5046SIlpo Järvinen 			TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1684490d5046SIlpo Järvinen 	}
1685490d5046SIlpo Järvinen 
1686490d5046SIlpo Järvinen 	/* Change state AFTER socket is unhashed to avoid closed
1687490d5046SIlpo Järvinen 	 * socket sitting in hash tables.
1688490d5046SIlpo Järvinen 	 */
1689490d5046SIlpo Järvinen 	sk->sk_state = state;
1690490d5046SIlpo Järvinen 
1691490d5046SIlpo Järvinen #ifdef STATE_TRACE
1692490d5046SIlpo Järvinen 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1693490d5046SIlpo Järvinen #endif
1694490d5046SIlpo Järvinen }
1695490d5046SIlpo Järvinen EXPORT_SYMBOL_GPL(tcp_set_state);
1696490d5046SIlpo Järvinen 
16971da177e4SLinus Torvalds /*
16981da177e4SLinus Torvalds  *	State processing on a close. This implements the state shift for
16991da177e4SLinus Torvalds  *	sending our FIN frame. Note that we only send a FIN for some
17001da177e4SLinus Torvalds  *	states. A shutdown() may have already sent the FIN, or we may be
17011da177e4SLinus Torvalds  *	closed.
17021da177e4SLinus Torvalds  */
17031da177e4SLinus Torvalds 
17049b5b5cffSArjan van de Ven static const unsigned char new_state[16] = {
17051da177e4SLinus Torvalds   /* current state:        new state:      action:	*/
17061da177e4SLinus Torvalds   /* (Invalid)		*/ TCP_CLOSE,
17071da177e4SLinus Torvalds   /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
17081da177e4SLinus Torvalds   /* TCP_SYN_SENT	*/ TCP_CLOSE,
17091da177e4SLinus Torvalds   /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
17101da177e4SLinus Torvalds   /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
17111da177e4SLinus Torvalds   /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
17121da177e4SLinus Torvalds   /* TCP_TIME_WAIT	*/ TCP_CLOSE,
17131da177e4SLinus Torvalds   /* TCP_CLOSE		*/ TCP_CLOSE,
17141da177e4SLinus Torvalds   /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
17151da177e4SLinus Torvalds   /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
17161da177e4SLinus Torvalds   /* TCP_LISTEN		*/ TCP_CLOSE,
17171da177e4SLinus Torvalds   /* TCP_CLOSING	*/ TCP_CLOSING,
17181da177e4SLinus Torvalds };
17191da177e4SLinus Torvalds 
17201da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
17211da177e4SLinus Torvalds {
17221da177e4SLinus Torvalds 	int next = (int)new_state[sk->sk_state];
17231da177e4SLinus Torvalds 	int ns = next & TCP_STATE_MASK;
17241da177e4SLinus Torvalds 
17251da177e4SLinus Torvalds 	tcp_set_state(sk, ns);
17261da177e4SLinus Torvalds 
17271da177e4SLinus Torvalds 	return next & TCP_ACTION_FIN;
17281da177e4SLinus Torvalds }
17291da177e4SLinus Torvalds 
17301da177e4SLinus Torvalds /*
17311da177e4SLinus Torvalds  *	Shutdown the sending side of a connection. Much like close except
17321f29b058SSatoru SATOH  *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
17331da177e4SLinus Torvalds  */
17341da177e4SLinus Torvalds 
17351da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
17361da177e4SLinus Torvalds {
17371da177e4SLinus Torvalds 	/*	We need to grab some memory, and put together a FIN,
17381da177e4SLinus Torvalds 	 *	and then put it into the queue to be sent.
17391da177e4SLinus Torvalds 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
17401da177e4SLinus Torvalds 	 */
17411da177e4SLinus Torvalds 	if (!(how & SEND_SHUTDOWN))
17421da177e4SLinus Torvalds 		return;
17431da177e4SLinus Torvalds 
17441da177e4SLinus Torvalds 	/* If we've already sent a FIN, or it's a closed state, skip this. */
17451da177e4SLinus Torvalds 	if ((1 << sk->sk_state) &
17461da177e4SLinus Torvalds 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
17471da177e4SLinus Torvalds 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
17481da177e4SLinus Torvalds 		/* Clear out any half completed packets.  FIN if needed. */
17491da177e4SLinus Torvalds 		if (tcp_close_state(sk))
17501da177e4SLinus Torvalds 			tcp_send_fin(sk);
17511da177e4SLinus Torvalds 	}
17521da177e4SLinus Torvalds }
17531da177e4SLinus Torvalds 
17541da177e4SLinus Torvalds void tcp_close(struct sock *sk, long timeout)
17551da177e4SLinus Torvalds {
17561da177e4SLinus Torvalds 	struct sk_buff *skb;
17571da177e4SLinus Torvalds 	int data_was_unread = 0;
175875c2d907SHerbert Xu 	int state;
17591da177e4SLinus Torvalds 
17601da177e4SLinus Torvalds 	lock_sock(sk);
17611da177e4SLinus Torvalds 	sk->sk_shutdown = SHUTDOWN_MASK;
17621da177e4SLinus Torvalds 
17631da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN) {
17641da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
17651da177e4SLinus Torvalds 
17661da177e4SLinus Torvalds 		/* Special case. */
17670a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
17681da177e4SLinus Torvalds 
17691da177e4SLinus Torvalds 		goto adjudge_to_death;
17701da177e4SLinus Torvalds 	}
17711da177e4SLinus Torvalds 
17721da177e4SLinus Torvalds 	/*  We need to flush the recv. buffs.  We do this only on the
17731da177e4SLinus Torvalds 	 *  descriptor close, not protocol-sourced closes, because the
17741da177e4SLinus Torvalds 	 *  reader process may not have drained the data yet!
17751da177e4SLinus Torvalds 	 */
17761da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
17771da177e4SLinus Torvalds 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1778aa8223c7SArnaldo Carvalho de Melo 			  tcp_hdr(skb)->fin;
17791da177e4SLinus Torvalds 		data_was_unread += len;
17801da177e4SLinus Torvalds 		__kfree_skb(skb);
17811da177e4SLinus Torvalds 	}
17821da177e4SLinus Torvalds 
17833ab224beSHideo Aoki 	sk_mem_reclaim(sk);
17841da177e4SLinus Torvalds 
178565bb723cSGerrit Renker 	/* As outlined in RFC 2525, section 2.17, we send a RST here because
178665bb723cSGerrit Renker 	 * data was lost. To witness the awful effects of the old behavior of
178765bb723cSGerrit Renker 	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
178865bb723cSGerrit Renker 	 * GET in an FTP client, suspend the process, wait for the client to
178965bb723cSGerrit Renker 	 * advertise a zero window, then kill -9 the FTP client, wheee...
179065bb723cSGerrit Renker 	 * Note: timeout is always zero in such a case.
17911da177e4SLinus Torvalds 	 */
17921da177e4SLinus Torvalds 	if (data_was_unread) {
17931da177e4SLinus Torvalds 		/* Unread data was tossed, zap the connection. */
17941da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
17951da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
17961da177e4SLinus Torvalds 		tcp_send_active_reset(sk, GFP_KERNEL);
17971da177e4SLinus Torvalds 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
17981da177e4SLinus Torvalds 		/* Check zero linger _after_ checking for unread data. */
17991da177e4SLinus Torvalds 		sk->sk_prot->disconnect(sk, 0);
18001da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
18011da177e4SLinus Torvalds 	} else if (tcp_close_state(sk)) {
18021da177e4SLinus Torvalds 		/* We FIN if the application ate all the data before
18031da177e4SLinus Torvalds 		 * zapping the connection.
18041da177e4SLinus Torvalds 		 */
18051da177e4SLinus Torvalds 
18061da177e4SLinus Torvalds 		/* RED-PEN. Formally speaking, we have broken TCP state
18071da177e4SLinus Torvalds 		 * machine. State transitions:
18081da177e4SLinus Torvalds 		 *
18091da177e4SLinus Torvalds 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
18101da177e4SLinus Torvalds 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
18111da177e4SLinus Torvalds 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
18121da177e4SLinus Torvalds 		 *
18131da177e4SLinus Torvalds 		 * are legal only when FIN has been sent (i.e. in window),
18141da177e4SLinus Torvalds 		 * rather than queued out of window. Purists blame.
18151da177e4SLinus Torvalds 		 *
18161da177e4SLinus Torvalds 		 * F.e. "RFC state" is ESTABLISHED,
18171da177e4SLinus Torvalds 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
18181da177e4SLinus Torvalds 		 *
18191da177e4SLinus Torvalds 		 * The visible declinations are that sometimes
18201da177e4SLinus Torvalds 		 * we enter time-wait state, when it is not required really
18211da177e4SLinus Torvalds 		 * (harmless), do not send active resets, when they are
18221da177e4SLinus Torvalds 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
18231da177e4SLinus Torvalds 		 * they look as CLOSING or LAST_ACK for Linux)
18241da177e4SLinus Torvalds 		 * Probably, I missed some more holelets.
18251da177e4SLinus Torvalds 		 * 						--ANK
18261da177e4SLinus Torvalds 		 */
18271da177e4SLinus Torvalds 		tcp_send_fin(sk);
18281da177e4SLinus Torvalds 	}
18291da177e4SLinus Torvalds 
18301da177e4SLinus Torvalds 	sk_stream_wait_close(sk, timeout);
18311da177e4SLinus Torvalds 
18321da177e4SLinus Torvalds adjudge_to_death:
183375c2d907SHerbert Xu 	state = sk->sk_state;
183475c2d907SHerbert Xu 	sock_hold(sk);
183575c2d907SHerbert Xu 	sock_orphan(sk);
183675c2d907SHerbert Xu 	atomic_inc(sk->sk_prot->orphan_count);
183775c2d907SHerbert Xu 
18381da177e4SLinus Torvalds 	/* It is the last release_sock in its life. It will remove backlog. */
18391da177e4SLinus Torvalds 	release_sock(sk);
18401da177e4SLinus Torvalds 
18411da177e4SLinus Torvalds 
18421da177e4SLinus Torvalds 	/* Now socket is owned by kernel and we acquire BH lock
18431da177e4SLinus Torvalds 	   to finish close. No need to check for user refs.
18441da177e4SLinus Torvalds 	 */
18451da177e4SLinus Torvalds 	local_bh_disable();
18461da177e4SLinus Torvalds 	bh_lock_sock(sk);
18471da177e4SLinus Torvalds 	BUG_TRAP(!sock_owned_by_user(sk));
18481da177e4SLinus Torvalds 
184975c2d907SHerbert Xu 	/* Have we already been destroyed by a softirq or backlog? */
185075c2d907SHerbert Xu 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
185175c2d907SHerbert Xu 		goto out;
18521da177e4SLinus Torvalds 
18531da177e4SLinus Torvalds 	/*	This is a (useful) BSD violating of the RFC. There is a
18541da177e4SLinus Torvalds 	 *	problem with TCP as specified in that the other end could
18551da177e4SLinus Torvalds 	 *	keep a socket open forever with no application left this end.
18561da177e4SLinus Torvalds 	 *	We use a 3 minute timeout (about the same as BSD) then kill
18571da177e4SLinus Torvalds 	 *	our end. If they send after that then tough - BUT: long enough
18581da177e4SLinus Torvalds 	 *	that we won't make the old 4*rto = almost no time - whoops
18591da177e4SLinus Torvalds 	 *	reset mistake.
18601da177e4SLinus Torvalds 	 *
18611da177e4SLinus Torvalds 	 *	Nope, it was not mistake. It is really desired behaviour
18621da177e4SLinus Torvalds 	 *	f.e. on http servers, when such sockets are useless, but
18631da177e4SLinus Torvalds 	 *	consume significant resources. Let's do it with special
18641da177e4SLinus Torvalds 	 *	linger2	option.					--ANK
18651da177e4SLinus Torvalds 	 */
18661da177e4SLinus Torvalds 
18671da177e4SLinus Torvalds 	if (sk->sk_state == TCP_FIN_WAIT2) {
18681da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
18691da177e4SLinus Torvalds 		if (tp->linger2 < 0) {
18701da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
18711da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
18721da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
18731da177e4SLinus Torvalds 		} else {
1874463c84b9SArnaldo Carvalho de Melo 			const int tmo = tcp_fin_time(sk);
18751da177e4SLinus Torvalds 
18761da177e4SLinus Torvalds 			if (tmo > TCP_TIMEWAIT_LEN) {
187752499afeSDavid S. Miller 				inet_csk_reset_keepalive_timer(sk,
187852499afeSDavid S. Miller 						tmo - TCP_TIMEWAIT_LEN);
18791da177e4SLinus Torvalds 			} else {
18801da177e4SLinus Torvalds 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
18811da177e4SLinus Torvalds 				goto out;
18821da177e4SLinus Torvalds 			}
18831da177e4SLinus Torvalds 		}
18841da177e4SLinus Torvalds 	}
18851da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
18863ab224beSHideo Aoki 		sk_mem_reclaim(sk);
1887e4fd5da3SPavel Emelianov 		if (tcp_too_many_orphans(sk,
1888e4fd5da3SPavel Emelianov 				atomic_read(sk->sk_prot->orphan_count))) {
18891da177e4SLinus Torvalds 			if (net_ratelimit())
18901da177e4SLinus Torvalds 				printk(KERN_INFO "TCP: too many of orphaned "
18911da177e4SLinus Torvalds 				       "sockets\n");
18921da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
18931da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
18941da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
18951da177e4SLinus Torvalds 		}
18961da177e4SLinus Torvalds 	}
18971da177e4SLinus Torvalds 
18981da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE)
18990a5578cfSArnaldo Carvalho de Melo 		inet_csk_destroy_sock(sk);
19001da177e4SLinus Torvalds 	/* Otherwise, socket is reprieved until protocol close. */
19011da177e4SLinus Torvalds 
19021da177e4SLinus Torvalds out:
19031da177e4SLinus Torvalds 	bh_unlock_sock(sk);
19041da177e4SLinus Torvalds 	local_bh_enable();
19051da177e4SLinus Torvalds 	sock_put(sk);
19061da177e4SLinus Torvalds }
19071da177e4SLinus Torvalds 
19081da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
19091da177e4SLinus Torvalds 
19101da177e4SLinus Torvalds static inline int tcp_need_reset(int state)
19111da177e4SLinus Torvalds {
19121da177e4SLinus Torvalds 	return (1 << state) &
19131da177e4SLinus Torvalds 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
19141da177e4SLinus Torvalds 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
19151da177e4SLinus Torvalds }
19161da177e4SLinus Torvalds 
19171da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
19181da177e4SLinus Torvalds {
19191da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
1920463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
19211da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
19221da177e4SLinus Torvalds 	int err = 0;
19231da177e4SLinus Torvalds 	int old_state = sk->sk_state;
19241da177e4SLinus Torvalds 
19251da177e4SLinus Torvalds 	if (old_state != TCP_CLOSE)
19261da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
19271da177e4SLinus Torvalds 
19281da177e4SLinus Torvalds 	/* ABORT function of RFC793 */
19291da177e4SLinus Torvalds 	if (old_state == TCP_LISTEN) {
19300a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
19311da177e4SLinus Torvalds 	} else if (tcp_need_reset(old_state) ||
19321da177e4SLinus Torvalds 		   (tp->snd_nxt != tp->write_seq &&
19331da177e4SLinus Torvalds 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1934caa20d9aSStephen Hemminger 		/* The last check adjusts for discrepancy of Linux wrt. RFC
19351da177e4SLinus Torvalds 		 * states
19361da177e4SLinus Torvalds 		 */
19371da177e4SLinus Torvalds 		tcp_send_active_reset(sk, gfp_any());
19381da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
19391da177e4SLinus Torvalds 	} else if (old_state == TCP_SYN_SENT)
19401da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
19411da177e4SLinus Torvalds 
19421da177e4SLinus Torvalds 	tcp_clear_xmit_timers(sk);
19431da177e4SLinus Torvalds 	__skb_queue_purge(&sk->sk_receive_queue);
1944fe067e8aSDavid S. Miller 	tcp_write_queue_purge(sk);
19451da177e4SLinus Torvalds 	__skb_queue_purge(&tp->out_of_order_queue);
19461a2449a8SChris Leech #ifdef CONFIG_NET_DMA
19471a2449a8SChris Leech 	__skb_queue_purge(&sk->sk_async_wait_queue);
19481a2449a8SChris Leech #endif
19491da177e4SLinus Torvalds 
19501da177e4SLinus Torvalds 	inet->dport = 0;
19511da177e4SLinus Torvalds 
19521da177e4SLinus Torvalds 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
19531da177e4SLinus Torvalds 		inet_reset_saddr(sk);
19541da177e4SLinus Torvalds 
19551da177e4SLinus Torvalds 	sk->sk_shutdown = 0;
19561da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
19571da177e4SLinus Torvalds 	tp->srtt = 0;
19581da177e4SLinus Torvalds 	if ((tp->write_seq += tp->max_window + 2) == 0)
19591da177e4SLinus Torvalds 		tp->write_seq = 1;
1960463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_backoff = 0;
19611da177e4SLinus Torvalds 	tp->snd_cwnd = 2;
19626687e988SArnaldo Carvalho de Melo 	icsk->icsk_probes_out = 0;
19631da177e4SLinus Torvalds 	tp->packets_out = 0;
19641da177e4SLinus Torvalds 	tp->snd_ssthresh = 0x7fffffff;
19651da177e4SLinus Torvalds 	tp->snd_cwnd_cnt = 0;
19669772efb9SStephen Hemminger 	tp->bytes_acked = 0;
19676687e988SArnaldo Carvalho de Melo 	tcp_set_ca_state(sk, TCP_CA_Open);
19681da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
1969463c84b9SArnaldo Carvalho de Melo 	inet_csk_delack_init(sk);
1970fe067e8aSDavid S. Miller 	tcp_init_send_head(sk);
1971b40b4f79SSrinivas Aji 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
19721da177e4SLinus Torvalds 	__sk_dst_reset(sk);
19731da177e4SLinus Torvalds 
1974463c84b9SArnaldo Carvalho de Melo 	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
19751da177e4SLinus Torvalds 
19761da177e4SLinus Torvalds 	sk->sk_error_report(sk);
19771da177e4SLinus Torvalds 	return err;
19781da177e4SLinus Torvalds }
19791da177e4SLinus Torvalds 
19801da177e4SLinus Torvalds /*
19811da177e4SLinus Torvalds  *	Socket option code for TCP.
19821da177e4SLinus Torvalds  */
19833fdadf7dSDmitry Mishin static int do_tcp_setsockopt(struct sock *sk, int level,
19843fdadf7dSDmitry Mishin 		int optname, char __user *optval, int optlen)
19851da177e4SLinus Torvalds {
19861da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1987463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
19881da177e4SLinus Torvalds 	int val;
19891da177e4SLinus Torvalds 	int err = 0;
19901da177e4SLinus Torvalds 
19915f8ef48dSStephen Hemminger 	/* This is a string value all the others are int's */
19925f8ef48dSStephen Hemminger 	if (optname == TCP_CONGESTION) {
19935f8ef48dSStephen Hemminger 		char name[TCP_CA_NAME_MAX];
19945f8ef48dSStephen Hemminger 
19955f8ef48dSStephen Hemminger 		if (optlen < 1)
19965f8ef48dSStephen Hemminger 			return -EINVAL;
19975f8ef48dSStephen Hemminger 
19985f8ef48dSStephen Hemminger 		val = strncpy_from_user(name, optval,
19995f8ef48dSStephen Hemminger 					min(TCP_CA_NAME_MAX-1, optlen));
20005f8ef48dSStephen Hemminger 		if (val < 0)
20015f8ef48dSStephen Hemminger 			return -EFAULT;
20025f8ef48dSStephen Hemminger 		name[val] = 0;
20035f8ef48dSStephen Hemminger 
20045f8ef48dSStephen Hemminger 		lock_sock(sk);
20056687e988SArnaldo Carvalho de Melo 		err = tcp_set_congestion_control(sk, name);
20065f8ef48dSStephen Hemminger 		release_sock(sk);
20075f8ef48dSStephen Hemminger 		return err;
20085f8ef48dSStephen Hemminger 	}
20095f8ef48dSStephen Hemminger 
20101da177e4SLinus Torvalds 	if (optlen < sizeof(int))
20111da177e4SLinus Torvalds 		return -EINVAL;
20121da177e4SLinus Torvalds 
20131da177e4SLinus Torvalds 	if (get_user(val, (int __user *)optval))
20141da177e4SLinus Torvalds 		return -EFAULT;
20151da177e4SLinus Torvalds 
20161da177e4SLinus Torvalds 	lock_sock(sk);
20171da177e4SLinus Torvalds 
20181da177e4SLinus Torvalds 	switch (optname) {
20191da177e4SLinus Torvalds 	case TCP_MAXSEG:
20201da177e4SLinus Torvalds 		/* Values greater than interface MTU won't take effect. However
20211da177e4SLinus Torvalds 		 * at the point when this call is done we typically don't yet
20221da177e4SLinus Torvalds 		 * know which interface is going to be used */
20231da177e4SLinus Torvalds 		if (val < 8 || val > MAX_TCP_WINDOW) {
20241da177e4SLinus Torvalds 			err = -EINVAL;
20251da177e4SLinus Torvalds 			break;
20261da177e4SLinus Torvalds 		}
20271da177e4SLinus Torvalds 		tp->rx_opt.user_mss = val;
20281da177e4SLinus Torvalds 		break;
20291da177e4SLinus Torvalds 
20301da177e4SLinus Torvalds 	case TCP_NODELAY:
20311da177e4SLinus Torvalds 		if (val) {
20321da177e4SLinus Torvalds 			/* TCP_NODELAY is weaker than TCP_CORK, so that
20331da177e4SLinus Torvalds 			 * this option on corked socket is remembered, but
20341da177e4SLinus Torvalds 			 * it is not activated until cork is cleared.
20351da177e4SLinus Torvalds 			 *
20361da177e4SLinus Torvalds 			 * However, when TCP_NODELAY is set we make
20371da177e4SLinus Torvalds 			 * an explicit push, which overrides even TCP_CORK
20381da177e4SLinus Torvalds 			 * for currently queued segments.
20391da177e4SLinus Torvalds 			 */
20401da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
20419e412ba7SIlpo Järvinen 			tcp_push_pending_frames(sk);
20421da177e4SLinus Torvalds 		} else {
20431da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_OFF;
20441da177e4SLinus Torvalds 		}
20451da177e4SLinus Torvalds 		break;
20461da177e4SLinus Torvalds 
20471da177e4SLinus Torvalds 	case TCP_CORK:
20481da177e4SLinus Torvalds 		/* When set indicates to always queue non-full frames.
20491da177e4SLinus Torvalds 		 * Later the user clears this option and we transmit
20501da177e4SLinus Torvalds 		 * any pending partial frames in the queue.  This is
20511da177e4SLinus Torvalds 		 * meant to be used alongside sendfile() to get properly
20521da177e4SLinus Torvalds 		 * filled frames when the user (for example) must write
20531da177e4SLinus Torvalds 		 * out headers with a write() call first and then use
20541da177e4SLinus Torvalds 		 * sendfile to send out the data parts.
20551da177e4SLinus Torvalds 		 *
20561da177e4SLinus Torvalds 		 * TCP_CORK can be set together with TCP_NODELAY and it is
20571da177e4SLinus Torvalds 		 * stronger than TCP_NODELAY.
20581da177e4SLinus Torvalds 		 */
20591da177e4SLinus Torvalds 		if (val) {
20601da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_CORK;
20611da177e4SLinus Torvalds 		} else {
20621da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_CORK;
20631da177e4SLinus Torvalds 			if (tp->nonagle&TCP_NAGLE_OFF)
20641da177e4SLinus Torvalds 				tp->nonagle |= TCP_NAGLE_PUSH;
20659e412ba7SIlpo Järvinen 			tcp_push_pending_frames(sk);
20661da177e4SLinus Torvalds 		}
20671da177e4SLinus Torvalds 		break;
20681da177e4SLinus Torvalds 
20691da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
20701da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPIDLE)
20711da177e4SLinus Torvalds 			err = -EINVAL;
20721da177e4SLinus Torvalds 		else {
20731da177e4SLinus Torvalds 			tp->keepalive_time = val * HZ;
20741da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
20751da177e4SLinus Torvalds 			    !((1 << sk->sk_state) &
20761da177e4SLinus Torvalds 			      (TCPF_CLOSE | TCPF_LISTEN))) {
20771da177e4SLinus Torvalds 				__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
20781da177e4SLinus Torvalds 				if (tp->keepalive_time > elapsed)
20791da177e4SLinus Torvalds 					elapsed = tp->keepalive_time - elapsed;
20801da177e4SLinus Torvalds 				else
20811da177e4SLinus Torvalds 					elapsed = 0;
2082463c84b9SArnaldo Carvalho de Melo 				inet_csk_reset_keepalive_timer(sk, elapsed);
20831da177e4SLinus Torvalds 			}
20841da177e4SLinus Torvalds 		}
20851da177e4SLinus Torvalds 		break;
20861da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
20871da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
20881da177e4SLinus Torvalds 			err = -EINVAL;
20891da177e4SLinus Torvalds 		else
20901da177e4SLinus Torvalds 			tp->keepalive_intvl = val * HZ;
20911da177e4SLinus Torvalds 		break;
20921da177e4SLinus Torvalds 	case TCP_KEEPCNT:
20931da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPCNT)
20941da177e4SLinus Torvalds 			err = -EINVAL;
20951da177e4SLinus Torvalds 		else
20961da177e4SLinus Torvalds 			tp->keepalive_probes = val;
20971da177e4SLinus Torvalds 		break;
20981da177e4SLinus Torvalds 	case TCP_SYNCNT:
20991da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_SYNCNT)
21001da177e4SLinus Torvalds 			err = -EINVAL;
21011da177e4SLinus Torvalds 		else
2102463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_syn_retries = val;
21031da177e4SLinus Torvalds 		break;
21041da177e4SLinus Torvalds 
21051da177e4SLinus Torvalds 	case TCP_LINGER2:
21061da177e4SLinus Torvalds 		if (val < 0)
21071da177e4SLinus Torvalds 			tp->linger2 = -1;
21081da177e4SLinus Torvalds 		else if (val > sysctl_tcp_fin_timeout / HZ)
21091da177e4SLinus Torvalds 			tp->linger2 = 0;
21101da177e4SLinus Torvalds 		else
21111da177e4SLinus Torvalds 			tp->linger2 = val * HZ;
21121da177e4SLinus Torvalds 		break;
21131da177e4SLinus Torvalds 
21141da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
2115ec3c0982SPatrick McManus 		if (val < 0) {
2116ec3c0982SPatrick McManus 			err = -EINVAL;
2117ec3c0982SPatrick McManus 		} else {
2118ec3c0982SPatrick McManus 			if (val > MAX_TCP_ACCEPT_DEFERRED)
2119ec3c0982SPatrick McManus 				val = MAX_TCP_ACCEPT_DEFERRED;
2120ec3c0982SPatrick McManus 			icsk->icsk_accept_queue.rskq_defer_accept = val;
21211da177e4SLinus Torvalds 		}
21221da177e4SLinus Torvalds 		break;
21231da177e4SLinus Torvalds 
21241da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
21251da177e4SLinus Torvalds 		if (!val) {
21261da177e4SLinus Torvalds 			if (sk->sk_state != TCP_CLOSE) {
21271da177e4SLinus Torvalds 				err = -EINVAL;
21281da177e4SLinus Torvalds 				break;
21291da177e4SLinus Torvalds 			}
21301da177e4SLinus Torvalds 			tp->window_clamp = 0;
21311da177e4SLinus Torvalds 		} else
21321da177e4SLinus Torvalds 			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
21331da177e4SLinus Torvalds 						SOCK_MIN_RCVBUF / 2 : val;
21341da177e4SLinus Torvalds 		break;
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds 	case TCP_QUICKACK:
21371da177e4SLinus Torvalds 		if (!val) {
2138463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
21391da177e4SLinus Torvalds 		} else {
2140463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 0;
21411da177e4SLinus Torvalds 			if ((1 << sk->sk_state) &
21421da177e4SLinus Torvalds 			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2143463c84b9SArnaldo Carvalho de Melo 			    inet_csk_ack_scheduled(sk)) {
2144463c84b9SArnaldo Carvalho de Melo 				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
21450e4b4992SChris Leech 				tcp_cleanup_rbuf(sk, 1);
21461da177e4SLinus Torvalds 				if (!(val & 1))
2147463c84b9SArnaldo Carvalho de Melo 					icsk->icsk_ack.pingpong = 1;
21481da177e4SLinus Torvalds 			}
21491da177e4SLinus Torvalds 		}
21501da177e4SLinus Torvalds 		break;
21511da177e4SLinus Torvalds 
2152cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2153cfb6eeb4SYOSHIFUJI Hideaki 	case TCP_MD5SIG:
2154cfb6eeb4SYOSHIFUJI Hideaki 		/* Read the IP->Key mappings from userspace */
2155cfb6eeb4SYOSHIFUJI Hideaki 		err = tp->af_specific->md5_parse(sk, optval, optlen);
2156cfb6eeb4SYOSHIFUJI Hideaki 		break;
2157cfb6eeb4SYOSHIFUJI Hideaki #endif
2158cfb6eeb4SYOSHIFUJI Hideaki 
21591da177e4SLinus Torvalds 	default:
21601da177e4SLinus Torvalds 		err = -ENOPROTOOPT;
21611da177e4SLinus Torvalds 		break;
21623ff50b79SStephen Hemminger 	}
21633ff50b79SStephen Hemminger 
21641da177e4SLinus Torvalds 	release_sock(sk);
21651da177e4SLinus Torvalds 	return err;
21661da177e4SLinus Torvalds }
21671da177e4SLinus Torvalds 
21683fdadf7dSDmitry Mishin int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
21693fdadf7dSDmitry Mishin 		   int optlen)
21703fdadf7dSDmitry Mishin {
21713fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
21723fdadf7dSDmitry Mishin 
21733fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
21743fdadf7dSDmitry Mishin 		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
21753fdadf7dSDmitry Mishin 						     optval, optlen);
21763fdadf7dSDmitry Mishin 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
21773fdadf7dSDmitry Mishin }
21783fdadf7dSDmitry Mishin 
21793fdadf7dSDmitry Mishin #ifdef CONFIG_COMPAT
2180543d9cfeSArnaldo Carvalho de Melo int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2181543d9cfeSArnaldo Carvalho de Melo 			  char __user *optval, int optlen)
21823fdadf7dSDmitry Mishin {
2183dec73ff0SArnaldo Carvalho de Melo 	if (level != SOL_TCP)
2184dec73ff0SArnaldo Carvalho de Melo 		return inet_csk_compat_setsockopt(sk, level, optname,
2185dec73ff0SArnaldo Carvalho de Melo 						  optval, optlen);
21863fdadf7dSDmitry Mishin 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
21873fdadf7dSDmitry Mishin }
2188543d9cfeSArnaldo Carvalho de Melo 
2189543d9cfeSArnaldo Carvalho de Melo EXPORT_SYMBOL(compat_tcp_setsockopt);
21903fdadf7dSDmitry Mishin #endif
21913fdadf7dSDmitry Mishin 
21921da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
21931da177e4SLinus Torvalds void tcp_get_info(struct sock *sk, struct tcp_info *info)
21941da177e4SLinus Torvalds {
21951da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
2196463c84b9SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
21971da177e4SLinus Torvalds 	u32 now = tcp_time_stamp;
21981da177e4SLinus Torvalds 
21991da177e4SLinus Torvalds 	memset(info, 0, sizeof(*info));
22001da177e4SLinus Torvalds 
22011da177e4SLinus Torvalds 	info->tcpi_state = sk->sk_state;
22026687e988SArnaldo Carvalho de Melo 	info->tcpi_ca_state = icsk->icsk_ca_state;
2203463c84b9SArnaldo Carvalho de Melo 	info->tcpi_retransmits = icsk->icsk_retransmits;
22046687e988SArnaldo Carvalho de Melo 	info->tcpi_probes = icsk->icsk_probes_out;
2205463c84b9SArnaldo Carvalho de Melo 	info->tcpi_backoff = icsk->icsk_backoff;
22061da177e4SLinus Torvalds 
22071da177e4SLinus Torvalds 	if (tp->rx_opt.tstamp_ok)
22081da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2209e60402d0SIlpo Järvinen 	if (tcp_is_sack(tp))
22101da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_SACK;
22111da177e4SLinus Torvalds 	if (tp->rx_opt.wscale_ok) {
22121da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_WSCALE;
22131da177e4SLinus Torvalds 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
22141da177e4SLinus Torvalds 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
22151da177e4SLinus Torvalds 	}
22161da177e4SLinus Torvalds 
22171da177e4SLinus Torvalds 	if (tp->ecn_flags&TCP_ECN_OK)
22181da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_ECN;
22191da177e4SLinus Torvalds 
2220463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2221463c84b9SArnaldo Carvalho de Melo 	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2222c1b4a7e6SDavid S. Miller 	info->tcpi_snd_mss = tp->mss_cache;
2223463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
22241da177e4SLinus Torvalds 
22255ee3afbaSRick Jones 	if (sk->sk_state == TCP_LISTEN) {
22265ee3afbaSRick Jones 		info->tcpi_unacked = sk->sk_ack_backlog;
22275ee3afbaSRick Jones 		info->tcpi_sacked = sk->sk_max_ack_backlog;
22285ee3afbaSRick Jones 	} else {
22291da177e4SLinus Torvalds 		info->tcpi_unacked = tp->packets_out;
22301da177e4SLinus Torvalds 		info->tcpi_sacked = tp->sacked_out;
22315ee3afbaSRick Jones 	}
22321da177e4SLinus Torvalds 	info->tcpi_lost = tp->lost_out;
22331da177e4SLinus Torvalds 	info->tcpi_retrans = tp->retrans_out;
22341da177e4SLinus Torvalds 	info->tcpi_fackets = tp->fackets_out;
22351da177e4SLinus Torvalds 
22361da177e4SLinus Torvalds 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2237463c84b9SArnaldo Carvalho de Melo 	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
22381da177e4SLinus Torvalds 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
22391da177e4SLinus Torvalds 
2240d83d8461SArnaldo Carvalho de Melo 	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
22411da177e4SLinus Torvalds 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
22421da177e4SLinus Torvalds 	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
22431da177e4SLinus Torvalds 	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
22441da177e4SLinus Torvalds 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
22451da177e4SLinus Torvalds 	info->tcpi_snd_cwnd = tp->snd_cwnd;
22461da177e4SLinus Torvalds 	info->tcpi_advmss = tp->advmss;
22471da177e4SLinus Torvalds 	info->tcpi_reordering = tp->reordering;
22481da177e4SLinus Torvalds 
22491da177e4SLinus Torvalds 	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
22501da177e4SLinus Torvalds 	info->tcpi_rcv_space = tp->rcvq_space.space;
22511da177e4SLinus Torvalds 
22521da177e4SLinus Torvalds 	info->tcpi_total_retrans = tp->total_retrans;
22531da177e4SLinus Torvalds }
22541da177e4SLinus Torvalds 
22551da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
22561da177e4SLinus Torvalds 
22573fdadf7dSDmitry Mishin static int do_tcp_getsockopt(struct sock *sk, int level,
22583fdadf7dSDmitry Mishin 		int optname, char __user *optval, int __user *optlen)
22591da177e4SLinus Torvalds {
2260295f7324SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
22611da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
22621da177e4SLinus Torvalds 	int val, len;
22631da177e4SLinus Torvalds 
22641da177e4SLinus Torvalds 	if (get_user(len, optlen))
22651da177e4SLinus Torvalds 		return -EFAULT;
22661da177e4SLinus Torvalds 
22671da177e4SLinus Torvalds 	len = min_t(unsigned int, len, sizeof(int));
22681da177e4SLinus Torvalds 
22691da177e4SLinus Torvalds 	if (len < 0)
22701da177e4SLinus Torvalds 		return -EINVAL;
22711da177e4SLinus Torvalds 
22721da177e4SLinus Torvalds 	switch (optname) {
22731da177e4SLinus Torvalds 	case TCP_MAXSEG:
2274c1b4a7e6SDavid S. Miller 		val = tp->mss_cache;
22751da177e4SLinus Torvalds 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
22761da177e4SLinus Torvalds 			val = tp->rx_opt.user_mss;
22771da177e4SLinus Torvalds 		break;
22781da177e4SLinus Torvalds 	case TCP_NODELAY:
22791da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
22801da177e4SLinus Torvalds 		break;
22811da177e4SLinus Torvalds 	case TCP_CORK:
22821da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
22831da177e4SLinus Torvalds 		break;
22841da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
22851da177e4SLinus Torvalds 		val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
22861da177e4SLinus Torvalds 		break;
22871da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
22881da177e4SLinus Torvalds 		val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
22891da177e4SLinus Torvalds 		break;
22901da177e4SLinus Torvalds 	case TCP_KEEPCNT:
22911da177e4SLinus Torvalds 		val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
22921da177e4SLinus Torvalds 		break;
22931da177e4SLinus Torvalds 	case TCP_SYNCNT:
2294295f7324SArnaldo Carvalho de Melo 		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
22951da177e4SLinus Torvalds 		break;
22961da177e4SLinus Torvalds 	case TCP_LINGER2:
22971da177e4SLinus Torvalds 		val = tp->linger2;
22981da177e4SLinus Torvalds 		if (val >= 0)
22991da177e4SLinus Torvalds 			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
23001da177e4SLinus Torvalds 		break;
23011da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
2302ec3c0982SPatrick McManus 		val = icsk->icsk_accept_queue.rskq_defer_accept;
23031da177e4SLinus Torvalds 		break;
23041da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
23051da177e4SLinus Torvalds 		val = tp->window_clamp;
23061da177e4SLinus Torvalds 		break;
23071da177e4SLinus Torvalds 	case TCP_INFO: {
23081da177e4SLinus Torvalds 		struct tcp_info info;
23091da177e4SLinus Torvalds 
23101da177e4SLinus Torvalds 		if (get_user(len, optlen))
23111da177e4SLinus Torvalds 			return -EFAULT;
23121da177e4SLinus Torvalds 
23131da177e4SLinus Torvalds 		tcp_get_info(sk, &info);
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds 		len = min_t(unsigned int, len, sizeof(info));
23161da177e4SLinus Torvalds 		if (put_user(len, optlen))
23171da177e4SLinus Torvalds 			return -EFAULT;
23181da177e4SLinus Torvalds 		if (copy_to_user(optval, &info, len))
23191da177e4SLinus Torvalds 			return -EFAULT;
23201da177e4SLinus Torvalds 		return 0;
23211da177e4SLinus Torvalds 	}
23221da177e4SLinus Torvalds 	case TCP_QUICKACK:
2323295f7324SArnaldo Carvalho de Melo 		val = !icsk->icsk_ack.pingpong;
23241da177e4SLinus Torvalds 		break;
23255f8ef48dSStephen Hemminger 
23265f8ef48dSStephen Hemminger 	case TCP_CONGESTION:
23275f8ef48dSStephen Hemminger 		if (get_user(len, optlen))
23285f8ef48dSStephen Hemminger 			return -EFAULT;
23295f8ef48dSStephen Hemminger 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
23305f8ef48dSStephen Hemminger 		if (put_user(len, optlen))
23315f8ef48dSStephen Hemminger 			return -EFAULT;
23326687e988SArnaldo Carvalho de Melo 		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
23335f8ef48dSStephen Hemminger 			return -EFAULT;
23345f8ef48dSStephen Hemminger 		return 0;
23351da177e4SLinus Torvalds 	default:
23361da177e4SLinus Torvalds 		return -ENOPROTOOPT;
23373ff50b79SStephen Hemminger 	}
23381da177e4SLinus Torvalds 
23391da177e4SLinus Torvalds 	if (put_user(len, optlen))
23401da177e4SLinus Torvalds 		return -EFAULT;
23411da177e4SLinus Torvalds 	if (copy_to_user(optval, &val, len))
23421da177e4SLinus Torvalds 		return -EFAULT;
23431da177e4SLinus Torvalds 	return 0;
23441da177e4SLinus Torvalds }
23451da177e4SLinus Torvalds 
23463fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
23473fdadf7dSDmitry Mishin 		   int __user *optlen)
23483fdadf7dSDmitry Mishin {
23493fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
23503fdadf7dSDmitry Mishin 
23513fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
23523fdadf7dSDmitry Mishin 		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
23533fdadf7dSDmitry Mishin 						     optval, optlen);
23543fdadf7dSDmitry Mishin 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
23553fdadf7dSDmitry Mishin }
23563fdadf7dSDmitry Mishin 
23573fdadf7dSDmitry Mishin #ifdef CONFIG_COMPAT
2358543d9cfeSArnaldo Carvalho de Melo int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2359543d9cfeSArnaldo Carvalho de Melo 			  char __user *optval, int __user *optlen)
23603fdadf7dSDmitry Mishin {
2361dec73ff0SArnaldo Carvalho de Melo 	if (level != SOL_TCP)
2362dec73ff0SArnaldo Carvalho de Melo 		return inet_csk_compat_getsockopt(sk, level, optname,
2363dec73ff0SArnaldo Carvalho de Melo 						  optval, optlen);
23643fdadf7dSDmitry Mishin 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
23653fdadf7dSDmitry Mishin }
2366543d9cfeSArnaldo Carvalho de Melo 
2367543d9cfeSArnaldo Carvalho de Melo EXPORT_SYMBOL(compat_tcp_getsockopt);
23683fdadf7dSDmitry Mishin #endif
23691da177e4SLinus Torvalds 
2370576a30ebSHerbert Xu struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2371f4c50d99SHerbert Xu {
2372f4c50d99SHerbert Xu 	struct sk_buff *segs = ERR_PTR(-EINVAL);
2373f4c50d99SHerbert Xu 	struct tcphdr *th;
2374f4c50d99SHerbert Xu 	unsigned thlen;
2375f4c50d99SHerbert Xu 	unsigned int seq;
2376d3bc23e7SAl Viro 	__be32 delta;
2377f4c50d99SHerbert Xu 	unsigned int oldlen;
2378f4c50d99SHerbert Xu 	unsigned int len;
2379f4c50d99SHerbert Xu 
2380f4c50d99SHerbert Xu 	if (!pskb_may_pull(skb, sizeof(*th)))
2381f4c50d99SHerbert Xu 		goto out;
2382f4c50d99SHerbert Xu 
2383aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
2384f4c50d99SHerbert Xu 	thlen = th->doff * 4;
2385f4c50d99SHerbert Xu 	if (thlen < sizeof(*th))
2386f4c50d99SHerbert Xu 		goto out;
2387f4c50d99SHerbert Xu 
2388f4c50d99SHerbert Xu 	if (!pskb_may_pull(skb, thlen))
2389f4c50d99SHerbert Xu 		goto out;
2390f4c50d99SHerbert Xu 
23910718bcc0SHerbert Xu 	oldlen = (u16)~skb->len;
2392f4c50d99SHerbert Xu 	__skb_pull(skb, thlen);
2393f4c50d99SHerbert Xu 
23943820c3f3SHerbert Xu 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
23953820c3f3SHerbert Xu 		/* Packet is from an untrusted source, reset gso_segs. */
2396bbcf467dSHerbert Xu 		int type = skb_shinfo(skb)->gso_type;
2397bbcf467dSHerbert Xu 		int mss;
23983820c3f3SHerbert Xu 
2399bbcf467dSHerbert Xu 		if (unlikely(type &
2400bbcf467dSHerbert Xu 			     ~(SKB_GSO_TCPV4 |
2401bbcf467dSHerbert Xu 			       SKB_GSO_DODGY |
2402bbcf467dSHerbert Xu 			       SKB_GSO_TCP_ECN |
2403bbcf467dSHerbert Xu 			       SKB_GSO_TCPV6 |
2404bbcf467dSHerbert Xu 			       0) ||
2405bbcf467dSHerbert Xu 			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2406bbcf467dSHerbert Xu 			goto out;
2407bbcf467dSHerbert Xu 
2408bbcf467dSHerbert Xu 		mss = skb_shinfo(skb)->gso_size;
2409172589ccSIlpo Järvinen 		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
24103820c3f3SHerbert Xu 
24113820c3f3SHerbert Xu 		segs = NULL;
24123820c3f3SHerbert Xu 		goto out;
24133820c3f3SHerbert Xu 	}
24143820c3f3SHerbert Xu 
2415576a30ebSHerbert Xu 	segs = skb_segment(skb, features);
2416f4c50d99SHerbert Xu 	if (IS_ERR(segs))
2417f4c50d99SHerbert Xu 		goto out;
2418f4c50d99SHerbert Xu 
2419f4c50d99SHerbert Xu 	len = skb_shinfo(skb)->gso_size;
24200718bcc0SHerbert Xu 	delta = htonl(oldlen + (thlen + len));
2421f4c50d99SHerbert Xu 
2422f4c50d99SHerbert Xu 	skb = segs;
2423aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
2424f4c50d99SHerbert Xu 	seq = ntohl(th->seq);
2425f4c50d99SHerbert Xu 
2426f4c50d99SHerbert Xu 	do {
2427f4c50d99SHerbert Xu 		th->fin = th->psh = 0;
2428f4c50d99SHerbert Xu 
2429d3bc23e7SAl Viro 		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2430d3bc23e7SAl Viro 				       (__force u32)delta));
243184fa7933SPatrick McHardy 		if (skb->ip_summed != CHECKSUM_PARTIAL)
24329c70220bSArnaldo Carvalho de Melo 			th->check =
24339c70220bSArnaldo Carvalho de Melo 			     csum_fold(csum_partial(skb_transport_header(skb),
24349c70220bSArnaldo Carvalho de Melo 						    thlen, skb->csum));
2435f4c50d99SHerbert Xu 
2436f4c50d99SHerbert Xu 		seq += len;
2437f4c50d99SHerbert Xu 		skb = skb->next;
2438aa8223c7SArnaldo Carvalho de Melo 		th = tcp_hdr(skb);
2439f4c50d99SHerbert Xu 
2440f4c50d99SHerbert Xu 		th->seq = htonl(seq);
2441f4c50d99SHerbert Xu 		th->cwr = 0;
2442f4c50d99SHerbert Xu 	} while (skb->next);
2443f4c50d99SHerbert Xu 
244427a884dcSArnaldo Carvalho de Melo 	delta = htonl(oldlen + (skb->tail - skb->transport_header) +
24459c70220bSArnaldo Carvalho de Melo 		      skb->data_len);
2446d3bc23e7SAl Viro 	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2447d3bc23e7SAl Viro 				(__force u32)delta));
244884fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
24499c70220bSArnaldo Carvalho de Melo 		th->check = csum_fold(csum_partial(skb_transport_header(skb),
24509c70220bSArnaldo Carvalho de Melo 						   thlen, skb->csum));
2451f4c50d99SHerbert Xu 
2452f4c50d99SHerbert Xu out:
2453f4c50d99SHerbert Xu 	return segs;
2454f4c50d99SHerbert Xu }
2455adcfc7d0SHerbert Xu EXPORT_SYMBOL(tcp_tso_segment);
2456f4c50d99SHerbert Xu 
2457cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2458cfb6eeb4SYOSHIFUJI Hideaki static unsigned long tcp_md5sig_users;
2459cfb6eeb4SYOSHIFUJI Hideaki static struct tcp_md5sig_pool **tcp_md5sig_pool;
2460cfb6eeb4SYOSHIFUJI Hideaki static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2461cfb6eeb4SYOSHIFUJI Hideaki 
2462cfb6eeb4SYOSHIFUJI Hideaki static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2463cfb6eeb4SYOSHIFUJI Hideaki {
2464cfb6eeb4SYOSHIFUJI Hideaki 	int cpu;
2465cfb6eeb4SYOSHIFUJI Hideaki 	for_each_possible_cpu(cpu) {
2466cfb6eeb4SYOSHIFUJI Hideaki 		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2467cfb6eeb4SYOSHIFUJI Hideaki 		if (p) {
2468cfb6eeb4SYOSHIFUJI Hideaki 			if (p->md5_desc.tfm)
2469cfb6eeb4SYOSHIFUJI Hideaki 				crypto_free_hash(p->md5_desc.tfm);
2470cfb6eeb4SYOSHIFUJI Hideaki 			kfree(p);
2471cfb6eeb4SYOSHIFUJI Hideaki 			p = NULL;
2472cfb6eeb4SYOSHIFUJI Hideaki 		}
2473cfb6eeb4SYOSHIFUJI Hideaki 	}
2474cfb6eeb4SYOSHIFUJI Hideaki 	free_percpu(pool);
2475cfb6eeb4SYOSHIFUJI Hideaki }
2476cfb6eeb4SYOSHIFUJI Hideaki 
2477cfb6eeb4SYOSHIFUJI Hideaki void tcp_free_md5sig_pool(void)
2478cfb6eeb4SYOSHIFUJI Hideaki {
2479cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **pool = NULL;
2480cfb6eeb4SYOSHIFUJI Hideaki 
24812c4f6219SDavid S. Miller 	spin_lock_bh(&tcp_md5sig_pool_lock);
2482cfb6eeb4SYOSHIFUJI Hideaki 	if (--tcp_md5sig_users == 0) {
2483cfb6eeb4SYOSHIFUJI Hideaki 		pool = tcp_md5sig_pool;
2484cfb6eeb4SYOSHIFUJI Hideaki 		tcp_md5sig_pool = NULL;
2485cfb6eeb4SYOSHIFUJI Hideaki 	}
24862c4f6219SDavid S. Miller 	spin_unlock_bh(&tcp_md5sig_pool_lock);
2487cfb6eeb4SYOSHIFUJI Hideaki 	if (pool)
2488cfb6eeb4SYOSHIFUJI Hideaki 		__tcp_free_md5sig_pool(pool);
2489cfb6eeb4SYOSHIFUJI Hideaki }
2490cfb6eeb4SYOSHIFUJI Hideaki 
2491cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_free_md5sig_pool);
2492cfb6eeb4SYOSHIFUJI Hideaki 
2493f5b99bcdSAdrian Bunk static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2494cfb6eeb4SYOSHIFUJI Hideaki {
2495cfb6eeb4SYOSHIFUJI Hideaki 	int cpu;
2496cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **pool;
2497cfb6eeb4SYOSHIFUJI Hideaki 
2498cfb6eeb4SYOSHIFUJI Hideaki 	pool = alloc_percpu(struct tcp_md5sig_pool *);
2499cfb6eeb4SYOSHIFUJI Hideaki 	if (!pool)
2500cfb6eeb4SYOSHIFUJI Hideaki 		return NULL;
2501cfb6eeb4SYOSHIFUJI Hideaki 
2502cfb6eeb4SYOSHIFUJI Hideaki 	for_each_possible_cpu(cpu) {
2503cfb6eeb4SYOSHIFUJI Hideaki 		struct tcp_md5sig_pool *p;
2504cfb6eeb4SYOSHIFUJI Hideaki 		struct crypto_hash *hash;
2505cfb6eeb4SYOSHIFUJI Hideaki 
2506cfb6eeb4SYOSHIFUJI Hideaki 		p = kzalloc(sizeof(*p), GFP_KERNEL);
2507cfb6eeb4SYOSHIFUJI Hideaki 		if (!p)
2508cfb6eeb4SYOSHIFUJI Hideaki 			goto out_free;
2509cfb6eeb4SYOSHIFUJI Hideaki 		*per_cpu_ptr(pool, cpu) = p;
2510cfb6eeb4SYOSHIFUJI Hideaki 
2511cfb6eeb4SYOSHIFUJI Hideaki 		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2512cfb6eeb4SYOSHIFUJI Hideaki 		if (!hash || IS_ERR(hash))
2513cfb6eeb4SYOSHIFUJI Hideaki 			goto out_free;
2514cfb6eeb4SYOSHIFUJI Hideaki 
2515cfb6eeb4SYOSHIFUJI Hideaki 		p->md5_desc.tfm = hash;
2516cfb6eeb4SYOSHIFUJI Hideaki 	}
2517cfb6eeb4SYOSHIFUJI Hideaki 	return pool;
2518cfb6eeb4SYOSHIFUJI Hideaki out_free:
2519cfb6eeb4SYOSHIFUJI Hideaki 	__tcp_free_md5sig_pool(pool);
2520cfb6eeb4SYOSHIFUJI Hideaki 	return NULL;
2521cfb6eeb4SYOSHIFUJI Hideaki }
2522cfb6eeb4SYOSHIFUJI Hideaki 
2523cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2524cfb6eeb4SYOSHIFUJI Hideaki {
2525cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **pool;
2526cfb6eeb4SYOSHIFUJI Hideaki 	int alloc = 0;
2527cfb6eeb4SYOSHIFUJI Hideaki 
2528cfb6eeb4SYOSHIFUJI Hideaki retry:
25292c4f6219SDavid S. Miller 	spin_lock_bh(&tcp_md5sig_pool_lock);
2530cfb6eeb4SYOSHIFUJI Hideaki 	pool = tcp_md5sig_pool;
2531cfb6eeb4SYOSHIFUJI Hideaki 	if (tcp_md5sig_users++ == 0) {
2532cfb6eeb4SYOSHIFUJI Hideaki 		alloc = 1;
25332c4f6219SDavid S. Miller 		spin_unlock_bh(&tcp_md5sig_pool_lock);
2534cfb6eeb4SYOSHIFUJI Hideaki 	} else if (!pool) {
2535cfb6eeb4SYOSHIFUJI Hideaki 		tcp_md5sig_users--;
25362c4f6219SDavid S. Miller 		spin_unlock_bh(&tcp_md5sig_pool_lock);
2537cfb6eeb4SYOSHIFUJI Hideaki 		cpu_relax();
2538cfb6eeb4SYOSHIFUJI Hideaki 		goto retry;
2539cfb6eeb4SYOSHIFUJI Hideaki 	} else
25402c4f6219SDavid S. Miller 		spin_unlock_bh(&tcp_md5sig_pool_lock);
2541cfb6eeb4SYOSHIFUJI Hideaki 
2542cfb6eeb4SYOSHIFUJI Hideaki 	if (alloc) {
2543cfb6eeb4SYOSHIFUJI Hideaki 		/* we cannot hold spinlock here because this may sleep. */
2544cfb6eeb4SYOSHIFUJI Hideaki 		struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
25452c4f6219SDavid S. Miller 		spin_lock_bh(&tcp_md5sig_pool_lock);
2546cfb6eeb4SYOSHIFUJI Hideaki 		if (!p) {
2547cfb6eeb4SYOSHIFUJI Hideaki 			tcp_md5sig_users--;
25482c4f6219SDavid S. Miller 			spin_unlock_bh(&tcp_md5sig_pool_lock);
2549cfb6eeb4SYOSHIFUJI Hideaki 			return NULL;
2550cfb6eeb4SYOSHIFUJI Hideaki 		}
2551cfb6eeb4SYOSHIFUJI Hideaki 		pool = tcp_md5sig_pool;
2552cfb6eeb4SYOSHIFUJI Hideaki 		if (pool) {
2553cfb6eeb4SYOSHIFUJI Hideaki 			/* oops, it has already been assigned. */
25542c4f6219SDavid S. Miller 			spin_unlock_bh(&tcp_md5sig_pool_lock);
2555cfb6eeb4SYOSHIFUJI Hideaki 			__tcp_free_md5sig_pool(p);
2556cfb6eeb4SYOSHIFUJI Hideaki 		} else {
2557cfb6eeb4SYOSHIFUJI Hideaki 			tcp_md5sig_pool = pool = p;
25582c4f6219SDavid S. Miller 			spin_unlock_bh(&tcp_md5sig_pool_lock);
2559cfb6eeb4SYOSHIFUJI Hideaki 		}
2560cfb6eeb4SYOSHIFUJI Hideaki 	}
2561cfb6eeb4SYOSHIFUJI Hideaki 	return pool;
2562cfb6eeb4SYOSHIFUJI Hideaki }
2563cfb6eeb4SYOSHIFUJI Hideaki 
2564cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2565cfb6eeb4SYOSHIFUJI Hideaki 
2566cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2567cfb6eeb4SYOSHIFUJI Hideaki {
2568cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **p;
25692c4f6219SDavid S. Miller 	spin_lock_bh(&tcp_md5sig_pool_lock);
2570cfb6eeb4SYOSHIFUJI Hideaki 	p = tcp_md5sig_pool;
2571cfb6eeb4SYOSHIFUJI Hideaki 	if (p)
2572cfb6eeb4SYOSHIFUJI Hideaki 		tcp_md5sig_users++;
25732c4f6219SDavid S. Miller 	spin_unlock_bh(&tcp_md5sig_pool_lock);
2574cfb6eeb4SYOSHIFUJI Hideaki 	return (p ? *per_cpu_ptr(p, cpu) : NULL);
2575cfb6eeb4SYOSHIFUJI Hideaki }
2576cfb6eeb4SYOSHIFUJI Hideaki 
2577cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2578cfb6eeb4SYOSHIFUJI Hideaki 
25796931ba7cSDavid S. Miller void __tcp_put_md5sig_pool(void)
25806931ba7cSDavid S. Miller {
25816931ba7cSDavid S. Miller 	tcp_free_md5sig_pool();
2582cfb6eeb4SYOSHIFUJI Hideaki }
2583cfb6eeb4SYOSHIFUJI Hideaki 
2584cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2585cfb6eeb4SYOSHIFUJI Hideaki #endif
2586cfb6eeb4SYOSHIFUJI Hideaki 
25874ac02babSAndi Kleen void tcp_done(struct sock *sk)
25884ac02babSAndi Kleen {
25894ac02babSAndi Kleen 	if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
25904ac02babSAndi Kleen 		TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
25914ac02babSAndi Kleen 
25924ac02babSAndi Kleen 	tcp_set_state(sk, TCP_CLOSE);
25934ac02babSAndi Kleen 	tcp_clear_xmit_timers(sk);
25944ac02babSAndi Kleen 
25954ac02babSAndi Kleen 	sk->sk_shutdown = SHUTDOWN_MASK;
25964ac02babSAndi Kleen 
25974ac02babSAndi Kleen 	if (!sock_flag(sk, SOCK_DEAD))
25984ac02babSAndi Kleen 		sk->sk_state_change(sk);
25994ac02babSAndi Kleen 	else
26004ac02babSAndi Kleen 		inet_csk_destroy_sock(sk);
26014ac02babSAndi Kleen }
26024ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done);
26034ac02babSAndi Kleen 
26045f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
26051da177e4SLinus Torvalds 
26061da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
26071da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
26081da177e4SLinus Torvalds {
26091da177e4SLinus Torvalds 	if (!str)
26101da177e4SLinus Torvalds 		return 0;
26111da177e4SLinus Torvalds 	thash_entries = simple_strtoul(str, &str, 0);
26121da177e4SLinus Torvalds 	return 1;
26131da177e4SLinus Torvalds }
26141da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
26151da177e4SLinus Torvalds 
26161da177e4SLinus Torvalds void __init tcp_init(void)
26171da177e4SLinus Torvalds {
26181da177e4SLinus Torvalds 	struct sk_buff *skb = NULL;
26197b4f4b5eSJohn Heffner 	unsigned long limit;
26207b4f4b5eSJohn Heffner 	int order, i, max_share;
26211da177e4SLinus Torvalds 
26221f9e636eSPavel Emelyanov 	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
26231da177e4SLinus Torvalds 
26246e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bind_bucket_cachep =
26256e04e021SArnaldo Carvalho de Melo 		kmem_cache_create("tcp_bind_bucket",
26266e04e021SArnaldo Carvalho de Melo 				  sizeof(struct inet_bind_bucket), 0,
262720c2df83SPaul Mundt 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
26281da177e4SLinus Torvalds 
26291da177e4SLinus Torvalds 	/* Size and allocate the main established and bind bucket
26301da177e4SLinus Torvalds 	 * hash tables.
26311da177e4SLinus Torvalds 	 *
26321da177e4SLinus Torvalds 	 * The methodology is similar to that of the buffer cache.
26331da177e4SLinus Torvalds 	 */
26346e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.ehash =
26351da177e4SLinus Torvalds 		alloc_large_system_hash("TCP established",
26360f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_ehash_bucket),
26371da177e4SLinus Torvalds 					thash_entries,
26381da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
263918955cfcSMike Stroyan 					13 : 15,
26409e950efaSJohn Heffner 					0,
26416e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.ehash_size,
26421da177e4SLinus Torvalds 					NULL,
26430ccfe618SJean Delvare 					thash_entries ? 0 : 512 * 1024);
2644dbca9b27SEric Dumazet 	tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2645dbca9b27SEric Dumazet 	for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
26466e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2647dbca9b27SEric Dumazet 		INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
26481da177e4SLinus Torvalds 	}
2649230140cfSEric Dumazet 	if (inet_ehash_locks_alloc(&tcp_hashinfo))
2650230140cfSEric Dumazet 		panic("TCP: failed to alloc ehash_locks");
26516e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash =
26521da177e4SLinus Torvalds 		alloc_large_system_hash("TCP bind",
26530f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_bind_hashbucket),
26546e04e021SArnaldo Carvalho de Melo 					tcp_hashinfo.ehash_size,
26551da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
265618955cfcSMike Stroyan 					13 : 15,
26579e950efaSJohn Heffner 					0,
26586e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.bhash_size,
26591da177e4SLinus Torvalds 					NULL,
26601da177e4SLinus Torvalds 					64 * 1024);
26616e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
26626e04e021SArnaldo Carvalho de Melo 	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
26636e04e021SArnaldo Carvalho de Melo 		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
26646e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
26651da177e4SLinus Torvalds 	}
26661da177e4SLinus Torvalds 
26671da177e4SLinus Torvalds 	/* Try to be a bit smarter and adjust defaults depending
26681da177e4SLinus Torvalds 	 * on available memory.
26691da177e4SLinus Torvalds 	 */
26701da177e4SLinus Torvalds 	for (order = 0; ((1 << order) << PAGE_SHIFT) <
26716e04e021SArnaldo Carvalho de Melo 			(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
26721da177e4SLinus Torvalds 			order++)
26731da177e4SLinus Torvalds 		;
2674e7626486SAndi Kleen 	if (order >= 4) {
2675295ff7edSArnaldo Carvalho de Melo 		tcp_death_row.sysctl_max_tw_buckets = 180000;
26761da177e4SLinus Torvalds 		sysctl_tcp_max_orphans = 4096 << (order - 4);
26771da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 1024;
26781da177e4SLinus Torvalds 	} else if (order < 3) {
2679295ff7edSArnaldo Carvalho de Melo 		tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
26801da177e4SLinus Torvalds 		sysctl_tcp_max_orphans >>= (3 - order);
26811da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 128;
26821da177e4SLinus Torvalds 	}
26831da177e4SLinus Torvalds 
268453cdcc04SJohn Heffner 	/* Set the pressure threshold to be a fraction of global memory that
268553cdcc04SJohn Heffner 	 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
268653cdcc04SJohn Heffner 	 * memory, with a floor of 128 pages.
268753cdcc04SJohn Heffner 	 */
268853cdcc04SJohn Heffner 	limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
268953cdcc04SJohn Heffner 	limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
269053cdcc04SJohn Heffner 	limit = max(limit, 128UL);
269153cdcc04SJohn Heffner 	sysctl_tcp_mem[0] = limit / 4 * 3;
269253cdcc04SJohn Heffner 	sysctl_tcp_mem[1] = limit;
269352bf376cSJohn Heffner 	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
26941da177e4SLinus Torvalds 
269553cdcc04SJohn Heffner 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
26967b4f4b5eSJohn Heffner 	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
26977b4f4b5eSJohn Heffner 	max_share = min(4UL*1024*1024, limit);
26987b4f4b5eSJohn Heffner 
26993ab224beSHideo Aoki 	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
27007b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[1] = 16*1024;
27017b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[2] = max(64*1024, max_share);
27027b4f4b5eSJohn Heffner 
27033ab224beSHideo Aoki 	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
27047b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[1] = 87380;
27057b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[2] = max(87380, max_share);
27061da177e4SLinus Torvalds 
27071da177e4SLinus Torvalds 	printk(KERN_INFO "TCP: Hash tables configured "
27081da177e4SLinus Torvalds 	       "(established %d bind %d)\n",
2709dbca9b27SEric Dumazet 	       tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2710317a76f9SStephen Hemminger 
2711317a76f9SStephen Hemminger 	tcp_register_congestion_control(&tcp_reno);
27121da177e4SLinus Torvalds }
27131da177e4SLinus Torvalds 
27141da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_close);
27151da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_disconnect);
27161da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_getsockopt);
27171da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_ioctl);
27181da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
27191da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_read_sock);
27201da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_recvmsg);
27211da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendmsg);
27229c55e01cSJens Axboe EXPORT_SYMBOL(tcp_splice_read);
27231da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendpage);
27241da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_setsockopt);
27251da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_shutdown);
27261da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_statistics);
2727