xref: /linux/net/ipv4/tcp.c (revision 89ebd197eb2cd31d6187db344d5117064e19fdde)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  *
221da177e4SLinus Torvalds  * Fixes:
231da177e4SLinus Torvalds  *		Alan Cox	:	Numerous verify_area() calls
241da177e4SLinus Torvalds  *		Alan Cox	:	Set the ACK bit on a reset
251da177e4SLinus Torvalds  *		Alan Cox	:	Stopped it crashing if it closed while
261da177e4SLinus Torvalds  *					sk->inuse=1 and was trying to connect
271da177e4SLinus Torvalds  *					(tcp_err()).
281da177e4SLinus Torvalds  *		Alan Cox	:	All icmp error handling was broken
291da177e4SLinus Torvalds  *					pointers passed where wrong and the
301da177e4SLinus Torvalds  *					socket was looked up backwards. Nobody
311da177e4SLinus Torvalds  *					tested any icmp error code obviously.
321da177e4SLinus Torvalds  *		Alan Cox	:	tcp_err() now handled properly. It
331da177e4SLinus Torvalds  *					wakes people on errors. poll
341da177e4SLinus Torvalds  *					behaves and the icmp error race
351da177e4SLinus Torvalds  *					has gone by moving it into sock.c
361da177e4SLinus Torvalds  *		Alan Cox	:	tcp_send_reset() fixed to work for
371da177e4SLinus Torvalds  *					everything not just packets for
381da177e4SLinus Torvalds  *					unknown sockets.
391da177e4SLinus Torvalds  *		Alan Cox	:	tcp option processing.
401da177e4SLinus Torvalds  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
411da177e4SLinus Torvalds  *					syn rule wrong]
421da177e4SLinus Torvalds  *		Herp Rosmanith  :	More reset fixes
431da177e4SLinus Torvalds  *		Alan Cox	:	No longer acks invalid rst frames.
441da177e4SLinus Torvalds  *					Acking any kind of RST is right out.
451da177e4SLinus Torvalds  *		Alan Cox	:	Sets an ignore me flag on an rst
461da177e4SLinus Torvalds  *					receive otherwise odd bits of prattle
471da177e4SLinus Torvalds  *					escape still
481da177e4SLinus Torvalds  *		Alan Cox	:	Fixed another acking RST frame bug.
491da177e4SLinus Torvalds  *					Should stop LAN workplace lockups.
501da177e4SLinus Torvalds  *		Alan Cox	: 	Some tidyups using the new skb list
511da177e4SLinus Torvalds  *					facilities
521da177e4SLinus Torvalds  *		Alan Cox	:	sk->keepopen now seems to work
531da177e4SLinus Torvalds  *		Alan Cox	:	Pulls options out correctly on accepts
541da177e4SLinus Torvalds  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
551da177e4SLinus Torvalds  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
561da177e4SLinus Torvalds  *					bit to skb ops.
571da177e4SLinus Torvalds  *		Alan Cox	:	Tidied tcp_data to avoid a potential
581da177e4SLinus Torvalds  *					nasty.
591da177e4SLinus Torvalds  *		Alan Cox	:	Added some better commenting, as the
601da177e4SLinus Torvalds  *					tcp is hard to follow
611da177e4SLinus Torvalds  *		Alan Cox	:	Removed incorrect check for 20 * psh
621da177e4SLinus Torvalds  *	Michael O'Reilly	:	ack < copied bug fix.
631da177e4SLinus Torvalds  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
641da177e4SLinus Torvalds  *		Alan Cox	:	FIN with no memory -> CRASH
651da177e4SLinus Torvalds  *		Alan Cox	:	Added socket option proto entries.
661da177e4SLinus Torvalds  *					Also added awareness of them to accept.
671da177e4SLinus Torvalds  *		Alan Cox	:	Added TCP options (SOL_TCP)
681da177e4SLinus Torvalds  *		Alan Cox	:	Switched wakeup calls to callbacks,
691da177e4SLinus Torvalds  *					so the kernel can layer network
701da177e4SLinus Torvalds  *					sockets.
711da177e4SLinus Torvalds  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
721da177e4SLinus Torvalds  *		Alan Cox	:	Handle FIN (more) properly (we hope).
731da177e4SLinus Torvalds  *		Alan Cox	:	RST frames sent on unsynchronised
741da177e4SLinus Torvalds  *					state ack error.
751da177e4SLinus Torvalds  *		Alan Cox	:	Put in missing check for SYN bit.
761da177e4SLinus Torvalds  *		Alan Cox	:	Added tcp_select_window() aka NET2E
771da177e4SLinus Torvalds  *					window non shrink trick.
781da177e4SLinus Torvalds  *		Alan Cox	:	Added a couple of small NET2E timer
791da177e4SLinus Torvalds  *					fixes
801da177e4SLinus Torvalds  *		Charles Hedrick :	TCP fixes
811da177e4SLinus Torvalds  *		Toomas Tamm	:	TCP window fixes
821da177e4SLinus Torvalds  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
831da177e4SLinus Torvalds  *		Charles Hedrick	:	Rewrote most of it to actually work
841da177e4SLinus Torvalds  *		Linus		:	Rewrote tcp_read() and URG handling
851da177e4SLinus Torvalds  *					completely
861da177e4SLinus Torvalds  *		Gerhard Koerting:	Fixed some missing timer handling
871da177e4SLinus Torvalds  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
881da177e4SLinus Torvalds  *		Gerhard Koerting:	PC/TCP workarounds
891da177e4SLinus Torvalds  *		Adam Caldwell	:	Assorted timer/timing errors
901da177e4SLinus Torvalds  *		Matthew Dillon	:	Fixed another RST bug
911da177e4SLinus Torvalds  *		Alan Cox	:	Move to kernel side addressing changes.
921da177e4SLinus Torvalds  *		Alan Cox	:	Beginning work on TCP fastpathing
931da177e4SLinus Torvalds  *					(not yet usable)
941da177e4SLinus Torvalds  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
951da177e4SLinus Torvalds  *		Alan Cox	:	TCP fast path debugging
961da177e4SLinus Torvalds  *		Alan Cox	:	Window clamping
971da177e4SLinus Torvalds  *		Michael Riepe	:	Bug in tcp_check()
981da177e4SLinus Torvalds  *		Matt Dillon	:	More TCP improvements and RST bug fixes
991da177e4SLinus Torvalds  *		Matt Dillon	:	Yet more small nasties remove from the
1001da177e4SLinus Torvalds  *					TCP code (Be very nice to this man if
1011da177e4SLinus Torvalds  *					tcp finally works 100%) 8)
1021da177e4SLinus Torvalds  *		Alan Cox	:	BSD accept semantics.
1031da177e4SLinus Torvalds  *		Alan Cox	:	Reset on closedown bug.
1041da177e4SLinus Torvalds  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
1051da177e4SLinus Torvalds  *		Michael Pall	:	Handle poll() after URG properly in
1061da177e4SLinus Torvalds  *					all cases.
1071da177e4SLinus Torvalds  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
1081da177e4SLinus Torvalds  *					(multi URG PUSH broke rlogin).
1091da177e4SLinus Torvalds  *		Michael Pall	:	Fix the multi URG PUSH problem in
1101da177e4SLinus Torvalds  *					tcp_readable(), poll() after URG
1111da177e4SLinus Torvalds  *					works now.
1121da177e4SLinus Torvalds  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
1131da177e4SLinus Torvalds  *					BSD api.
1141da177e4SLinus Torvalds  *		Alan Cox	:	Changed the semantics of sk->socket to
1151da177e4SLinus Torvalds  *					fix a race and a signal problem with
1161da177e4SLinus Torvalds  *					accept() and async I/O.
1171da177e4SLinus Torvalds  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
1181da177e4SLinus Torvalds  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
1191da177e4SLinus Torvalds  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
1201da177e4SLinus Torvalds  *					clients/servers which listen in on
1211da177e4SLinus Torvalds  *					fixed ports.
1221da177e4SLinus Torvalds  *		Alan Cox	:	Cleaned the above up and shrank it to
1231da177e4SLinus Torvalds  *					a sensible code size.
1241da177e4SLinus Torvalds  *		Alan Cox	:	Self connect lockup fix.
1251da177e4SLinus Torvalds  *		Alan Cox	:	No connect to multicast.
1261da177e4SLinus Torvalds  *		Ross Biro	:	Close unaccepted children on master
1271da177e4SLinus Torvalds  *					socket close.
1281da177e4SLinus Torvalds  *		Alan Cox	:	Reset tracing code.
1291da177e4SLinus Torvalds  *		Alan Cox	:	Spurious resets on shutdown.
1301da177e4SLinus Torvalds  *		Alan Cox	:	Giant 15 minute/60 second timer error
1311da177e4SLinus Torvalds  *		Alan Cox	:	Small whoops in polling before an
1321da177e4SLinus Torvalds  *					accept.
1331da177e4SLinus Torvalds  *		Alan Cox	:	Kept the state trace facility since
1341da177e4SLinus Torvalds  *					it's handy for debugging.
1351da177e4SLinus Torvalds  *		Alan Cox	:	More reset handler fixes.
1361da177e4SLinus Torvalds  *		Alan Cox	:	Started rewriting the code based on
1371da177e4SLinus Torvalds  *					the RFC's for other useful protocol
1381da177e4SLinus Torvalds  *					references see: Comer, KA9Q NOS, and
1391da177e4SLinus Torvalds  *					for a reference on the difference
1401da177e4SLinus Torvalds  *					between specifications and how BSD
1411da177e4SLinus Torvalds  *					works see the 4.4lite source.
1421da177e4SLinus Torvalds  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
1431da177e4SLinus Torvalds  *					close.
1441da177e4SLinus Torvalds  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
1451da177e4SLinus Torvalds  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
1461da177e4SLinus Torvalds  *		Alan Cox	:	Reimplemented timers as per the RFC
1471da177e4SLinus Torvalds  *					and using multiple timers for sanity.
1481da177e4SLinus Torvalds  *		Alan Cox	:	Small bug fixes, and a lot of new
1491da177e4SLinus Torvalds  *					comments.
1501da177e4SLinus Torvalds  *		Alan Cox	:	Fixed dual reader crash by locking
1511da177e4SLinus Torvalds  *					the buffers (much like datagram.c)
1521da177e4SLinus Torvalds  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
1531da177e4SLinus Torvalds  *					now gets fed up of retrying without
1541da177e4SLinus Torvalds  *					(even a no space) answer.
1551da177e4SLinus Torvalds  *		Alan Cox	:	Extracted closing code better
1561da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the closing state machine to
1571da177e4SLinus Torvalds  *					resemble the RFC.
1581da177e4SLinus Torvalds  *		Alan Cox	:	More 'per spec' fixes.
1591da177e4SLinus Torvalds  *		Jorge Cwik	:	Even faster checksumming.
1601da177e4SLinus Torvalds  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
1611da177e4SLinus Torvalds  *					only frames. At least one pc tcp stack
1621da177e4SLinus Torvalds  *					generates them.
1631da177e4SLinus Torvalds  *		Alan Cox	:	Cache last socket.
1641da177e4SLinus Torvalds  *		Alan Cox	:	Per route irtt.
1651da177e4SLinus Torvalds  *		Matt Day	:	poll()->select() match BSD precisely on error
1661da177e4SLinus Torvalds  *		Alan Cox	:	New buffers
1671da177e4SLinus Torvalds  *		Marc Tamsky	:	Various sk->prot->retransmits and
1681da177e4SLinus Torvalds  *					sk->retransmits misupdating fixed.
1691da177e4SLinus Torvalds  *					Fixed tcp_write_timeout: stuck close,
1701da177e4SLinus Torvalds  *					and TCP syn retries gets used now.
1711da177e4SLinus Torvalds  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
1721da177e4SLinus Torvalds  *					ack if state is TCP_CLOSED.
1731da177e4SLinus Torvalds  *		Alan Cox	:	Look up device on a retransmit - routes may
1741da177e4SLinus Torvalds  *					change. Doesn't yet cope with MSS shrink right
1751da177e4SLinus Torvalds  *					but it's a start!
1761da177e4SLinus Torvalds  *		Marc Tamsky	:	Closing in closing fixes.
1771da177e4SLinus Torvalds  *		Mike Shaver	:	RFC1122 verifications.
1781da177e4SLinus Torvalds  *		Alan Cox	:	rcv_saddr errors.
1791da177e4SLinus Torvalds  *		Alan Cox	:	Block double connect().
1801da177e4SLinus Torvalds  *		Alan Cox	:	Small hooks for enSKIP.
1811da177e4SLinus Torvalds  *		Alexey Kuznetsov:	Path MTU discovery.
1821da177e4SLinus Torvalds  *		Alan Cox	:	Support soft errors.
1831da177e4SLinus Torvalds  *		Alan Cox	:	Fix MTU discovery pathological case
1841da177e4SLinus Torvalds  *					when the remote claims no mtu!
1851da177e4SLinus Torvalds  *		Marc Tamsky	:	TCP_CLOSE fix.
1861da177e4SLinus Torvalds  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
1871da177e4SLinus Torvalds  *					window but wrong (fixes NT lpd problems)
1881da177e4SLinus Torvalds  *		Pedro Roque	:	Better TCP window handling, delayed ack.
1891da177e4SLinus Torvalds  *		Joerg Reuter	:	No modification of locked buffers in
1901da177e4SLinus Torvalds  *					tcp_do_retransmit()
1911da177e4SLinus Torvalds  *		Eric Schenk	:	Changed receiver side silly window
1921da177e4SLinus Torvalds  *					avoidance algorithm to BSD style
1931da177e4SLinus Torvalds  *					algorithm. This doubles throughput
1941da177e4SLinus Torvalds  *					against machines running Solaris,
1951da177e4SLinus Torvalds  *					and seems to result in general
1961da177e4SLinus Torvalds  *					improvement.
1971da177e4SLinus Torvalds  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
1981da177e4SLinus Torvalds  *	Willy Konynenberg	:	Transparent proxying support.
1991da177e4SLinus Torvalds  *	Mike McLagan		:	Routing by source
2001da177e4SLinus Torvalds  *		Keith Owens	:	Do proper merging with partial SKB's in
2011da177e4SLinus Torvalds  *					tcp_do_sendmsg to avoid burstiness.
2021da177e4SLinus Torvalds  *		Eric Schenk	:	Fix fast close down bug with
2031da177e4SLinus Torvalds  *					shutdown() followed by close().
2041da177e4SLinus Torvalds  *		Andi Kleen 	:	Make poll agree with SIGIO
2051da177e4SLinus Torvalds  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
2061da177e4SLinus Torvalds  *					lingertime == 0 (RFC 793 ABORT Call)
2071da177e4SLinus Torvalds  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
2081da177e4SLinus Torvalds  *					csum_and_copy_from_user() if possible.
2091da177e4SLinus Torvalds  *
2101da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
2111da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
2121da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
2131da177e4SLinus Torvalds  *		2 of the License, or(at your option) any later version.
2141da177e4SLinus Torvalds  *
2151da177e4SLinus Torvalds  * Description of States:
2161da177e4SLinus Torvalds  *
2171da177e4SLinus Torvalds  *	TCP_SYN_SENT		sent a connection request, waiting for ack
2181da177e4SLinus Torvalds  *
2191da177e4SLinus Torvalds  *	TCP_SYN_RECV		received a connection request, sent ack,
2201da177e4SLinus Torvalds  *				waiting for final ack in three-way handshake.
2211da177e4SLinus Torvalds  *
2221da177e4SLinus Torvalds  *	TCP_ESTABLISHED		connection established
2231da177e4SLinus Torvalds  *
2241da177e4SLinus Torvalds  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
2251da177e4SLinus Torvalds  *				transmission of remaining buffered data
2261da177e4SLinus Torvalds  *
2271da177e4SLinus Torvalds  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
2281da177e4SLinus Torvalds  *				to shutdown
2291da177e4SLinus Torvalds  *
2301da177e4SLinus Torvalds  *	TCP_CLOSING		both sides have shutdown but we still have
2311da177e4SLinus Torvalds  *				data we have to finish sending
2321da177e4SLinus Torvalds  *
2331da177e4SLinus Torvalds  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
2341da177e4SLinus Torvalds  *				closed, can only be entered from FIN_WAIT2
2351da177e4SLinus Torvalds  *				or CLOSING.  Required because the other end
2361da177e4SLinus Torvalds  *				may not have gotten our last ACK causing it
2371da177e4SLinus Torvalds  *				to retransmit the data packet (which we ignore)
2381da177e4SLinus Torvalds  *
2391da177e4SLinus Torvalds  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
2401da177e4SLinus Torvalds  *				us to finish writing our data and to shutdown
2411da177e4SLinus Torvalds  *				(we have to close() to move on to LAST_ACK)
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  *	TCP_LAST_ACK		out side has shutdown after remote has
2441da177e4SLinus Torvalds  *				shutdown.  There may still be data in our
2451da177e4SLinus Torvalds  *				buffer that we have to finish sending
2461da177e4SLinus Torvalds  *
2471da177e4SLinus Torvalds  *	TCP_CLOSE		socket is finished
2481da177e4SLinus Torvalds  */
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds #include <linux/config.h>
2511da177e4SLinus Torvalds #include <linux/module.h>
2521da177e4SLinus Torvalds #include <linux/types.h>
2531da177e4SLinus Torvalds #include <linux/fcntl.h>
2541da177e4SLinus Torvalds #include <linux/poll.h>
2551da177e4SLinus Torvalds #include <linux/init.h>
2561da177e4SLinus Torvalds #include <linux/smp_lock.h>
2571da177e4SLinus Torvalds #include <linux/fs.h>
2581da177e4SLinus Torvalds #include <linux/random.h>
2591da177e4SLinus Torvalds #include <linux/bootmem.h>
2601da177e4SLinus Torvalds 
2611da177e4SLinus Torvalds #include <net/icmp.h>
2621da177e4SLinus Torvalds #include <net/tcp.h>
2631da177e4SLinus Torvalds #include <net/xfrm.h>
2641da177e4SLinus Torvalds #include <net/ip.h>
2651da177e4SLinus Torvalds 
2661da177e4SLinus Torvalds 
2671da177e4SLinus Torvalds #include <asm/uaccess.h>
2681da177e4SLinus Torvalds #include <asm/ioctls.h>
2691da177e4SLinus Torvalds 
2701da177e4SLinus Torvalds int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
2731da177e4SLinus Torvalds 
2741da177e4SLinus Torvalds kmem_cache_t *tcp_bucket_cachep;
2751da177e4SLinus Torvalds kmem_cache_t *tcp_timewait_cachep;
2761da177e4SLinus Torvalds 
2771da177e4SLinus Torvalds atomic_t tcp_orphan_count = ATOMIC_INIT(0);
2781da177e4SLinus Torvalds 
2791da177e4SLinus Torvalds int sysctl_tcp_mem[3];
2801da177e4SLinus Torvalds int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
2811da177e4SLinus Torvalds int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_mem);
2841da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_rmem);
2851da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_wmem);
2861da177e4SLinus Torvalds 
2871da177e4SLinus Torvalds atomic_t tcp_memory_allocated;	/* Current allocated memory. */
2881da177e4SLinus Torvalds atomic_t tcp_sockets_allocated;	/* Current number of TCP sockets. */
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
2911da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
2921da177e4SLinus Torvalds 
2931da177e4SLinus Torvalds /*
2941da177e4SLinus Torvalds  * Pressure flag: try to collapse.
2951da177e4SLinus Torvalds  * Technical note: it is used by multiple contexts non atomically.
2961da177e4SLinus Torvalds  * All the sk_stream_mem_schedule() is of this nature: accounting
2971da177e4SLinus Torvalds  * is strict, actions are advisory and have some latency.
2981da177e4SLinus Torvalds  */
2991da177e4SLinus Torvalds int tcp_memory_pressure;
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_pressure);
3021da177e4SLinus Torvalds 
3031da177e4SLinus Torvalds void tcp_enter_memory_pressure(void)
3041da177e4SLinus Torvalds {
3051da177e4SLinus Torvalds 	if (!tcp_memory_pressure) {
3061da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
3071da177e4SLinus Torvalds 		tcp_memory_pressure = 1;
3081da177e4SLinus Torvalds 	}
3091da177e4SLinus Torvalds }
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_enter_memory_pressure);
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds /*
3141da177e4SLinus Torvalds  * LISTEN is a special case for poll..
3151da177e4SLinus Torvalds  */
3161da177e4SLinus Torvalds static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
3171da177e4SLinus Torvalds 					       poll_table *wait)
3181da177e4SLinus Torvalds {
3190e87506fSArnaldo Carvalho de Melo 	return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0;
3201da177e4SLinus Torvalds }
3211da177e4SLinus Torvalds 
3221da177e4SLinus Torvalds /*
3231da177e4SLinus Torvalds  *	Wait for a TCP event.
3241da177e4SLinus Torvalds  *
3251da177e4SLinus Torvalds  *	Note that we don't need to lock the socket, as the upper poll layers
3261da177e4SLinus Torvalds  *	take care of normal races (between the test and the event) and we don't
3271da177e4SLinus Torvalds  *	go look at any of the socket buffers directly.
3281da177e4SLinus Torvalds  */
3291da177e4SLinus Torvalds unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
3301da177e4SLinus Torvalds {
3311da177e4SLinus Torvalds 	unsigned int mask;
3321da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
3331da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds 	poll_wait(file, sk->sk_sleep, wait);
3361da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
3371da177e4SLinus Torvalds 		return tcp_listen_poll(sk, wait);
3381da177e4SLinus Torvalds 
3391da177e4SLinus Torvalds 	/* Socket is not locked. We are protected from async events
3401da177e4SLinus Torvalds 	   by poll logic and correct handling of state changes
3411da177e4SLinus Torvalds 	   made by another threads is impossible in any case.
3421da177e4SLinus Torvalds 	 */
3431da177e4SLinus Torvalds 
3441da177e4SLinus Torvalds 	mask = 0;
3451da177e4SLinus Torvalds 	if (sk->sk_err)
3461da177e4SLinus Torvalds 		mask = POLLERR;
3471da177e4SLinus Torvalds 
3481da177e4SLinus Torvalds 	/*
3491da177e4SLinus Torvalds 	 * POLLHUP is certainly not done right. But poll() doesn't
3501da177e4SLinus Torvalds 	 * have a notion of HUP in just one direction, and for a
3511da177e4SLinus Torvalds 	 * socket the read side is more interesting.
3521da177e4SLinus Torvalds 	 *
3531da177e4SLinus Torvalds 	 * Some poll() documentation says that POLLHUP is incompatible
3541da177e4SLinus Torvalds 	 * with the POLLOUT/POLLWR flags, so somebody should check this
3551da177e4SLinus Torvalds 	 * all. But careful, it tends to be safer to return too many
3561da177e4SLinus Torvalds 	 * bits than too few, and you can easily break real applications
3571da177e4SLinus Torvalds 	 * if you don't tell them that something has hung up!
3581da177e4SLinus Torvalds 	 *
3591da177e4SLinus Torvalds 	 * Check-me.
3601da177e4SLinus Torvalds 	 *
3611da177e4SLinus Torvalds 	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
3621da177e4SLinus Torvalds 	 * our fs/select.c). It means that after we received EOF,
3631da177e4SLinus Torvalds 	 * poll always returns immediately, making impossible poll() on write()
3641da177e4SLinus Torvalds 	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
3651da177e4SLinus Torvalds 	 * if and only if shutdown has been made in both directions.
3661da177e4SLinus Torvalds 	 * Actually, it is interesting to look how Solaris and DUX
3671da177e4SLinus Torvalds 	 * solve this dilemma. I would prefer, if PULLHUP were maskable,
3681da177e4SLinus Torvalds 	 * then we could set it on SND_SHUTDOWN. BTW examples given
3691da177e4SLinus Torvalds 	 * in Stevens' books assume exactly this behaviour, it explains
3701da177e4SLinus Torvalds 	 * why PULLHUP is incompatible with POLLOUT.	--ANK
3711da177e4SLinus Torvalds 	 *
3721da177e4SLinus Torvalds 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
3731da177e4SLinus Torvalds 	 * blocking on fresh not-connected or disconnected socket. --ANK
3741da177e4SLinus Torvalds 	 */
3751da177e4SLinus Torvalds 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
3761da177e4SLinus Torvalds 		mask |= POLLHUP;
3771da177e4SLinus Torvalds 	if (sk->sk_shutdown & RCV_SHUTDOWN)
3781da177e4SLinus Torvalds 		mask |= POLLIN | POLLRDNORM;
3791da177e4SLinus Torvalds 
3801da177e4SLinus Torvalds 	/* Connected? */
3811da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
3821da177e4SLinus Torvalds 		/* Potential race condition. If read of tp below will
3831da177e4SLinus Torvalds 		 * escape above sk->sk_state, we can be illegally awaken
3841da177e4SLinus Torvalds 		 * in SYN_* states. */
3851da177e4SLinus Torvalds 		if ((tp->rcv_nxt != tp->copied_seq) &&
3861da177e4SLinus Torvalds 		    (tp->urg_seq != tp->copied_seq ||
3871da177e4SLinus Torvalds 		     tp->rcv_nxt != tp->copied_seq + 1 ||
3881da177e4SLinus Torvalds 		     sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
3891da177e4SLinus Torvalds 			mask |= POLLIN | POLLRDNORM;
3901da177e4SLinus Torvalds 
3911da177e4SLinus Torvalds 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
3921da177e4SLinus Torvalds 			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
3931da177e4SLinus Torvalds 				mask |= POLLOUT | POLLWRNORM;
3941da177e4SLinus Torvalds 			} else {  /* send SIGIO later */
3951da177e4SLinus Torvalds 				set_bit(SOCK_ASYNC_NOSPACE,
3961da177e4SLinus Torvalds 					&sk->sk_socket->flags);
3971da177e4SLinus Torvalds 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
3981da177e4SLinus Torvalds 
3991da177e4SLinus Torvalds 				/* Race breaker. If space is freed after
4001da177e4SLinus Torvalds 				 * wspace test but before the flags are set,
4011da177e4SLinus Torvalds 				 * IO signal will be lost.
4021da177e4SLinus Torvalds 				 */
4031da177e4SLinus Torvalds 				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
4041da177e4SLinus Torvalds 					mask |= POLLOUT | POLLWRNORM;
4051da177e4SLinus Torvalds 			}
4061da177e4SLinus Torvalds 		}
4071da177e4SLinus Torvalds 
4081da177e4SLinus Torvalds 		if (tp->urg_data & TCP_URG_VALID)
4091da177e4SLinus Torvalds 			mask |= POLLPRI;
4101da177e4SLinus Torvalds 	}
4111da177e4SLinus Torvalds 	return mask;
4121da177e4SLinus Torvalds }
4131da177e4SLinus Torvalds 
4141da177e4SLinus Torvalds int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4151da177e4SLinus Torvalds {
4161da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4171da177e4SLinus Torvalds 	int answ;
4181da177e4SLinus Torvalds 
4191da177e4SLinus Torvalds 	switch (cmd) {
4201da177e4SLinus Torvalds 	case SIOCINQ:
4211da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4221da177e4SLinus Torvalds 			return -EINVAL;
4231da177e4SLinus Torvalds 
4241da177e4SLinus Torvalds 		lock_sock(sk);
4251da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4261da177e4SLinus Torvalds 			answ = 0;
4271da177e4SLinus Torvalds 		else if (sock_flag(sk, SOCK_URGINLINE) ||
4281da177e4SLinus Torvalds 			 !tp->urg_data ||
4291da177e4SLinus Torvalds 			 before(tp->urg_seq, tp->copied_seq) ||
4301da177e4SLinus Torvalds 			 !before(tp->urg_seq, tp->rcv_nxt)) {
4311da177e4SLinus Torvalds 			answ = tp->rcv_nxt - tp->copied_seq;
4321da177e4SLinus Torvalds 
4331da177e4SLinus Torvalds 			/* Subtract 1, if FIN is in queue. */
4341da177e4SLinus Torvalds 			if (answ && !skb_queue_empty(&sk->sk_receive_queue))
4351da177e4SLinus Torvalds 				answ -=
4361da177e4SLinus Torvalds 		       ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
4371da177e4SLinus Torvalds 		} else
4381da177e4SLinus Torvalds 			answ = tp->urg_seq - tp->copied_seq;
4391da177e4SLinus Torvalds 		release_sock(sk);
4401da177e4SLinus Torvalds 		break;
4411da177e4SLinus Torvalds 	case SIOCATMARK:
4421da177e4SLinus Torvalds 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
4431da177e4SLinus Torvalds 		break;
4441da177e4SLinus Torvalds 	case SIOCOUTQ:
4451da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4461da177e4SLinus Torvalds 			return -EINVAL;
4471da177e4SLinus Torvalds 
4481da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4491da177e4SLinus Torvalds 			answ = 0;
4501da177e4SLinus Torvalds 		else
4511da177e4SLinus Torvalds 			answ = tp->write_seq - tp->snd_una;
4521da177e4SLinus Torvalds 		break;
4531da177e4SLinus Torvalds 	default:
4541da177e4SLinus Torvalds 		return -ENOIOCTLCMD;
4551da177e4SLinus Torvalds 	};
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds 	return put_user(answ, (int __user *)arg);
4581da177e4SLinus Torvalds }
4591da177e4SLinus Torvalds 
4601da177e4SLinus Torvalds 
4611da177e4SLinus Torvalds int tcp_listen_start(struct sock *sk)
4621da177e4SLinus Torvalds {
4631da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
4641da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4650e87506fSArnaldo Carvalho de Melo 	int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE);
4660e87506fSArnaldo Carvalho de Melo 
4670e87506fSArnaldo Carvalho de Melo 	if (rc != 0)
4680e87506fSArnaldo Carvalho de Melo 		return rc;
4691da177e4SLinus Torvalds 
4701da177e4SLinus Torvalds 	sk->sk_max_ack_backlog = 0;
4711da177e4SLinus Torvalds 	sk->sk_ack_backlog = 0;
4721da177e4SLinus Torvalds 	tcp_delack_init(tp);
4731da177e4SLinus Torvalds 
4741da177e4SLinus Torvalds 	/* There is race window here: we announce ourselves listening,
4751da177e4SLinus Torvalds 	 * but this transition is still not validated by get_port().
4761da177e4SLinus Torvalds 	 * It is OK, because this socket enters to hash table only
4771da177e4SLinus Torvalds 	 * after validation is complete.
4781da177e4SLinus Torvalds 	 */
4791da177e4SLinus Torvalds 	sk->sk_state = TCP_LISTEN;
4801da177e4SLinus Torvalds 	if (!sk->sk_prot->get_port(sk, inet->num)) {
4811da177e4SLinus Torvalds 		inet->sport = htons(inet->num);
4821da177e4SLinus Torvalds 
4831da177e4SLinus Torvalds 		sk_dst_reset(sk);
4841da177e4SLinus Torvalds 		sk->sk_prot->hash(sk);
4851da177e4SLinus Torvalds 
4861da177e4SLinus Torvalds 		return 0;
4871da177e4SLinus Torvalds 	}
4881da177e4SLinus Torvalds 
4891da177e4SLinus Torvalds 	sk->sk_state = TCP_CLOSE;
4900e87506fSArnaldo Carvalho de Melo 	reqsk_queue_destroy(&tp->accept_queue);
4911da177e4SLinus Torvalds 	return -EADDRINUSE;
4921da177e4SLinus Torvalds }
4931da177e4SLinus Torvalds 
4941da177e4SLinus Torvalds /*
4951da177e4SLinus Torvalds  *	This routine closes sockets which have been at least partially
4961da177e4SLinus Torvalds  *	opened, but not yet accepted.
4971da177e4SLinus Torvalds  */
4981da177e4SLinus Torvalds 
4991da177e4SLinus Torvalds static void tcp_listen_stop (struct sock *sk)
5001da177e4SLinus Torvalds {
5011da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
5022ad69c55SArnaldo Carvalho de Melo 	struct listen_sock *lopt;
5030e87506fSArnaldo Carvalho de Melo 	struct request_sock *acc_req;
50460236fddSArnaldo Carvalho de Melo 	struct request_sock *req;
5051da177e4SLinus Torvalds 	int i;
5061da177e4SLinus Torvalds 
5071da177e4SLinus Torvalds 	tcp_delete_keepalive_timer(sk);
5081da177e4SLinus Torvalds 
5091da177e4SLinus Torvalds 	/* make all the listen_opt local to us */
5100e87506fSArnaldo Carvalho de Melo 	lopt = reqsk_queue_yank_listen_sk(&tp->accept_queue);
5110e87506fSArnaldo Carvalho de Melo 	acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue);
5121da177e4SLinus Torvalds 
5131da177e4SLinus Torvalds 	if (lopt->qlen) {
5141da177e4SLinus Torvalds 		for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
5151da177e4SLinus Torvalds 			while ((req = lopt->syn_table[i]) != NULL) {
5161da177e4SLinus Torvalds 				lopt->syn_table[i] = req->dl_next;
5171da177e4SLinus Torvalds 				lopt->qlen--;
51860236fddSArnaldo Carvalho de Melo 				reqsk_free(req);
5191da177e4SLinus Torvalds 
5201da177e4SLinus Torvalds 		/* Following specs, it would be better either to send FIN
5211da177e4SLinus Torvalds 		 * (and enter FIN-WAIT-1, it is normal close)
5221da177e4SLinus Torvalds 		 * or to send active reset (abort).
5231da177e4SLinus Torvalds 		 * Certainly, it is pretty dangerous while synflood, but it is
5241da177e4SLinus Torvalds 		 * bad justification for our negligence 8)
5251da177e4SLinus Torvalds 		 * To be honest, we are not able to make either
5261da177e4SLinus Torvalds 		 * of the variants now.			--ANK
5271da177e4SLinus Torvalds 		 */
5281da177e4SLinus Torvalds 			}
5291da177e4SLinus Torvalds 		}
5301da177e4SLinus Torvalds 	}
5311da177e4SLinus Torvalds 	BUG_TRAP(!lopt->qlen);
5321da177e4SLinus Torvalds 
5331da177e4SLinus Torvalds 	kfree(lopt);
5341da177e4SLinus Torvalds 
5351da177e4SLinus Torvalds 	while ((req = acc_req) != NULL) {
5361da177e4SLinus Torvalds 		struct sock *child = req->sk;
5371da177e4SLinus Torvalds 
5381da177e4SLinus Torvalds 		acc_req = req->dl_next;
5391da177e4SLinus Torvalds 
5401da177e4SLinus Torvalds 		local_bh_disable();
5411da177e4SLinus Torvalds 		bh_lock_sock(child);
5421da177e4SLinus Torvalds 		BUG_TRAP(!sock_owned_by_user(child));
5431da177e4SLinus Torvalds 		sock_hold(child);
5441da177e4SLinus Torvalds 
5451da177e4SLinus Torvalds 		tcp_disconnect(child, O_NONBLOCK);
5461da177e4SLinus Torvalds 
5471da177e4SLinus Torvalds 		sock_orphan(child);
5481da177e4SLinus Torvalds 
5491da177e4SLinus Torvalds 		atomic_inc(&tcp_orphan_count);
5501da177e4SLinus Torvalds 
5511da177e4SLinus Torvalds 		tcp_destroy_sock(child);
5521da177e4SLinus Torvalds 
5531da177e4SLinus Torvalds 		bh_unlock_sock(child);
5541da177e4SLinus Torvalds 		local_bh_enable();
5551da177e4SLinus Torvalds 		sock_put(child);
5561da177e4SLinus Torvalds 
5571da177e4SLinus Torvalds 		sk_acceptq_removed(sk);
55860236fddSArnaldo Carvalho de Melo 		__reqsk_free(req);
5591da177e4SLinus Torvalds 	}
5601da177e4SLinus Torvalds 	BUG_TRAP(!sk->sk_ack_backlog);
5611da177e4SLinus Torvalds }
5621da177e4SLinus Torvalds 
5631da177e4SLinus Torvalds static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
5641da177e4SLinus Torvalds {
5651da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
5661da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
5671da177e4SLinus Torvalds }
5681da177e4SLinus Torvalds 
5691da177e4SLinus Torvalds static inline int forced_push(struct tcp_sock *tp)
5701da177e4SLinus Torvalds {
5711da177e4SLinus Torvalds 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
5721da177e4SLinus Torvalds }
5731da177e4SLinus Torvalds 
5741da177e4SLinus Torvalds static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
5751da177e4SLinus Torvalds 			      struct sk_buff *skb)
5761da177e4SLinus Torvalds {
5771da177e4SLinus Torvalds 	skb->csum = 0;
5781da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = tp->write_seq;
5791da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = tp->write_seq;
5801da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
5811da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
5821da177e4SLinus Torvalds 	skb_header_release(skb);
5831da177e4SLinus Torvalds 	__skb_queue_tail(&sk->sk_write_queue, skb);
5841da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
5851da177e4SLinus Torvalds 	if (!sk->sk_send_head)
5861da177e4SLinus Torvalds 		sk->sk_send_head = skb;
587*89ebd197SDavid S. Miller 	if (tp->nonagle & TCP_NAGLE_PUSH)
5881da177e4SLinus Torvalds 		tp->nonagle &= ~TCP_NAGLE_PUSH;
5891da177e4SLinus Torvalds }
5901da177e4SLinus Torvalds 
5911da177e4SLinus Torvalds static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
5921da177e4SLinus Torvalds 				struct sk_buff *skb)
5931da177e4SLinus Torvalds {
5941da177e4SLinus Torvalds 	if (flags & MSG_OOB) {
5951da177e4SLinus Torvalds 		tp->urg_mode = 1;
5961da177e4SLinus Torvalds 		tp->snd_up = tp->write_seq;
5971da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
5981da177e4SLinus Torvalds 	}
5991da177e4SLinus Torvalds }
6001da177e4SLinus Torvalds 
6011da177e4SLinus Torvalds static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
6021da177e4SLinus Torvalds 			    int mss_now, int nonagle)
6031da177e4SLinus Torvalds {
6041da177e4SLinus Torvalds 	if (sk->sk_send_head) {
6051da177e4SLinus Torvalds 		struct sk_buff *skb = sk->sk_write_queue.prev;
6061da177e4SLinus Torvalds 		if (!(flags & MSG_MORE) || forced_push(tp))
6071da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
6081da177e4SLinus Torvalds 		tcp_mark_urg(tp, flags, skb);
6091da177e4SLinus Torvalds 		__tcp_push_pending_frames(sk, tp, mss_now,
6101da177e4SLinus Torvalds 					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
6111da177e4SLinus Torvalds 	}
6121da177e4SLinus Torvalds }
6131da177e4SLinus Torvalds 
6141da177e4SLinus Torvalds static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
6151da177e4SLinus Torvalds 			 size_t psize, int flags)
6161da177e4SLinus Torvalds {
6171da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
618c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
6191da177e4SLinus Torvalds 	int err;
6201da177e4SLinus Torvalds 	ssize_t copied;
6211da177e4SLinus Torvalds 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
6221da177e4SLinus Torvalds 
6231da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
6241da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
6251da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
6261da177e4SLinus Torvalds 			goto out_err;
6271da177e4SLinus Torvalds 
6281da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
6291da177e4SLinus Torvalds 
6301da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
631c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
6321da177e4SLinus Torvalds 	copied = 0;
6331da177e4SLinus Torvalds 
6341da177e4SLinus Torvalds 	err = -EPIPE;
6351da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
6361da177e4SLinus Torvalds 		goto do_error;
6371da177e4SLinus Torvalds 
6381da177e4SLinus Torvalds 	while (psize > 0) {
6391da177e4SLinus Torvalds 		struct sk_buff *skb = sk->sk_write_queue.prev;
6401da177e4SLinus Torvalds 		struct page *page = pages[poffset / PAGE_SIZE];
6411da177e4SLinus Torvalds 		int copy, i, can_coalesce;
6421da177e4SLinus Torvalds 		int offset = poffset % PAGE_SIZE;
6431da177e4SLinus Torvalds 		int size = min_t(size_t, psize, PAGE_SIZE - offset);
6441da177e4SLinus Torvalds 
645c1b4a7e6SDavid S. Miller 		if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
6461da177e4SLinus Torvalds new_segment:
6471da177e4SLinus Torvalds 			if (!sk_stream_memory_free(sk))
6481da177e4SLinus Torvalds 				goto wait_for_sndbuf;
6491da177e4SLinus Torvalds 
6501da177e4SLinus Torvalds 			skb = sk_stream_alloc_pskb(sk, 0, 0,
6511da177e4SLinus Torvalds 						   sk->sk_allocation);
6521da177e4SLinus Torvalds 			if (!skb)
6531da177e4SLinus Torvalds 				goto wait_for_memory;
6541da177e4SLinus Torvalds 
6551da177e4SLinus Torvalds 			skb_entail(sk, tp, skb);
656c1b4a7e6SDavid S. Miller 			copy = size_goal;
6571da177e4SLinus Torvalds 		}
6581da177e4SLinus Torvalds 
6591da177e4SLinus Torvalds 		if (copy > size)
6601da177e4SLinus Torvalds 			copy = size;
6611da177e4SLinus Torvalds 
6621da177e4SLinus Torvalds 		i = skb_shinfo(skb)->nr_frags;
6631da177e4SLinus Torvalds 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
6641da177e4SLinus Torvalds 		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
6651da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
6661da177e4SLinus Torvalds 			goto new_segment;
6671da177e4SLinus Torvalds 		}
6681da177e4SLinus Torvalds 		if (sk->sk_forward_alloc < copy &&
6691da177e4SLinus Torvalds 		    !sk_stream_mem_schedule(sk, copy, 0))
6701da177e4SLinus Torvalds 			goto wait_for_memory;
6711da177e4SLinus Torvalds 
6721da177e4SLinus Torvalds 		if (can_coalesce) {
6731da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[i - 1].size += copy;
6741da177e4SLinus Torvalds 		} else {
6751da177e4SLinus Torvalds 			get_page(page);
6761da177e4SLinus Torvalds 			skb_fill_page_desc(skb, i, page, offset, copy);
6771da177e4SLinus Torvalds 		}
6781da177e4SLinus Torvalds 
6791da177e4SLinus Torvalds 		skb->len += copy;
6801da177e4SLinus Torvalds 		skb->data_len += copy;
6811da177e4SLinus Torvalds 		skb->truesize += copy;
6821da177e4SLinus Torvalds 		sk->sk_wmem_queued += copy;
6831da177e4SLinus Torvalds 		sk->sk_forward_alloc -= copy;
6841da177e4SLinus Torvalds 		skb->ip_summed = CHECKSUM_HW;
6851da177e4SLinus Torvalds 		tp->write_seq += copy;
6861da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq += copy;
6871da177e4SLinus Torvalds 		skb_shinfo(skb)->tso_segs = 0;
6881da177e4SLinus Torvalds 
6891da177e4SLinus Torvalds 		if (!copied)
6901da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
6911da177e4SLinus Torvalds 
6921da177e4SLinus Torvalds 		copied += copy;
6931da177e4SLinus Torvalds 		poffset += copy;
6941da177e4SLinus Torvalds 		if (!(psize -= copy))
6951da177e4SLinus Torvalds 			goto out;
6961da177e4SLinus Torvalds 
697c1b4a7e6SDavid S. Miller 		if (skb->len < mss_now || (flags & MSG_OOB))
6981da177e4SLinus Torvalds 			continue;
6991da177e4SLinus Torvalds 
7001da177e4SLinus Torvalds 		if (forced_push(tp)) {
7011da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
7021da177e4SLinus Torvalds 			__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
7031da177e4SLinus Torvalds 		} else if (skb == sk->sk_send_head)
7041da177e4SLinus Torvalds 			tcp_push_one(sk, mss_now);
7051da177e4SLinus Torvalds 		continue;
7061da177e4SLinus Torvalds 
7071da177e4SLinus Torvalds wait_for_sndbuf:
7081da177e4SLinus Torvalds 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
7091da177e4SLinus Torvalds wait_for_memory:
7101da177e4SLinus Torvalds 		if (copied)
7111da177e4SLinus Torvalds 			tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
7121da177e4SLinus Torvalds 
7131da177e4SLinus Torvalds 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
7141da177e4SLinus Torvalds 			goto do_error;
7151da177e4SLinus Torvalds 
7161da177e4SLinus Torvalds 		mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
717c1b4a7e6SDavid S. Miller 		size_goal = tp->xmit_size_goal;
7181da177e4SLinus Torvalds 	}
7191da177e4SLinus Torvalds 
7201da177e4SLinus Torvalds out:
7211da177e4SLinus Torvalds 	if (copied)
7221da177e4SLinus Torvalds 		tcp_push(sk, tp, flags, mss_now, tp->nonagle);
7231da177e4SLinus Torvalds 	return copied;
7241da177e4SLinus Torvalds 
7251da177e4SLinus Torvalds do_error:
7261da177e4SLinus Torvalds 	if (copied)
7271da177e4SLinus Torvalds 		goto out;
7281da177e4SLinus Torvalds out_err:
7291da177e4SLinus Torvalds 	return sk_stream_error(sk, flags, err);
7301da177e4SLinus Torvalds }
7311da177e4SLinus Torvalds 
7321da177e4SLinus Torvalds ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
7331da177e4SLinus Torvalds 		     size_t size, int flags)
7341da177e4SLinus Torvalds {
7351da177e4SLinus Torvalds 	ssize_t res;
7361da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
7371da177e4SLinus Torvalds 
7381da177e4SLinus Torvalds #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
7391da177e4SLinus Torvalds 
7401da177e4SLinus Torvalds 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
7411da177e4SLinus Torvalds 	    !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
7421da177e4SLinus Torvalds 		return sock_no_sendpage(sock, page, offset, size, flags);
7431da177e4SLinus Torvalds 
7441da177e4SLinus Torvalds #undef TCP_ZC_CSUM_FLAGS
7451da177e4SLinus Torvalds 
7461da177e4SLinus Torvalds 	lock_sock(sk);
7471da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
7481da177e4SLinus Torvalds 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
7491da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
7501da177e4SLinus Torvalds 	release_sock(sk);
7511da177e4SLinus Torvalds 	return res;
7521da177e4SLinus Torvalds }
7531da177e4SLinus Torvalds 
7541da177e4SLinus Torvalds #define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
7551da177e4SLinus Torvalds #define TCP_OFF(sk)	(sk->sk_sndmsg_off)
7561da177e4SLinus Torvalds 
7571da177e4SLinus Torvalds static inline int select_size(struct sock *sk, struct tcp_sock *tp)
7581da177e4SLinus Torvalds {
759c1b4a7e6SDavid S. Miller 	int tmp = tp->mss_cache;
7601da177e4SLinus Torvalds 
761b4e26f5eSDavid S. Miller 	if (sk->sk_route_caps & NETIF_F_SG) {
762b4e26f5eSDavid S. Miller 		if (sk->sk_route_caps & NETIF_F_TSO)
763c65f7f00SDavid S. Miller 			tmp = 0;
764b4e26f5eSDavid S. Miller 		else {
765b4e26f5eSDavid S. Miller 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
766b4e26f5eSDavid S. Miller 
767b4e26f5eSDavid S. Miller 			if (tmp >= pgbreak &&
768b4e26f5eSDavid S. Miller 			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
769b4e26f5eSDavid S. Miller 				tmp = pgbreak;
770b4e26f5eSDavid S. Miller 		}
771b4e26f5eSDavid S. Miller 	}
7721da177e4SLinus Torvalds 
7731da177e4SLinus Torvalds 	return tmp;
7741da177e4SLinus Torvalds }
7751da177e4SLinus Torvalds 
7761da177e4SLinus Torvalds int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
7771da177e4SLinus Torvalds 		size_t size)
7781da177e4SLinus Torvalds {
7791da177e4SLinus Torvalds 	struct iovec *iov;
7801da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
7811da177e4SLinus Torvalds 	struct sk_buff *skb;
7821da177e4SLinus Torvalds 	int iovlen, flags;
783c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
7841da177e4SLinus Torvalds 	int err, copied;
7851da177e4SLinus Torvalds 	long timeo;
7861da177e4SLinus Torvalds 
7871da177e4SLinus Torvalds 	lock_sock(sk);
7881da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds 	flags = msg->msg_flags;
7911da177e4SLinus Torvalds 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
7921da177e4SLinus Torvalds 
7931da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
7941da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
7951da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
7961da177e4SLinus Torvalds 			goto out_err;
7971da177e4SLinus Torvalds 
7981da177e4SLinus Torvalds 	/* This should be in poll */
7991da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
8001da177e4SLinus Torvalds 
8011da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
802c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
8031da177e4SLinus Torvalds 
8041da177e4SLinus Torvalds 	/* Ok commence sending. */
8051da177e4SLinus Torvalds 	iovlen = msg->msg_iovlen;
8061da177e4SLinus Torvalds 	iov = msg->msg_iov;
8071da177e4SLinus Torvalds 	copied = 0;
8081da177e4SLinus Torvalds 
8091da177e4SLinus Torvalds 	err = -EPIPE;
8101da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
8111da177e4SLinus Torvalds 		goto do_error;
8121da177e4SLinus Torvalds 
8131da177e4SLinus Torvalds 	while (--iovlen >= 0) {
8141da177e4SLinus Torvalds 		int seglen = iov->iov_len;
8151da177e4SLinus Torvalds 		unsigned char __user *from = iov->iov_base;
8161da177e4SLinus Torvalds 
8171da177e4SLinus Torvalds 		iov++;
8181da177e4SLinus Torvalds 
8191da177e4SLinus Torvalds 		while (seglen > 0) {
8201da177e4SLinus Torvalds 			int copy;
8211da177e4SLinus Torvalds 
8221da177e4SLinus Torvalds 			skb = sk->sk_write_queue.prev;
8231da177e4SLinus Torvalds 
8241da177e4SLinus Torvalds 			if (!sk->sk_send_head ||
825c1b4a7e6SDavid S. Miller 			    (copy = size_goal - skb->len) <= 0) {
8261da177e4SLinus Torvalds 
8271da177e4SLinus Torvalds new_segment:
8281da177e4SLinus Torvalds 				/* Allocate new segment. If the interface is SG,
8291da177e4SLinus Torvalds 				 * allocate skb fitting to single page.
8301da177e4SLinus Torvalds 				 */
8311da177e4SLinus Torvalds 				if (!sk_stream_memory_free(sk))
8321da177e4SLinus Torvalds 					goto wait_for_sndbuf;
8331da177e4SLinus Torvalds 
8341da177e4SLinus Torvalds 				skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
8351da177e4SLinus Torvalds 							   0, sk->sk_allocation);
8361da177e4SLinus Torvalds 				if (!skb)
8371da177e4SLinus Torvalds 					goto wait_for_memory;
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds 				/*
8401da177e4SLinus Torvalds 				 * Check whether we can use HW checksum.
8411da177e4SLinus Torvalds 				 */
8421da177e4SLinus Torvalds 				if (sk->sk_route_caps &
8431da177e4SLinus Torvalds 				    (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
8441da177e4SLinus Torvalds 				     NETIF_F_HW_CSUM))
8451da177e4SLinus Torvalds 					skb->ip_summed = CHECKSUM_HW;
8461da177e4SLinus Torvalds 
8471da177e4SLinus Torvalds 				skb_entail(sk, tp, skb);
848c1b4a7e6SDavid S. Miller 				copy = size_goal;
8491da177e4SLinus Torvalds 			}
8501da177e4SLinus Torvalds 
8511da177e4SLinus Torvalds 			/* Try to append data to the end of skb. */
8521da177e4SLinus Torvalds 			if (copy > seglen)
8531da177e4SLinus Torvalds 				copy = seglen;
8541da177e4SLinus Torvalds 
8551da177e4SLinus Torvalds 			/* Where to copy to? */
8561da177e4SLinus Torvalds 			if (skb_tailroom(skb) > 0) {
8571da177e4SLinus Torvalds 				/* We have some space in skb head. Superb! */
8581da177e4SLinus Torvalds 				if (copy > skb_tailroom(skb))
8591da177e4SLinus Torvalds 					copy = skb_tailroom(skb);
8601da177e4SLinus Torvalds 				if ((err = skb_add_data(skb, from, copy)) != 0)
8611da177e4SLinus Torvalds 					goto do_fault;
8621da177e4SLinus Torvalds 			} else {
8631da177e4SLinus Torvalds 				int merge = 0;
8641da177e4SLinus Torvalds 				int i = skb_shinfo(skb)->nr_frags;
8651da177e4SLinus Torvalds 				struct page *page = TCP_PAGE(sk);
8661da177e4SLinus Torvalds 				int off = TCP_OFF(sk);
8671da177e4SLinus Torvalds 
8681da177e4SLinus Torvalds 				if (skb_can_coalesce(skb, i, page, off) &&
8691da177e4SLinus Torvalds 				    off != PAGE_SIZE) {
8701da177e4SLinus Torvalds 					/* We can extend the last page
8711da177e4SLinus Torvalds 					 * fragment. */
8721da177e4SLinus Torvalds 					merge = 1;
8731da177e4SLinus Torvalds 				} else if (i == MAX_SKB_FRAGS ||
8741da177e4SLinus Torvalds 					   (!i &&
8751da177e4SLinus Torvalds 					   !(sk->sk_route_caps & NETIF_F_SG))) {
8761da177e4SLinus Torvalds 					/* Need to add new fragment and cannot
8771da177e4SLinus Torvalds 					 * do this because interface is non-SG,
8781da177e4SLinus Torvalds 					 * or because all the page slots are
8791da177e4SLinus Torvalds 					 * busy. */
8801da177e4SLinus Torvalds 					tcp_mark_push(tp, skb);
8811da177e4SLinus Torvalds 					goto new_segment;
8821da177e4SLinus Torvalds 				} else if (page) {
8831da177e4SLinus Torvalds 					if (off == PAGE_SIZE) {
8841da177e4SLinus Torvalds 						put_page(page);
8851da177e4SLinus Torvalds 						TCP_PAGE(sk) = page = NULL;
8861da177e4SLinus Torvalds 					}
8871da177e4SLinus Torvalds 				}
8881da177e4SLinus Torvalds 
8891da177e4SLinus Torvalds 				if (!page) {
8901da177e4SLinus Torvalds 					/* Allocate new cache page. */
8911da177e4SLinus Torvalds 					if (!(page = sk_stream_alloc_page(sk)))
8921da177e4SLinus Torvalds 						goto wait_for_memory;
8931da177e4SLinus Torvalds 					off = 0;
8941da177e4SLinus Torvalds 				}
8951da177e4SLinus Torvalds 
8961da177e4SLinus Torvalds 				if (copy > PAGE_SIZE - off)
8971da177e4SLinus Torvalds 					copy = PAGE_SIZE - off;
8981da177e4SLinus Torvalds 
8991da177e4SLinus Torvalds 				/* Time to copy data. We are close to
9001da177e4SLinus Torvalds 				 * the end! */
9011da177e4SLinus Torvalds 				err = skb_copy_to_page(sk, from, skb, page,
9021da177e4SLinus Torvalds 						       off, copy);
9031da177e4SLinus Torvalds 				if (err) {
9041da177e4SLinus Torvalds 					/* If this page was new, give it to the
9051da177e4SLinus Torvalds 					 * socket so it does not get leaked.
9061da177e4SLinus Torvalds 					 */
9071da177e4SLinus Torvalds 					if (!TCP_PAGE(sk)) {
9081da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
9091da177e4SLinus Torvalds 						TCP_OFF(sk) = 0;
9101da177e4SLinus Torvalds 					}
9111da177e4SLinus Torvalds 					goto do_error;
9121da177e4SLinus Torvalds 				}
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds 				/* Update the skb. */
9151da177e4SLinus Torvalds 				if (merge) {
9161da177e4SLinus Torvalds 					skb_shinfo(skb)->frags[i - 1].size +=
9171da177e4SLinus Torvalds 									copy;
9181da177e4SLinus Torvalds 				} else {
9191da177e4SLinus Torvalds 					skb_fill_page_desc(skb, i, page, off, copy);
9201da177e4SLinus Torvalds 					if (TCP_PAGE(sk)) {
9211da177e4SLinus Torvalds 						get_page(page);
9221da177e4SLinus Torvalds 					} else if (off + copy < PAGE_SIZE) {
9231da177e4SLinus Torvalds 						get_page(page);
9241da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
9251da177e4SLinus Torvalds 					}
9261da177e4SLinus Torvalds 				}
9271da177e4SLinus Torvalds 
9281da177e4SLinus Torvalds 				TCP_OFF(sk) = off + copy;
9291da177e4SLinus Torvalds 			}
9301da177e4SLinus Torvalds 
9311da177e4SLinus Torvalds 			if (!copied)
9321da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
9331da177e4SLinus Torvalds 
9341da177e4SLinus Torvalds 			tp->write_seq += copy;
9351da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->end_seq += copy;
9361da177e4SLinus Torvalds 			skb_shinfo(skb)->tso_segs = 0;
9371da177e4SLinus Torvalds 
9381da177e4SLinus Torvalds 			from += copy;
9391da177e4SLinus Torvalds 			copied += copy;
9401da177e4SLinus Torvalds 			if ((seglen -= copy) == 0 && iovlen == 0)
9411da177e4SLinus Torvalds 				goto out;
9421da177e4SLinus Torvalds 
943c1b4a7e6SDavid S. Miller 			if (skb->len < mss_now || (flags & MSG_OOB))
9441da177e4SLinus Torvalds 				continue;
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 			if (forced_push(tp)) {
9471da177e4SLinus Torvalds 				tcp_mark_push(tp, skb);
9481da177e4SLinus Torvalds 				__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
9491da177e4SLinus Torvalds 			} else if (skb == sk->sk_send_head)
9501da177e4SLinus Torvalds 				tcp_push_one(sk, mss_now);
9511da177e4SLinus Torvalds 			continue;
9521da177e4SLinus Torvalds 
9531da177e4SLinus Torvalds wait_for_sndbuf:
9541da177e4SLinus Torvalds 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
9551da177e4SLinus Torvalds wait_for_memory:
9561da177e4SLinus Torvalds 			if (copied)
9571da177e4SLinus Torvalds 				tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
9581da177e4SLinus Torvalds 
9591da177e4SLinus Torvalds 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
9601da177e4SLinus Torvalds 				goto do_error;
9611da177e4SLinus Torvalds 
9621da177e4SLinus Torvalds 			mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
963c1b4a7e6SDavid S. Miller 			size_goal = tp->xmit_size_goal;
9641da177e4SLinus Torvalds 		}
9651da177e4SLinus Torvalds 	}
9661da177e4SLinus Torvalds 
9671da177e4SLinus Torvalds out:
9681da177e4SLinus Torvalds 	if (copied)
9691da177e4SLinus Torvalds 		tcp_push(sk, tp, flags, mss_now, tp->nonagle);
9701da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
9711da177e4SLinus Torvalds 	release_sock(sk);
9721da177e4SLinus Torvalds 	return copied;
9731da177e4SLinus Torvalds 
9741da177e4SLinus Torvalds do_fault:
9751da177e4SLinus Torvalds 	if (!skb->len) {
9761da177e4SLinus Torvalds 		if (sk->sk_send_head == skb)
9771da177e4SLinus Torvalds 			sk->sk_send_head = NULL;
9781da177e4SLinus Torvalds 		__skb_unlink(skb, skb->list);
9791da177e4SLinus Torvalds 		sk_stream_free_skb(sk, skb);
9801da177e4SLinus Torvalds 	}
9811da177e4SLinus Torvalds 
9821da177e4SLinus Torvalds do_error:
9831da177e4SLinus Torvalds 	if (copied)
9841da177e4SLinus Torvalds 		goto out;
9851da177e4SLinus Torvalds out_err:
9861da177e4SLinus Torvalds 	err = sk_stream_error(sk, flags, err);
9871da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
9881da177e4SLinus Torvalds 	release_sock(sk);
9891da177e4SLinus Torvalds 	return err;
9901da177e4SLinus Torvalds }
9911da177e4SLinus Torvalds 
9921da177e4SLinus Torvalds /*
9931da177e4SLinus Torvalds  *	Handle reading urgent data. BSD has very simple semantics for
9941da177e4SLinus Torvalds  *	this, no blocking and very strange errors 8)
9951da177e4SLinus Torvalds  */
9961da177e4SLinus Torvalds 
9971da177e4SLinus Torvalds static int tcp_recv_urg(struct sock *sk, long timeo,
9981da177e4SLinus Torvalds 			struct msghdr *msg, int len, int flags,
9991da177e4SLinus Torvalds 			int *addr_len)
10001da177e4SLinus Torvalds {
10011da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds 	/* No URG data to read. */
10041da177e4SLinus Torvalds 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
10051da177e4SLinus Torvalds 	    tp->urg_data == TCP_URG_READ)
10061da177e4SLinus Torvalds 		return -EINVAL;	/* Yes this is right ! */
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
10091da177e4SLinus Torvalds 		return -ENOTCONN;
10101da177e4SLinus Torvalds 
10111da177e4SLinus Torvalds 	if (tp->urg_data & TCP_URG_VALID) {
10121da177e4SLinus Torvalds 		int err = 0;
10131da177e4SLinus Torvalds 		char c = tp->urg_data;
10141da177e4SLinus Torvalds 
10151da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
10161da177e4SLinus Torvalds 			tp->urg_data = TCP_URG_READ;
10171da177e4SLinus Torvalds 
10181da177e4SLinus Torvalds 		/* Read urgent data. */
10191da177e4SLinus Torvalds 		msg->msg_flags |= MSG_OOB;
10201da177e4SLinus Torvalds 
10211da177e4SLinus Torvalds 		if (len > 0) {
10221da177e4SLinus Torvalds 			if (!(flags & MSG_TRUNC))
10231da177e4SLinus Torvalds 				err = memcpy_toiovec(msg->msg_iov, &c, 1);
10241da177e4SLinus Torvalds 			len = 1;
10251da177e4SLinus Torvalds 		} else
10261da177e4SLinus Torvalds 			msg->msg_flags |= MSG_TRUNC;
10271da177e4SLinus Torvalds 
10281da177e4SLinus Torvalds 		return err ? -EFAULT : len;
10291da177e4SLinus Torvalds 	}
10301da177e4SLinus Torvalds 
10311da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
10321da177e4SLinus Torvalds 		return 0;
10331da177e4SLinus Torvalds 
10341da177e4SLinus Torvalds 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
10351da177e4SLinus Torvalds 	 * the available implementations agree in this case:
10361da177e4SLinus Torvalds 	 * this call should never block, independent of the
10371da177e4SLinus Torvalds 	 * blocking state of the socket.
10381da177e4SLinus Torvalds 	 * Mike <pall@rz.uni-karlsruhe.de>
10391da177e4SLinus Torvalds 	 */
10401da177e4SLinus Torvalds 	return -EAGAIN;
10411da177e4SLinus Torvalds }
10421da177e4SLinus Torvalds 
10431da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
10441da177e4SLinus Torvalds  * then send an ACK if necessary.  COPIED is the number of bytes
10451da177e4SLinus Torvalds  * tcp_recvmsg has given to the user so far, it speeds up the
10461da177e4SLinus Torvalds  * calculation of whether or not we must ACK for the sake of
10471da177e4SLinus Torvalds  * a window update.
10481da177e4SLinus Torvalds  */
10491da177e4SLinus Torvalds static void cleanup_rbuf(struct sock *sk, int copied)
10501da177e4SLinus Torvalds {
10511da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10521da177e4SLinus Torvalds 	int time_to_ack = 0;
10531da177e4SLinus Torvalds 
10541da177e4SLinus Torvalds #if TCP_DEBUG
10551da177e4SLinus Torvalds 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
10561da177e4SLinus Torvalds 
10571da177e4SLinus Torvalds 	BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
10581da177e4SLinus Torvalds #endif
10591da177e4SLinus Torvalds 
10601da177e4SLinus Torvalds 	if (tcp_ack_scheduled(tp)) {
10611da177e4SLinus Torvalds 		   /* Delayed ACKs frequently hit locked sockets during bulk
10621da177e4SLinus Torvalds 		    * receive. */
10631da177e4SLinus Torvalds 		if (tp->ack.blocked ||
10641da177e4SLinus Torvalds 		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
10651da177e4SLinus Torvalds 		    tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
10661da177e4SLinus Torvalds 		    /*
10671da177e4SLinus Torvalds 		     * If this read emptied read buffer, we send ACK, if
10681da177e4SLinus Torvalds 		     * connection is not bidirectional, user drained
10691da177e4SLinus Torvalds 		     * receive buffer and there was a small segment
10701da177e4SLinus Torvalds 		     * in queue.
10711da177e4SLinus Torvalds 		     */
10721da177e4SLinus Torvalds 		    (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
10731da177e4SLinus Torvalds 		     !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
10741da177e4SLinus Torvalds 			time_to_ack = 1;
10751da177e4SLinus Torvalds 	}
10761da177e4SLinus Torvalds 
10771da177e4SLinus Torvalds 	/* We send an ACK if we can now advertise a non-zero window
10781da177e4SLinus Torvalds 	 * which has been raised "significantly".
10791da177e4SLinus Torvalds 	 *
10801da177e4SLinus Torvalds 	 * Even if window raised up to infinity, do not send window open ACK
10811da177e4SLinus Torvalds 	 * in states, where we will not receive more. It is useless.
10821da177e4SLinus Torvalds 	 */
10831da177e4SLinus Torvalds 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
10841da177e4SLinus Torvalds 		__u32 rcv_window_now = tcp_receive_window(tp);
10851da177e4SLinus Torvalds 
10861da177e4SLinus Torvalds 		/* Optimize, __tcp_select_window() is not cheap. */
10871da177e4SLinus Torvalds 		if (2*rcv_window_now <= tp->window_clamp) {
10881da177e4SLinus Torvalds 			__u32 new_window = __tcp_select_window(sk);
10891da177e4SLinus Torvalds 
10901da177e4SLinus Torvalds 			/* Send ACK now, if this read freed lots of space
10911da177e4SLinus Torvalds 			 * in our buffer. Certainly, new_window is new window.
10921da177e4SLinus Torvalds 			 * We can advertise it now, if it is not less than current one.
10931da177e4SLinus Torvalds 			 * "Lots" means "at least twice" here.
10941da177e4SLinus Torvalds 			 */
10951da177e4SLinus Torvalds 			if (new_window && new_window >= 2 * rcv_window_now)
10961da177e4SLinus Torvalds 				time_to_ack = 1;
10971da177e4SLinus Torvalds 		}
10981da177e4SLinus Torvalds 	}
10991da177e4SLinus Torvalds 	if (time_to_ack)
11001da177e4SLinus Torvalds 		tcp_send_ack(sk);
11011da177e4SLinus Torvalds }
11021da177e4SLinus Torvalds 
11031da177e4SLinus Torvalds static void tcp_prequeue_process(struct sock *sk)
11041da177e4SLinus Torvalds {
11051da177e4SLinus Torvalds 	struct sk_buff *skb;
11061da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11071da177e4SLinus Torvalds 
1108b03efcfbSDavid S. Miller 	NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
11091da177e4SLinus Torvalds 
11101da177e4SLinus Torvalds 	/* RX process wants to run with disabled BHs, though it is not
11111da177e4SLinus Torvalds 	 * necessary */
11121da177e4SLinus Torvalds 	local_bh_disable();
11131da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
11141da177e4SLinus Torvalds 		sk->sk_backlog_rcv(sk, skb);
11151da177e4SLinus Torvalds 	local_bh_enable();
11161da177e4SLinus Torvalds 
11171da177e4SLinus Torvalds 	/* Clear memory counter. */
11181da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
11191da177e4SLinus Torvalds }
11201da177e4SLinus Torvalds 
11211da177e4SLinus Torvalds static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
11221da177e4SLinus Torvalds {
11231da177e4SLinus Torvalds 	struct sk_buff *skb;
11241da177e4SLinus Torvalds 	u32 offset;
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 	skb_queue_walk(&sk->sk_receive_queue, skb) {
11271da177e4SLinus Torvalds 		offset = seq - TCP_SKB_CB(skb)->seq;
11281da177e4SLinus Torvalds 		if (skb->h.th->syn)
11291da177e4SLinus Torvalds 			offset--;
11301da177e4SLinus Torvalds 		if (offset < skb->len || skb->h.th->fin) {
11311da177e4SLinus Torvalds 			*off = offset;
11321da177e4SLinus Torvalds 			return skb;
11331da177e4SLinus Torvalds 		}
11341da177e4SLinus Torvalds 	}
11351da177e4SLinus Torvalds 	return NULL;
11361da177e4SLinus Torvalds }
11371da177e4SLinus Torvalds 
11381da177e4SLinus Torvalds /*
11391da177e4SLinus Torvalds  * This routine provides an alternative to tcp_recvmsg() for routines
11401da177e4SLinus Torvalds  * that would like to handle copying from skbuffs directly in 'sendfile'
11411da177e4SLinus Torvalds  * fashion.
11421da177e4SLinus Torvalds  * Note:
11431da177e4SLinus Torvalds  *	- It is assumed that the socket was locked by the caller.
11441da177e4SLinus Torvalds  *	- The routine does not block.
11451da177e4SLinus Torvalds  *	- At present, there is no support for reading OOB data
11461da177e4SLinus Torvalds  *	  or for 'peeking' the socket using this routine
11471da177e4SLinus Torvalds  *	  (although both would be easy to implement).
11481da177e4SLinus Torvalds  */
11491da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
11501da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor)
11511da177e4SLinus Torvalds {
11521da177e4SLinus Torvalds 	struct sk_buff *skb;
11531da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11541da177e4SLinus Torvalds 	u32 seq = tp->copied_seq;
11551da177e4SLinus Torvalds 	u32 offset;
11561da177e4SLinus Torvalds 	int copied = 0;
11571da177e4SLinus Torvalds 
11581da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
11591da177e4SLinus Torvalds 		return -ENOTCONN;
11601da177e4SLinus Torvalds 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
11611da177e4SLinus Torvalds 		if (offset < skb->len) {
11621da177e4SLinus Torvalds 			size_t used, len;
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 			len = skb->len - offset;
11651da177e4SLinus Torvalds 			/* Stop reading if we hit a patch of urgent data */
11661da177e4SLinus Torvalds 			if (tp->urg_data) {
11671da177e4SLinus Torvalds 				u32 urg_offset = tp->urg_seq - seq;
11681da177e4SLinus Torvalds 				if (urg_offset < len)
11691da177e4SLinus Torvalds 					len = urg_offset;
11701da177e4SLinus Torvalds 				if (!len)
11711da177e4SLinus Torvalds 					break;
11721da177e4SLinus Torvalds 			}
11731da177e4SLinus Torvalds 			used = recv_actor(desc, skb, offset, len);
11741da177e4SLinus Torvalds 			if (used <= len) {
11751da177e4SLinus Torvalds 				seq += used;
11761da177e4SLinus Torvalds 				copied += used;
11771da177e4SLinus Torvalds 				offset += used;
11781da177e4SLinus Torvalds 			}
11791da177e4SLinus Torvalds 			if (offset != skb->len)
11801da177e4SLinus Torvalds 				break;
11811da177e4SLinus Torvalds 		}
11821da177e4SLinus Torvalds 		if (skb->h.th->fin) {
11831da177e4SLinus Torvalds 			sk_eat_skb(sk, skb);
11841da177e4SLinus Torvalds 			++seq;
11851da177e4SLinus Torvalds 			break;
11861da177e4SLinus Torvalds 		}
11871da177e4SLinus Torvalds 		sk_eat_skb(sk, skb);
11881da177e4SLinus Torvalds 		if (!desc->count)
11891da177e4SLinus Torvalds 			break;
11901da177e4SLinus Torvalds 	}
11911da177e4SLinus Torvalds 	tp->copied_seq = seq;
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds 	tcp_rcv_space_adjust(sk);
11941da177e4SLinus Torvalds 
11951da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
11961da177e4SLinus Torvalds 	if (copied)
11971da177e4SLinus Torvalds 		cleanup_rbuf(sk, copied);
11981da177e4SLinus Torvalds 	return copied;
11991da177e4SLinus Torvalds }
12001da177e4SLinus Torvalds 
12011da177e4SLinus Torvalds /*
12021da177e4SLinus Torvalds  *	This routine copies from a sock struct into the user buffer.
12031da177e4SLinus Torvalds  *
12041da177e4SLinus Torvalds  *	Technical note: in 2.3 we work on _locked_ socket, so that
12051da177e4SLinus Torvalds  *	tricks with *seq access order and skb->users are not required.
12061da177e4SLinus Torvalds  *	Probably, code can be easily improved even more.
12071da177e4SLinus Torvalds  */
12081da177e4SLinus Torvalds 
12091da177e4SLinus Torvalds int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
12101da177e4SLinus Torvalds 		size_t len, int nonblock, int flags, int *addr_len)
12111da177e4SLinus Torvalds {
12121da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
12131da177e4SLinus Torvalds 	int copied = 0;
12141da177e4SLinus Torvalds 	u32 peek_seq;
12151da177e4SLinus Torvalds 	u32 *seq;
12161da177e4SLinus Torvalds 	unsigned long used;
12171da177e4SLinus Torvalds 	int err;
12181da177e4SLinus Torvalds 	int target;		/* Read at least this many bytes */
12191da177e4SLinus Torvalds 	long timeo;
12201da177e4SLinus Torvalds 	struct task_struct *user_recv = NULL;
12211da177e4SLinus Torvalds 
12221da177e4SLinus Torvalds 	lock_sock(sk);
12231da177e4SLinus Torvalds 
12241da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
12251da177e4SLinus Torvalds 
12261da177e4SLinus Torvalds 	err = -ENOTCONN;
12271da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
12281da177e4SLinus Torvalds 		goto out;
12291da177e4SLinus Torvalds 
12301da177e4SLinus Torvalds 	timeo = sock_rcvtimeo(sk, nonblock);
12311da177e4SLinus Torvalds 
12321da177e4SLinus Torvalds 	/* Urgent data needs to be handled specially. */
12331da177e4SLinus Torvalds 	if (flags & MSG_OOB)
12341da177e4SLinus Torvalds 		goto recv_urg;
12351da177e4SLinus Torvalds 
12361da177e4SLinus Torvalds 	seq = &tp->copied_seq;
12371da177e4SLinus Torvalds 	if (flags & MSG_PEEK) {
12381da177e4SLinus Torvalds 		peek_seq = tp->copied_seq;
12391da177e4SLinus Torvalds 		seq = &peek_seq;
12401da177e4SLinus Torvalds 	}
12411da177e4SLinus Torvalds 
12421da177e4SLinus Torvalds 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
12431da177e4SLinus Torvalds 
12441da177e4SLinus Torvalds 	do {
12451da177e4SLinus Torvalds 		struct sk_buff *skb;
12461da177e4SLinus Torvalds 		u32 offset;
12471da177e4SLinus Torvalds 
12481da177e4SLinus Torvalds 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
12491da177e4SLinus Torvalds 		if (tp->urg_data && tp->urg_seq == *seq) {
12501da177e4SLinus Torvalds 			if (copied)
12511da177e4SLinus Torvalds 				break;
12521da177e4SLinus Torvalds 			if (signal_pending(current)) {
12531da177e4SLinus Torvalds 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
12541da177e4SLinus Torvalds 				break;
12551da177e4SLinus Torvalds 			}
12561da177e4SLinus Torvalds 		}
12571da177e4SLinus Torvalds 
12581da177e4SLinus Torvalds 		/* Next get a buffer. */
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds 		skb = skb_peek(&sk->sk_receive_queue);
12611da177e4SLinus Torvalds 		do {
12621da177e4SLinus Torvalds 			if (!skb)
12631da177e4SLinus Torvalds 				break;
12641da177e4SLinus Torvalds 
12651da177e4SLinus Torvalds 			/* Now that we have two receive queues this
12661da177e4SLinus Torvalds 			 * shouldn't happen.
12671da177e4SLinus Torvalds 			 */
12681da177e4SLinus Torvalds 			if (before(*seq, TCP_SKB_CB(skb)->seq)) {
12691da177e4SLinus Torvalds 				printk(KERN_INFO "recvmsg bug: copied %X "
12701da177e4SLinus Torvalds 				       "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
12711da177e4SLinus Torvalds 				break;
12721da177e4SLinus Torvalds 			}
12731da177e4SLinus Torvalds 			offset = *seq - TCP_SKB_CB(skb)->seq;
12741da177e4SLinus Torvalds 			if (skb->h.th->syn)
12751da177e4SLinus Torvalds 				offset--;
12761da177e4SLinus Torvalds 			if (offset < skb->len)
12771da177e4SLinus Torvalds 				goto found_ok_skb;
12781da177e4SLinus Torvalds 			if (skb->h.th->fin)
12791da177e4SLinus Torvalds 				goto found_fin_ok;
12801da177e4SLinus Torvalds 			BUG_TRAP(flags & MSG_PEEK);
12811da177e4SLinus Torvalds 			skb = skb->next;
12821da177e4SLinus Torvalds 		} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
12831da177e4SLinus Torvalds 
12841da177e4SLinus Torvalds 		/* Well, if we have backlog, try to process it now yet. */
12851da177e4SLinus Torvalds 
12861da177e4SLinus Torvalds 		if (copied >= target && !sk->sk_backlog.tail)
12871da177e4SLinus Torvalds 			break;
12881da177e4SLinus Torvalds 
12891da177e4SLinus Torvalds 		if (copied) {
12901da177e4SLinus Torvalds 			if (sk->sk_err ||
12911da177e4SLinus Torvalds 			    sk->sk_state == TCP_CLOSE ||
12921da177e4SLinus Torvalds 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
12931da177e4SLinus Torvalds 			    !timeo ||
12941da177e4SLinus Torvalds 			    signal_pending(current) ||
12951da177e4SLinus Torvalds 			    (flags & MSG_PEEK))
12961da177e4SLinus Torvalds 				break;
12971da177e4SLinus Torvalds 		} else {
12981da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_DONE))
12991da177e4SLinus Torvalds 				break;
13001da177e4SLinus Torvalds 
13011da177e4SLinus Torvalds 			if (sk->sk_err) {
13021da177e4SLinus Torvalds 				copied = sock_error(sk);
13031da177e4SLinus Torvalds 				break;
13041da177e4SLinus Torvalds 			}
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds 			if (sk->sk_shutdown & RCV_SHUTDOWN)
13071da177e4SLinus Torvalds 				break;
13081da177e4SLinus Torvalds 
13091da177e4SLinus Torvalds 			if (sk->sk_state == TCP_CLOSE) {
13101da177e4SLinus Torvalds 				if (!sock_flag(sk, SOCK_DONE)) {
13111da177e4SLinus Torvalds 					/* This occurs when user tries to read
13121da177e4SLinus Torvalds 					 * from never connected socket.
13131da177e4SLinus Torvalds 					 */
13141da177e4SLinus Torvalds 					copied = -ENOTCONN;
13151da177e4SLinus Torvalds 					break;
13161da177e4SLinus Torvalds 				}
13171da177e4SLinus Torvalds 				break;
13181da177e4SLinus Torvalds 			}
13191da177e4SLinus Torvalds 
13201da177e4SLinus Torvalds 			if (!timeo) {
13211da177e4SLinus Torvalds 				copied = -EAGAIN;
13221da177e4SLinus Torvalds 				break;
13231da177e4SLinus Torvalds 			}
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds 			if (signal_pending(current)) {
13261da177e4SLinus Torvalds 				copied = sock_intr_errno(timeo);
13271da177e4SLinus Torvalds 				break;
13281da177e4SLinus Torvalds 			}
13291da177e4SLinus Torvalds 		}
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 		cleanup_rbuf(sk, copied);
13321da177e4SLinus Torvalds 
13337df55125SDavid S. Miller 		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
13341da177e4SLinus Torvalds 			/* Install new reader */
13351da177e4SLinus Torvalds 			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
13361da177e4SLinus Torvalds 				user_recv = current;
13371da177e4SLinus Torvalds 				tp->ucopy.task = user_recv;
13381da177e4SLinus Torvalds 				tp->ucopy.iov = msg->msg_iov;
13391da177e4SLinus Torvalds 			}
13401da177e4SLinus Torvalds 
13411da177e4SLinus Torvalds 			tp->ucopy.len = len;
13421da177e4SLinus Torvalds 
13431da177e4SLinus Torvalds 			BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
13441da177e4SLinus Torvalds 				 (flags & (MSG_PEEK | MSG_TRUNC)));
13451da177e4SLinus Torvalds 
13461da177e4SLinus Torvalds 			/* Ugly... If prequeue is not empty, we have to
13471da177e4SLinus Torvalds 			 * process it before releasing socket, otherwise
13481da177e4SLinus Torvalds 			 * order will be broken at second iteration.
13491da177e4SLinus Torvalds 			 * More elegant solution is required!!!
13501da177e4SLinus Torvalds 			 *
13511da177e4SLinus Torvalds 			 * Look: we have the following (pseudo)queues:
13521da177e4SLinus Torvalds 			 *
13531da177e4SLinus Torvalds 			 * 1. packets in flight
13541da177e4SLinus Torvalds 			 * 2. backlog
13551da177e4SLinus Torvalds 			 * 3. prequeue
13561da177e4SLinus Torvalds 			 * 4. receive_queue
13571da177e4SLinus Torvalds 			 *
13581da177e4SLinus Torvalds 			 * Each queue can be processed only if the next ones
13591da177e4SLinus Torvalds 			 * are empty. At this point we have empty receive_queue.
13601da177e4SLinus Torvalds 			 * But prequeue _can_ be not empty after 2nd iteration,
13611da177e4SLinus Torvalds 			 * when we jumped to start of loop because backlog
13621da177e4SLinus Torvalds 			 * processing added something to receive_queue.
13631da177e4SLinus Torvalds 			 * We cannot release_sock(), because backlog contains
13641da177e4SLinus Torvalds 			 * packets arrived _after_ prequeued ones.
13651da177e4SLinus Torvalds 			 *
13661da177e4SLinus Torvalds 			 * Shortly, algorithm is clear --- to process all
13671da177e4SLinus Torvalds 			 * the queues in order. We could make it more directly,
13681da177e4SLinus Torvalds 			 * requeueing packets from backlog to prequeue, if
13691da177e4SLinus Torvalds 			 * is not empty. It is more elegant, but eats cycles,
13701da177e4SLinus Torvalds 			 * unfortunately.
13711da177e4SLinus Torvalds 			 */
1372b03efcfbSDavid S. Miller 			if (!skb_queue_empty(&tp->ucopy.prequeue))
13731da177e4SLinus Torvalds 				goto do_prequeue;
13741da177e4SLinus Torvalds 
13751da177e4SLinus Torvalds 			/* __ Set realtime policy in scheduler __ */
13761da177e4SLinus Torvalds 		}
13771da177e4SLinus Torvalds 
13781da177e4SLinus Torvalds 		if (copied >= target) {
13791da177e4SLinus Torvalds 			/* Do not sleep, just process backlog. */
13801da177e4SLinus Torvalds 			release_sock(sk);
13811da177e4SLinus Torvalds 			lock_sock(sk);
13821da177e4SLinus Torvalds 		} else
13831da177e4SLinus Torvalds 			sk_wait_data(sk, &timeo);
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds 		if (user_recv) {
13861da177e4SLinus Torvalds 			int chunk;
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 			/* __ Restore normal policy in scheduler __ */
13891da177e4SLinus Torvalds 
13901da177e4SLinus Torvalds 			if ((chunk = len - tp->ucopy.len) != 0) {
13911da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
13921da177e4SLinus Torvalds 				len -= chunk;
13931da177e4SLinus Torvalds 				copied += chunk;
13941da177e4SLinus Torvalds 			}
13951da177e4SLinus Torvalds 
13961da177e4SLinus Torvalds 			if (tp->rcv_nxt == tp->copied_seq &&
1397b03efcfbSDavid S. Miller 			    !skb_queue_empty(&tp->ucopy.prequeue)) {
13981da177e4SLinus Torvalds do_prequeue:
13991da177e4SLinus Torvalds 				tcp_prequeue_process(sk);
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 				if ((chunk = len - tp->ucopy.len) != 0) {
14021da177e4SLinus Torvalds 					NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
14031da177e4SLinus Torvalds 					len -= chunk;
14041da177e4SLinus Torvalds 					copied += chunk;
14051da177e4SLinus Torvalds 				}
14061da177e4SLinus Torvalds 			}
14071da177e4SLinus Torvalds 		}
14081da177e4SLinus Torvalds 		if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
14091da177e4SLinus Torvalds 			if (net_ratelimit())
14101da177e4SLinus Torvalds 				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
14111da177e4SLinus Torvalds 				       current->comm, current->pid);
14121da177e4SLinus Torvalds 			peek_seq = tp->copied_seq;
14131da177e4SLinus Torvalds 		}
14141da177e4SLinus Torvalds 		continue;
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds 	found_ok_skb:
14171da177e4SLinus Torvalds 		/* Ok so how much can we use? */
14181da177e4SLinus Torvalds 		used = skb->len - offset;
14191da177e4SLinus Torvalds 		if (len < used)
14201da177e4SLinus Torvalds 			used = len;
14211da177e4SLinus Torvalds 
14221da177e4SLinus Torvalds 		/* Do we have urgent data here? */
14231da177e4SLinus Torvalds 		if (tp->urg_data) {
14241da177e4SLinus Torvalds 			u32 urg_offset = tp->urg_seq - *seq;
14251da177e4SLinus Torvalds 			if (urg_offset < used) {
14261da177e4SLinus Torvalds 				if (!urg_offset) {
14271da177e4SLinus Torvalds 					if (!sock_flag(sk, SOCK_URGINLINE)) {
14281da177e4SLinus Torvalds 						++*seq;
14291da177e4SLinus Torvalds 						offset++;
14301da177e4SLinus Torvalds 						used--;
14311da177e4SLinus Torvalds 						if (!used)
14321da177e4SLinus Torvalds 							goto skip_copy;
14331da177e4SLinus Torvalds 					}
14341da177e4SLinus Torvalds 				} else
14351da177e4SLinus Torvalds 					used = urg_offset;
14361da177e4SLinus Torvalds 			}
14371da177e4SLinus Torvalds 		}
14381da177e4SLinus Torvalds 
14391da177e4SLinus Torvalds 		if (!(flags & MSG_TRUNC)) {
14401da177e4SLinus Torvalds 			err = skb_copy_datagram_iovec(skb, offset,
14411da177e4SLinus Torvalds 						      msg->msg_iov, used);
14421da177e4SLinus Torvalds 			if (err) {
14431da177e4SLinus Torvalds 				/* Exception. Bailout! */
14441da177e4SLinus Torvalds 				if (!copied)
14451da177e4SLinus Torvalds 					copied = -EFAULT;
14461da177e4SLinus Torvalds 				break;
14471da177e4SLinus Torvalds 			}
14481da177e4SLinus Torvalds 		}
14491da177e4SLinus Torvalds 
14501da177e4SLinus Torvalds 		*seq += used;
14511da177e4SLinus Torvalds 		copied += used;
14521da177e4SLinus Torvalds 		len -= used;
14531da177e4SLinus Torvalds 
14541da177e4SLinus Torvalds 		tcp_rcv_space_adjust(sk);
14551da177e4SLinus Torvalds 
14561da177e4SLinus Torvalds skip_copy:
14571da177e4SLinus Torvalds 		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
14581da177e4SLinus Torvalds 			tp->urg_data = 0;
14591da177e4SLinus Torvalds 			tcp_fast_path_check(sk, tp);
14601da177e4SLinus Torvalds 		}
14611da177e4SLinus Torvalds 		if (used + offset < skb->len)
14621da177e4SLinus Torvalds 			continue;
14631da177e4SLinus Torvalds 
14641da177e4SLinus Torvalds 		if (skb->h.th->fin)
14651da177e4SLinus Torvalds 			goto found_fin_ok;
14661da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
14671da177e4SLinus Torvalds 			sk_eat_skb(sk, skb);
14681da177e4SLinus Torvalds 		continue;
14691da177e4SLinus Torvalds 
14701da177e4SLinus Torvalds 	found_fin_ok:
14711da177e4SLinus Torvalds 		/* Process the FIN. */
14721da177e4SLinus Torvalds 		++*seq;
14731da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
14741da177e4SLinus Torvalds 			sk_eat_skb(sk, skb);
14751da177e4SLinus Torvalds 		break;
14761da177e4SLinus Torvalds 	} while (len > 0);
14771da177e4SLinus Torvalds 
14781da177e4SLinus Torvalds 	if (user_recv) {
1479b03efcfbSDavid S. Miller 		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
14801da177e4SLinus Torvalds 			int chunk;
14811da177e4SLinus Torvalds 
14821da177e4SLinus Torvalds 			tp->ucopy.len = copied > 0 ? len : 0;
14831da177e4SLinus Torvalds 
14841da177e4SLinus Torvalds 			tcp_prequeue_process(sk);
14851da177e4SLinus Torvalds 
14861da177e4SLinus Torvalds 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
14871da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
14881da177e4SLinus Torvalds 				len -= chunk;
14891da177e4SLinus Torvalds 				copied += chunk;
14901da177e4SLinus Torvalds 			}
14911da177e4SLinus Torvalds 		}
14921da177e4SLinus Torvalds 
14931da177e4SLinus Torvalds 		tp->ucopy.task = NULL;
14941da177e4SLinus Torvalds 		tp->ucopy.len = 0;
14951da177e4SLinus Torvalds 	}
14961da177e4SLinus Torvalds 
14971da177e4SLinus Torvalds 	/* According to UNIX98, msg_name/msg_namelen are ignored
14981da177e4SLinus Torvalds 	 * on connected socket. I was just happy when found this 8) --ANK
14991da177e4SLinus Torvalds 	 */
15001da177e4SLinus Torvalds 
15011da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
15021da177e4SLinus Torvalds 	cleanup_rbuf(sk, copied);
15031da177e4SLinus Torvalds 
15041da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
15051da177e4SLinus Torvalds 	release_sock(sk);
15061da177e4SLinus Torvalds 	return copied;
15071da177e4SLinus Torvalds 
15081da177e4SLinus Torvalds out:
15091da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
15101da177e4SLinus Torvalds 	release_sock(sk);
15111da177e4SLinus Torvalds 	return err;
15121da177e4SLinus Torvalds 
15131da177e4SLinus Torvalds recv_urg:
15141da177e4SLinus Torvalds 	err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
15151da177e4SLinus Torvalds 	goto out;
15161da177e4SLinus Torvalds }
15171da177e4SLinus Torvalds 
15181da177e4SLinus Torvalds /*
15191da177e4SLinus Torvalds  *	State processing on a close. This implements the state shift for
15201da177e4SLinus Torvalds  *	sending our FIN frame. Note that we only send a FIN for some
15211da177e4SLinus Torvalds  *	states. A shutdown() may have already sent the FIN, or we may be
15221da177e4SLinus Torvalds  *	closed.
15231da177e4SLinus Torvalds  */
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds static unsigned char new_state[16] = {
15261da177e4SLinus Torvalds   /* current state:        new state:      action:	*/
15271da177e4SLinus Torvalds   /* (Invalid)		*/ TCP_CLOSE,
15281da177e4SLinus Torvalds   /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
15291da177e4SLinus Torvalds   /* TCP_SYN_SENT	*/ TCP_CLOSE,
15301da177e4SLinus Torvalds   /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
15311da177e4SLinus Torvalds   /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
15321da177e4SLinus Torvalds   /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
15331da177e4SLinus Torvalds   /* TCP_TIME_WAIT	*/ TCP_CLOSE,
15341da177e4SLinus Torvalds   /* TCP_CLOSE		*/ TCP_CLOSE,
15351da177e4SLinus Torvalds   /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
15361da177e4SLinus Torvalds   /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
15371da177e4SLinus Torvalds   /* TCP_LISTEN		*/ TCP_CLOSE,
15381da177e4SLinus Torvalds   /* TCP_CLOSING	*/ TCP_CLOSING,
15391da177e4SLinus Torvalds };
15401da177e4SLinus Torvalds 
15411da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
15421da177e4SLinus Torvalds {
15431da177e4SLinus Torvalds 	int next = (int)new_state[sk->sk_state];
15441da177e4SLinus Torvalds 	int ns = next & TCP_STATE_MASK;
15451da177e4SLinus Torvalds 
15461da177e4SLinus Torvalds 	tcp_set_state(sk, ns);
15471da177e4SLinus Torvalds 
15481da177e4SLinus Torvalds 	return next & TCP_ACTION_FIN;
15491da177e4SLinus Torvalds }
15501da177e4SLinus Torvalds 
15511da177e4SLinus Torvalds /*
15521da177e4SLinus Torvalds  *	Shutdown the sending side of a connection. Much like close except
15531da177e4SLinus Torvalds  *	that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
15541da177e4SLinus Torvalds  */
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
15571da177e4SLinus Torvalds {
15581da177e4SLinus Torvalds 	/*	We need to grab some memory, and put together a FIN,
15591da177e4SLinus Torvalds 	 *	and then put it into the queue to be sent.
15601da177e4SLinus Torvalds 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
15611da177e4SLinus Torvalds 	 */
15621da177e4SLinus Torvalds 	if (!(how & SEND_SHUTDOWN))
15631da177e4SLinus Torvalds 		return;
15641da177e4SLinus Torvalds 
15651da177e4SLinus Torvalds 	/* If we've already sent a FIN, or it's a closed state, skip this. */
15661da177e4SLinus Torvalds 	if ((1 << sk->sk_state) &
15671da177e4SLinus Torvalds 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
15681da177e4SLinus Torvalds 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
15691da177e4SLinus Torvalds 		/* Clear out any half completed packets.  FIN if needed. */
15701da177e4SLinus Torvalds 		if (tcp_close_state(sk))
15711da177e4SLinus Torvalds 			tcp_send_fin(sk);
15721da177e4SLinus Torvalds 	}
15731da177e4SLinus Torvalds }
15741da177e4SLinus Torvalds 
15751da177e4SLinus Torvalds /*
15761da177e4SLinus Torvalds  * At this point, there should be no process reference to this
15771da177e4SLinus Torvalds  * socket, and thus no user references at all.  Therefore we
15781da177e4SLinus Torvalds  * can assume the socket waitqueue is inactive and nobody will
15791da177e4SLinus Torvalds  * try to jump onto it.
15801da177e4SLinus Torvalds  */
15811da177e4SLinus Torvalds void tcp_destroy_sock(struct sock *sk)
15821da177e4SLinus Torvalds {
15831da177e4SLinus Torvalds 	BUG_TRAP(sk->sk_state == TCP_CLOSE);
15841da177e4SLinus Torvalds 	BUG_TRAP(sock_flag(sk, SOCK_DEAD));
15851da177e4SLinus Torvalds 
15861da177e4SLinus Torvalds 	/* It cannot be in hash table! */
15871da177e4SLinus Torvalds 	BUG_TRAP(sk_unhashed(sk));
15881da177e4SLinus Torvalds 
15891da177e4SLinus Torvalds 	/* If it has not 0 inet_sk(sk)->num, it must be bound */
15901da177e4SLinus Torvalds 	BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
15911da177e4SLinus Torvalds 
15921da177e4SLinus Torvalds 	sk->sk_prot->destroy(sk);
15931da177e4SLinus Torvalds 
15941da177e4SLinus Torvalds 	sk_stream_kill_queues(sk);
15951da177e4SLinus Torvalds 
15961da177e4SLinus Torvalds 	xfrm_sk_free_policy(sk);
15971da177e4SLinus Torvalds 
15981da177e4SLinus Torvalds #ifdef INET_REFCNT_DEBUG
15991da177e4SLinus Torvalds 	if (atomic_read(&sk->sk_refcnt) != 1) {
16001da177e4SLinus Torvalds 		printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
16011da177e4SLinus Torvalds 		       sk, atomic_read(&sk->sk_refcnt));
16021da177e4SLinus Torvalds 	}
16031da177e4SLinus Torvalds #endif
16041da177e4SLinus Torvalds 
16051da177e4SLinus Torvalds 	atomic_dec(&tcp_orphan_count);
16061da177e4SLinus Torvalds 	sock_put(sk);
16071da177e4SLinus Torvalds }
16081da177e4SLinus Torvalds 
16091da177e4SLinus Torvalds void tcp_close(struct sock *sk, long timeout)
16101da177e4SLinus Torvalds {
16111da177e4SLinus Torvalds 	struct sk_buff *skb;
16121da177e4SLinus Torvalds 	int data_was_unread = 0;
16131da177e4SLinus Torvalds 
16141da177e4SLinus Torvalds 	lock_sock(sk);
16151da177e4SLinus Torvalds 	sk->sk_shutdown = SHUTDOWN_MASK;
16161da177e4SLinus Torvalds 
16171da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN) {
16181da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
16191da177e4SLinus Torvalds 
16201da177e4SLinus Torvalds 		/* Special case. */
16211da177e4SLinus Torvalds 		tcp_listen_stop(sk);
16221da177e4SLinus Torvalds 
16231da177e4SLinus Torvalds 		goto adjudge_to_death;
16241da177e4SLinus Torvalds 	}
16251da177e4SLinus Torvalds 
16261da177e4SLinus Torvalds 	/*  We need to flush the recv. buffs.  We do this only on the
16271da177e4SLinus Torvalds 	 *  descriptor close, not protocol-sourced closes, because the
16281da177e4SLinus Torvalds 	 *  reader process may not have drained the data yet!
16291da177e4SLinus Torvalds 	 */
16301da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
16311da177e4SLinus Torvalds 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
16321da177e4SLinus Torvalds 			  skb->h.th->fin;
16331da177e4SLinus Torvalds 		data_was_unread += len;
16341da177e4SLinus Torvalds 		__kfree_skb(skb);
16351da177e4SLinus Torvalds 	}
16361da177e4SLinus Torvalds 
16371da177e4SLinus Torvalds 	sk_stream_mem_reclaim(sk);
16381da177e4SLinus Torvalds 
16391da177e4SLinus Torvalds 	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
16401da177e4SLinus Torvalds 	 * 3.10, we send a RST here because data was lost.  To
16411da177e4SLinus Torvalds 	 * witness the awful effects of the old behavior of always
16421da177e4SLinus Torvalds 	 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
16431da177e4SLinus Torvalds 	 * a bulk GET in an FTP client, suspend the process, wait
16441da177e4SLinus Torvalds 	 * for the client to advertise a zero window, then kill -9
16451da177e4SLinus Torvalds 	 * the FTP client, wheee...  Note: timeout is always zero
16461da177e4SLinus Torvalds 	 * in such a case.
16471da177e4SLinus Torvalds 	 */
16481da177e4SLinus Torvalds 	if (data_was_unread) {
16491da177e4SLinus Torvalds 		/* Unread data was tossed, zap the connection. */
16501da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
16511da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
16521da177e4SLinus Torvalds 		tcp_send_active_reset(sk, GFP_KERNEL);
16531da177e4SLinus Torvalds 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
16541da177e4SLinus Torvalds 		/* Check zero linger _after_ checking for unread data. */
16551da177e4SLinus Torvalds 		sk->sk_prot->disconnect(sk, 0);
16561da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
16571da177e4SLinus Torvalds 	} else if (tcp_close_state(sk)) {
16581da177e4SLinus Torvalds 		/* We FIN if the application ate all the data before
16591da177e4SLinus Torvalds 		 * zapping the connection.
16601da177e4SLinus Torvalds 		 */
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 		/* RED-PEN. Formally speaking, we have broken TCP state
16631da177e4SLinus Torvalds 		 * machine. State transitions:
16641da177e4SLinus Torvalds 		 *
16651da177e4SLinus Torvalds 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
16661da177e4SLinus Torvalds 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
16671da177e4SLinus Torvalds 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
16681da177e4SLinus Torvalds 		 *
16691da177e4SLinus Torvalds 		 * are legal only when FIN has been sent (i.e. in window),
16701da177e4SLinus Torvalds 		 * rather than queued out of window. Purists blame.
16711da177e4SLinus Torvalds 		 *
16721da177e4SLinus Torvalds 		 * F.e. "RFC state" is ESTABLISHED,
16731da177e4SLinus Torvalds 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
16741da177e4SLinus Torvalds 		 *
16751da177e4SLinus Torvalds 		 * The visible declinations are that sometimes
16761da177e4SLinus Torvalds 		 * we enter time-wait state, when it is not required really
16771da177e4SLinus Torvalds 		 * (harmless), do not send active resets, when they are
16781da177e4SLinus Torvalds 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
16791da177e4SLinus Torvalds 		 * they look as CLOSING or LAST_ACK for Linux)
16801da177e4SLinus Torvalds 		 * Probably, I missed some more holelets.
16811da177e4SLinus Torvalds 		 * 						--ANK
16821da177e4SLinus Torvalds 		 */
16831da177e4SLinus Torvalds 		tcp_send_fin(sk);
16841da177e4SLinus Torvalds 	}
16851da177e4SLinus Torvalds 
16861da177e4SLinus Torvalds 	sk_stream_wait_close(sk, timeout);
16871da177e4SLinus Torvalds 
16881da177e4SLinus Torvalds adjudge_to_death:
16891da177e4SLinus Torvalds 	/* It is the last release_sock in its life. It will remove backlog. */
16901da177e4SLinus Torvalds 	release_sock(sk);
16911da177e4SLinus Torvalds 
16921da177e4SLinus Torvalds 
16931da177e4SLinus Torvalds 	/* Now socket is owned by kernel and we acquire BH lock
16941da177e4SLinus Torvalds 	   to finish close. No need to check for user refs.
16951da177e4SLinus Torvalds 	 */
16961da177e4SLinus Torvalds 	local_bh_disable();
16971da177e4SLinus Torvalds 	bh_lock_sock(sk);
16981da177e4SLinus Torvalds 	BUG_TRAP(!sock_owned_by_user(sk));
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds 	sock_hold(sk);
17011da177e4SLinus Torvalds 	sock_orphan(sk);
17021da177e4SLinus Torvalds 
17031da177e4SLinus Torvalds 	/*	This is a (useful) BSD violating of the RFC. There is a
17041da177e4SLinus Torvalds 	 *	problem with TCP as specified in that the other end could
17051da177e4SLinus Torvalds 	 *	keep a socket open forever with no application left this end.
17061da177e4SLinus Torvalds 	 *	We use a 3 minute timeout (about the same as BSD) then kill
17071da177e4SLinus Torvalds 	 *	our end. If they send after that then tough - BUT: long enough
17081da177e4SLinus Torvalds 	 *	that we won't make the old 4*rto = almost no time - whoops
17091da177e4SLinus Torvalds 	 *	reset mistake.
17101da177e4SLinus Torvalds 	 *
17111da177e4SLinus Torvalds 	 *	Nope, it was not mistake. It is really desired behaviour
17121da177e4SLinus Torvalds 	 *	f.e. on http servers, when such sockets are useless, but
17131da177e4SLinus Torvalds 	 *	consume significant resources. Let's do it with special
17141da177e4SLinus Torvalds 	 *	linger2	option.					--ANK
17151da177e4SLinus Torvalds 	 */
17161da177e4SLinus Torvalds 
17171da177e4SLinus Torvalds 	if (sk->sk_state == TCP_FIN_WAIT2) {
17181da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
17191da177e4SLinus Torvalds 		if (tp->linger2 < 0) {
17201da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
17211da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
17221da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
17231da177e4SLinus Torvalds 		} else {
17241da177e4SLinus Torvalds 			int tmo = tcp_fin_time(tp);
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds 			if (tmo > TCP_TIMEWAIT_LEN) {
17271da177e4SLinus Torvalds 				tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
17281da177e4SLinus Torvalds 			} else {
17291da177e4SLinus Torvalds 				atomic_inc(&tcp_orphan_count);
17301da177e4SLinus Torvalds 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
17311da177e4SLinus Torvalds 				goto out;
17321da177e4SLinus Torvalds 			}
17331da177e4SLinus Torvalds 		}
17341da177e4SLinus Torvalds 	}
17351da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
17361da177e4SLinus Torvalds 		sk_stream_mem_reclaim(sk);
17371da177e4SLinus Torvalds 		if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
17381da177e4SLinus Torvalds 		    (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
17391da177e4SLinus Torvalds 		     atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
17401da177e4SLinus Torvalds 			if (net_ratelimit())
17411da177e4SLinus Torvalds 				printk(KERN_INFO "TCP: too many of orphaned "
17421da177e4SLinus Torvalds 				       "sockets\n");
17431da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
17441da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
17451da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
17461da177e4SLinus Torvalds 		}
17471da177e4SLinus Torvalds 	}
17481da177e4SLinus Torvalds 	atomic_inc(&tcp_orphan_count);
17491da177e4SLinus Torvalds 
17501da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE)
17511da177e4SLinus Torvalds 		tcp_destroy_sock(sk);
17521da177e4SLinus Torvalds 	/* Otherwise, socket is reprieved until protocol close. */
17531da177e4SLinus Torvalds 
17541da177e4SLinus Torvalds out:
17551da177e4SLinus Torvalds 	bh_unlock_sock(sk);
17561da177e4SLinus Torvalds 	local_bh_enable();
17571da177e4SLinus Torvalds 	sock_put(sk);
17581da177e4SLinus Torvalds }
17591da177e4SLinus Torvalds 
17601da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
17611da177e4SLinus Torvalds 
17621da177e4SLinus Torvalds static inline int tcp_need_reset(int state)
17631da177e4SLinus Torvalds {
17641da177e4SLinus Torvalds 	return (1 << state) &
17651da177e4SLinus Torvalds 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
17661da177e4SLinus Torvalds 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
17671da177e4SLinus Torvalds }
17681da177e4SLinus Torvalds 
17691da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
17701da177e4SLinus Torvalds {
17711da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
17721da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17731da177e4SLinus Torvalds 	int err = 0;
17741da177e4SLinus Torvalds 	int old_state = sk->sk_state;
17751da177e4SLinus Torvalds 
17761da177e4SLinus Torvalds 	if (old_state != TCP_CLOSE)
17771da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
17781da177e4SLinus Torvalds 
17791da177e4SLinus Torvalds 	/* ABORT function of RFC793 */
17801da177e4SLinus Torvalds 	if (old_state == TCP_LISTEN) {
17811da177e4SLinus Torvalds 		tcp_listen_stop(sk);
17821da177e4SLinus Torvalds 	} else if (tcp_need_reset(old_state) ||
17831da177e4SLinus Torvalds 		   (tp->snd_nxt != tp->write_seq &&
17841da177e4SLinus Torvalds 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
17851da177e4SLinus Torvalds 		/* The last check adjusts for discrepance of Linux wrt. RFC
17861da177e4SLinus Torvalds 		 * states
17871da177e4SLinus Torvalds 		 */
17881da177e4SLinus Torvalds 		tcp_send_active_reset(sk, gfp_any());
17891da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
17901da177e4SLinus Torvalds 	} else if (old_state == TCP_SYN_SENT)
17911da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
17921da177e4SLinus Torvalds 
17931da177e4SLinus Torvalds 	tcp_clear_xmit_timers(sk);
17941da177e4SLinus Torvalds 	__skb_queue_purge(&sk->sk_receive_queue);
17951da177e4SLinus Torvalds 	sk_stream_writequeue_purge(sk);
17961da177e4SLinus Torvalds 	__skb_queue_purge(&tp->out_of_order_queue);
17971da177e4SLinus Torvalds 
17981da177e4SLinus Torvalds 	inet->dport = 0;
17991da177e4SLinus Torvalds 
18001da177e4SLinus Torvalds 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
18011da177e4SLinus Torvalds 		inet_reset_saddr(sk);
18021da177e4SLinus Torvalds 
18031da177e4SLinus Torvalds 	sk->sk_shutdown = 0;
18041da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
18051da177e4SLinus Torvalds 	tp->srtt = 0;
18061da177e4SLinus Torvalds 	if ((tp->write_seq += tp->max_window + 2) == 0)
18071da177e4SLinus Torvalds 		tp->write_seq = 1;
18081da177e4SLinus Torvalds 	tp->backoff = 0;
18091da177e4SLinus Torvalds 	tp->snd_cwnd = 2;
18101da177e4SLinus Torvalds 	tp->probes_out = 0;
18111da177e4SLinus Torvalds 	tp->packets_out = 0;
18121da177e4SLinus Torvalds 	tp->snd_ssthresh = 0x7fffffff;
18131da177e4SLinus Torvalds 	tp->snd_cwnd_cnt = 0;
18141da177e4SLinus Torvalds 	tcp_set_ca_state(tp, TCP_CA_Open);
18151da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
18161da177e4SLinus Torvalds 	tcp_delack_init(tp);
18171da177e4SLinus Torvalds 	sk->sk_send_head = NULL;
18181da177e4SLinus Torvalds 	tp->rx_opt.saw_tstamp = 0;
18191da177e4SLinus Torvalds 	tcp_sack_reset(&tp->rx_opt);
18201da177e4SLinus Torvalds 	__sk_dst_reset(sk);
18211da177e4SLinus Torvalds 
18221da177e4SLinus Torvalds 	BUG_TRAP(!inet->num || tp->bind_hash);
18231da177e4SLinus Torvalds 
18241da177e4SLinus Torvalds 	sk->sk_error_report(sk);
18251da177e4SLinus Torvalds 	return err;
18261da177e4SLinus Torvalds }
18271da177e4SLinus Torvalds 
18281da177e4SLinus Torvalds /*
18291da177e4SLinus Torvalds  *	Wait for an incoming connection, avoid race
18301da177e4SLinus Torvalds  *	conditions. This must be called with the socket locked.
18311da177e4SLinus Torvalds  */
18321da177e4SLinus Torvalds static int wait_for_connect(struct sock *sk, long timeo)
18331da177e4SLinus Torvalds {
18341da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18351da177e4SLinus Torvalds 	DEFINE_WAIT(wait);
18361da177e4SLinus Torvalds 	int err;
18371da177e4SLinus Torvalds 
18381da177e4SLinus Torvalds 	/*
18391da177e4SLinus Torvalds 	 * True wake-one mechanism for incoming connections: only
18401da177e4SLinus Torvalds 	 * one process gets woken up, not the 'whole herd'.
18411da177e4SLinus Torvalds 	 * Since we do not 'race & poll' for established sockets
18421da177e4SLinus Torvalds 	 * anymore, the common case will execute the loop only once.
18431da177e4SLinus Torvalds 	 *
18441da177e4SLinus Torvalds 	 * Subtle issue: "add_wait_queue_exclusive()" will be added
18451da177e4SLinus Torvalds 	 * after any current non-exclusive waiters, and we know that
18461da177e4SLinus Torvalds 	 * it will always _stay_ after any new non-exclusive waiters
18471da177e4SLinus Torvalds 	 * because all non-exclusive waiters are added at the
18481da177e4SLinus Torvalds 	 * beginning of the wait-queue. As such, it's ok to "drop"
18491da177e4SLinus Torvalds 	 * our exclusiveness temporarily when we get woken up without
18501da177e4SLinus Torvalds 	 * having to remove and re-insert us on the wait queue.
18511da177e4SLinus Torvalds 	 */
18521da177e4SLinus Torvalds 	for (;;) {
18531da177e4SLinus Torvalds 		prepare_to_wait_exclusive(sk->sk_sleep, &wait,
18541da177e4SLinus Torvalds 					  TASK_INTERRUPTIBLE);
18551da177e4SLinus Torvalds 		release_sock(sk);
18560e87506fSArnaldo Carvalho de Melo 		if (reqsk_queue_empty(&tp->accept_queue))
18571da177e4SLinus Torvalds 			timeo = schedule_timeout(timeo);
18581da177e4SLinus Torvalds 		lock_sock(sk);
18591da177e4SLinus Torvalds 		err = 0;
18600e87506fSArnaldo Carvalho de Melo 		if (!reqsk_queue_empty(&tp->accept_queue))
18611da177e4SLinus Torvalds 			break;
18621da177e4SLinus Torvalds 		err = -EINVAL;
18631da177e4SLinus Torvalds 		if (sk->sk_state != TCP_LISTEN)
18641da177e4SLinus Torvalds 			break;
18651da177e4SLinus Torvalds 		err = sock_intr_errno(timeo);
18661da177e4SLinus Torvalds 		if (signal_pending(current))
18671da177e4SLinus Torvalds 			break;
18681da177e4SLinus Torvalds 		err = -EAGAIN;
18691da177e4SLinus Torvalds 		if (!timeo)
18701da177e4SLinus Torvalds 			break;
18711da177e4SLinus Torvalds 	}
18721da177e4SLinus Torvalds 	finish_wait(sk->sk_sleep, &wait);
18731da177e4SLinus Torvalds 	return err;
18741da177e4SLinus Torvalds }
18751da177e4SLinus Torvalds 
18761da177e4SLinus Torvalds /*
18771da177e4SLinus Torvalds  *	This will accept the next outstanding connection.
18781da177e4SLinus Torvalds  */
18791da177e4SLinus Torvalds 
18801da177e4SLinus Torvalds struct sock *tcp_accept(struct sock *sk, int flags, int *err)
18811da177e4SLinus Torvalds {
18821da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
18831da177e4SLinus Torvalds 	struct sock *newsk;
18841da177e4SLinus Torvalds 	int error;
18851da177e4SLinus Torvalds 
18861da177e4SLinus Torvalds 	lock_sock(sk);
18871da177e4SLinus Torvalds 
18881da177e4SLinus Torvalds 	/* We need to make sure that this socket is listening,
18891da177e4SLinus Torvalds 	 * and that it has something pending.
18901da177e4SLinus Torvalds 	 */
18911da177e4SLinus Torvalds 	error = -EINVAL;
18921da177e4SLinus Torvalds 	if (sk->sk_state != TCP_LISTEN)
18930e87506fSArnaldo Carvalho de Melo 		goto out_err;
18941da177e4SLinus Torvalds 
18951da177e4SLinus Torvalds 	/* Find already established connection */
18960e87506fSArnaldo Carvalho de Melo 	if (reqsk_queue_empty(&tp->accept_queue)) {
18971da177e4SLinus Torvalds 		long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
18981da177e4SLinus Torvalds 
18991da177e4SLinus Torvalds 		/* If this is a non blocking socket don't sleep */
19001da177e4SLinus Torvalds 		error = -EAGAIN;
19011da177e4SLinus Torvalds 		if (!timeo)
19020e87506fSArnaldo Carvalho de Melo 			goto out_err;
19031da177e4SLinus Torvalds 
19041da177e4SLinus Torvalds 		error = wait_for_connect(sk, timeo);
19051da177e4SLinus Torvalds 		if (error)
19060e87506fSArnaldo Carvalho de Melo 			goto out_err;
19071da177e4SLinus Torvalds 	}
19081da177e4SLinus Torvalds 
19090e87506fSArnaldo Carvalho de Melo 	newsk = reqsk_queue_get_child(&tp->accept_queue, sk);
19101da177e4SLinus Torvalds 	BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
19111da177e4SLinus Torvalds out:
19121da177e4SLinus Torvalds 	release_sock(sk);
19130e87506fSArnaldo Carvalho de Melo 	return newsk;
19140e87506fSArnaldo Carvalho de Melo out_err:
19150e87506fSArnaldo Carvalho de Melo 	newsk = NULL;
19161da177e4SLinus Torvalds 	*err = error;
19170e87506fSArnaldo Carvalho de Melo 	goto out;
19181da177e4SLinus Torvalds }
19191da177e4SLinus Torvalds 
19201da177e4SLinus Torvalds /*
19211da177e4SLinus Torvalds  *	Socket option code for TCP.
19221da177e4SLinus Torvalds  */
19231da177e4SLinus Torvalds int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
19241da177e4SLinus Torvalds 		   int optlen)
19251da177e4SLinus Torvalds {
19261da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
19271da177e4SLinus Torvalds 	int val;
19281da177e4SLinus Torvalds 	int err = 0;
19291da177e4SLinus Torvalds 
19301da177e4SLinus Torvalds 	if (level != SOL_TCP)
19311da177e4SLinus Torvalds 		return tp->af_specific->setsockopt(sk, level, optname,
19321da177e4SLinus Torvalds 						   optval, optlen);
19331da177e4SLinus Torvalds 
19345f8ef48dSStephen Hemminger 	/* This is a string value all the others are int's */
19355f8ef48dSStephen Hemminger 	if (optname == TCP_CONGESTION) {
19365f8ef48dSStephen Hemminger 		char name[TCP_CA_NAME_MAX];
19375f8ef48dSStephen Hemminger 
19385f8ef48dSStephen Hemminger 		if (optlen < 1)
19395f8ef48dSStephen Hemminger 			return -EINVAL;
19405f8ef48dSStephen Hemminger 
19415f8ef48dSStephen Hemminger 		val = strncpy_from_user(name, optval,
19425f8ef48dSStephen Hemminger 					min(TCP_CA_NAME_MAX-1, optlen));
19435f8ef48dSStephen Hemminger 		if (val < 0)
19445f8ef48dSStephen Hemminger 			return -EFAULT;
19455f8ef48dSStephen Hemminger 		name[val] = 0;
19465f8ef48dSStephen Hemminger 
19475f8ef48dSStephen Hemminger 		lock_sock(sk);
19485f8ef48dSStephen Hemminger 		err = tcp_set_congestion_control(tp, name);
19495f8ef48dSStephen Hemminger 		release_sock(sk);
19505f8ef48dSStephen Hemminger 		return err;
19515f8ef48dSStephen Hemminger 	}
19525f8ef48dSStephen Hemminger 
19531da177e4SLinus Torvalds 	if (optlen < sizeof(int))
19541da177e4SLinus Torvalds 		return -EINVAL;
19551da177e4SLinus Torvalds 
19561da177e4SLinus Torvalds 	if (get_user(val, (int __user *)optval))
19571da177e4SLinus Torvalds 		return -EFAULT;
19581da177e4SLinus Torvalds 
19591da177e4SLinus Torvalds 	lock_sock(sk);
19601da177e4SLinus Torvalds 
19611da177e4SLinus Torvalds 	switch (optname) {
19621da177e4SLinus Torvalds 	case TCP_MAXSEG:
19631da177e4SLinus Torvalds 		/* Values greater than interface MTU won't take effect. However
19641da177e4SLinus Torvalds 		 * at the point when this call is done we typically don't yet
19651da177e4SLinus Torvalds 		 * know which interface is going to be used */
19661da177e4SLinus Torvalds 		if (val < 8 || val > MAX_TCP_WINDOW) {
19671da177e4SLinus Torvalds 			err = -EINVAL;
19681da177e4SLinus Torvalds 			break;
19691da177e4SLinus Torvalds 		}
19701da177e4SLinus Torvalds 		tp->rx_opt.user_mss = val;
19711da177e4SLinus Torvalds 		break;
19721da177e4SLinus Torvalds 
19731da177e4SLinus Torvalds 	case TCP_NODELAY:
19741da177e4SLinus Torvalds 		if (val) {
19751da177e4SLinus Torvalds 			/* TCP_NODELAY is weaker than TCP_CORK, so that
19761da177e4SLinus Torvalds 			 * this option on corked socket is remembered, but
19771da177e4SLinus Torvalds 			 * it is not activated until cork is cleared.
19781da177e4SLinus Torvalds 			 *
19791da177e4SLinus Torvalds 			 * However, when TCP_NODELAY is set we make
19801da177e4SLinus Torvalds 			 * an explicit push, which overrides even TCP_CORK
19811da177e4SLinus Torvalds 			 * for currently queued segments.
19821da177e4SLinus Torvalds 			 */
19831da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
19841da177e4SLinus Torvalds 			tcp_push_pending_frames(sk, tp);
19851da177e4SLinus Torvalds 		} else {
19861da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_OFF;
19871da177e4SLinus Torvalds 		}
19881da177e4SLinus Torvalds 		break;
19891da177e4SLinus Torvalds 
19901da177e4SLinus Torvalds 	case TCP_CORK:
19911da177e4SLinus Torvalds 		/* When set indicates to always queue non-full frames.
19921da177e4SLinus Torvalds 		 * Later the user clears this option and we transmit
19931da177e4SLinus Torvalds 		 * any pending partial frames in the queue.  This is
19941da177e4SLinus Torvalds 		 * meant to be used alongside sendfile() to get properly
19951da177e4SLinus Torvalds 		 * filled frames when the user (for example) must write
19961da177e4SLinus Torvalds 		 * out headers with a write() call first and then use
19971da177e4SLinus Torvalds 		 * sendfile to send out the data parts.
19981da177e4SLinus Torvalds 		 *
19991da177e4SLinus Torvalds 		 * TCP_CORK can be set together with TCP_NODELAY and it is
20001da177e4SLinus Torvalds 		 * stronger than TCP_NODELAY.
20011da177e4SLinus Torvalds 		 */
20021da177e4SLinus Torvalds 		if (val) {
20031da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_CORK;
20041da177e4SLinus Torvalds 		} else {
20051da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_CORK;
20061da177e4SLinus Torvalds 			if (tp->nonagle&TCP_NAGLE_OFF)
20071da177e4SLinus Torvalds 				tp->nonagle |= TCP_NAGLE_PUSH;
20081da177e4SLinus Torvalds 			tcp_push_pending_frames(sk, tp);
20091da177e4SLinus Torvalds 		}
20101da177e4SLinus Torvalds 		break;
20111da177e4SLinus Torvalds 
20121da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
20131da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPIDLE)
20141da177e4SLinus Torvalds 			err = -EINVAL;
20151da177e4SLinus Torvalds 		else {
20161da177e4SLinus Torvalds 			tp->keepalive_time = val * HZ;
20171da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
20181da177e4SLinus Torvalds 			    !((1 << sk->sk_state) &
20191da177e4SLinus Torvalds 			      (TCPF_CLOSE | TCPF_LISTEN))) {
20201da177e4SLinus Torvalds 				__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
20211da177e4SLinus Torvalds 				if (tp->keepalive_time > elapsed)
20221da177e4SLinus Torvalds 					elapsed = tp->keepalive_time - elapsed;
20231da177e4SLinus Torvalds 				else
20241da177e4SLinus Torvalds 					elapsed = 0;
20251da177e4SLinus Torvalds 				tcp_reset_keepalive_timer(sk, elapsed);
20261da177e4SLinus Torvalds 			}
20271da177e4SLinus Torvalds 		}
20281da177e4SLinus Torvalds 		break;
20291da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
20301da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
20311da177e4SLinus Torvalds 			err = -EINVAL;
20321da177e4SLinus Torvalds 		else
20331da177e4SLinus Torvalds 			tp->keepalive_intvl = val * HZ;
20341da177e4SLinus Torvalds 		break;
20351da177e4SLinus Torvalds 	case TCP_KEEPCNT:
20361da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPCNT)
20371da177e4SLinus Torvalds 			err = -EINVAL;
20381da177e4SLinus Torvalds 		else
20391da177e4SLinus Torvalds 			tp->keepalive_probes = val;
20401da177e4SLinus Torvalds 		break;
20411da177e4SLinus Torvalds 	case TCP_SYNCNT:
20421da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_SYNCNT)
20431da177e4SLinus Torvalds 			err = -EINVAL;
20441da177e4SLinus Torvalds 		else
20451da177e4SLinus Torvalds 			tp->syn_retries = val;
20461da177e4SLinus Torvalds 		break;
20471da177e4SLinus Torvalds 
20481da177e4SLinus Torvalds 	case TCP_LINGER2:
20491da177e4SLinus Torvalds 		if (val < 0)
20501da177e4SLinus Torvalds 			tp->linger2 = -1;
20511da177e4SLinus Torvalds 		else if (val > sysctl_tcp_fin_timeout / HZ)
20521da177e4SLinus Torvalds 			tp->linger2 = 0;
20531da177e4SLinus Torvalds 		else
20541da177e4SLinus Torvalds 			tp->linger2 = val * HZ;
20551da177e4SLinus Torvalds 		break;
20561da177e4SLinus Torvalds 
20571da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
20581da177e4SLinus Torvalds 		tp->defer_accept = 0;
20591da177e4SLinus Torvalds 		if (val > 0) {
20601da177e4SLinus Torvalds 			/* Translate value in seconds to number of
20611da177e4SLinus Torvalds 			 * retransmits */
20621da177e4SLinus Torvalds 			while (tp->defer_accept < 32 &&
20631da177e4SLinus Torvalds 			       val > ((TCP_TIMEOUT_INIT / HZ) <<
20641da177e4SLinus Torvalds 				       tp->defer_accept))
20651da177e4SLinus Torvalds 				tp->defer_accept++;
20661da177e4SLinus Torvalds 			tp->defer_accept++;
20671da177e4SLinus Torvalds 		}
20681da177e4SLinus Torvalds 		break;
20691da177e4SLinus Torvalds 
20701da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
20711da177e4SLinus Torvalds 		if (!val) {
20721da177e4SLinus Torvalds 			if (sk->sk_state != TCP_CLOSE) {
20731da177e4SLinus Torvalds 				err = -EINVAL;
20741da177e4SLinus Torvalds 				break;
20751da177e4SLinus Torvalds 			}
20761da177e4SLinus Torvalds 			tp->window_clamp = 0;
20771da177e4SLinus Torvalds 		} else
20781da177e4SLinus Torvalds 			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
20791da177e4SLinus Torvalds 						SOCK_MIN_RCVBUF / 2 : val;
20801da177e4SLinus Torvalds 		break;
20811da177e4SLinus Torvalds 
20821da177e4SLinus Torvalds 	case TCP_QUICKACK:
20831da177e4SLinus Torvalds 		if (!val) {
20841da177e4SLinus Torvalds 			tp->ack.pingpong = 1;
20851da177e4SLinus Torvalds 		} else {
20861da177e4SLinus Torvalds 			tp->ack.pingpong = 0;
20871da177e4SLinus Torvalds 			if ((1 << sk->sk_state) &
20881da177e4SLinus Torvalds 			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
20891da177e4SLinus Torvalds 			    tcp_ack_scheduled(tp)) {
20901da177e4SLinus Torvalds 				tp->ack.pending |= TCP_ACK_PUSHED;
20911da177e4SLinus Torvalds 				cleanup_rbuf(sk, 1);
20921da177e4SLinus Torvalds 				if (!(val & 1))
20931da177e4SLinus Torvalds 					tp->ack.pingpong = 1;
20941da177e4SLinus Torvalds 			}
20951da177e4SLinus Torvalds 		}
20961da177e4SLinus Torvalds 		break;
20971da177e4SLinus Torvalds 
20981da177e4SLinus Torvalds 	default:
20991da177e4SLinus Torvalds 		err = -ENOPROTOOPT;
21001da177e4SLinus Torvalds 		break;
21011da177e4SLinus Torvalds 	};
21021da177e4SLinus Torvalds 	release_sock(sk);
21031da177e4SLinus Torvalds 	return err;
21041da177e4SLinus Torvalds }
21051da177e4SLinus Torvalds 
21061da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
21071da177e4SLinus Torvalds void tcp_get_info(struct sock *sk, struct tcp_info *info)
21081da177e4SLinus Torvalds {
21091da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
21101da177e4SLinus Torvalds 	u32 now = tcp_time_stamp;
21111da177e4SLinus Torvalds 
21121da177e4SLinus Torvalds 	memset(info, 0, sizeof(*info));
21131da177e4SLinus Torvalds 
21141da177e4SLinus Torvalds 	info->tcpi_state = sk->sk_state;
21151da177e4SLinus Torvalds 	info->tcpi_ca_state = tp->ca_state;
21161da177e4SLinus Torvalds 	info->tcpi_retransmits = tp->retransmits;
21171da177e4SLinus Torvalds 	info->tcpi_probes = tp->probes_out;
21181da177e4SLinus Torvalds 	info->tcpi_backoff = tp->backoff;
21191da177e4SLinus Torvalds 
21201da177e4SLinus Torvalds 	if (tp->rx_opt.tstamp_ok)
21211da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
21221da177e4SLinus Torvalds 	if (tp->rx_opt.sack_ok)
21231da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_SACK;
21241da177e4SLinus Torvalds 	if (tp->rx_opt.wscale_ok) {
21251da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_WSCALE;
21261da177e4SLinus Torvalds 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
21271da177e4SLinus Torvalds 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
21281da177e4SLinus Torvalds 	}
21291da177e4SLinus Torvalds 
21301da177e4SLinus Torvalds 	if (tp->ecn_flags&TCP_ECN_OK)
21311da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_ECN;
21321da177e4SLinus Torvalds 
21331da177e4SLinus Torvalds 	info->tcpi_rto = jiffies_to_usecs(tp->rto);
21341da177e4SLinus Torvalds 	info->tcpi_ato = jiffies_to_usecs(tp->ack.ato);
2135c1b4a7e6SDavid S. Miller 	info->tcpi_snd_mss = tp->mss_cache;
21361da177e4SLinus Torvalds 	info->tcpi_rcv_mss = tp->ack.rcv_mss;
21371da177e4SLinus Torvalds 
21381da177e4SLinus Torvalds 	info->tcpi_unacked = tp->packets_out;
21391da177e4SLinus Torvalds 	info->tcpi_sacked = tp->sacked_out;
21401da177e4SLinus Torvalds 	info->tcpi_lost = tp->lost_out;
21411da177e4SLinus Torvalds 	info->tcpi_retrans = tp->retrans_out;
21421da177e4SLinus Torvalds 	info->tcpi_fackets = tp->fackets_out;
21431da177e4SLinus Torvalds 
21441da177e4SLinus Torvalds 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
21451da177e4SLinus Torvalds 	info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime);
21461da177e4SLinus Torvalds 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
21471da177e4SLinus Torvalds 
21481da177e4SLinus Torvalds 	info->tcpi_pmtu = tp->pmtu_cookie;
21491da177e4SLinus Torvalds 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
21501da177e4SLinus Torvalds 	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
21511da177e4SLinus Torvalds 	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
21521da177e4SLinus Torvalds 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
21531da177e4SLinus Torvalds 	info->tcpi_snd_cwnd = tp->snd_cwnd;
21541da177e4SLinus Torvalds 	info->tcpi_advmss = tp->advmss;
21551da177e4SLinus Torvalds 	info->tcpi_reordering = tp->reordering;
21561da177e4SLinus Torvalds 
21571da177e4SLinus Torvalds 	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
21581da177e4SLinus Torvalds 	info->tcpi_rcv_space = tp->rcvq_space.space;
21591da177e4SLinus Torvalds 
21601da177e4SLinus Torvalds 	info->tcpi_total_retrans = tp->total_retrans;
21611da177e4SLinus Torvalds }
21621da177e4SLinus Torvalds 
21631da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
21641da177e4SLinus Torvalds 
21651da177e4SLinus Torvalds int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
21661da177e4SLinus Torvalds 		   int __user *optlen)
21671da177e4SLinus Torvalds {
21681da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
21691da177e4SLinus Torvalds 	int val, len;
21701da177e4SLinus Torvalds 
21711da177e4SLinus Torvalds 	if (level != SOL_TCP)
21721da177e4SLinus Torvalds 		return tp->af_specific->getsockopt(sk, level, optname,
21731da177e4SLinus Torvalds 						   optval, optlen);
21741da177e4SLinus Torvalds 
21751da177e4SLinus Torvalds 	if (get_user(len, optlen))
21761da177e4SLinus Torvalds 		return -EFAULT;
21771da177e4SLinus Torvalds 
21781da177e4SLinus Torvalds 	len = min_t(unsigned int, len, sizeof(int));
21791da177e4SLinus Torvalds 
21801da177e4SLinus Torvalds 	if (len < 0)
21811da177e4SLinus Torvalds 		return -EINVAL;
21821da177e4SLinus Torvalds 
21831da177e4SLinus Torvalds 	switch (optname) {
21841da177e4SLinus Torvalds 	case TCP_MAXSEG:
2185c1b4a7e6SDavid S. Miller 		val = tp->mss_cache;
21861da177e4SLinus Torvalds 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
21871da177e4SLinus Torvalds 			val = tp->rx_opt.user_mss;
21881da177e4SLinus Torvalds 		break;
21891da177e4SLinus Torvalds 	case TCP_NODELAY:
21901da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
21911da177e4SLinus Torvalds 		break;
21921da177e4SLinus Torvalds 	case TCP_CORK:
21931da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
21941da177e4SLinus Torvalds 		break;
21951da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
21961da177e4SLinus Torvalds 		val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
21971da177e4SLinus Torvalds 		break;
21981da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
21991da177e4SLinus Torvalds 		val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
22001da177e4SLinus Torvalds 		break;
22011da177e4SLinus Torvalds 	case TCP_KEEPCNT:
22021da177e4SLinus Torvalds 		val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
22031da177e4SLinus Torvalds 		break;
22041da177e4SLinus Torvalds 	case TCP_SYNCNT:
22051da177e4SLinus Torvalds 		val = tp->syn_retries ? : sysctl_tcp_syn_retries;
22061da177e4SLinus Torvalds 		break;
22071da177e4SLinus Torvalds 	case TCP_LINGER2:
22081da177e4SLinus Torvalds 		val = tp->linger2;
22091da177e4SLinus Torvalds 		if (val >= 0)
22101da177e4SLinus Torvalds 			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
22111da177e4SLinus Torvalds 		break;
22121da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
22131da177e4SLinus Torvalds 		val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
22141da177e4SLinus Torvalds 					       (tp->defer_accept - 1));
22151da177e4SLinus Torvalds 		break;
22161da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
22171da177e4SLinus Torvalds 		val = tp->window_clamp;
22181da177e4SLinus Torvalds 		break;
22191da177e4SLinus Torvalds 	case TCP_INFO: {
22201da177e4SLinus Torvalds 		struct tcp_info info;
22211da177e4SLinus Torvalds 
22221da177e4SLinus Torvalds 		if (get_user(len, optlen))
22231da177e4SLinus Torvalds 			return -EFAULT;
22241da177e4SLinus Torvalds 
22251da177e4SLinus Torvalds 		tcp_get_info(sk, &info);
22261da177e4SLinus Torvalds 
22271da177e4SLinus Torvalds 		len = min_t(unsigned int, len, sizeof(info));
22281da177e4SLinus Torvalds 		if (put_user(len, optlen))
22291da177e4SLinus Torvalds 			return -EFAULT;
22301da177e4SLinus Torvalds 		if (copy_to_user(optval, &info, len))
22311da177e4SLinus Torvalds 			return -EFAULT;
22321da177e4SLinus Torvalds 		return 0;
22331da177e4SLinus Torvalds 	}
22341da177e4SLinus Torvalds 	case TCP_QUICKACK:
22351da177e4SLinus Torvalds 		val = !tp->ack.pingpong;
22361da177e4SLinus Torvalds 		break;
22375f8ef48dSStephen Hemminger 
22385f8ef48dSStephen Hemminger 	case TCP_CONGESTION:
22395f8ef48dSStephen Hemminger 		if (get_user(len, optlen))
22405f8ef48dSStephen Hemminger 			return -EFAULT;
22415f8ef48dSStephen Hemminger 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
22425f8ef48dSStephen Hemminger 		if (put_user(len, optlen))
22435f8ef48dSStephen Hemminger 			return -EFAULT;
22445f8ef48dSStephen Hemminger 		if (copy_to_user(optval, tp->ca_ops->name, len))
22455f8ef48dSStephen Hemminger 			return -EFAULT;
22465f8ef48dSStephen Hemminger 		return 0;
22471da177e4SLinus Torvalds 	default:
22481da177e4SLinus Torvalds 		return -ENOPROTOOPT;
22491da177e4SLinus Torvalds 	};
22501da177e4SLinus Torvalds 
22511da177e4SLinus Torvalds 	if (put_user(len, optlen))
22521da177e4SLinus Torvalds 		return -EFAULT;
22531da177e4SLinus Torvalds 	if (copy_to_user(optval, &val, len))
22541da177e4SLinus Torvalds 		return -EFAULT;
22551da177e4SLinus Torvalds 	return 0;
22561da177e4SLinus Torvalds }
22571da177e4SLinus Torvalds 
22581da177e4SLinus Torvalds 
22591da177e4SLinus Torvalds extern void __skb_cb_too_small_for_tcp(int, int);
22605f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
22611da177e4SLinus Torvalds 
22621da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
22631da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
22641da177e4SLinus Torvalds {
22651da177e4SLinus Torvalds 	if (!str)
22661da177e4SLinus Torvalds 		return 0;
22671da177e4SLinus Torvalds 	thash_entries = simple_strtoul(str, &str, 0);
22681da177e4SLinus Torvalds 	return 1;
22691da177e4SLinus Torvalds }
22701da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
22711da177e4SLinus Torvalds 
22721da177e4SLinus Torvalds void __init tcp_init(void)
22731da177e4SLinus Torvalds {
22741da177e4SLinus Torvalds 	struct sk_buff *skb = NULL;
22751da177e4SLinus Torvalds 	int order, i;
22761da177e4SLinus Torvalds 
22771da177e4SLinus Torvalds 	if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
22781da177e4SLinus Torvalds 		__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
22791da177e4SLinus Torvalds 					   sizeof(skb->cb));
22801da177e4SLinus Torvalds 
22811da177e4SLinus Torvalds 	tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
22821da177e4SLinus Torvalds 					      sizeof(struct tcp_bind_bucket),
22831da177e4SLinus Torvalds 					      0, SLAB_HWCACHE_ALIGN,
22841da177e4SLinus Torvalds 					      NULL, NULL);
22851da177e4SLinus Torvalds 	if (!tcp_bucket_cachep)
22861da177e4SLinus Torvalds 		panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
22871da177e4SLinus Torvalds 
22881da177e4SLinus Torvalds 	tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
22891da177e4SLinus Torvalds 						sizeof(struct tcp_tw_bucket),
22901da177e4SLinus Torvalds 						0, SLAB_HWCACHE_ALIGN,
22911da177e4SLinus Torvalds 						NULL, NULL);
22921da177e4SLinus Torvalds 	if (!tcp_timewait_cachep)
22931da177e4SLinus Torvalds 		panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
22941da177e4SLinus Torvalds 
22951da177e4SLinus Torvalds 	/* Size and allocate the main established and bind bucket
22961da177e4SLinus Torvalds 	 * hash tables.
22971da177e4SLinus Torvalds 	 *
22981da177e4SLinus Torvalds 	 * The methodology is similar to that of the buffer cache.
22991da177e4SLinus Torvalds 	 */
23001da177e4SLinus Torvalds 	tcp_ehash = (struct tcp_ehash_bucket *)
23011da177e4SLinus Torvalds 		alloc_large_system_hash("TCP established",
23021da177e4SLinus Torvalds 					sizeof(struct tcp_ehash_bucket),
23031da177e4SLinus Torvalds 					thash_entries,
23041da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
23051da177e4SLinus Torvalds 						(25 - PAGE_SHIFT) :
23061da177e4SLinus Torvalds 						(27 - PAGE_SHIFT),
23071da177e4SLinus Torvalds 					HASH_HIGHMEM,
23081da177e4SLinus Torvalds 					&tcp_ehash_size,
23091da177e4SLinus Torvalds 					NULL,
23101da177e4SLinus Torvalds 					0);
23111da177e4SLinus Torvalds 	tcp_ehash_size = (1 << tcp_ehash_size) >> 1;
23121da177e4SLinus Torvalds 	for (i = 0; i < (tcp_ehash_size << 1); i++) {
23131da177e4SLinus Torvalds 		rwlock_init(&tcp_ehash[i].lock);
23141da177e4SLinus Torvalds 		INIT_HLIST_HEAD(&tcp_ehash[i].chain);
23151da177e4SLinus Torvalds 	}
23161da177e4SLinus Torvalds 
23171da177e4SLinus Torvalds 	tcp_bhash = (struct tcp_bind_hashbucket *)
23181da177e4SLinus Torvalds 		alloc_large_system_hash("TCP bind",
23191da177e4SLinus Torvalds 					sizeof(struct tcp_bind_hashbucket),
23201da177e4SLinus Torvalds 					tcp_ehash_size,
23211da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
23221da177e4SLinus Torvalds 						(25 - PAGE_SHIFT) :
23231da177e4SLinus Torvalds 						(27 - PAGE_SHIFT),
23241da177e4SLinus Torvalds 					HASH_HIGHMEM,
23251da177e4SLinus Torvalds 					&tcp_bhash_size,
23261da177e4SLinus Torvalds 					NULL,
23271da177e4SLinus Torvalds 					64 * 1024);
23281da177e4SLinus Torvalds 	tcp_bhash_size = 1 << tcp_bhash_size;
23291da177e4SLinus Torvalds 	for (i = 0; i < tcp_bhash_size; i++) {
23301da177e4SLinus Torvalds 		spin_lock_init(&tcp_bhash[i].lock);
23311da177e4SLinus Torvalds 		INIT_HLIST_HEAD(&tcp_bhash[i].chain);
23321da177e4SLinus Torvalds 	}
23331da177e4SLinus Torvalds 
23341da177e4SLinus Torvalds 	/* Try to be a bit smarter and adjust defaults depending
23351da177e4SLinus Torvalds 	 * on available memory.
23361da177e4SLinus Torvalds 	 */
23371da177e4SLinus Torvalds 	for (order = 0; ((1 << order) << PAGE_SHIFT) <
23381da177e4SLinus Torvalds 			(tcp_bhash_size * sizeof(struct tcp_bind_hashbucket));
23391da177e4SLinus Torvalds 			order++)
23401da177e4SLinus Torvalds 		;
2341e7626486SAndi Kleen 	if (order >= 4) {
23421da177e4SLinus Torvalds 		sysctl_local_port_range[0] = 32768;
23431da177e4SLinus Torvalds 		sysctl_local_port_range[1] = 61000;
23441da177e4SLinus Torvalds 		sysctl_tcp_max_tw_buckets = 180000;
23451da177e4SLinus Torvalds 		sysctl_tcp_max_orphans = 4096 << (order - 4);
23461da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 1024;
23471da177e4SLinus Torvalds 	} else if (order < 3) {
23481da177e4SLinus Torvalds 		sysctl_local_port_range[0] = 1024 * (3 - order);
23491da177e4SLinus Torvalds 		sysctl_tcp_max_tw_buckets >>= (3 - order);
23501da177e4SLinus Torvalds 		sysctl_tcp_max_orphans >>= (3 - order);
23511da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 128;
23521da177e4SLinus Torvalds 	}
23531da177e4SLinus Torvalds 	tcp_port_rover = sysctl_local_port_range[0] - 1;
23541da177e4SLinus Torvalds 
23551da177e4SLinus Torvalds 	sysctl_tcp_mem[0] =  768 << order;
23561da177e4SLinus Torvalds 	sysctl_tcp_mem[1] = 1024 << order;
23571da177e4SLinus Torvalds 	sysctl_tcp_mem[2] = 1536 << order;
23581da177e4SLinus Torvalds 
23591da177e4SLinus Torvalds 	if (order < 3) {
23601da177e4SLinus Torvalds 		sysctl_tcp_wmem[2] = 64 * 1024;
23611da177e4SLinus Torvalds 		sysctl_tcp_rmem[0] = PAGE_SIZE;
23621da177e4SLinus Torvalds 		sysctl_tcp_rmem[1] = 43689;
23631da177e4SLinus Torvalds 		sysctl_tcp_rmem[2] = 2 * 43689;
23641da177e4SLinus Torvalds 	}
23651da177e4SLinus Torvalds 
23661da177e4SLinus Torvalds 	printk(KERN_INFO "TCP: Hash tables configured "
23671da177e4SLinus Torvalds 	       "(established %d bind %d)\n",
23681da177e4SLinus Torvalds 	       tcp_ehash_size << 1, tcp_bhash_size);
2369317a76f9SStephen Hemminger 
2370317a76f9SStephen Hemminger 	tcp_register_congestion_control(&tcp_reno);
23711da177e4SLinus Torvalds }
23721da177e4SLinus Torvalds 
23731da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_accept);
23741da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_close);
23751da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_destroy_sock);
23761da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_disconnect);
23771da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_getsockopt);
23781da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_ioctl);
23791da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
23801da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_read_sock);
23811da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_recvmsg);
23821da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendmsg);
23831da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendpage);
23841da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_setsockopt);
23851da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_shutdown);
23861da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_statistics);
23871da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_timewait_cachep);
2388