xref: /linux/net/ipv4/tcp.c (revision 576a30eb6453439b3c37ba24455ac7090c247b5a)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  *
221da177e4SLinus Torvalds  * Fixes:
231da177e4SLinus Torvalds  *		Alan Cox	:	Numerous verify_area() calls
241da177e4SLinus Torvalds  *		Alan Cox	:	Set the ACK bit on a reset
251da177e4SLinus Torvalds  *		Alan Cox	:	Stopped it crashing if it closed while
261da177e4SLinus Torvalds  *					sk->inuse=1 and was trying to connect
271da177e4SLinus Torvalds  *					(tcp_err()).
281da177e4SLinus Torvalds  *		Alan Cox	:	All icmp error handling was broken
291da177e4SLinus Torvalds  *					pointers passed where wrong and the
301da177e4SLinus Torvalds  *					socket was looked up backwards. Nobody
311da177e4SLinus Torvalds  *					tested any icmp error code obviously.
321da177e4SLinus Torvalds  *		Alan Cox	:	tcp_err() now handled properly. It
331da177e4SLinus Torvalds  *					wakes people on errors. poll
341da177e4SLinus Torvalds  *					behaves and the icmp error race
351da177e4SLinus Torvalds  *					has gone by moving it into sock.c
361da177e4SLinus Torvalds  *		Alan Cox	:	tcp_send_reset() fixed to work for
371da177e4SLinus Torvalds  *					everything not just packets for
381da177e4SLinus Torvalds  *					unknown sockets.
391da177e4SLinus Torvalds  *		Alan Cox	:	tcp option processing.
401da177e4SLinus Torvalds  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
411da177e4SLinus Torvalds  *					syn rule wrong]
421da177e4SLinus Torvalds  *		Herp Rosmanith  :	More reset fixes
431da177e4SLinus Torvalds  *		Alan Cox	:	No longer acks invalid rst frames.
441da177e4SLinus Torvalds  *					Acking any kind of RST is right out.
451da177e4SLinus Torvalds  *		Alan Cox	:	Sets an ignore me flag on an rst
461da177e4SLinus Torvalds  *					receive otherwise odd bits of prattle
471da177e4SLinus Torvalds  *					escape still
481da177e4SLinus Torvalds  *		Alan Cox	:	Fixed another acking RST frame bug.
491da177e4SLinus Torvalds  *					Should stop LAN workplace lockups.
501da177e4SLinus Torvalds  *		Alan Cox	: 	Some tidyups using the new skb list
511da177e4SLinus Torvalds  *					facilities
521da177e4SLinus Torvalds  *		Alan Cox	:	sk->keepopen now seems to work
531da177e4SLinus Torvalds  *		Alan Cox	:	Pulls options out correctly on accepts
541da177e4SLinus Torvalds  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
551da177e4SLinus Torvalds  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
561da177e4SLinus Torvalds  *					bit to skb ops.
571da177e4SLinus Torvalds  *		Alan Cox	:	Tidied tcp_data to avoid a potential
581da177e4SLinus Torvalds  *					nasty.
591da177e4SLinus Torvalds  *		Alan Cox	:	Added some better commenting, as the
601da177e4SLinus Torvalds  *					tcp is hard to follow
611da177e4SLinus Torvalds  *		Alan Cox	:	Removed incorrect check for 20 * psh
621da177e4SLinus Torvalds  *	Michael O'Reilly	:	ack < copied bug fix.
631da177e4SLinus Torvalds  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
641da177e4SLinus Torvalds  *		Alan Cox	:	FIN with no memory -> CRASH
651da177e4SLinus Torvalds  *		Alan Cox	:	Added socket option proto entries.
661da177e4SLinus Torvalds  *					Also added awareness of them to accept.
671da177e4SLinus Torvalds  *		Alan Cox	:	Added TCP options (SOL_TCP)
681da177e4SLinus Torvalds  *		Alan Cox	:	Switched wakeup calls to callbacks,
691da177e4SLinus Torvalds  *					so the kernel can layer network
701da177e4SLinus Torvalds  *					sockets.
711da177e4SLinus Torvalds  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
721da177e4SLinus Torvalds  *		Alan Cox	:	Handle FIN (more) properly (we hope).
731da177e4SLinus Torvalds  *		Alan Cox	:	RST frames sent on unsynchronised
741da177e4SLinus Torvalds  *					state ack error.
751da177e4SLinus Torvalds  *		Alan Cox	:	Put in missing check for SYN bit.
761da177e4SLinus Torvalds  *		Alan Cox	:	Added tcp_select_window() aka NET2E
771da177e4SLinus Torvalds  *					window non shrink trick.
781da177e4SLinus Torvalds  *		Alan Cox	:	Added a couple of small NET2E timer
791da177e4SLinus Torvalds  *					fixes
801da177e4SLinus Torvalds  *		Charles Hedrick :	TCP fixes
811da177e4SLinus Torvalds  *		Toomas Tamm	:	TCP window fixes
821da177e4SLinus Torvalds  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
831da177e4SLinus Torvalds  *		Charles Hedrick	:	Rewrote most of it to actually work
841da177e4SLinus Torvalds  *		Linus		:	Rewrote tcp_read() and URG handling
851da177e4SLinus Torvalds  *					completely
861da177e4SLinus Torvalds  *		Gerhard Koerting:	Fixed some missing timer handling
871da177e4SLinus Torvalds  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
881da177e4SLinus Torvalds  *		Gerhard Koerting:	PC/TCP workarounds
891da177e4SLinus Torvalds  *		Adam Caldwell	:	Assorted timer/timing errors
901da177e4SLinus Torvalds  *		Matthew Dillon	:	Fixed another RST bug
911da177e4SLinus Torvalds  *		Alan Cox	:	Move to kernel side addressing changes.
921da177e4SLinus Torvalds  *		Alan Cox	:	Beginning work on TCP fastpathing
931da177e4SLinus Torvalds  *					(not yet usable)
941da177e4SLinus Torvalds  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
951da177e4SLinus Torvalds  *		Alan Cox	:	TCP fast path debugging
961da177e4SLinus Torvalds  *		Alan Cox	:	Window clamping
971da177e4SLinus Torvalds  *		Michael Riepe	:	Bug in tcp_check()
981da177e4SLinus Torvalds  *		Matt Dillon	:	More TCP improvements and RST bug fixes
991da177e4SLinus Torvalds  *		Matt Dillon	:	Yet more small nasties remove from the
1001da177e4SLinus Torvalds  *					TCP code (Be very nice to this man if
1011da177e4SLinus Torvalds  *					tcp finally works 100%) 8)
1021da177e4SLinus Torvalds  *		Alan Cox	:	BSD accept semantics.
1031da177e4SLinus Torvalds  *		Alan Cox	:	Reset on closedown bug.
1041da177e4SLinus Torvalds  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
1051da177e4SLinus Torvalds  *		Michael Pall	:	Handle poll() after URG properly in
1061da177e4SLinus Torvalds  *					all cases.
1071da177e4SLinus Torvalds  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
1081da177e4SLinus Torvalds  *					(multi URG PUSH broke rlogin).
1091da177e4SLinus Torvalds  *		Michael Pall	:	Fix the multi URG PUSH problem in
1101da177e4SLinus Torvalds  *					tcp_readable(), poll() after URG
1111da177e4SLinus Torvalds  *					works now.
1121da177e4SLinus Torvalds  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
1131da177e4SLinus Torvalds  *					BSD api.
1141da177e4SLinus Torvalds  *		Alan Cox	:	Changed the semantics of sk->socket to
1151da177e4SLinus Torvalds  *					fix a race and a signal problem with
1161da177e4SLinus Torvalds  *					accept() and async I/O.
1171da177e4SLinus Torvalds  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
1181da177e4SLinus Torvalds  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
1191da177e4SLinus Torvalds  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
1201da177e4SLinus Torvalds  *					clients/servers which listen in on
1211da177e4SLinus Torvalds  *					fixed ports.
1221da177e4SLinus Torvalds  *		Alan Cox	:	Cleaned the above up and shrank it to
1231da177e4SLinus Torvalds  *					a sensible code size.
1241da177e4SLinus Torvalds  *		Alan Cox	:	Self connect lockup fix.
1251da177e4SLinus Torvalds  *		Alan Cox	:	No connect to multicast.
1261da177e4SLinus Torvalds  *		Ross Biro	:	Close unaccepted children on master
1271da177e4SLinus Torvalds  *					socket close.
1281da177e4SLinus Torvalds  *		Alan Cox	:	Reset tracing code.
1291da177e4SLinus Torvalds  *		Alan Cox	:	Spurious resets on shutdown.
1301da177e4SLinus Torvalds  *		Alan Cox	:	Giant 15 minute/60 second timer error
1311da177e4SLinus Torvalds  *		Alan Cox	:	Small whoops in polling before an
1321da177e4SLinus Torvalds  *					accept.
1331da177e4SLinus Torvalds  *		Alan Cox	:	Kept the state trace facility since
1341da177e4SLinus Torvalds  *					it's handy for debugging.
1351da177e4SLinus Torvalds  *		Alan Cox	:	More reset handler fixes.
1361da177e4SLinus Torvalds  *		Alan Cox	:	Started rewriting the code based on
1371da177e4SLinus Torvalds  *					the RFC's for other useful protocol
1381da177e4SLinus Torvalds  *					references see: Comer, KA9Q NOS, and
1391da177e4SLinus Torvalds  *					for a reference on the difference
1401da177e4SLinus Torvalds  *					between specifications and how BSD
1411da177e4SLinus Torvalds  *					works see the 4.4lite source.
1421da177e4SLinus Torvalds  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
1431da177e4SLinus Torvalds  *					close.
1441da177e4SLinus Torvalds  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
1451da177e4SLinus Torvalds  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
1461da177e4SLinus Torvalds  *		Alan Cox	:	Reimplemented timers as per the RFC
1471da177e4SLinus Torvalds  *					and using multiple timers for sanity.
1481da177e4SLinus Torvalds  *		Alan Cox	:	Small bug fixes, and a lot of new
1491da177e4SLinus Torvalds  *					comments.
1501da177e4SLinus Torvalds  *		Alan Cox	:	Fixed dual reader crash by locking
1511da177e4SLinus Torvalds  *					the buffers (much like datagram.c)
1521da177e4SLinus Torvalds  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
1531da177e4SLinus Torvalds  *					now gets fed up of retrying without
1541da177e4SLinus Torvalds  *					(even a no space) answer.
1551da177e4SLinus Torvalds  *		Alan Cox	:	Extracted closing code better
1561da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the closing state machine to
1571da177e4SLinus Torvalds  *					resemble the RFC.
1581da177e4SLinus Torvalds  *		Alan Cox	:	More 'per spec' fixes.
1591da177e4SLinus Torvalds  *		Jorge Cwik	:	Even faster checksumming.
1601da177e4SLinus Torvalds  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
1611da177e4SLinus Torvalds  *					only frames. At least one pc tcp stack
1621da177e4SLinus Torvalds  *					generates them.
1631da177e4SLinus Torvalds  *		Alan Cox	:	Cache last socket.
1641da177e4SLinus Torvalds  *		Alan Cox	:	Per route irtt.
1651da177e4SLinus Torvalds  *		Matt Day	:	poll()->select() match BSD precisely on error
1661da177e4SLinus Torvalds  *		Alan Cox	:	New buffers
1671da177e4SLinus Torvalds  *		Marc Tamsky	:	Various sk->prot->retransmits and
1681da177e4SLinus Torvalds  *					sk->retransmits misupdating fixed.
1691da177e4SLinus Torvalds  *					Fixed tcp_write_timeout: stuck close,
1701da177e4SLinus Torvalds  *					and TCP syn retries gets used now.
1711da177e4SLinus Torvalds  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
1721da177e4SLinus Torvalds  *					ack if state is TCP_CLOSED.
1731da177e4SLinus Torvalds  *		Alan Cox	:	Look up device on a retransmit - routes may
1741da177e4SLinus Torvalds  *					change. Doesn't yet cope with MSS shrink right
1751da177e4SLinus Torvalds  *					but it's a start!
1761da177e4SLinus Torvalds  *		Marc Tamsky	:	Closing in closing fixes.
1771da177e4SLinus Torvalds  *		Mike Shaver	:	RFC1122 verifications.
1781da177e4SLinus Torvalds  *		Alan Cox	:	rcv_saddr errors.
1791da177e4SLinus Torvalds  *		Alan Cox	:	Block double connect().
1801da177e4SLinus Torvalds  *		Alan Cox	:	Small hooks for enSKIP.
1811da177e4SLinus Torvalds  *		Alexey Kuznetsov:	Path MTU discovery.
1821da177e4SLinus Torvalds  *		Alan Cox	:	Support soft errors.
1831da177e4SLinus Torvalds  *		Alan Cox	:	Fix MTU discovery pathological case
1841da177e4SLinus Torvalds  *					when the remote claims no mtu!
1851da177e4SLinus Torvalds  *		Marc Tamsky	:	TCP_CLOSE fix.
1861da177e4SLinus Torvalds  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
1871da177e4SLinus Torvalds  *					window but wrong (fixes NT lpd problems)
1881da177e4SLinus Torvalds  *		Pedro Roque	:	Better TCP window handling, delayed ack.
1891da177e4SLinus Torvalds  *		Joerg Reuter	:	No modification of locked buffers in
1901da177e4SLinus Torvalds  *					tcp_do_retransmit()
1911da177e4SLinus Torvalds  *		Eric Schenk	:	Changed receiver side silly window
1921da177e4SLinus Torvalds  *					avoidance algorithm to BSD style
1931da177e4SLinus Torvalds  *					algorithm. This doubles throughput
1941da177e4SLinus Torvalds  *					against machines running Solaris,
1951da177e4SLinus Torvalds  *					and seems to result in general
1961da177e4SLinus Torvalds  *					improvement.
1971da177e4SLinus Torvalds  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
1981da177e4SLinus Torvalds  *	Willy Konynenberg	:	Transparent proxying support.
1991da177e4SLinus Torvalds  *	Mike McLagan		:	Routing by source
2001da177e4SLinus Torvalds  *		Keith Owens	:	Do proper merging with partial SKB's in
2011da177e4SLinus Torvalds  *					tcp_do_sendmsg to avoid burstiness.
2021da177e4SLinus Torvalds  *		Eric Schenk	:	Fix fast close down bug with
2031da177e4SLinus Torvalds  *					shutdown() followed by close().
2041da177e4SLinus Torvalds  *		Andi Kleen 	:	Make poll agree with SIGIO
2051da177e4SLinus Torvalds  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
2061da177e4SLinus Torvalds  *					lingertime == 0 (RFC 793 ABORT Call)
2071da177e4SLinus Torvalds  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
2081da177e4SLinus Torvalds  *					csum_and_copy_from_user() if possible.
2091da177e4SLinus Torvalds  *
2101da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
2111da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
2121da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
2131da177e4SLinus Torvalds  *		2 of the License, or(at your option) any later version.
2141da177e4SLinus Torvalds  *
2151da177e4SLinus Torvalds  * Description of States:
2161da177e4SLinus Torvalds  *
2171da177e4SLinus Torvalds  *	TCP_SYN_SENT		sent a connection request, waiting for ack
2181da177e4SLinus Torvalds  *
2191da177e4SLinus Torvalds  *	TCP_SYN_RECV		received a connection request, sent ack,
2201da177e4SLinus Torvalds  *				waiting for final ack in three-way handshake.
2211da177e4SLinus Torvalds  *
2221da177e4SLinus Torvalds  *	TCP_ESTABLISHED		connection established
2231da177e4SLinus Torvalds  *
2241da177e4SLinus Torvalds  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
2251da177e4SLinus Torvalds  *				transmission of remaining buffered data
2261da177e4SLinus Torvalds  *
2271da177e4SLinus Torvalds  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
2281da177e4SLinus Torvalds  *				to shutdown
2291da177e4SLinus Torvalds  *
2301da177e4SLinus Torvalds  *	TCP_CLOSING		both sides have shutdown but we still have
2311da177e4SLinus Torvalds  *				data we have to finish sending
2321da177e4SLinus Torvalds  *
2331da177e4SLinus Torvalds  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
2341da177e4SLinus Torvalds  *				closed, can only be entered from FIN_WAIT2
2351da177e4SLinus Torvalds  *				or CLOSING.  Required because the other end
2361da177e4SLinus Torvalds  *				may not have gotten our last ACK causing it
2371da177e4SLinus Torvalds  *				to retransmit the data packet (which we ignore)
2381da177e4SLinus Torvalds  *
2391da177e4SLinus Torvalds  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
2401da177e4SLinus Torvalds  *				us to finish writing our data and to shutdown
2411da177e4SLinus Torvalds  *				(we have to close() to move on to LAST_ACK)
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  *	TCP_LAST_ACK		out side has shutdown after remote has
2441da177e4SLinus Torvalds  *				shutdown.  There may still be data in our
2451da177e4SLinus Torvalds  *				buffer that we have to finish sending
2461da177e4SLinus Torvalds  *
2471da177e4SLinus Torvalds  *	TCP_CLOSE		socket is finished
2481da177e4SLinus Torvalds  */
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds #include <linux/config.h>
2511da177e4SLinus Torvalds #include <linux/module.h>
2521da177e4SLinus Torvalds #include <linux/types.h>
2531da177e4SLinus Torvalds #include <linux/fcntl.h>
2541da177e4SLinus Torvalds #include <linux/poll.h>
2551da177e4SLinus Torvalds #include <linux/init.h>
2561da177e4SLinus Torvalds #include <linux/smp_lock.h>
2571da177e4SLinus Torvalds #include <linux/fs.h>
2581da177e4SLinus Torvalds #include <linux/random.h>
2591da177e4SLinus Torvalds #include <linux/bootmem.h>
260b8059eadSDavid S. Miller #include <linux/cache.h>
261f4c50d99SHerbert Xu #include <linux/err.h>
2621da177e4SLinus Torvalds 
2631da177e4SLinus Torvalds #include <net/icmp.h>
2641da177e4SLinus Torvalds #include <net/tcp.h>
2651da177e4SLinus Torvalds #include <net/xfrm.h>
2661da177e4SLinus Torvalds #include <net/ip.h>
2671a2449a8SChris Leech #include <net/netdma.h>
2681da177e4SLinus Torvalds 
2691da177e4SLinus Torvalds #include <asm/uaccess.h>
2701da177e4SLinus Torvalds #include <asm/ioctls.h>
2711da177e4SLinus Torvalds 
2721da177e4SLinus Torvalds int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2731da177e4SLinus Torvalds 
274ba89966cSEric Dumazet DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
2751da177e4SLinus Torvalds 
2761da177e4SLinus Torvalds atomic_t tcp_orphan_count = ATOMIC_INIT(0);
2771da177e4SLinus Torvalds 
2780a5578cfSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(tcp_orphan_count);
2790a5578cfSArnaldo Carvalho de Melo 
280b8059eadSDavid S. Miller int sysctl_tcp_mem[3] __read_mostly;
281b8059eadSDavid S. Miller int sysctl_tcp_wmem[3] __read_mostly;
282b8059eadSDavid S. Miller int sysctl_tcp_rmem[3] __read_mostly;
2831da177e4SLinus Torvalds 
2841da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_mem);
2851da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_rmem);
2861da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_wmem);
2871da177e4SLinus Torvalds 
2881da177e4SLinus Torvalds atomic_t tcp_memory_allocated;	/* Current allocated memory. */
2891da177e4SLinus Torvalds atomic_t tcp_sockets_allocated;	/* Current number of TCP sockets. */
2901da177e4SLinus Torvalds 
2911da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
2921da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
2931da177e4SLinus Torvalds 
2941da177e4SLinus Torvalds /*
2951da177e4SLinus Torvalds  * Pressure flag: try to collapse.
2961da177e4SLinus Torvalds  * Technical note: it is used by multiple contexts non atomically.
2971da177e4SLinus Torvalds  * All the sk_stream_mem_schedule() is of this nature: accounting
2981da177e4SLinus Torvalds  * is strict, actions are advisory and have some latency.
2991da177e4SLinus Torvalds  */
3001da177e4SLinus Torvalds int tcp_memory_pressure;
3011da177e4SLinus Torvalds 
3021da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_pressure);
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds void tcp_enter_memory_pressure(void)
3051da177e4SLinus Torvalds {
3061da177e4SLinus Torvalds 	if (!tcp_memory_pressure) {
3071da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
3081da177e4SLinus Torvalds 		tcp_memory_pressure = 1;
3091da177e4SLinus Torvalds 	}
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_enter_memory_pressure);
3131da177e4SLinus Torvalds 
3141da177e4SLinus Torvalds /*
3151da177e4SLinus Torvalds  *	Wait for a TCP event.
3161da177e4SLinus Torvalds  *
3171da177e4SLinus Torvalds  *	Note that we don't need to lock the socket, as the upper poll layers
3181da177e4SLinus Torvalds  *	take care of normal races (between the test and the event) and we don't
3191da177e4SLinus Torvalds  *	go look at any of the socket buffers directly.
3201da177e4SLinus Torvalds  */
3211da177e4SLinus Torvalds unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
3221da177e4SLinus Torvalds {
3231da177e4SLinus Torvalds 	unsigned int mask;
3241da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
3251da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3261da177e4SLinus Torvalds 
3271da177e4SLinus Torvalds 	poll_wait(file, sk->sk_sleep, wait);
3281da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
329dc40c7bcSArnaldo Carvalho de Melo 		return inet_csk_listen_poll(sk);
3301da177e4SLinus Torvalds 
3311da177e4SLinus Torvalds 	/* Socket is not locked. We are protected from async events
3321da177e4SLinus Torvalds 	   by poll logic and correct handling of state changes
3331da177e4SLinus Torvalds 	   made by another threads is impossible in any case.
3341da177e4SLinus Torvalds 	 */
3351da177e4SLinus Torvalds 
3361da177e4SLinus Torvalds 	mask = 0;
3371da177e4SLinus Torvalds 	if (sk->sk_err)
3381da177e4SLinus Torvalds 		mask = POLLERR;
3391da177e4SLinus Torvalds 
3401da177e4SLinus Torvalds 	/*
3411da177e4SLinus Torvalds 	 * POLLHUP is certainly not done right. But poll() doesn't
3421da177e4SLinus Torvalds 	 * have a notion of HUP in just one direction, and for a
3431da177e4SLinus Torvalds 	 * socket the read side is more interesting.
3441da177e4SLinus Torvalds 	 *
3451da177e4SLinus Torvalds 	 * Some poll() documentation says that POLLHUP is incompatible
3461da177e4SLinus Torvalds 	 * with the POLLOUT/POLLWR flags, so somebody should check this
3471da177e4SLinus Torvalds 	 * all. But careful, it tends to be safer to return too many
3481da177e4SLinus Torvalds 	 * bits than too few, and you can easily break real applications
3491da177e4SLinus Torvalds 	 * if you don't tell them that something has hung up!
3501da177e4SLinus Torvalds 	 *
3511da177e4SLinus Torvalds 	 * Check-me.
3521da177e4SLinus Torvalds 	 *
3531da177e4SLinus Torvalds 	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
3541da177e4SLinus Torvalds 	 * our fs/select.c). It means that after we received EOF,
3551da177e4SLinus Torvalds 	 * poll always returns immediately, making impossible poll() on write()
3561da177e4SLinus Torvalds 	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
3571da177e4SLinus Torvalds 	 * if and only if shutdown has been made in both directions.
3581da177e4SLinus Torvalds 	 * Actually, it is interesting to look how Solaris and DUX
3591da177e4SLinus Torvalds 	 * solve this dilemma. I would prefer, if PULLHUP were maskable,
3601da177e4SLinus Torvalds 	 * then we could set it on SND_SHUTDOWN. BTW examples given
3611da177e4SLinus Torvalds 	 * in Stevens' books assume exactly this behaviour, it explains
3621da177e4SLinus Torvalds 	 * why PULLHUP is incompatible with POLLOUT.	--ANK
3631da177e4SLinus Torvalds 	 *
3641da177e4SLinus Torvalds 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
3651da177e4SLinus Torvalds 	 * blocking on fresh not-connected or disconnected socket. --ANK
3661da177e4SLinus Torvalds 	 */
3671da177e4SLinus Torvalds 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
3681da177e4SLinus Torvalds 		mask |= POLLHUP;
3691da177e4SLinus Torvalds 	if (sk->sk_shutdown & RCV_SHUTDOWN)
370f348d70aSDavide Libenzi 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
3711da177e4SLinus Torvalds 
3721da177e4SLinus Torvalds 	/* Connected? */
3731da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
3741da177e4SLinus Torvalds 		/* Potential race condition. If read of tp below will
3751da177e4SLinus Torvalds 		 * escape above sk->sk_state, we can be illegally awaken
3761da177e4SLinus Torvalds 		 * in SYN_* states. */
3771da177e4SLinus Torvalds 		if ((tp->rcv_nxt != tp->copied_seq) &&
3781da177e4SLinus Torvalds 		    (tp->urg_seq != tp->copied_seq ||
3791da177e4SLinus Torvalds 		     tp->rcv_nxt != tp->copied_seq + 1 ||
3801da177e4SLinus Torvalds 		     sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
3811da177e4SLinus Torvalds 			mask |= POLLIN | POLLRDNORM;
3821da177e4SLinus Torvalds 
3831da177e4SLinus Torvalds 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
3841da177e4SLinus Torvalds 			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
3851da177e4SLinus Torvalds 				mask |= POLLOUT | POLLWRNORM;
3861da177e4SLinus Torvalds 			} else {  /* send SIGIO later */
3871da177e4SLinus Torvalds 				set_bit(SOCK_ASYNC_NOSPACE,
3881da177e4SLinus Torvalds 					&sk->sk_socket->flags);
3891da177e4SLinus Torvalds 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
3901da177e4SLinus Torvalds 
3911da177e4SLinus Torvalds 				/* Race breaker. If space is freed after
3921da177e4SLinus Torvalds 				 * wspace test but before the flags are set,
3931da177e4SLinus Torvalds 				 * IO signal will be lost.
3941da177e4SLinus Torvalds 				 */
3951da177e4SLinus Torvalds 				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
3961da177e4SLinus Torvalds 					mask |= POLLOUT | POLLWRNORM;
3971da177e4SLinus Torvalds 			}
3981da177e4SLinus Torvalds 		}
3991da177e4SLinus Torvalds 
4001da177e4SLinus Torvalds 		if (tp->urg_data & TCP_URG_VALID)
4011da177e4SLinus Torvalds 			mask |= POLLPRI;
4021da177e4SLinus Torvalds 	}
4031da177e4SLinus Torvalds 	return mask;
4041da177e4SLinus Torvalds }
4051da177e4SLinus Torvalds 
4061da177e4SLinus Torvalds int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4071da177e4SLinus Torvalds {
4081da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4091da177e4SLinus Torvalds 	int answ;
4101da177e4SLinus Torvalds 
4111da177e4SLinus Torvalds 	switch (cmd) {
4121da177e4SLinus Torvalds 	case SIOCINQ:
4131da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4141da177e4SLinus Torvalds 			return -EINVAL;
4151da177e4SLinus Torvalds 
4161da177e4SLinus Torvalds 		lock_sock(sk);
4171da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4181da177e4SLinus Torvalds 			answ = 0;
4191da177e4SLinus Torvalds 		else if (sock_flag(sk, SOCK_URGINLINE) ||
4201da177e4SLinus Torvalds 			 !tp->urg_data ||
4211da177e4SLinus Torvalds 			 before(tp->urg_seq, tp->copied_seq) ||
4221da177e4SLinus Torvalds 			 !before(tp->urg_seq, tp->rcv_nxt)) {
4231da177e4SLinus Torvalds 			answ = tp->rcv_nxt - tp->copied_seq;
4241da177e4SLinus Torvalds 
4251da177e4SLinus Torvalds 			/* Subtract 1, if FIN is in queue. */
4261da177e4SLinus Torvalds 			if (answ && !skb_queue_empty(&sk->sk_receive_queue))
4271da177e4SLinus Torvalds 				answ -=
4281da177e4SLinus Torvalds 		       ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
4291da177e4SLinus Torvalds 		} else
4301da177e4SLinus Torvalds 			answ = tp->urg_seq - tp->copied_seq;
4311da177e4SLinus Torvalds 		release_sock(sk);
4321da177e4SLinus Torvalds 		break;
4331da177e4SLinus Torvalds 	case SIOCATMARK:
4341da177e4SLinus Torvalds 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
4351da177e4SLinus Torvalds 		break;
4361da177e4SLinus Torvalds 	case SIOCOUTQ:
4371da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4381da177e4SLinus Torvalds 			return -EINVAL;
4391da177e4SLinus Torvalds 
4401da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4411da177e4SLinus Torvalds 			answ = 0;
4421da177e4SLinus Torvalds 		else
4431da177e4SLinus Torvalds 			answ = tp->write_seq - tp->snd_una;
4441da177e4SLinus Torvalds 		break;
4451da177e4SLinus Torvalds 	default:
4461da177e4SLinus Torvalds 		return -ENOIOCTLCMD;
4471da177e4SLinus Torvalds 	};
4481da177e4SLinus Torvalds 
4491da177e4SLinus Torvalds 	return put_user(answ, (int __user *)arg);
4501da177e4SLinus Torvalds }
4511da177e4SLinus Torvalds 
4521da177e4SLinus Torvalds static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
4531da177e4SLinus Torvalds {
4541da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
4551da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
4561da177e4SLinus Torvalds }
4571da177e4SLinus Torvalds 
4581da177e4SLinus Torvalds static inline int forced_push(struct tcp_sock *tp)
4591da177e4SLinus Torvalds {
4601da177e4SLinus Torvalds 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
4611da177e4SLinus Torvalds }
4621da177e4SLinus Torvalds 
4631da177e4SLinus Torvalds static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
4641da177e4SLinus Torvalds 			      struct sk_buff *skb)
4651da177e4SLinus Torvalds {
4661da177e4SLinus Torvalds 	skb->csum = 0;
4671da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->seq = tp->write_seq;
4681da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->end_seq = tp->write_seq;
4691da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
4701da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->sacked = 0;
4711da177e4SLinus Torvalds 	skb_header_release(skb);
4721da177e4SLinus Torvalds 	__skb_queue_tail(&sk->sk_write_queue, skb);
4731da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
4741da177e4SLinus Torvalds 	if (!sk->sk_send_head)
4751da177e4SLinus Torvalds 		sk->sk_send_head = skb;
47689ebd197SDavid S. Miller 	if (tp->nonagle & TCP_NAGLE_PUSH)
4771da177e4SLinus Torvalds 		tp->nonagle &= ~TCP_NAGLE_PUSH;
4781da177e4SLinus Torvalds }
4791da177e4SLinus Torvalds 
4801da177e4SLinus Torvalds static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
4811da177e4SLinus Torvalds 				struct sk_buff *skb)
4821da177e4SLinus Torvalds {
4831da177e4SLinus Torvalds 	if (flags & MSG_OOB) {
4841da177e4SLinus Torvalds 		tp->urg_mode = 1;
4851da177e4SLinus Torvalds 		tp->snd_up = tp->write_seq;
4861da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
4871da177e4SLinus Torvalds 	}
4881da177e4SLinus Torvalds }
4891da177e4SLinus Torvalds 
4901da177e4SLinus Torvalds static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
4911da177e4SLinus Torvalds 			    int mss_now, int nonagle)
4921da177e4SLinus Torvalds {
4931da177e4SLinus Torvalds 	if (sk->sk_send_head) {
4941da177e4SLinus Torvalds 		struct sk_buff *skb = sk->sk_write_queue.prev;
4951da177e4SLinus Torvalds 		if (!(flags & MSG_MORE) || forced_push(tp))
4961da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
4971da177e4SLinus Torvalds 		tcp_mark_urg(tp, flags, skb);
4981da177e4SLinus Torvalds 		__tcp_push_pending_frames(sk, tp, mss_now,
4991da177e4SLinus Torvalds 					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
5001da177e4SLinus Torvalds 	}
5011da177e4SLinus Torvalds }
5021da177e4SLinus Torvalds 
5031da177e4SLinus Torvalds static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
5041da177e4SLinus Torvalds 			 size_t psize, int flags)
5051da177e4SLinus Torvalds {
5061da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
507c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
5081da177e4SLinus Torvalds 	int err;
5091da177e4SLinus Torvalds 	ssize_t copied;
5101da177e4SLinus Torvalds 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
5111da177e4SLinus Torvalds 
5121da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
5131da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
5141da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
5151da177e4SLinus Torvalds 			goto out_err;
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
5181da177e4SLinus Torvalds 
5191da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
520c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
5211da177e4SLinus Torvalds 	copied = 0;
5221da177e4SLinus Torvalds 
5231da177e4SLinus Torvalds 	err = -EPIPE;
5241da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
5251da177e4SLinus Torvalds 		goto do_error;
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds 	while (psize > 0) {
5281da177e4SLinus Torvalds 		struct sk_buff *skb = sk->sk_write_queue.prev;
5291da177e4SLinus Torvalds 		struct page *page = pages[poffset / PAGE_SIZE];
5301da177e4SLinus Torvalds 		int copy, i, can_coalesce;
5311da177e4SLinus Torvalds 		int offset = poffset % PAGE_SIZE;
5321da177e4SLinus Torvalds 		int size = min_t(size_t, psize, PAGE_SIZE - offset);
5331da177e4SLinus Torvalds 
534c1b4a7e6SDavid S. Miller 		if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
5351da177e4SLinus Torvalds new_segment:
5361da177e4SLinus Torvalds 			if (!sk_stream_memory_free(sk))
5371da177e4SLinus Torvalds 				goto wait_for_sndbuf;
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 			skb = sk_stream_alloc_pskb(sk, 0, 0,
5401da177e4SLinus Torvalds 						   sk->sk_allocation);
5411da177e4SLinus Torvalds 			if (!skb)
5421da177e4SLinus Torvalds 				goto wait_for_memory;
5431da177e4SLinus Torvalds 
5441da177e4SLinus Torvalds 			skb_entail(sk, tp, skb);
545c1b4a7e6SDavid S. Miller 			copy = size_goal;
5461da177e4SLinus Torvalds 		}
5471da177e4SLinus Torvalds 
5481da177e4SLinus Torvalds 		if (copy > size)
5491da177e4SLinus Torvalds 			copy = size;
5501da177e4SLinus Torvalds 
5511da177e4SLinus Torvalds 		i = skb_shinfo(skb)->nr_frags;
5521da177e4SLinus Torvalds 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
5531da177e4SLinus Torvalds 		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
5541da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
5551da177e4SLinus Torvalds 			goto new_segment;
5561da177e4SLinus Torvalds 		}
557d80d99d6SHerbert Xu 		if (!sk_stream_wmem_schedule(sk, copy))
5581da177e4SLinus Torvalds 			goto wait_for_memory;
5591da177e4SLinus Torvalds 
5601da177e4SLinus Torvalds 		if (can_coalesce) {
5611da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[i - 1].size += copy;
5621da177e4SLinus Torvalds 		} else {
5631da177e4SLinus Torvalds 			get_page(page);
5641da177e4SLinus Torvalds 			skb_fill_page_desc(skb, i, page, offset, copy);
5651da177e4SLinus Torvalds 		}
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 		skb->len += copy;
5681da177e4SLinus Torvalds 		skb->data_len += copy;
5691da177e4SLinus Torvalds 		skb->truesize += copy;
5701da177e4SLinus Torvalds 		sk->sk_wmem_queued += copy;
5711da177e4SLinus Torvalds 		sk->sk_forward_alloc -= copy;
5721da177e4SLinus Torvalds 		skb->ip_summed = CHECKSUM_HW;
5731da177e4SLinus Torvalds 		tp->write_seq += copy;
5741da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq += copy;
5757967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 0;
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds 		if (!copied)
5781da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds 		copied += copy;
5811da177e4SLinus Torvalds 		poffset += copy;
5821da177e4SLinus Torvalds 		if (!(psize -= copy))
5831da177e4SLinus Torvalds 			goto out;
5841da177e4SLinus Torvalds 
585c1b4a7e6SDavid S. Miller 		if (skb->len < mss_now || (flags & MSG_OOB))
5861da177e4SLinus Torvalds 			continue;
5871da177e4SLinus Torvalds 
5881da177e4SLinus Torvalds 		if (forced_push(tp)) {
5891da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
5901da177e4SLinus Torvalds 			__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
5911da177e4SLinus Torvalds 		} else if (skb == sk->sk_send_head)
5921da177e4SLinus Torvalds 			tcp_push_one(sk, mss_now);
5931da177e4SLinus Torvalds 		continue;
5941da177e4SLinus Torvalds 
5951da177e4SLinus Torvalds wait_for_sndbuf:
5961da177e4SLinus Torvalds 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
5971da177e4SLinus Torvalds wait_for_memory:
5981da177e4SLinus Torvalds 		if (copied)
5991da177e4SLinus Torvalds 			tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
6001da177e4SLinus Torvalds 
6011da177e4SLinus Torvalds 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
6021da177e4SLinus Torvalds 			goto do_error;
6031da177e4SLinus Torvalds 
6041da177e4SLinus Torvalds 		mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
605c1b4a7e6SDavid S. Miller 		size_goal = tp->xmit_size_goal;
6061da177e4SLinus Torvalds 	}
6071da177e4SLinus Torvalds 
6081da177e4SLinus Torvalds out:
6091da177e4SLinus Torvalds 	if (copied)
6101da177e4SLinus Torvalds 		tcp_push(sk, tp, flags, mss_now, tp->nonagle);
6111da177e4SLinus Torvalds 	return copied;
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds do_error:
6141da177e4SLinus Torvalds 	if (copied)
6151da177e4SLinus Torvalds 		goto out;
6161da177e4SLinus Torvalds out_err:
6171da177e4SLinus Torvalds 	return sk_stream_error(sk, flags, err);
6181da177e4SLinus Torvalds }
6191da177e4SLinus Torvalds 
6201da177e4SLinus Torvalds ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
6211da177e4SLinus Torvalds 		     size_t size, int flags)
6221da177e4SLinus Torvalds {
6231da177e4SLinus Torvalds 	ssize_t res;
6241da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
6278648b305SHerbert Xu 	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
6281da177e4SLinus Torvalds 		return sock_no_sendpage(sock, page, offset, size, flags);
6291da177e4SLinus Torvalds 
6301da177e4SLinus Torvalds 	lock_sock(sk);
6311da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
6321da177e4SLinus Torvalds 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
6331da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
6341da177e4SLinus Torvalds 	release_sock(sk);
6351da177e4SLinus Torvalds 	return res;
6361da177e4SLinus Torvalds }
6371da177e4SLinus Torvalds 
6381da177e4SLinus Torvalds #define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
6391da177e4SLinus Torvalds #define TCP_OFF(sk)	(sk->sk_sndmsg_off)
6401da177e4SLinus Torvalds 
6411da177e4SLinus Torvalds static inline int select_size(struct sock *sk, struct tcp_sock *tp)
6421da177e4SLinus Torvalds {
643c1b4a7e6SDavid S. Miller 	int tmp = tp->mss_cache;
6441da177e4SLinus Torvalds 
645b4e26f5eSDavid S. Miller 	if (sk->sk_route_caps & NETIF_F_SG) {
646b4e26f5eSDavid S. Miller 		if (sk->sk_route_caps & NETIF_F_TSO)
647c65f7f00SDavid S. Miller 			tmp = 0;
648b4e26f5eSDavid S. Miller 		else {
649b4e26f5eSDavid S. Miller 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
650b4e26f5eSDavid S. Miller 
651b4e26f5eSDavid S. Miller 			if (tmp >= pgbreak &&
652b4e26f5eSDavid S. Miller 			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
653b4e26f5eSDavid S. Miller 				tmp = pgbreak;
654b4e26f5eSDavid S. Miller 		}
655b4e26f5eSDavid S. Miller 	}
6561da177e4SLinus Torvalds 
6571da177e4SLinus Torvalds 	return tmp;
6581da177e4SLinus Torvalds }
6591da177e4SLinus Torvalds 
6601da177e4SLinus Torvalds int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
6611da177e4SLinus Torvalds 		size_t size)
6621da177e4SLinus Torvalds {
6631da177e4SLinus Torvalds 	struct iovec *iov;
6641da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6651da177e4SLinus Torvalds 	struct sk_buff *skb;
6661da177e4SLinus Torvalds 	int iovlen, flags;
667c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
6681da177e4SLinus Torvalds 	int err, copied;
6691da177e4SLinus Torvalds 	long timeo;
6701da177e4SLinus Torvalds 
6711da177e4SLinus Torvalds 	lock_sock(sk);
6721da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
6731da177e4SLinus Torvalds 
6741da177e4SLinus Torvalds 	flags = msg->msg_flags;
6751da177e4SLinus Torvalds 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
6761da177e4SLinus Torvalds 
6771da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
6781da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
6791da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
6801da177e4SLinus Torvalds 			goto out_err;
6811da177e4SLinus Torvalds 
6821da177e4SLinus Torvalds 	/* This should be in poll */
6831da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
6841da177e4SLinus Torvalds 
6851da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
686c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds 	/* Ok commence sending. */
6891da177e4SLinus Torvalds 	iovlen = msg->msg_iovlen;
6901da177e4SLinus Torvalds 	iov = msg->msg_iov;
6911da177e4SLinus Torvalds 	copied = 0;
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds 	err = -EPIPE;
6941da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
6951da177e4SLinus Torvalds 		goto do_error;
6961da177e4SLinus Torvalds 
6971da177e4SLinus Torvalds 	while (--iovlen >= 0) {
6981da177e4SLinus Torvalds 		int seglen = iov->iov_len;
6991da177e4SLinus Torvalds 		unsigned char __user *from = iov->iov_base;
7001da177e4SLinus Torvalds 
7011da177e4SLinus Torvalds 		iov++;
7021da177e4SLinus Torvalds 
7031da177e4SLinus Torvalds 		while (seglen > 0) {
7041da177e4SLinus Torvalds 			int copy;
7051da177e4SLinus Torvalds 
7061da177e4SLinus Torvalds 			skb = sk->sk_write_queue.prev;
7071da177e4SLinus Torvalds 
7081da177e4SLinus Torvalds 			if (!sk->sk_send_head ||
709c1b4a7e6SDavid S. Miller 			    (copy = size_goal - skb->len) <= 0) {
7101da177e4SLinus Torvalds 
7111da177e4SLinus Torvalds new_segment:
7121da177e4SLinus Torvalds 				/* Allocate new segment. If the interface is SG,
7131da177e4SLinus Torvalds 				 * allocate skb fitting to single page.
7141da177e4SLinus Torvalds 				 */
7151da177e4SLinus Torvalds 				if (!sk_stream_memory_free(sk))
7161da177e4SLinus Torvalds 					goto wait_for_sndbuf;
7171da177e4SLinus Torvalds 
7181da177e4SLinus Torvalds 				skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
7191da177e4SLinus Torvalds 							   0, sk->sk_allocation);
7201da177e4SLinus Torvalds 				if (!skb)
7211da177e4SLinus Torvalds 					goto wait_for_memory;
7221da177e4SLinus Torvalds 
7231da177e4SLinus Torvalds 				/*
7241da177e4SLinus Torvalds 				 * Check whether we can use HW checksum.
7251da177e4SLinus Torvalds 				 */
7268648b305SHerbert Xu 				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
7271da177e4SLinus Torvalds 					skb->ip_summed = CHECKSUM_HW;
7281da177e4SLinus Torvalds 
7291da177e4SLinus Torvalds 				skb_entail(sk, tp, skb);
730c1b4a7e6SDavid S. Miller 				copy = size_goal;
7311da177e4SLinus Torvalds 			}
7321da177e4SLinus Torvalds 
7331da177e4SLinus Torvalds 			/* Try to append data to the end of skb. */
7341da177e4SLinus Torvalds 			if (copy > seglen)
7351da177e4SLinus Torvalds 				copy = seglen;
7361da177e4SLinus Torvalds 
7371da177e4SLinus Torvalds 			/* Where to copy to? */
7381da177e4SLinus Torvalds 			if (skb_tailroom(skb) > 0) {
7391da177e4SLinus Torvalds 				/* We have some space in skb head. Superb! */
7401da177e4SLinus Torvalds 				if (copy > skb_tailroom(skb))
7411da177e4SLinus Torvalds 					copy = skb_tailroom(skb);
7421da177e4SLinus Torvalds 				if ((err = skb_add_data(skb, from, copy)) != 0)
7431da177e4SLinus Torvalds 					goto do_fault;
7441da177e4SLinus Torvalds 			} else {
7451da177e4SLinus Torvalds 				int merge = 0;
7461da177e4SLinus Torvalds 				int i = skb_shinfo(skb)->nr_frags;
7471da177e4SLinus Torvalds 				struct page *page = TCP_PAGE(sk);
7481da177e4SLinus Torvalds 				int off = TCP_OFF(sk);
7491da177e4SLinus Torvalds 
7501da177e4SLinus Torvalds 				if (skb_can_coalesce(skb, i, page, off) &&
7511da177e4SLinus Torvalds 				    off != PAGE_SIZE) {
7521da177e4SLinus Torvalds 					/* We can extend the last page
7531da177e4SLinus Torvalds 					 * fragment. */
7541da177e4SLinus Torvalds 					merge = 1;
7551da177e4SLinus Torvalds 				} else if (i == MAX_SKB_FRAGS ||
7561da177e4SLinus Torvalds 					   (!i &&
7571da177e4SLinus Torvalds 					   !(sk->sk_route_caps & NETIF_F_SG))) {
7581da177e4SLinus Torvalds 					/* Need to add new fragment and cannot
7591da177e4SLinus Torvalds 					 * do this because interface is non-SG,
7601da177e4SLinus Torvalds 					 * or because all the page slots are
7611da177e4SLinus Torvalds 					 * busy. */
7621da177e4SLinus Torvalds 					tcp_mark_push(tp, skb);
7631da177e4SLinus Torvalds 					goto new_segment;
7641da177e4SLinus Torvalds 				} else if (page) {
7651da177e4SLinus Torvalds 					if (off == PAGE_SIZE) {
7661da177e4SLinus Torvalds 						put_page(page);
7671da177e4SLinus Torvalds 						TCP_PAGE(sk) = page = NULL;
768fb5f5e6eSHerbert Xu 						off = 0;
7691da177e4SLinus Torvalds 					}
770ef015786SHerbert Xu 				} else
771fb5f5e6eSHerbert Xu 					off = 0;
772ef015786SHerbert Xu 
773ef015786SHerbert Xu 				if (copy > PAGE_SIZE - off)
774ef015786SHerbert Xu 					copy = PAGE_SIZE - off;
775ef015786SHerbert Xu 
776ef015786SHerbert Xu 				if (!sk_stream_wmem_schedule(sk, copy))
777ef015786SHerbert Xu 					goto wait_for_memory;
7781da177e4SLinus Torvalds 
7791da177e4SLinus Torvalds 				if (!page) {
7801da177e4SLinus Torvalds 					/* Allocate new cache page. */
7811da177e4SLinus Torvalds 					if (!(page = sk_stream_alloc_page(sk)))
7821da177e4SLinus Torvalds 						goto wait_for_memory;
7831da177e4SLinus Torvalds 				}
7841da177e4SLinus Torvalds 
7851da177e4SLinus Torvalds 				/* Time to copy data. We are close to
7861da177e4SLinus Torvalds 				 * the end! */
7871da177e4SLinus Torvalds 				err = skb_copy_to_page(sk, from, skb, page,
7881da177e4SLinus Torvalds 						       off, copy);
7891da177e4SLinus Torvalds 				if (err) {
7901da177e4SLinus Torvalds 					/* If this page was new, give it to the
7911da177e4SLinus Torvalds 					 * socket so it does not get leaked.
7921da177e4SLinus Torvalds 					 */
7931da177e4SLinus Torvalds 					if (!TCP_PAGE(sk)) {
7941da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
7951da177e4SLinus Torvalds 						TCP_OFF(sk) = 0;
7961da177e4SLinus Torvalds 					}
7971da177e4SLinus Torvalds 					goto do_error;
7981da177e4SLinus Torvalds 				}
7991da177e4SLinus Torvalds 
8001da177e4SLinus Torvalds 				/* Update the skb. */
8011da177e4SLinus Torvalds 				if (merge) {
8021da177e4SLinus Torvalds 					skb_shinfo(skb)->frags[i - 1].size +=
8031da177e4SLinus Torvalds 									copy;
8041da177e4SLinus Torvalds 				} else {
8051da177e4SLinus Torvalds 					skb_fill_page_desc(skb, i, page, off, copy);
8061da177e4SLinus Torvalds 					if (TCP_PAGE(sk)) {
8071da177e4SLinus Torvalds 						get_page(page);
8081da177e4SLinus Torvalds 					} else if (off + copy < PAGE_SIZE) {
8091da177e4SLinus Torvalds 						get_page(page);
8101da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
8111da177e4SLinus Torvalds 					}
8121da177e4SLinus Torvalds 				}
8131da177e4SLinus Torvalds 
8141da177e4SLinus Torvalds 				TCP_OFF(sk) = off + copy;
8151da177e4SLinus Torvalds 			}
8161da177e4SLinus Torvalds 
8171da177e4SLinus Torvalds 			if (!copied)
8181da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
8191da177e4SLinus Torvalds 
8201da177e4SLinus Torvalds 			tp->write_seq += copy;
8211da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->end_seq += copy;
8227967168cSHerbert Xu 			skb_shinfo(skb)->gso_segs = 0;
8231da177e4SLinus Torvalds 
8241da177e4SLinus Torvalds 			from += copy;
8251da177e4SLinus Torvalds 			copied += copy;
8261da177e4SLinus Torvalds 			if ((seglen -= copy) == 0 && iovlen == 0)
8271da177e4SLinus Torvalds 				goto out;
8281da177e4SLinus Torvalds 
829c1b4a7e6SDavid S. Miller 			if (skb->len < mss_now || (flags & MSG_OOB))
8301da177e4SLinus Torvalds 				continue;
8311da177e4SLinus Torvalds 
8321da177e4SLinus Torvalds 			if (forced_push(tp)) {
8331da177e4SLinus Torvalds 				tcp_mark_push(tp, skb);
8341da177e4SLinus Torvalds 				__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
8351da177e4SLinus Torvalds 			} else if (skb == sk->sk_send_head)
8361da177e4SLinus Torvalds 				tcp_push_one(sk, mss_now);
8371da177e4SLinus Torvalds 			continue;
8381da177e4SLinus Torvalds 
8391da177e4SLinus Torvalds wait_for_sndbuf:
8401da177e4SLinus Torvalds 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
8411da177e4SLinus Torvalds wait_for_memory:
8421da177e4SLinus Torvalds 			if (copied)
8431da177e4SLinus Torvalds 				tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
8441da177e4SLinus Torvalds 
8451da177e4SLinus Torvalds 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
8461da177e4SLinus Torvalds 				goto do_error;
8471da177e4SLinus Torvalds 
8481da177e4SLinus Torvalds 			mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
849c1b4a7e6SDavid S. Miller 			size_goal = tp->xmit_size_goal;
8501da177e4SLinus Torvalds 		}
8511da177e4SLinus Torvalds 	}
8521da177e4SLinus Torvalds 
8531da177e4SLinus Torvalds out:
8541da177e4SLinus Torvalds 	if (copied)
8551da177e4SLinus Torvalds 		tcp_push(sk, tp, flags, mss_now, tp->nonagle);
8561da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
8571da177e4SLinus Torvalds 	release_sock(sk);
8581da177e4SLinus Torvalds 	return copied;
8591da177e4SLinus Torvalds 
8601da177e4SLinus Torvalds do_fault:
8611da177e4SLinus Torvalds 	if (!skb->len) {
8621da177e4SLinus Torvalds 		if (sk->sk_send_head == skb)
8631da177e4SLinus Torvalds 			sk->sk_send_head = NULL;
8648728b834SDavid S. Miller 		__skb_unlink(skb, &sk->sk_write_queue);
8651da177e4SLinus Torvalds 		sk_stream_free_skb(sk, skb);
8661da177e4SLinus Torvalds 	}
8671da177e4SLinus Torvalds 
8681da177e4SLinus Torvalds do_error:
8691da177e4SLinus Torvalds 	if (copied)
8701da177e4SLinus Torvalds 		goto out;
8711da177e4SLinus Torvalds out_err:
8721da177e4SLinus Torvalds 	err = sk_stream_error(sk, flags, err);
8731da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
8741da177e4SLinus Torvalds 	release_sock(sk);
8751da177e4SLinus Torvalds 	return err;
8761da177e4SLinus Torvalds }
8771da177e4SLinus Torvalds 
8781da177e4SLinus Torvalds /*
8791da177e4SLinus Torvalds  *	Handle reading urgent data. BSD has very simple semantics for
8801da177e4SLinus Torvalds  *	this, no blocking and very strange errors 8)
8811da177e4SLinus Torvalds  */
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds static int tcp_recv_urg(struct sock *sk, long timeo,
8841da177e4SLinus Torvalds 			struct msghdr *msg, int len, int flags,
8851da177e4SLinus Torvalds 			int *addr_len)
8861da177e4SLinus Torvalds {
8871da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
8881da177e4SLinus Torvalds 
8891da177e4SLinus Torvalds 	/* No URG data to read. */
8901da177e4SLinus Torvalds 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
8911da177e4SLinus Torvalds 	    tp->urg_data == TCP_URG_READ)
8921da177e4SLinus Torvalds 		return -EINVAL;	/* Yes this is right ! */
8931da177e4SLinus Torvalds 
8941da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
8951da177e4SLinus Torvalds 		return -ENOTCONN;
8961da177e4SLinus Torvalds 
8971da177e4SLinus Torvalds 	if (tp->urg_data & TCP_URG_VALID) {
8981da177e4SLinus Torvalds 		int err = 0;
8991da177e4SLinus Torvalds 		char c = tp->urg_data;
9001da177e4SLinus Torvalds 
9011da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
9021da177e4SLinus Torvalds 			tp->urg_data = TCP_URG_READ;
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds 		/* Read urgent data. */
9051da177e4SLinus Torvalds 		msg->msg_flags |= MSG_OOB;
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds 		if (len > 0) {
9081da177e4SLinus Torvalds 			if (!(flags & MSG_TRUNC))
9091da177e4SLinus Torvalds 				err = memcpy_toiovec(msg->msg_iov, &c, 1);
9101da177e4SLinus Torvalds 			len = 1;
9111da177e4SLinus Torvalds 		} else
9121da177e4SLinus Torvalds 			msg->msg_flags |= MSG_TRUNC;
9131da177e4SLinus Torvalds 
9141da177e4SLinus Torvalds 		return err ? -EFAULT : len;
9151da177e4SLinus Torvalds 	}
9161da177e4SLinus Torvalds 
9171da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
9181da177e4SLinus Torvalds 		return 0;
9191da177e4SLinus Torvalds 
9201da177e4SLinus Torvalds 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
9211da177e4SLinus Torvalds 	 * the available implementations agree in this case:
9221da177e4SLinus Torvalds 	 * this call should never block, independent of the
9231da177e4SLinus Torvalds 	 * blocking state of the socket.
9241da177e4SLinus Torvalds 	 * Mike <pall@rz.uni-karlsruhe.de>
9251da177e4SLinus Torvalds 	 */
9261da177e4SLinus Torvalds 	return -EAGAIN;
9271da177e4SLinus Torvalds }
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
9301da177e4SLinus Torvalds  * then send an ACK if necessary.  COPIED is the number of bytes
9311da177e4SLinus Torvalds  * tcp_recvmsg has given to the user so far, it speeds up the
9321da177e4SLinus Torvalds  * calculation of whether or not we must ACK for the sake of
9331da177e4SLinus Torvalds  * a window update.
9341da177e4SLinus Torvalds  */
9350e4b4992SChris Leech void tcp_cleanup_rbuf(struct sock *sk, int copied)
9361da177e4SLinus Torvalds {
9371da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9381da177e4SLinus Torvalds 	int time_to_ack = 0;
9391da177e4SLinus Torvalds 
9401da177e4SLinus Torvalds #if TCP_DEBUG
9411da177e4SLinus Torvalds 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
9421da177e4SLinus Torvalds 
9431da177e4SLinus Torvalds 	BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
9441da177e4SLinus Torvalds #endif
9451da177e4SLinus Torvalds 
946463c84b9SArnaldo Carvalho de Melo 	if (inet_csk_ack_scheduled(sk)) {
947463c84b9SArnaldo Carvalho de Melo 		const struct inet_connection_sock *icsk = inet_csk(sk);
9481da177e4SLinus Torvalds 		   /* Delayed ACKs frequently hit locked sockets during bulk
9491da177e4SLinus Torvalds 		    * receive. */
950463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
9511da177e4SLinus Torvalds 		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
952463c84b9SArnaldo Carvalho de Melo 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
9531da177e4SLinus Torvalds 		    /*
9541da177e4SLinus Torvalds 		     * If this read emptied read buffer, we send ACK, if
9551da177e4SLinus Torvalds 		     * connection is not bidirectional, user drained
9561da177e4SLinus Torvalds 		     * receive buffer and there was a small segment
9571da177e4SLinus Torvalds 		     * in queue.
9581da177e4SLinus Torvalds 		     */
959463c84b9SArnaldo Carvalho de Melo 		    (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
960463c84b9SArnaldo Carvalho de Melo 		     !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
9611da177e4SLinus Torvalds 			time_to_ack = 1;
9621da177e4SLinus Torvalds 	}
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds 	/* We send an ACK if we can now advertise a non-zero window
9651da177e4SLinus Torvalds 	 * which has been raised "significantly".
9661da177e4SLinus Torvalds 	 *
9671da177e4SLinus Torvalds 	 * Even if window raised up to infinity, do not send window open ACK
9681da177e4SLinus Torvalds 	 * in states, where we will not receive more. It is useless.
9691da177e4SLinus Torvalds 	 */
9701da177e4SLinus Torvalds 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
9711da177e4SLinus Torvalds 		__u32 rcv_window_now = tcp_receive_window(tp);
9721da177e4SLinus Torvalds 
9731da177e4SLinus Torvalds 		/* Optimize, __tcp_select_window() is not cheap. */
9741da177e4SLinus Torvalds 		if (2*rcv_window_now <= tp->window_clamp) {
9751da177e4SLinus Torvalds 			__u32 new_window = __tcp_select_window(sk);
9761da177e4SLinus Torvalds 
9771da177e4SLinus Torvalds 			/* Send ACK now, if this read freed lots of space
9781da177e4SLinus Torvalds 			 * in our buffer. Certainly, new_window is new window.
9791da177e4SLinus Torvalds 			 * We can advertise it now, if it is not less than current one.
9801da177e4SLinus Torvalds 			 * "Lots" means "at least twice" here.
9811da177e4SLinus Torvalds 			 */
9821da177e4SLinus Torvalds 			if (new_window && new_window >= 2 * rcv_window_now)
9831da177e4SLinus Torvalds 				time_to_ack = 1;
9841da177e4SLinus Torvalds 		}
9851da177e4SLinus Torvalds 	}
9861da177e4SLinus Torvalds 	if (time_to_ack)
9871da177e4SLinus Torvalds 		tcp_send_ack(sk);
9881da177e4SLinus Torvalds }
9891da177e4SLinus Torvalds 
9901da177e4SLinus Torvalds static void tcp_prequeue_process(struct sock *sk)
9911da177e4SLinus Torvalds {
9921da177e4SLinus Torvalds 	struct sk_buff *skb;
9931da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9941da177e4SLinus Torvalds 
995b03efcfbSDavid S. Miller 	NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
9961da177e4SLinus Torvalds 
9971da177e4SLinus Torvalds 	/* RX process wants to run with disabled BHs, though it is not
9981da177e4SLinus Torvalds 	 * necessary */
9991da177e4SLinus Torvalds 	local_bh_disable();
10001da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
10011da177e4SLinus Torvalds 		sk->sk_backlog_rcv(sk, skb);
10021da177e4SLinus Torvalds 	local_bh_enable();
10031da177e4SLinus Torvalds 
10041da177e4SLinus Torvalds 	/* Clear memory counter. */
10051da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
10061da177e4SLinus Torvalds }
10071da177e4SLinus Torvalds 
10081da177e4SLinus Torvalds static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
10091da177e4SLinus Torvalds {
10101da177e4SLinus Torvalds 	struct sk_buff *skb;
10111da177e4SLinus Torvalds 	u32 offset;
10121da177e4SLinus Torvalds 
10131da177e4SLinus Torvalds 	skb_queue_walk(&sk->sk_receive_queue, skb) {
10141da177e4SLinus Torvalds 		offset = seq - TCP_SKB_CB(skb)->seq;
10151da177e4SLinus Torvalds 		if (skb->h.th->syn)
10161da177e4SLinus Torvalds 			offset--;
10171da177e4SLinus Torvalds 		if (offset < skb->len || skb->h.th->fin) {
10181da177e4SLinus Torvalds 			*off = offset;
10191da177e4SLinus Torvalds 			return skb;
10201da177e4SLinus Torvalds 		}
10211da177e4SLinus Torvalds 	}
10221da177e4SLinus Torvalds 	return NULL;
10231da177e4SLinus Torvalds }
10241da177e4SLinus Torvalds 
10251da177e4SLinus Torvalds /*
10261da177e4SLinus Torvalds  * This routine provides an alternative to tcp_recvmsg() for routines
10271da177e4SLinus Torvalds  * that would like to handle copying from skbuffs directly in 'sendfile'
10281da177e4SLinus Torvalds  * fashion.
10291da177e4SLinus Torvalds  * Note:
10301da177e4SLinus Torvalds  *	- It is assumed that the socket was locked by the caller.
10311da177e4SLinus Torvalds  *	- The routine does not block.
10321da177e4SLinus Torvalds  *	- At present, there is no support for reading OOB data
10331da177e4SLinus Torvalds  *	  or for 'peeking' the socket using this routine
10341da177e4SLinus Torvalds  *	  (although both would be easy to implement).
10351da177e4SLinus Torvalds  */
10361da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
10371da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor)
10381da177e4SLinus Torvalds {
10391da177e4SLinus Torvalds 	struct sk_buff *skb;
10401da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10411da177e4SLinus Torvalds 	u32 seq = tp->copied_seq;
10421da177e4SLinus Torvalds 	u32 offset;
10431da177e4SLinus Torvalds 	int copied = 0;
10441da177e4SLinus Torvalds 
10451da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
10461da177e4SLinus Torvalds 		return -ENOTCONN;
10471da177e4SLinus Torvalds 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
10481da177e4SLinus Torvalds 		if (offset < skb->len) {
10491da177e4SLinus Torvalds 			size_t used, len;
10501da177e4SLinus Torvalds 
10511da177e4SLinus Torvalds 			len = skb->len - offset;
10521da177e4SLinus Torvalds 			/* Stop reading if we hit a patch of urgent data */
10531da177e4SLinus Torvalds 			if (tp->urg_data) {
10541da177e4SLinus Torvalds 				u32 urg_offset = tp->urg_seq - seq;
10551da177e4SLinus Torvalds 				if (urg_offset < len)
10561da177e4SLinus Torvalds 					len = urg_offset;
10571da177e4SLinus Torvalds 				if (!len)
10581da177e4SLinus Torvalds 					break;
10591da177e4SLinus Torvalds 			}
10601da177e4SLinus Torvalds 			used = recv_actor(desc, skb, offset, len);
10611da177e4SLinus Torvalds 			if (used <= len) {
10621da177e4SLinus Torvalds 				seq += used;
10631da177e4SLinus Torvalds 				copied += used;
10641da177e4SLinus Torvalds 				offset += used;
10651da177e4SLinus Torvalds 			}
10661da177e4SLinus Torvalds 			if (offset != skb->len)
10671da177e4SLinus Torvalds 				break;
10681da177e4SLinus Torvalds 		}
10691da177e4SLinus Torvalds 		if (skb->h.th->fin) {
1070624d1164SChris Leech 			sk_eat_skb(sk, skb, 0);
10711da177e4SLinus Torvalds 			++seq;
10721da177e4SLinus Torvalds 			break;
10731da177e4SLinus Torvalds 		}
1074624d1164SChris Leech 		sk_eat_skb(sk, skb, 0);
10751da177e4SLinus Torvalds 		if (!desc->count)
10761da177e4SLinus Torvalds 			break;
10771da177e4SLinus Torvalds 	}
10781da177e4SLinus Torvalds 	tp->copied_seq = seq;
10791da177e4SLinus Torvalds 
10801da177e4SLinus Torvalds 	tcp_rcv_space_adjust(sk);
10811da177e4SLinus Torvalds 
10821da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
10831da177e4SLinus Torvalds 	if (copied)
10840e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
10851da177e4SLinus Torvalds 	return copied;
10861da177e4SLinus Torvalds }
10871da177e4SLinus Torvalds 
10881da177e4SLinus Torvalds /*
10891da177e4SLinus Torvalds  *	This routine copies from a sock struct into the user buffer.
10901da177e4SLinus Torvalds  *
10911da177e4SLinus Torvalds  *	Technical note: in 2.3 we work on _locked_ socket, so that
10921da177e4SLinus Torvalds  *	tricks with *seq access order and skb->users are not required.
10931da177e4SLinus Torvalds  *	Probably, code can be easily improved even more.
10941da177e4SLinus Torvalds  */
10951da177e4SLinus Torvalds 
10961da177e4SLinus Torvalds int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
10971da177e4SLinus Torvalds 		size_t len, int nonblock, int flags, int *addr_len)
10981da177e4SLinus Torvalds {
10991da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11001da177e4SLinus Torvalds 	int copied = 0;
11011da177e4SLinus Torvalds 	u32 peek_seq;
11021da177e4SLinus Torvalds 	u32 *seq;
11031da177e4SLinus Torvalds 	unsigned long used;
11041da177e4SLinus Torvalds 	int err;
11051da177e4SLinus Torvalds 	int target;		/* Read at least this many bytes */
11061da177e4SLinus Torvalds 	long timeo;
11071da177e4SLinus Torvalds 	struct task_struct *user_recv = NULL;
11081a2449a8SChris Leech 	int copied_early = 0;
11091da177e4SLinus Torvalds 
11101da177e4SLinus Torvalds 	lock_sock(sk);
11111da177e4SLinus Torvalds 
11121da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
11131da177e4SLinus Torvalds 
11141da177e4SLinus Torvalds 	err = -ENOTCONN;
11151da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
11161da177e4SLinus Torvalds 		goto out;
11171da177e4SLinus Torvalds 
11181da177e4SLinus Torvalds 	timeo = sock_rcvtimeo(sk, nonblock);
11191da177e4SLinus Torvalds 
11201da177e4SLinus Torvalds 	/* Urgent data needs to be handled specially. */
11211da177e4SLinus Torvalds 	if (flags & MSG_OOB)
11221da177e4SLinus Torvalds 		goto recv_urg;
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds 	seq = &tp->copied_seq;
11251da177e4SLinus Torvalds 	if (flags & MSG_PEEK) {
11261da177e4SLinus Torvalds 		peek_seq = tp->copied_seq;
11271da177e4SLinus Torvalds 		seq = &peek_seq;
11281da177e4SLinus Torvalds 	}
11291da177e4SLinus Torvalds 
11301da177e4SLinus Torvalds 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
11311da177e4SLinus Torvalds 
11321a2449a8SChris Leech #ifdef CONFIG_NET_DMA
11331a2449a8SChris Leech 	tp->ucopy.dma_chan = NULL;
11341a2449a8SChris Leech 	preempt_disable();
11351a2449a8SChris Leech 	if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
11361a2449a8SChris Leech 	    !sysctl_tcp_low_latency && __get_cpu_var(softnet_data.net_dma)) {
11371a2449a8SChris Leech 		preempt_enable_no_resched();
11381a2449a8SChris Leech 		tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
11391a2449a8SChris Leech 	} else
11401a2449a8SChris Leech 		preempt_enable_no_resched();
11411a2449a8SChris Leech #endif
11421a2449a8SChris Leech 
11431da177e4SLinus Torvalds 	do {
11441da177e4SLinus Torvalds 		struct sk_buff *skb;
11451da177e4SLinus Torvalds 		u32 offset;
11461da177e4SLinus Torvalds 
11471da177e4SLinus Torvalds 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
11481da177e4SLinus Torvalds 		if (tp->urg_data && tp->urg_seq == *seq) {
11491da177e4SLinus Torvalds 			if (copied)
11501da177e4SLinus Torvalds 				break;
11511da177e4SLinus Torvalds 			if (signal_pending(current)) {
11521da177e4SLinus Torvalds 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
11531da177e4SLinus Torvalds 				break;
11541da177e4SLinus Torvalds 			}
11551da177e4SLinus Torvalds 		}
11561da177e4SLinus Torvalds 
11571da177e4SLinus Torvalds 		/* Next get a buffer. */
11581da177e4SLinus Torvalds 
11591da177e4SLinus Torvalds 		skb = skb_peek(&sk->sk_receive_queue);
11601da177e4SLinus Torvalds 		do {
11611da177e4SLinus Torvalds 			if (!skb)
11621da177e4SLinus Torvalds 				break;
11631da177e4SLinus Torvalds 
11641da177e4SLinus Torvalds 			/* Now that we have two receive queues this
11651da177e4SLinus Torvalds 			 * shouldn't happen.
11661da177e4SLinus Torvalds 			 */
11671da177e4SLinus Torvalds 			if (before(*seq, TCP_SKB_CB(skb)->seq)) {
11681da177e4SLinus Torvalds 				printk(KERN_INFO "recvmsg bug: copied %X "
11691da177e4SLinus Torvalds 				       "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
11701da177e4SLinus Torvalds 				break;
11711da177e4SLinus Torvalds 			}
11721da177e4SLinus Torvalds 			offset = *seq - TCP_SKB_CB(skb)->seq;
11731da177e4SLinus Torvalds 			if (skb->h.th->syn)
11741da177e4SLinus Torvalds 				offset--;
11751da177e4SLinus Torvalds 			if (offset < skb->len)
11761da177e4SLinus Torvalds 				goto found_ok_skb;
11771da177e4SLinus Torvalds 			if (skb->h.th->fin)
11781da177e4SLinus Torvalds 				goto found_fin_ok;
11791da177e4SLinus Torvalds 			BUG_TRAP(flags & MSG_PEEK);
11801da177e4SLinus Torvalds 			skb = skb->next;
11811da177e4SLinus Torvalds 		} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
11821da177e4SLinus Torvalds 
11831da177e4SLinus Torvalds 		/* Well, if we have backlog, try to process it now yet. */
11841da177e4SLinus Torvalds 
11851da177e4SLinus Torvalds 		if (copied >= target && !sk->sk_backlog.tail)
11861da177e4SLinus Torvalds 			break;
11871da177e4SLinus Torvalds 
11881da177e4SLinus Torvalds 		if (copied) {
11891da177e4SLinus Torvalds 			if (sk->sk_err ||
11901da177e4SLinus Torvalds 			    sk->sk_state == TCP_CLOSE ||
11911da177e4SLinus Torvalds 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
11921da177e4SLinus Torvalds 			    !timeo ||
11931da177e4SLinus Torvalds 			    signal_pending(current) ||
11941da177e4SLinus Torvalds 			    (flags & MSG_PEEK))
11951da177e4SLinus Torvalds 				break;
11961da177e4SLinus Torvalds 		} else {
11971da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_DONE))
11981da177e4SLinus Torvalds 				break;
11991da177e4SLinus Torvalds 
12001da177e4SLinus Torvalds 			if (sk->sk_err) {
12011da177e4SLinus Torvalds 				copied = sock_error(sk);
12021da177e4SLinus Torvalds 				break;
12031da177e4SLinus Torvalds 			}
12041da177e4SLinus Torvalds 
12051da177e4SLinus Torvalds 			if (sk->sk_shutdown & RCV_SHUTDOWN)
12061da177e4SLinus Torvalds 				break;
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds 			if (sk->sk_state == TCP_CLOSE) {
12091da177e4SLinus Torvalds 				if (!sock_flag(sk, SOCK_DONE)) {
12101da177e4SLinus Torvalds 					/* This occurs when user tries to read
12111da177e4SLinus Torvalds 					 * from never connected socket.
12121da177e4SLinus Torvalds 					 */
12131da177e4SLinus Torvalds 					copied = -ENOTCONN;
12141da177e4SLinus Torvalds 					break;
12151da177e4SLinus Torvalds 				}
12161da177e4SLinus Torvalds 				break;
12171da177e4SLinus Torvalds 			}
12181da177e4SLinus Torvalds 
12191da177e4SLinus Torvalds 			if (!timeo) {
12201da177e4SLinus Torvalds 				copied = -EAGAIN;
12211da177e4SLinus Torvalds 				break;
12221da177e4SLinus Torvalds 			}
12231da177e4SLinus Torvalds 
12241da177e4SLinus Torvalds 			if (signal_pending(current)) {
12251da177e4SLinus Torvalds 				copied = sock_intr_errno(timeo);
12261da177e4SLinus Torvalds 				break;
12271da177e4SLinus Torvalds 			}
12281da177e4SLinus Torvalds 		}
12291da177e4SLinus Torvalds 
12300e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
12311da177e4SLinus Torvalds 
12327df55125SDavid S. Miller 		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
12331da177e4SLinus Torvalds 			/* Install new reader */
12341da177e4SLinus Torvalds 			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
12351da177e4SLinus Torvalds 				user_recv = current;
12361da177e4SLinus Torvalds 				tp->ucopy.task = user_recv;
12371da177e4SLinus Torvalds 				tp->ucopy.iov = msg->msg_iov;
12381da177e4SLinus Torvalds 			}
12391da177e4SLinus Torvalds 
12401da177e4SLinus Torvalds 			tp->ucopy.len = len;
12411da177e4SLinus Torvalds 
12421da177e4SLinus Torvalds 			BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
12431da177e4SLinus Torvalds 				 (flags & (MSG_PEEK | MSG_TRUNC)));
12441da177e4SLinus Torvalds 
12451da177e4SLinus Torvalds 			/* Ugly... If prequeue is not empty, we have to
12461da177e4SLinus Torvalds 			 * process it before releasing socket, otherwise
12471da177e4SLinus Torvalds 			 * order will be broken at second iteration.
12481da177e4SLinus Torvalds 			 * More elegant solution is required!!!
12491da177e4SLinus Torvalds 			 *
12501da177e4SLinus Torvalds 			 * Look: we have the following (pseudo)queues:
12511da177e4SLinus Torvalds 			 *
12521da177e4SLinus Torvalds 			 * 1. packets in flight
12531da177e4SLinus Torvalds 			 * 2. backlog
12541da177e4SLinus Torvalds 			 * 3. prequeue
12551da177e4SLinus Torvalds 			 * 4. receive_queue
12561da177e4SLinus Torvalds 			 *
12571da177e4SLinus Torvalds 			 * Each queue can be processed only if the next ones
12581da177e4SLinus Torvalds 			 * are empty. At this point we have empty receive_queue.
12591da177e4SLinus Torvalds 			 * But prequeue _can_ be not empty after 2nd iteration,
12601da177e4SLinus Torvalds 			 * when we jumped to start of loop because backlog
12611da177e4SLinus Torvalds 			 * processing added something to receive_queue.
12621da177e4SLinus Torvalds 			 * We cannot release_sock(), because backlog contains
12631da177e4SLinus Torvalds 			 * packets arrived _after_ prequeued ones.
12641da177e4SLinus Torvalds 			 *
12651da177e4SLinus Torvalds 			 * Shortly, algorithm is clear --- to process all
12661da177e4SLinus Torvalds 			 * the queues in order. We could make it more directly,
12671da177e4SLinus Torvalds 			 * requeueing packets from backlog to prequeue, if
12681da177e4SLinus Torvalds 			 * is not empty. It is more elegant, but eats cycles,
12691da177e4SLinus Torvalds 			 * unfortunately.
12701da177e4SLinus Torvalds 			 */
1271b03efcfbSDavid S. Miller 			if (!skb_queue_empty(&tp->ucopy.prequeue))
12721da177e4SLinus Torvalds 				goto do_prequeue;
12731da177e4SLinus Torvalds 
12741da177e4SLinus Torvalds 			/* __ Set realtime policy in scheduler __ */
12751da177e4SLinus Torvalds 		}
12761da177e4SLinus Torvalds 
12771da177e4SLinus Torvalds 		if (copied >= target) {
12781da177e4SLinus Torvalds 			/* Do not sleep, just process backlog. */
12791da177e4SLinus Torvalds 			release_sock(sk);
12801da177e4SLinus Torvalds 			lock_sock(sk);
12811da177e4SLinus Torvalds 		} else
12821da177e4SLinus Torvalds 			sk_wait_data(sk, &timeo);
12831da177e4SLinus Torvalds 
12841a2449a8SChris Leech #ifdef CONFIG_NET_DMA
12851a2449a8SChris Leech 		tp->ucopy.wakeup = 0;
12861a2449a8SChris Leech #endif
12871a2449a8SChris Leech 
12881da177e4SLinus Torvalds 		if (user_recv) {
12891da177e4SLinus Torvalds 			int chunk;
12901da177e4SLinus Torvalds 
12911da177e4SLinus Torvalds 			/* __ Restore normal policy in scheduler __ */
12921da177e4SLinus Torvalds 
12931da177e4SLinus Torvalds 			if ((chunk = len - tp->ucopy.len) != 0) {
12941da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
12951da177e4SLinus Torvalds 				len -= chunk;
12961da177e4SLinus Torvalds 				copied += chunk;
12971da177e4SLinus Torvalds 			}
12981da177e4SLinus Torvalds 
12991da177e4SLinus Torvalds 			if (tp->rcv_nxt == tp->copied_seq &&
1300b03efcfbSDavid S. Miller 			    !skb_queue_empty(&tp->ucopy.prequeue)) {
13011da177e4SLinus Torvalds do_prequeue:
13021da177e4SLinus Torvalds 				tcp_prequeue_process(sk);
13031da177e4SLinus Torvalds 
13041da177e4SLinus Torvalds 				if ((chunk = len - tp->ucopy.len) != 0) {
13051da177e4SLinus Torvalds 					NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
13061da177e4SLinus Torvalds 					len -= chunk;
13071da177e4SLinus Torvalds 					copied += chunk;
13081da177e4SLinus Torvalds 				}
13091da177e4SLinus Torvalds 			}
13101da177e4SLinus Torvalds 		}
13111da177e4SLinus Torvalds 		if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
13121da177e4SLinus Torvalds 			if (net_ratelimit())
13131da177e4SLinus Torvalds 				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
13141da177e4SLinus Torvalds 				       current->comm, current->pid);
13151da177e4SLinus Torvalds 			peek_seq = tp->copied_seq;
13161da177e4SLinus Torvalds 		}
13171da177e4SLinus Torvalds 		continue;
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 	found_ok_skb:
13201da177e4SLinus Torvalds 		/* Ok so how much can we use? */
13211da177e4SLinus Torvalds 		used = skb->len - offset;
13221da177e4SLinus Torvalds 		if (len < used)
13231da177e4SLinus Torvalds 			used = len;
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds 		/* Do we have urgent data here? */
13261da177e4SLinus Torvalds 		if (tp->urg_data) {
13271da177e4SLinus Torvalds 			u32 urg_offset = tp->urg_seq - *seq;
13281da177e4SLinus Torvalds 			if (urg_offset < used) {
13291da177e4SLinus Torvalds 				if (!urg_offset) {
13301da177e4SLinus Torvalds 					if (!sock_flag(sk, SOCK_URGINLINE)) {
13311da177e4SLinus Torvalds 						++*seq;
13321da177e4SLinus Torvalds 						offset++;
13331da177e4SLinus Torvalds 						used--;
13341da177e4SLinus Torvalds 						if (!used)
13351da177e4SLinus Torvalds 							goto skip_copy;
13361da177e4SLinus Torvalds 					}
13371da177e4SLinus Torvalds 				} else
13381da177e4SLinus Torvalds 					used = urg_offset;
13391da177e4SLinus Torvalds 			}
13401da177e4SLinus Torvalds 		}
13411da177e4SLinus Torvalds 
13421da177e4SLinus Torvalds 		if (!(flags & MSG_TRUNC)) {
13431a2449a8SChris Leech #ifdef CONFIG_NET_DMA
13441a2449a8SChris Leech 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
13451a2449a8SChris Leech 				tp->ucopy.dma_chan = get_softnet_dma();
13461a2449a8SChris Leech 
13471a2449a8SChris Leech 			if (tp->ucopy.dma_chan) {
13481a2449a8SChris Leech 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
13491a2449a8SChris Leech 					tp->ucopy.dma_chan, skb, offset,
13501a2449a8SChris Leech 					msg->msg_iov, used,
13511a2449a8SChris Leech 					tp->ucopy.pinned_list);
13521a2449a8SChris Leech 
13531a2449a8SChris Leech 				if (tp->ucopy.dma_cookie < 0) {
13541a2449a8SChris Leech 
13551a2449a8SChris Leech 					printk(KERN_ALERT "dma_cookie < 0\n");
13561a2449a8SChris Leech 
13571a2449a8SChris Leech 					/* Exception. Bailout! */
13581a2449a8SChris Leech 					if (!copied)
13591a2449a8SChris Leech 						copied = -EFAULT;
13601a2449a8SChris Leech 					break;
13611a2449a8SChris Leech 				}
13621a2449a8SChris Leech 				if ((offset + used) == skb->len)
13631a2449a8SChris Leech 					copied_early = 1;
13641a2449a8SChris Leech 
13651a2449a8SChris Leech 			} else
13661a2449a8SChris Leech #endif
13671a2449a8SChris Leech 			{
13681da177e4SLinus Torvalds 				err = skb_copy_datagram_iovec(skb, offset,
13691da177e4SLinus Torvalds 						msg->msg_iov, used);
13701da177e4SLinus Torvalds 				if (err) {
13711da177e4SLinus Torvalds 					/* Exception. Bailout! */
13721da177e4SLinus Torvalds 					if (!copied)
13731da177e4SLinus Torvalds 						copied = -EFAULT;
13741da177e4SLinus Torvalds 					break;
13751da177e4SLinus Torvalds 				}
13761da177e4SLinus Torvalds 			}
13771a2449a8SChris Leech 		}
13781da177e4SLinus Torvalds 
13791da177e4SLinus Torvalds 		*seq += used;
13801da177e4SLinus Torvalds 		copied += used;
13811da177e4SLinus Torvalds 		len -= used;
13821da177e4SLinus Torvalds 
13831da177e4SLinus Torvalds 		tcp_rcv_space_adjust(sk);
13841da177e4SLinus Torvalds 
13851da177e4SLinus Torvalds skip_copy:
13861da177e4SLinus Torvalds 		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
13871da177e4SLinus Torvalds 			tp->urg_data = 0;
13881da177e4SLinus Torvalds 			tcp_fast_path_check(sk, tp);
13891da177e4SLinus Torvalds 		}
13901da177e4SLinus Torvalds 		if (used + offset < skb->len)
13911da177e4SLinus Torvalds 			continue;
13921da177e4SLinus Torvalds 
13931da177e4SLinus Torvalds 		if (skb->h.th->fin)
13941da177e4SLinus Torvalds 			goto found_fin_ok;
13951a2449a8SChris Leech 		if (!(flags & MSG_PEEK)) {
13961a2449a8SChris Leech 			sk_eat_skb(sk, skb, copied_early);
13971a2449a8SChris Leech 			copied_early = 0;
13981a2449a8SChris Leech 		}
13991da177e4SLinus Torvalds 		continue;
14001da177e4SLinus Torvalds 
14011da177e4SLinus Torvalds 	found_fin_ok:
14021da177e4SLinus Torvalds 		/* Process the FIN. */
14031da177e4SLinus Torvalds 		++*seq;
14041a2449a8SChris Leech 		if (!(flags & MSG_PEEK)) {
14051a2449a8SChris Leech 			sk_eat_skb(sk, skb, copied_early);
14061a2449a8SChris Leech 			copied_early = 0;
14071a2449a8SChris Leech 		}
14081da177e4SLinus Torvalds 		break;
14091da177e4SLinus Torvalds 	} while (len > 0);
14101da177e4SLinus Torvalds 
14111da177e4SLinus Torvalds 	if (user_recv) {
1412b03efcfbSDavid S. Miller 		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
14131da177e4SLinus Torvalds 			int chunk;
14141da177e4SLinus Torvalds 
14151da177e4SLinus Torvalds 			tp->ucopy.len = copied > 0 ? len : 0;
14161da177e4SLinus Torvalds 
14171da177e4SLinus Torvalds 			tcp_prequeue_process(sk);
14181da177e4SLinus Torvalds 
14191da177e4SLinus Torvalds 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
14201da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
14211da177e4SLinus Torvalds 				len -= chunk;
14221da177e4SLinus Torvalds 				copied += chunk;
14231da177e4SLinus Torvalds 			}
14241da177e4SLinus Torvalds 		}
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 		tp->ucopy.task = NULL;
14271da177e4SLinus Torvalds 		tp->ucopy.len = 0;
14281da177e4SLinus Torvalds 	}
14291da177e4SLinus Torvalds 
14301a2449a8SChris Leech #ifdef CONFIG_NET_DMA
14311a2449a8SChris Leech 	if (tp->ucopy.dma_chan) {
14321a2449a8SChris Leech 		struct sk_buff *skb;
14331a2449a8SChris Leech 		dma_cookie_t done, used;
14341a2449a8SChris Leech 
14351a2449a8SChris Leech 		dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
14361a2449a8SChris Leech 
14371a2449a8SChris Leech 		while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
14381a2449a8SChris Leech 		                                 tp->ucopy.dma_cookie, &done,
14391a2449a8SChris Leech 		                                 &used) == DMA_IN_PROGRESS) {
14401a2449a8SChris Leech 			/* do partial cleanup of sk_async_wait_queue */
14411a2449a8SChris Leech 			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
14421a2449a8SChris Leech 			       (dma_async_is_complete(skb->dma_cookie, done,
14431a2449a8SChris Leech 			                              used) == DMA_SUCCESS)) {
14441a2449a8SChris Leech 				__skb_dequeue(&sk->sk_async_wait_queue);
14451a2449a8SChris Leech 				kfree_skb(skb);
14461a2449a8SChris Leech 			}
14471a2449a8SChris Leech 		}
14481a2449a8SChris Leech 
14491a2449a8SChris Leech 		/* Safe to free early-copied skbs now */
14501a2449a8SChris Leech 		__skb_queue_purge(&sk->sk_async_wait_queue);
14511a2449a8SChris Leech 		dma_chan_put(tp->ucopy.dma_chan);
14521a2449a8SChris Leech 		tp->ucopy.dma_chan = NULL;
14531a2449a8SChris Leech 	}
14541a2449a8SChris Leech 	if (tp->ucopy.pinned_list) {
14551a2449a8SChris Leech 		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
14561a2449a8SChris Leech 		tp->ucopy.pinned_list = NULL;
14571a2449a8SChris Leech 	}
14581a2449a8SChris Leech #endif
14591a2449a8SChris Leech 
14601da177e4SLinus Torvalds 	/* According to UNIX98, msg_name/msg_namelen are ignored
14611da177e4SLinus Torvalds 	 * on connected socket. I was just happy when found this 8) --ANK
14621da177e4SLinus Torvalds 	 */
14631da177e4SLinus Torvalds 
14641da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
14650e4b4992SChris Leech 	tcp_cleanup_rbuf(sk, copied);
14661da177e4SLinus Torvalds 
14671da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
14681da177e4SLinus Torvalds 	release_sock(sk);
14691da177e4SLinus Torvalds 	return copied;
14701da177e4SLinus Torvalds 
14711da177e4SLinus Torvalds out:
14721da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
14731da177e4SLinus Torvalds 	release_sock(sk);
14741da177e4SLinus Torvalds 	return err;
14751da177e4SLinus Torvalds 
14761da177e4SLinus Torvalds recv_urg:
14771da177e4SLinus Torvalds 	err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
14781da177e4SLinus Torvalds 	goto out;
14791da177e4SLinus Torvalds }
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds /*
14821da177e4SLinus Torvalds  *	State processing on a close. This implements the state shift for
14831da177e4SLinus Torvalds  *	sending our FIN frame. Note that we only send a FIN for some
14841da177e4SLinus Torvalds  *	states. A shutdown() may have already sent the FIN, or we may be
14851da177e4SLinus Torvalds  *	closed.
14861da177e4SLinus Torvalds  */
14871da177e4SLinus Torvalds 
14889b5b5cffSArjan van de Ven static const unsigned char new_state[16] = {
14891da177e4SLinus Torvalds   /* current state:        new state:      action:	*/
14901da177e4SLinus Torvalds   /* (Invalid)		*/ TCP_CLOSE,
14911da177e4SLinus Torvalds   /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
14921da177e4SLinus Torvalds   /* TCP_SYN_SENT	*/ TCP_CLOSE,
14931da177e4SLinus Torvalds   /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
14941da177e4SLinus Torvalds   /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
14951da177e4SLinus Torvalds   /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
14961da177e4SLinus Torvalds   /* TCP_TIME_WAIT	*/ TCP_CLOSE,
14971da177e4SLinus Torvalds   /* TCP_CLOSE		*/ TCP_CLOSE,
14981da177e4SLinus Torvalds   /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
14991da177e4SLinus Torvalds   /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
15001da177e4SLinus Torvalds   /* TCP_LISTEN		*/ TCP_CLOSE,
15011da177e4SLinus Torvalds   /* TCP_CLOSING	*/ TCP_CLOSING,
15021da177e4SLinus Torvalds };
15031da177e4SLinus Torvalds 
15041da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
15051da177e4SLinus Torvalds {
15061da177e4SLinus Torvalds 	int next = (int)new_state[sk->sk_state];
15071da177e4SLinus Torvalds 	int ns = next & TCP_STATE_MASK;
15081da177e4SLinus Torvalds 
15091da177e4SLinus Torvalds 	tcp_set_state(sk, ns);
15101da177e4SLinus Torvalds 
15111da177e4SLinus Torvalds 	return next & TCP_ACTION_FIN;
15121da177e4SLinus Torvalds }
15131da177e4SLinus Torvalds 
15141da177e4SLinus Torvalds /*
15151da177e4SLinus Torvalds  *	Shutdown the sending side of a connection. Much like close except
15161da177e4SLinus Torvalds  *	that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
15171da177e4SLinus Torvalds  */
15181da177e4SLinus Torvalds 
15191da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
15201da177e4SLinus Torvalds {
15211da177e4SLinus Torvalds 	/*	We need to grab some memory, and put together a FIN,
15221da177e4SLinus Torvalds 	 *	and then put it into the queue to be sent.
15231da177e4SLinus Torvalds 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
15241da177e4SLinus Torvalds 	 */
15251da177e4SLinus Torvalds 	if (!(how & SEND_SHUTDOWN))
15261da177e4SLinus Torvalds 		return;
15271da177e4SLinus Torvalds 
15281da177e4SLinus Torvalds 	/* If we've already sent a FIN, or it's a closed state, skip this. */
15291da177e4SLinus Torvalds 	if ((1 << sk->sk_state) &
15301da177e4SLinus Torvalds 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
15311da177e4SLinus Torvalds 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
15321da177e4SLinus Torvalds 		/* Clear out any half completed packets.  FIN if needed. */
15331da177e4SLinus Torvalds 		if (tcp_close_state(sk))
15341da177e4SLinus Torvalds 			tcp_send_fin(sk);
15351da177e4SLinus Torvalds 	}
15361da177e4SLinus Torvalds }
15371da177e4SLinus Torvalds 
15381da177e4SLinus Torvalds void tcp_close(struct sock *sk, long timeout)
15391da177e4SLinus Torvalds {
15401da177e4SLinus Torvalds 	struct sk_buff *skb;
15411da177e4SLinus Torvalds 	int data_was_unread = 0;
154275c2d907SHerbert Xu 	int state;
15431da177e4SLinus Torvalds 
15441da177e4SLinus Torvalds 	lock_sock(sk);
15451da177e4SLinus Torvalds 	sk->sk_shutdown = SHUTDOWN_MASK;
15461da177e4SLinus Torvalds 
15471da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN) {
15481da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
15491da177e4SLinus Torvalds 
15501da177e4SLinus Torvalds 		/* Special case. */
15510a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
15521da177e4SLinus Torvalds 
15531da177e4SLinus Torvalds 		goto adjudge_to_death;
15541da177e4SLinus Torvalds 	}
15551da177e4SLinus Torvalds 
15561da177e4SLinus Torvalds 	/*  We need to flush the recv. buffs.  We do this only on the
15571da177e4SLinus Torvalds 	 *  descriptor close, not protocol-sourced closes, because the
15581da177e4SLinus Torvalds 	 *  reader process may not have drained the data yet!
15591da177e4SLinus Torvalds 	 */
15601da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
15611da177e4SLinus Torvalds 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
15621da177e4SLinus Torvalds 			  skb->h.th->fin;
15631da177e4SLinus Torvalds 		data_was_unread += len;
15641da177e4SLinus Torvalds 		__kfree_skb(skb);
15651da177e4SLinus Torvalds 	}
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds 	sk_stream_mem_reclaim(sk);
15681da177e4SLinus Torvalds 
15691da177e4SLinus Torvalds 	/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
15701da177e4SLinus Torvalds 	 * 3.10, we send a RST here because data was lost.  To
15711da177e4SLinus Torvalds 	 * witness the awful effects of the old behavior of always
15721da177e4SLinus Torvalds 	 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
15731da177e4SLinus Torvalds 	 * a bulk GET in an FTP client, suspend the process, wait
15741da177e4SLinus Torvalds 	 * for the client to advertise a zero window, then kill -9
15751da177e4SLinus Torvalds 	 * the FTP client, wheee...  Note: timeout is always zero
15761da177e4SLinus Torvalds 	 * in such a case.
15771da177e4SLinus Torvalds 	 */
15781da177e4SLinus Torvalds 	if (data_was_unread) {
15791da177e4SLinus Torvalds 		/* Unread data was tossed, zap the connection. */
15801da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
15811da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
15821da177e4SLinus Torvalds 		tcp_send_active_reset(sk, GFP_KERNEL);
15831da177e4SLinus Torvalds 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
15841da177e4SLinus Torvalds 		/* Check zero linger _after_ checking for unread data. */
15851da177e4SLinus Torvalds 		sk->sk_prot->disconnect(sk, 0);
15861da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
15871da177e4SLinus Torvalds 	} else if (tcp_close_state(sk)) {
15881da177e4SLinus Torvalds 		/* We FIN if the application ate all the data before
15891da177e4SLinus Torvalds 		 * zapping the connection.
15901da177e4SLinus Torvalds 		 */
15911da177e4SLinus Torvalds 
15921da177e4SLinus Torvalds 		/* RED-PEN. Formally speaking, we have broken TCP state
15931da177e4SLinus Torvalds 		 * machine. State transitions:
15941da177e4SLinus Torvalds 		 *
15951da177e4SLinus Torvalds 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
15961da177e4SLinus Torvalds 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
15971da177e4SLinus Torvalds 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
15981da177e4SLinus Torvalds 		 *
15991da177e4SLinus Torvalds 		 * are legal only when FIN has been sent (i.e. in window),
16001da177e4SLinus Torvalds 		 * rather than queued out of window. Purists blame.
16011da177e4SLinus Torvalds 		 *
16021da177e4SLinus Torvalds 		 * F.e. "RFC state" is ESTABLISHED,
16031da177e4SLinus Torvalds 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
16041da177e4SLinus Torvalds 		 *
16051da177e4SLinus Torvalds 		 * The visible declinations are that sometimes
16061da177e4SLinus Torvalds 		 * we enter time-wait state, when it is not required really
16071da177e4SLinus Torvalds 		 * (harmless), do not send active resets, when they are
16081da177e4SLinus Torvalds 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
16091da177e4SLinus Torvalds 		 * they look as CLOSING or LAST_ACK for Linux)
16101da177e4SLinus Torvalds 		 * Probably, I missed some more holelets.
16111da177e4SLinus Torvalds 		 * 						--ANK
16121da177e4SLinus Torvalds 		 */
16131da177e4SLinus Torvalds 		tcp_send_fin(sk);
16141da177e4SLinus Torvalds 	}
16151da177e4SLinus Torvalds 
16161da177e4SLinus Torvalds 	sk_stream_wait_close(sk, timeout);
16171da177e4SLinus Torvalds 
16181da177e4SLinus Torvalds adjudge_to_death:
161975c2d907SHerbert Xu 	state = sk->sk_state;
162075c2d907SHerbert Xu 	sock_hold(sk);
162175c2d907SHerbert Xu 	sock_orphan(sk);
162275c2d907SHerbert Xu 	atomic_inc(sk->sk_prot->orphan_count);
162375c2d907SHerbert Xu 
16241da177e4SLinus Torvalds 	/* It is the last release_sock in its life. It will remove backlog. */
16251da177e4SLinus Torvalds 	release_sock(sk);
16261da177e4SLinus Torvalds 
16271da177e4SLinus Torvalds 
16281da177e4SLinus Torvalds 	/* Now socket is owned by kernel and we acquire BH lock
16291da177e4SLinus Torvalds 	   to finish close. No need to check for user refs.
16301da177e4SLinus Torvalds 	 */
16311da177e4SLinus Torvalds 	local_bh_disable();
16321da177e4SLinus Torvalds 	bh_lock_sock(sk);
16331da177e4SLinus Torvalds 	BUG_TRAP(!sock_owned_by_user(sk));
16341da177e4SLinus Torvalds 
163575c2d907SHerbert Xu 	/* Have we already been destroyed by a softirq or backlog? */
163675c2d907SHerbert Xu 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
163775c2d907SHerbert Xu 		goto out;
16381da177e4SLinus Torvalds 
16391da177e4SLinus Torvalds 	/*	This is a (useful) BSD violating of the RFC. There is a
16401da177e4SLinus Torvalds 	 *	problem with TCP as specified in that the other end could
16411da177e4SLinus Torvalds 	 *	keep a socket open forever with no application left this end.
16421da177e4SLinus Torvalds 	 *	We use a 3 minute timeout (about the same as BSD) then kill
16431da177e4SLinus Torvalds 	 *	our end. If they send after that then tough - BUT: long enough
16441da177e4SLinus Torvalds 	 *	that we won't make the old 4*rto = almost no time - whoops
16451da177e4SLinus Torvalds 	 *	reset mistake.
16461da177e4SLinus Torvalds 	 *
16471da177e4SLinus Torvalds 	 *	Nope, it was not mistake. It is really desired behaviour
16481da177e4SLinus Torvalds 	 *	f.e. on http servers, when such sockets are useless, but
16491da177e4SLinus Torvalds 	 *	consume significant resources. Let's do it with special
16501da177e4SLinus Torvalds 	 *	linger2	option.					--ANK
16511da177e4SLinus Torvalds 	 */
16521da177e4SLinus Torvalds 
16531da177e4SLinus Torvalds 	if (sk->sk_state == TCP_FIN_WAIT2) {
16541da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
16551da177e4SLinus Torvalds 		if (tp->linger2 < 0) {
16561da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
16571da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
16581da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
16591da177e4SLinus Torvalds 		} else {
1660463c84b9SArnaldo Carvalho de Melo 			const int tmo = tcp_fin_time(sk);
16611da177e4SLinus Torvalds 
16621da177e4SLinus Torvalds 			if (tmo > TCP_TIMEWAIT_LEN) {
1663463c84b9SArnaldo Carvalho de Melo 				inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk));
16641da177e4SLinus Torvalds 			} else {
16651da177e4SLinus Torvalds 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
16661da177e4SLinus Torvalds 				goto out;
16671da177e4SLinus Torvalds 			}
16681da177e4SLinus Torvalds 		}
16691da177e4SLinus Torvalds 	}
16701da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
16711da177e4SLinus Torvalds 		sk_stream_mem_reclaim(sk);
16720a5578cfSArnaldo Carvalho de Melo 		if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
16731da177e4SLinus Torvalds 		    (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
16741da177e4SLinus Torvalds 		     atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
16751da177e4SLinus Torvalds 			if (net_ratelimit())
16761da177e4SLinus Torvalds 				printk(KERN_INFO "TCP: too many of orphaned "
16771da177e4SLinus Torvalds 				       "sockets\n");
16781da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
16791da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
16801da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
16811da177e4SLinus Torvalds 		}
16821da177e4SLinus Torvalds 	}
16831da177e4SLinus Torvalds 
16841da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE)
16850a5578cfSArnaldo Carvalho de Melo 		inet_csk_destroy_sock(sk);
16861da177e4SLinus Torvalds 	/* Otherwise, socket is reprieved until protocol close. */
16871da177e4SLinus Torvalds 
16881da177e4SLinus Torvalds out:
16891da177e4SLinus Torvalds 	bh_unlock_sock(sk);
16901da177e4SLinus Torvalds 	local_bh_enable();
16911da177e4SLinus Torvalds 	sock_put(sk);
16921da177e4SLinus Torvalds }
16931da177e4SLinus Torvalds 
16941da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
16951da177e4SLinus Torvalds 
16961da177e4SLinus Torvalds static inline int tcp_need_reset(int state)
16971da177e4SLinus Torvalds {
16981da177e4SLinus Torvalds 	return (1 << state) &
16991da177e4SLinus Torvalds 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
17001da177e4SLinus Torvalds 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
17011da177e4SLinus Torvalds }
17021da177e4SLinus Torvalds 
17031da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
17041da177e4SLinus Torvalds {
17051da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
1706463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
17071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17081da177e4SLinus Torvalds 	int err = 0;
17091da177e4SLinus Torvalds 	int old_state = sk->sk_state;
17101da177e4SLinus Torvalds 
17111da177e4SLinus Torvalds 	if (old_state != TCP_CLOSE)
17121da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
17131da177e4SLinus Torvalds 
17141da177e4SLinus Torvalds 	/* ABORT function of RFC793 */
17151da177e4SLinus Torvalds 	if (old_state == TCP_LISTEN) {
17160a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
17171da177e4SLinus Torvalds 	} else if (tcp_need_reset(old_state) ||
17181da177e4SLinus Torvalds 		   (tp->snd_nxt != tp->write_seq &&
17191da177e4SLinus Torvalds 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1720caa20d9aSStephen Hemminger 		/* The last check adjusts for discrepancy of Linux wrt. RFC
17211da177e4SLinus Torvalds 		 * states
17221da177e4SLinus Torvalds 		 */
17231da177e4SLinus Torvalds 		tcp_send_active_reset(sk, gfp_any());
17241da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
17251da177e4SLinus Torvalds 	} else if (old_state == TCP_SYN_SENT)
17261da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
17271da177e4SLinus Torvalds 
17281da177e4SLinus Torvalds 	tcp_clear_xmit_timers(sk);
17291da177e4SLinus Torvalds 	__skb_queue_purge(&sk->sk_receive_queue);
17301da177e4SLinus Torvalds 	sk_stream_writequeue_purge(sk);
17311da177e4SLinus Torvalds 	__skb_queue_purge(&tp->out_of_order_queue);
17321a2449a8SChris Leech #ifdef CONFIG_NET_DMA
17331a2449a8SChris Leech 	__skb_queue_purge(&sk->sk_async_wait_queue);
17341a2449a8SChris Leech #endif
17351da177e4SLinus Torvalds 
17361da177e4SLinus Torvalds 	inet->dport = 0;
17371da177e4SLinus Torvalds 
17381da177e4SLinus Torvalds 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
17391da177e4SLinus Torvalds 		inet_reset_saddr(sk);
17401da177e4SLinus Torvalds 
17411da177e4SLinus Torvalds 	sk->sk_shutdown = 0;
17421da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
17431da177e4SLinus Torvalds 	tp->srtt = 0;
17441da177e4SLinus Torvalds 	if ((tp->write_seq += tp->max_window + 2) == 0)
17451da177e4SLinus Torvalds 		tp->write_seq = 1;
1746463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_backoff = 0;
17471da177e4SLinus Torvalds 	tp->snd_cwnd = 2;
17486687e988SArnaldo Carvalho de Melo 	icsk->icsk_probes_out = 0;
17491da177e4SLinus Torvalds 	tp->packets_out = 0;
17501da177e4SLinus Torvalds 	tp->snd_ssthresh = 0x7fffffff;
17511da177e4SLinus Torvalds 	tp->snd_cwnd_cnt = 0;
17529772efb9SStephen Hemminger 	tp->bytes_acked = 0;
17536687e988SArnaldo Carvalho de Melo 	tcp_set_ca_state(sk, TCP_CA_Open);
17541da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
1755463c84b9SArnaldo Carvalho de Melo 	inet_csk_delack_init(sk);
17561da177e4SLinus Torvalds 	sk->sk_send_head = NULL;
17571da177e4SLinus Torvalds 	tp->rx_opt.saw_tstamp = 0;
17581da177e4SLinus Torvalds 	tcp_sack_reset(&tp->rx_opt);
17591da177e4SLinus Torvalds 	__sk_dst_reset(sk);
17601da177e4SLinus Torvalds 
1761463c84b9SArnaldo Carvalho de Melo 	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
17621da177e4SLinus Torvalds 
17631da177e4SLinus Torvalds 	sk->sk_error_report(sk);
17641da177e4SLinus Torvalds 	return err;
17651da177e4SLinus Torvalds }
17661da177e4SLinus Torvalds 
17671da177e4SLinus Torvalds /*
17681da177e4SLinus Torvalds  *	Socket option code for TCP.
17691da177e4SLinus Torvalds  */
17703fdadf7dSDmitry Mishin static int do_tcp_setsockopt(struct sock *sk, int level,
17713fdadf7dSDmitry Mishin 		int optname, char __user *optval, int optlen)
17721da177e4SLinus Torvalds {
17731da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1774463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
17751da177e4SLinus Torvalds 	int val;
17761da177e4SLinus Torvalds 	int err = 0;
17771da177e4SLinus Torvalds 
17785f8ef48dSStephen Hemminger 	/* This is a string value all the others are int's */
17795f8ef48dSStephen Hemminger 	if (optname == TCP_CONGESTION) {
17805f8ef48dSStephen Hemminger 		char name[TCP_CA_NAME_MAX];
17815f8ef48dSStephen Hemminger 
17825f8ef48dSStephen Hemminger 		if (optlen < 1)
17835f8ef48dSStephen Hemminger 			return -EINVAL;
17845f8ef48dSStephen Hemminger 
17855f8ef48dSStephen Hemminger 		val = strncpy_from_user(name, optval,
17865f8ef48dSStephen Hemminger 					min(TCP_CA_NAME_MAX-1, optlen));
17875f8ef48dSStephen Hemminger 		if (val < 0)
17885f8ef48dSStephen Hemminger 			return -EFAULT;
17895f8ef48dSStephen Hemminger 		name[val] = 0;
17905f8ef48dSStephen Hemminger 
17915f8ef48dSStephen Hemminger 		lock_sock(sk);
17926687e988SArnaldo Carvalho de Melo 		err = tcp_set_congestion_control(sk, name);
17935f8ef48dSStephen Hemminger 		release_sock(sk);
17945f8ef48dSStephen Hemminger 		return err;
17955f8ef48dSStephen Hemminger 	}
17965f8ef48dSStephen Hemminger 
17971da177e4SLinus Torvalds 	if (optlen < sizeof(int))
17981da177e4SLinus Torvalds 		return -EINVAL;
17991da177e4SLinus Torvalds 
18001da177e4SLinus Torvalds 	if (get_user(val, (int __user *)optval))
18011da177e4SLinus Torvalds 		return -EFAULT;
18021da177e4SLinus Torvalds 
18031da177e4SLinus Torvalds 	lock_sock(sk);
18041da177e4SLinus Torvalds 
18051da177e4SLinus Torvalds 	switch (optname) {
18061da177e4SLinus Torvalds 	case TCP_MAXSEG:
18071da177e4SLinus Torvalds 		/* Values greater than interface MTU won't take effect. However
18081da177e4SLinus Torvalds 		 * at the point when this call is done we typically don't yet
18091da177e4SLinus Torvalds 		 * know which interface is going to be used */
18101da177e4SLinus Torvalds 		if (val < 8 || val > MAX_TCP_WINDOW) {
18111da177e4SLinus Torvalds 			err = -EINVAL;
18121da177e4SLinus Torvalds 			break;
18131da177e4SLinus Torvalds 		}
18141da177e4SLinus Torvalds 		tp->rx_opt.user_mss = val;
18151da177e4SLinus Torvalds 		break;
18161da177e4SLinus Torvalds 
18171da177e4SLinus Torvalds 	case TCP_NODELAY:
18181da177e4SLinus Torvalds 		if (val) {
18191da177e4SLinus Torvalds 			/* TCP_NODELAY is weaker than TCP_CORK, so that
18201da177e4SLinus Torvalds 			 * this option on corked socket is remembered, but
18211da177e4SLinus Torvalds 			 * it is not activated until cork is cleared.
18221da177e4SLinus Torvalds 			 *
18231da177e4SLinus Torvalds 			 * However, when TCP_NODELAY is set we make
18241da177e4SLinus Torvalds 			 * an explicit push, which overrides even TCP_CORK
18251da177e4SLinus Torvalds 			 * for currently queued segments.
18261da177e4SLinus Torvalds 			 */
18271da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
18281da177e4SLinus Torvalds 			tcp_push_pending_frames(sk, tp);
18291da177e4SLinus Torvalds 		} else {
18301da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_OFF;
18311da177e4SLinus Torvalds 		}
18321da177e4SLinus Torvalds 		break;
18331da177e4SLinus Torvalds 
18341da177e4SLinus Torvalds 	case TCP_CORK:
18351da177e4SLinus Torvalds 		/* When set indicates to always queue non-full frames.
18361da177e4SLinus Torvalds 		 * Later the user clears this option and we transmit
18371da177e4SLinus Torvalds 		 * any pending partial frames in the queue.  This is
18381da177e4SLinus Torvalds 		 * meant to be used alongside sendfile() to get properly
18391da177e4SLinus Torvalds 		 * filled frames when the user (for example) must write
18401da177e4SLinus Torvalds 		 * out headers with a write() call first and then use
18411da177e4SLinus Torvalds 		 * sendfile to send out the data parts.
18421da177e4SLinus Torvalds 		 *
18431da177e4SLinus Torvalds 		 * TCP_CORK can be set together with TCP_NODELAY and it is
18441da177e4SLinus Torvalds 		 * stronger than TCP_NODELAY.
18451da177e4SLinus Torvalds 		 */
18461da177e4SLinus Torvalds 		if (val) {
18471da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_CORK;
18481da177e4SLinus Torvalds 		} else {
18491da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_CORK;
18501da177e4SLinus Torvalds 			if (tp->nonagle&TCP_NAGLE_OFF)
18511da177e4SLinus Torvalds 				tp->nonagle |= TCP_NAGLE_PUSH;
18521da177e4SLinus Torvalds 			tcp_push_pending_frames(sk, tp);
18531da177e4SLinus Torvalds 		}
18541da177e4SLinus Torvalds 		break;
18551da177e4SLinus Torvalds 
18561da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
18571da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPIDLE)
18581da177e4SLinus Torvalds 			err = -EINVAL;
18591da177e4SLinus Torvalds 		else {
18601da177e4SLinus Torvalds 			tp->keepalive_time = val * HZ;
18611da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
18621da177e4SLinus Torvalds 			    !((1 << sk->sk_state) &
18631da177e4SLinus Torvalds 			      (TCPF_CLOSE | TCPF_LISTEN))) {
18641da177e4SLinus Torvalds 				__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
18651da177e4SLinus Torvalds 				if (tp->keepalive_time > elapsed)
18661da177e4SLinus Torvalds 					elapsed = tp->keepalive_time - elapsed;
18671da177e4SLinus Torvalds 				else
18681da177e4SLinus Torvalds 					elapsed = 0;
1869463c84b9SArnaldo Carvalho de Melo 				inet_csk_reset_keepalive_timer(sk, elapsed);
18701da177e4SLinus Torvalds 			}
18711da177e4SLinus Torvalds 		}
18721da177e4SLinus Torvalds 		break;
18731da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
18741da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
18751da177e4SLinus Torvalds 			err = -EINVAL;
18761da177e4SLinus Torvalds 		else
18771da177e4SLinus Torvalds 			tp->keepalive_intvl = val * HZ;
18781da177e4SLinus Torvalds 		break;
18791da177e4SLinus Torvalds 	case TCP_KEEPCNT:
18801da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPCNT)
18811da177e4SLinus Torvalds 			err = -EINVAL;
18821da177e4SLinus Torvalds 		else
18831da177e4SLinus Torvalds 			tp->keepalive_probes = val;
18841da177e4SLinus Torvalds 		break;
18851da177e4SLinus Torvalds 	case TCP_SYNCNT:
18861da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_SYNCNT)
18871da177e4SLinus Torvalds 			err = -EINVAL;
18881da177e4SLinus Torvalds 		else
1889463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_syn_retries = val;
18901da177e4SLinus Torvalds 		break;
18911da177e4SLinus Torvalds 
18921da177e4SLinus Torvalds 	case TCP_LINGER2:
18931da177e4SLinus Torvalds 		if (val < 0)
18941da177e4SLinus Torvalds 			tp->linger2 = -1;
18951da177e4SLinus Torvalds 		else if (val > sysctl_tcp_fin_timeout / HZ)
18961da177e4SLinus Torvalds 			tp->linger2 = 0;
18971da177e4SLinus Torvalds 		else
18981da177e4SLinus Torvalds 			tp->linger2 = val * HZ;
18991da177e4SLinus Torvalds 		break;
19001da177e4SLinus Torvalds 
19011da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
1902295f7324SArnaldo Carvalho de Melo 		icsk->icsk_accept_queue.rskq_defer_accept = 0;
19031da177e4SLinus Torvalds 		if (val > 0) {
19041da177e4SLinus Torvalds 			/* Translate value in seconds to number of
19051da177e4SLinus Torvalds 			 * retransmits */
1906295f7324SArnaldo Carvalho de Melo 			while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
19071da177e4SLinus Torvalds 			       val > ((TCP_TIMEOUT_INIT / HZ) <<
1908295f7324SArnaldo Carvalho de Melo 				       icsk->icsk_accept_queue.rskq_defer_accept))
1909295f7324SArnaldo Carvalho de Melo 				icsk->icsk_accept_queue.rskq_defer_accept++;
1910295f7324SArnaldo Carvalho de Melo 			icsk->icsk_accept_queue.rskq_defer_accept++;
19111da177e4SLinus Torvalds 		}
19121da177e4SLinus Torvalds 		break;
19131da177e4SLinus Torvalds 
19141da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
19151da177e4SLinus Torvalds 		if (!val) {
19161da177e4SLinus Torvalds 			if (sk->sk_state != TCP_CLOSE) {
19171da177e4SLinus Torvalds 				err = -EINVAL;
19181da177e4SLinus Torvalds 				break;
19191da177e4SLinus Torvalds 			}
19201da177e4SLinus Torvalds 			tp->window_clamp = 0;
19211da177e4SLinus Torvalds 		} else
19221da177e4SLinus Torvalds 			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
19231da177e4SLinus Torvalds 						SOCK_MIN_RCVBUF / 2 : val;
19241da177e4SLinus Torvalds 		break;
19251da177e4SLinus Torvalds 
19261da177e4SLinus Torvalds 	case TCP_QUICKACK:
19271da177e4SLinus Torvalds 		if (!val) {
1928463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
19291da177e4SLinus Torvalds 		} else {
1930463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 0;
19311da177e4SLinus Torvalds 			if ((1 << sk->sk_state) &
19321da177e4SLinus Torvalds 			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1933463c84b9SArnaldo Carvalho de Melo 			    inet_csk_ack_scheduled(sk)) {
1934463c84b9SArnaldo Carvalho de Melo 				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
19350e4b4992SChris Leech 				tcp_cleanup_rbuf(sk, 1);
19361da177e4SLinus Torvalds 				if (!(val & 1))
1937463c84b9SArnaldo Carvalho de Melo 					icsk->icsk_ack.pingpong = 1;
19381da177e4SLinus Torvalds 			}
19391da177e4SLinus Torvalds 		}
19401da177e4SLinus Torvalds 		break;
19411da177e4SLinus Torvalds 
19421da177e4SLinus Torvalds 	default:
19431da177e4SLinus Torvalds 		err = -ENOPROTOOPT;
19441da177e4SLinus Torvalds 		break;
19451da177e4SLinus Torvalds 	};
19461da177e4SLinus Torvalds 	release_sock(sk);
19471da177e4SLinus Torvalds 	return err;
19481da177e4SLinus Torvalds }
19491da177e4SLinus Torvalds 
19503fdadf7dSDmitry Mishin int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
19513fdadf7dSDmitry Mishin 		   int optlen)
19523fdadf7dSDmitry Mishin {
19533fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
19543fdadf7dSDmitry Mishin 
19553fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
19563fdadf7dSDmitry Mishin 		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
19573fdadf7dSDmitry Mishin 						     optval, optlen);
19583fdadf7dSDmitry Mishin 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
19593fdadf7dSDmitry Mishin }
19603fdadf7dSDmitry Mishin 
19613fdadf7dSDmitry Mishin #ifdef CONFIG_COMPAT
1962543d9cfeSArnaldo Carvalho de Melo int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1963543d9cfeSArnaldo Carvalho de Melo 			  char __user *optval, int optlen)
19643fdadf7dSDmitry Mishin {
1965dec73ff0SArnaldo Carvalho de Melo 	if (level != SOL_TCP)
1966dec73ff0SArnaldo Carvalho de Melo 		return inet_csk_compat_setsockopt(sk, level, optname,
1967dec73ff0SArnaldo Carvalho de Melo 						  optval, optlen);
19683fdadf7dSDmitry Mishin 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
19693fdadf7dSDmitry Mishin }
1970543d9cfeSArnaldo Carvalho de Melo 
1971543d9cfeSArnaldo Carvalho de Melo EXPORT_SYMBOL(compat_tcp_setsockopt);
19723fdadf7dSDmitry Mishin #endif
19733fdadf7dSDmitry Mishin 
19741da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
19751da177e4SLinus Torvalds void tcp_get_info(struct sock *sk, struct tcp_info *info)
19761da177e4SLinus Torvalds {
19771da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1978463c84b9SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
19791da177e4SLinus Torvalds 	u32 now = tcp_time_stamp;
19801da177e4SLinus Torvalds 
19811da177e4SLinus Torvalds 	memset(info, 0, sizeof(*info));
19821da177e4SLinus Torvalds 
19831da177e4SLinus Torvalds 	info->tcpi_state = sk->sk_state;
19846687e988SArnaldo Carvalho de Melo 	info->tcpi_ca_state = icsk->icsk_ca_state;
1985463c84b9SArnaldo Carvalho de Melo 	info->tcpi_retransmits = icsk->icsk_retransmits;
19866687e988SArnaldo Carvalho de Melo 	info->tcpi_probes = icsk->icsk_probes_out;
1987463c84b9SArnaldo Carvalho de Melo 	info->tcpi_backoff = icsk->icsk_backoff;
19881da177e4SLinus Torvalds 
19891da177e4SLinus Torvalds 	if (tp->rx_opt.tstamp_ok)
19901da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
19911da177e4SLinus Torvalds 	if (tp->rx_opt.sack_ok)
19921da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_SACK;
19931da177e4SLinus Torvalds 	if (tp->rx_opt.wscale_ok) {
19941da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_WSCALE;
19951da177e4SLinus Torvalds 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
19961da177e4SLinus Torvalds 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
19971da177e4SLinus Torvalds 	}
19981da177e4SLinus Torvalds 
19991da177e4SLinus Torvalds 	if (tp->ecn_flags&TCP_ECN_OK)
20001da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_ECN;
20011da177e4SLinus Torvalds 
2002463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2003463c84b9SArnaldo Carvalho de Melo 	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2004c1b4a7e6SDavid S. Miller 	info->tcpi_snd_mss = tp->mss_cache;
2005463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
20061da177e4SLinus Torvalds 
20071da177e4SLinus Torvalds 	info->tcpi_unacked = tp->packets_out;
20081da177e4SLinus Torvalds 	info->tcpi_sacked = tp->sacked_out;
20091da177e4SLinus Torvalds 	info->tcpi_lost = tp->lost_out;
20101da177e4SLinus Torvalds 	info->tcpi_retrans = tp->retrans_out;
20111da177e4SLinus Torvalds 	info->tcpi_fackets = tp->fackets_out;
20121da177e4SLinus Torvalds 
20131da177e4SLinus Torvalds 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2014463c84b9SArnaldo Carvalho de Melo 	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
20151da177e4SLinus Torvalds 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
20161da177e4SLinus Torvalds 
2017d83d8461SArnaldo Carvalho de Melo 	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
20181da177e4SLinus Torvalds 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
20191da177e4SLinus Torvalds 	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
20201da177e4SLinus Torvalds 	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
20211da177e4SLinus Torvalds 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
20221da177e4SLinus Torvalds 	info->tcpi_snd_cwnd = tp->snd_cwnd;
20231da177e4SLinus Torvalds 	info->tcpi_advmss = tp->advmss;
20241da177e4SLinus Torvalds 	info->tcpi_reordering = tp->reordering;
20251da177e4SLinus Torvalds 
20261da177e4SLinus Torvalds 	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
20271da177e4SLinus Torvalds 	info->tcpi_rcv_space = tp->rcvq_space.space;
20281da177e4SLinus Torvalds 
20291da177e4SLinus Torvalds 	info->tcpi_total_retrans = tp->total_retrans;
20301da177e4SLinus Torvalds }
20311da177e4SLinus Torvalds 
20321da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
20331da177e4SLinus Torvalds 
20343fdadf7dSDmitry Mishin static int do_tcp_getsockopt(struct sock *sk, int level,
20353fdadf7dSDmitry Mishin 		int optname, char __user *optval, int __user *optlen)
20361da177e4SLinus Torvalds {
2037295f7324SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
20381da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20391da177e4SLinus Torvalds 	int val, len;
20401da177e4SLinus Torvalds 
20411da177e4SLinus Torvalds 	if (get_user(len, optlen))
20421da177e4SLinus Torvalds 		return -EFAULT;
20431da177e4SLinus Torvalds 
20441da177e4SLinus Torvalds 	len = min_t(unsigned int, len, sizeof(int));
20451da177e4SLinus Torvalds 
20461da177e4SLinus Torvalds 	if (len < 0)
20471da177e4SLinus Torvalds 		return -EINVAL;
20481da177e4SLinus Torvalds 
20491da177e4SLinus Torvalds 	switch (optname) {
20501da177e4SLinus Torvalds 	case TCP_MAXSEG:
2051c1b4a7e6SDavid S. Miller 		val = tp->mss_cache;
20521da177e4SLinus Torvalds 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
20531da177e4SLinus Torvalds 			val = tp->rx_opt.user_mss;
20541da177e4SLinus Torvalds 		break;
20551da177e4SLinus Torvalds 	case TCP_NODELAY:
20561da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
20571da177e4SLinus Torvalds 		break;
20581da177e4SLinus Torvalds 	case TCP_CORK:
20591da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
20601da177e4SLinus Torvalds 		break;
20611da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
20621da177e4SLinus Torvalds 		val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
20631da177e4SLinus Torvalds 		break;
20641da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
20651da177e4SLinus Torvalds 		val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
20661da177e4SLinus Torvalds 		break;
20671da177e4SLinus Torvalds 	case TCP_KEEPCNT:
20681da177e4SLinus Torvalds 		val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
20691da177e4SLinus Torvalds 		break;
20701da177e4SLinus Torvalds 	case TCP_SYNCNT:
2071295f7324SArnaldo Carvalho de Melo 		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
20721da177e4SLinus Torvalds 		break;
20731da177e4SLinus Torvalds 	case TCP_LINGER2:
20741da177e4SLinus Torvalds 		val = tp->linger2;
20751da177e4SLinus Torvalds 		if (val >= 0)
20761da177e4SLinus Torvalds 			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
20771da177e4SLinus Torvalds 		break;
20781da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
2079295f7324SArnaldo Carvalho de Melo 		val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2080295f7324SArnaldo Carvalho de Melo 			((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
20811da177e4SLinus Torvalds 		break;
20821da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
20831da177e4SLinus Torvalds 		val = tp->window_clamp;
20841da177e4SLinus Torvalds 		break;
20851da177e4SLinus Torvalds 	case TCP_INFO: {
20861da177e4SLinus Torvalds 		struct tcp_info info;
20871da177e4SLinus Torvalds 
20881da177e4SLinus Torvalds 		if (get_user(len, optlen))
20891da177e4SLinus Torvalds 			return -EFAULT;
20901da177e4SLinus Torvalds 
20911da177e4SLinus Torvalds 		tcp_get_info(sk, &info);
20921da177e4SLinus Torvalds 
20931da177e4SLinus Torvalds 		len = min_t(unsigned int, len, sizeof(info));
20941da177e4SLinus Torvalds 		if (put_user(len, optlen))
20951da177e4SLinus Torvalds 			return -EFAULT;
20961da177e4SLinus Torvalds 		if (copy_to_user(optval, &info, len))
20971da177e4SLinus Torvalds 			return -EFAULT;
20981da177e4SLinus Torvalds 		return 0;
20991da177e4SLinus Torvalds 	}
21001da177e4SLinus Torvalds 	case TCP_QUICKACK:
2101295f7324SArnaldo Carvalho de Melo 		val = !icsk->icsk_ack.pingpong;
21021da177e4SLinus Torvalds 		break;
21035f8ef48dSStephen Hemminger 
21045f8ef48dSStephen Hemminger 	case TCP_CONGESTION:
21055f8ef48dSStephen Hemminger 		if (get_user(len, optlen))
21065f8ef48dSStephen Hemminger 			return -EFAULT;
21075f8ef48dSStephen Hemminger 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
21085f8ef48dSStephen Hemminger 		if (put_user(len, optlen))
21095f8ef48dSStephen Hemminger 			return -EFAULT;
21106687e988SArnaldo Carvalho de Melo 		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
21115f8ef48dSStephen Hemminger 			return -EFAULT;
21125f8ef48dSStephen Hemminger 		return 0;
21131da177e4SLinus Torvalds 	default:
21141da177e4SLinus Torvalds 		return -ENOPROTOOPT;
21151da177e4SLinus Torvalds 	};
21161da177e4SLinus Torvalds 
21171da177e4SLinus Torvalds 	if (put_user(len, optlen))
21181da177e4SLinus Torvalds 		return -EFAULT;
21191da177e4SLinus Torvalds 	if (copy_to_user(optval, &val, len))
21201da177e4SLinus Torvalds 		return -EFAULT;
21211da177e4SLinus Torvalds 	return 0;
21221da177e4SLinus Torvalds }
21231da177e4SLinus Torvalds 
21243fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
21253fdadf7dSDmitry Mishin 		   int __user *optlen)
21263fdadf7dSDmitry Mishin {
21273fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
21283fdadf7dSDmitry Mishin 
21293fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
21303fdadf7dSDmitry Mishin 		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
21313fdadf7dSDmitry Mishin 						     optval, optlen);
21323fdadf7dSDmitry Mishin 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
21333fdadf7dSDmitry Mishin }
21343fdadf7dSDmitry Mishin 
21353fdadf7dSDmitry Mishin #ifdef CONFIG_COMPAT
2136543d9cfeSArnaldo Carvalho de Melo int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2137543d9cfeSArnaldo Carvalho de Melo 			  char __user *optval, int __user *optlen)
21383fdadf7dSDmitry Mishin {
2139dec73ff0SArnaldo Carvalho de Melo 	if (level != SOL_TCP)
2140dec73ff0SArnaldo Carvalho de Melo 		return inet_csk_compat_getsockopt(sk, level, optname,
2141dec73ff0SArnaldo Carvalho de Melo 						  optval, optlen);
21423fdadf7dSDmitry Mishin 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
21433fdadf7dSDmitry Mishin }
2144543d9cfeSArnaldo Carvalho de Melo 
2145543d9cfeSArnaldo Carvalho de Melo EXPORT_SYMBOL(compat_tcp_getsockopt);
21463fdadf7dSDmitry Mishin #endif
21471da177e4SLinus Torvalds 
2148*576a30ebSHerbert Xu struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2149f4c50d99SHerbert Xu {
2150f4c50d99SHerbert Xu 	struct sk_buff *segs = ERR_PTR(-EINVAL);
2151f4c50d99SHerbert Xu 	struct tcphdr *th;
2152f4c50d99SHerbert Xu 	unsigned thlen;
2153f4c50d99SHerbert Xu 	unsigned int seq;
2154f4c50d99SHerbert Xu 	unsigned int delta;
2155f4c50d99SHerbert Xu 	unsigned int oldlen;
2156f4c50d99SHerbert Xu 	unsigned int len;
2157f4c50d99SHerbert Xu 
2158f4c50d99SHerbert Xu 	if (!pskb_may_pull(skb, sizeof(*th)))
2159f4c50d99SHerbert Xu 		goto out;
2160f4c50d99SHerbert Xu 
2161f4c50d99SHerbert Xu 	th = skb->h.th;
2162f4c50d99SHerbert Xu 	thlen = th->doff * 4;
2163f4c50d99SHerbert Xu 	if (thlen < sizeof(*th))
2164f4c50d99SHerbert Xu 		goto out;
2165f4c50d99SHerbert Xu 
2166f4c50d99SHerbert Xu 	if (!pskb_may_pull(skb, thlen))
2167f4c50d99SHerbert Xu 		goto out;
2168f4c50d99SHerbert Xu 
2169*576a30ebSHerbert Xu 	segs = NULL;
2170*576a30ebSHerbert Xu 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
2171*576a30ebSHerbert Xu 		goto out;
2172*576a30ebSHerbert Xu 
21730718bcc0SHerbert Xu 	oldlen = (u16)~skb->len;
2174f4c50d99SHerbert Xu 	__skb_pull(skb, thlen);
2175f4c50d99SHerbert Xu 
2176*576a30ebSHerbert Xu 	segs = skb_segment(skb, features);
2177f4c50d99SHerbert Xu 	if (IS_ERR(segs))
2178f4c50d99SHerbert Xu 		goto out;
2179f4c50d99SHerbert Xu 
2180f4c50d99SHerbert Xu 	len = skb_shinfo(skb)->gso_size;
21810718bcc0SHerbert Xu 	delta = htonl(oldlen + (thlen + len));
2182f4c50d99SHerbert Xu 
2183f4c50d99SHerbert Xu 	skb = segs;
2184f4c50d99SHerbert Xu 	th = skb->h.th;
2185f4c50d99SHerbert Xu 	seq = ntohl(th->seq);
2186f4c50d99SHerbert Xu 
2187f4c50d99SHerbert Xu 	do {
2188f4c50d99SHerbert Xu 		th->fin = th->psh = 0;
2189f4c50d99SHerbert Xu 
21900718bcc0SHerbert Xu 		th->check = ~csum_fold(th->check + delta);
21910718bcc0SHerbert Xu 		if (skb->ip_summed != CHECKSUM_HW)
21920718bcc0SHerbert Xu 			th->check = csum_fold(csum_partial(skb->h.raw, thlen,
21930718bcc0SHerbert Xu 							   skb->csum));
2194f4c50d99SHerbert Xu 
2195f4c50d99SHerbert Xu 		seq += len;
2196f4c50d99SHerbert Xu 		skb = skb->next;
2197f4c50d99SHerbert Xu 		th = skb->h.th;
2198f4c50d99SHerbert Xu 
2199f4c50d99SHerbert Xu 		th->seq = htonl(seq);
2200f4c50d99SHerbert Xu 		th->cwr = 0;
2201f4c50d99SHerbert Xu 	} while (skb->next);
2202f4c50d99SHerbert Xu 
22030718bcc0SHerbert Xu 	delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
22040718bcc0SHerbert Xu 	th->check = ~csum_fold(th->check + delta);
22050718bcc0SHerbert Xu 	if (skb->ip_summed != CHECKSUM_HW)
22060718bcc0SHerbert Xu 		th->check = csum_fold(csum_partial(skb->h.raw, thlen,
22070718bcc0SHerbert Xu 						   skb->csum));
2208f4c50d99SHerbert Xu 
2209f4c50d99SHerbert Xu out:
2210f4c50d99SHerbert Xu 	return segs;
2211f4c50d99SHerbert Xu }
2212f4c50d99SHerbert Xu 
22131da177e4SLinus Torvalds extern void __skb_cb_too_small_for_tcp(int, int);
22145f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
22151da177e4SLinus Torvalds 
22161da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
22171da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
22181da177e4SLinus Torvalds {
22191da177e4SLinus Torvalds 	if (!str)
22201da177e4SLinus Torvalds 		return 0;
22211da177e4SLinus Torvalds 	thash_entries = simple_strtoul(str, &str, 0);
22221da177e4SLinus Torvalds 	return 1;
22231da177e4SLinus Torvalds }
22241da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
22251da177e4SLinus Torvalds 
22261da177e4SLinus Torvalds void __init tcp_init(void)
22271da177e4SLinus Torvalds {
22281da177e4SLinus Torvalds 	struct sk_buff *skb = NULL;
22297b4f4b5eSJohn Heffner 	unsigned long limit;
22307b4f4b5eSJohn Heffner 	int order, i, max_share;
22311da177e4SLinus Torvalds 
22321da177e4SLinus Torvalds 	if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
22331da177e4SLinus Torvalds 		__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
22341da177e4SLinus Torvalds 					   sizeof(skb->cb));
22351da177e4SLinus Torvalds 
22366e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bind_bucket_cachep =
22376e04e021SArnaldo Carvalho de Melo 		kmem_cache_create("tcp_bind_bucket",
22386e04e021SArnaldo Carvalho de Melo 				  sizeof(struct inet_bind_bucket), 0,
22396e04e021SArnaldo Carvalho de Melo 				  SLAB_HWCACHE_ALIGN, NULL, NULL);
22406e04e021SArnaldo Carvalho de Melo 	if (!tcp_hashinfo.bind_bucket_cachep)
22411da177e4SLinus Torvalds 		panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
22421da177e4SLinus Torvalds 
22431da177e4SLinus Torvalds 	/* Size and allocate the main established and bind bucket
22441da177e4SLinus Torvalds 	 * hash tables.
22451da177e4SLinus Torvalds 	 *
22461da177e4SLinus Torvalds 	 * The methodology is similar to that of the buffer cache.
22471da177e4SLinus Torvalds 	 */
22486e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.ehash =
22491da177e4SLinus Torvalds 		alloc_large_system_hash("TCP established",
22500f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_ehash_bucket),
22511da177e4SLinus Torvalds 					thash_entries,
22521da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
225318955cfcSMike Stroyan 					13 : 15,
22541da177e4SLinus Torvalds 					HASH_HIGHMEM,
22556e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.ehash_size,
22561da177e4SLinus Torvalds 					NULL,
22571da177e4SLinus Torvalds 					0);
22586e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
22596e04e021SArnaldo Carvalho de Melo 	for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
22606e04e021SArnaldo Carvalho de Melo 		rwlock_init(&tcp_hashinfo.ehash[i].lock);
22616e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
22621da177e4SLinus Torvalds 	}
22631da177e4SLinus Torvalds 
22646e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash =
22651da177e4SLinus Torvalds 		alloc_large_system_hash("TCP bind",
22660f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_bind_hashbucket),
22676e04e021SArnaldo Carvalho de Melo 					tcp_hashinfo.ehash_size,
22681da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
226918955cfcSMike Stroyan 					13 : 15,
22701da177e4SLinus Torvalds 					HASH_HIGHMEM,
22716e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.bhash_size,
22721da177e4SLinus Torvalds 					NULL,
22731da177e4SLinus Torvalds 					64 * 1024);
22746e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
22756e04e021SArnaldo Carvalho de Melo 	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
22766e04e021SArnaldo Carvalho de Melo 		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
22776e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
22781da177e4SLinus Torvalds 	}
22791da177e4SLinus Torvalds 
22801da177e4SLinus Torvalds 	/* Try to be a bit smarter and adjust defaults depending
22811da177e4SLinus Torvalds 	 * on available memory.
22821da177e4SLinus Torvalds 	 */
22831da177e4SLinus Torvalds 	for (order = 0; ((1 << order) << PAGE_SHIFT) <
22846e04e021SArnaldo Carvalho de Melo 			(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
22851da177e4SLinus Torvalds 			order++)
22861da177e4SLinus Torvalds 		;
2287e7626486SAndi Kleen 	if (order >= 4) {
22881da177e4SLinus Torvalds 		sysctl_local_port_range[0] = 32768;
22891da177e4SLinus Torvalds 		sysctl_local_port_range[1] = 61000;
2290295ff7edSArnaldo Carvalho de Melo 		tcp_death_row.sysctl_max_tw_buckets = 180000;
22911da177e4SLinus Torvalds 		sysctl_tcp_max_orphans = 4096 << (order - 4);
22921da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 1024;
22931da177e4SLinus Torvalds 	} else if (order < 3) {
22941da177e4SLinus Torvalds 		sysctl_local_port_range[0] = 1024 * (3 - order);
2295295ff7edSArnaldo Carvalho de Melo 		tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
22961da177e4SLinus Torvalds 		sysctl_tcp_max_orphans >>= (3 - order);
22971da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 128;
22981da177e4SLinus Torvalds 	}
22991da177e4SLinus Torvalds 
23001da177e4SLinus Torvalds 	sysctl_tcp_mem[0] =  768 << order;
23011da177e4SLinus Torvalds 	sysctl_tcp_mem[1] = 1024 << order;
23021da177e4SLinus Torvalds 	sysctl_tcp_mem[2] = 1536 << order;
23031da177e4SLinus Torvalds 
23047b4f4b5eSJohn Heffner 	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
23057b4f4b5eSJohn Heffner 	max_share = min(4UL*1024*1024, limit);
23067b4f4b5eSJohn Heffner 
23077b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
23087b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[1] = 16*1024;
23097b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[2] = max(64*1024, max_share);
23107b4f4b5eSJohn Heffner 
23117b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
23127b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[1] = 87380;
23137b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[2] = max(87380, max_share);
23141da177e4SLinus Torvalds 
23151da177e4SLinus Torvalds 	printk(KERN_INFO "TCP: Hash tables configured "
23161da177e4SLinus Torvalds 	       "(established %d bind %d)\n",
23176e04e021SArnaldo Carvalho de Melo 	       tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
2318317a76f9SStephen Hemminger 
2319317a76f9SStephen Hemminger 	tcp_register_congestion_control(&tcp_reno);
23201da177e4SLinus Torvalds }
23211da177e4SLinus Torvalds 
23221da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_close);
23231da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_disconnect);
23241da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_getsockopt);
23251da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_ioctl);
23261da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
23271da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_read_sock);
23281da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_recvmsg);
23291da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendmsg);
23301da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendpage);
23311da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_setsockopt);
23321da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_shutdown);
23331da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_statistics);
2334