xref: /linux/net/ipv4/tcp.c (revision 2b1244a43be97f504494b557a7f7a65fe0d00dba)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Implementation of the Transmission Control Protocol(TCP).
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	$Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *		Mark Evans, <evansmp@uhura.aston.ac.uk>
131da177e4SLinus Torvalds  *		Corey Minyard <wf-rch!minyard@relay.EU.net>
141da177e4SLinus Torvalds  *		Florian La Roche, <flla@stud.uni-sb.de>
151da177e4SLinus Torvalds  *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
161da177e4SLinus Torvalds  *		Linus Torvalds, <torvalds@cs.helsinki.fi>
171da177e4SLinus Torvalds  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
181da177e4SLinus Torvalds  *		Matthew Dillon, <dillon@apollo.west.oic.com>
191da177e4SLinus Torvalds  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
201da177e4SLinus Torvalds  *		Jorge Cwik, <jorge@laser.satlink.net>
211da177e4SLinus Torvalds  *
221da177e4SLinus Torvalds  * Fixes:
231da177e4SLinus Torvalds  *		Alan Cox	:	Numerous verify_area() calls
241da177e4SLinus Torvalds  *		Alan Cox	:	Set the ACK bit on a reset
251da177e4SLinus Torvalds  *		Alan Cox	:	Stopped it crashing if it closed while
261da177e4SLinus Torvalds  *					sk->inuse=1 and was trying to connect
271da177e4SLinus Torvalds  *					(tcp_err()).
281da177e4SLinus Torvalds  *		Alan Cox	:	All icmp error handling was broken
291da177e4SLinus Torvalds  *					pointers passed where wrong and the
301da177e4SLinus Torvalds  *					socket was looked up backwards. Nobody
311da177e4SLinus Torvalds  *					tested any icmp error code obviously.
321da177e4SLinus Torvalds  *		Alan Cox	:	tcp_err() now handled properly. It
331da177e4SLinus Torvalds  *					wakes people on errors. poll
341da177e4SLinus Torvalds  *					behaves and the icmp error race
351da177e4SLinus Torvalds  *					has gone by moving it into sock.c
361da177e4SLinus Torvalds  *		Alan Cox	:	tcp_send_reset() fixed to work for
371da177e4SLinus Torvalds  *					everything not just packets for
381da177e4SLinus Torvalds  *					unknown sockets.
391da177e4SLinus Torvalds  *		Alan Cox	:	tcp option processing.
401da177e4SLinus Torvalds  *		Alan Cox	:	Reset tweaked (still not 100%) [Had
411da177e4SLinus Torvalds  *					syn rule wrong]
421da177e4SLinus Torvalds  *		Herp Rosmanith  :	More reset fixes
431da177e4SLinus Torvalds  *		Alan Cox	:	No longer acks invalid rst frames.
441da177e4SLinus Torvalds  *					Acking any kind of RST is right out.
451da177e4SLinus Torvalds  *		Alan Cox	:	Sets an ignore me flag on an rst
461da177e4SLinus Torvalds  *					receive otherwise odd bits of prattle
471da177e4SLinus Torvalds  *					escape still
481da177e4SLinus Torvalds  *		Alan Cox	:	Fixed another acking RST frame bug.
491da177e4SLinus Torvalds  *					Should stop LAN workplace lockups.
501da177e4SLinus Torvalds  *		Alan Cox	: 	Some tidyups using the new skb list
511da177e4SLinus Torvalds  *					facilities
521da177e4SLinus Torvalds  *		Alan Cox	:	sk->keepopen now seems to work
531da177e4SLinus Torvalds  *		Alan Cox	:	Pulls options out correctly on accepts
541da177e4SLinus Torvalds  *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
551da177e4SLinus Torvalds  *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
561da177e4SLinus Torvalds  *					bit to skb ops.
571da177e4SLinus Torvalds  *		Alan Cox	:	Tidied tcp_data to avoid a potential
581da177e4SLinus Torvalds  *					nasty.
591da177e4SLinus Torvalds  *		Alan Cox	:	Added some better commenting, as the
601da177e4SLinus Torvalds  *					tcp is hard to follow
611da177e4SLinus Torvalds  *		Alan Cox	:	Removed incorrect check for 20 * psh
621da177e4SLinus Torvalds  *	Michael O'Reilly	:	ack < copied bug fix.
631da177e4SLinus Torvalds  *	Johannes Stille		:	Misc tcp fixes (not all in yet).
641da177e4SLinus Torvalds  *		Alan Cox	:	FIN with no memory -> CRASH
651da177e4SLinus Torvalds  *		Alan Cox	:	Added socket option proto entries.
661da177e4SLinus Torvalds  *					Also added awareness of them to accept.
671da177e4SLinus Torvalds  *		Alan Cox	:	Added TCP options (SOL_TCP)
681da177e4SLinus Torvalds  *		Alan Cox	:	Switched wakeup calls to callbacks,
691da177e4SLinus Torvalds  *					so the kernel can layer network
701da177e4SLinus Torvalds  *					sockets.
711da177e4SLinus Torvalds  *		Alan Cox	:	Use ip_tos/ip_ttl settings.
721da177e4SLinus Torvalds  *		Alan Cox	:	Handle FIN (more) properly (we hope).
731da177e4SLinus Torvalds  *		Alan Cox	:	RST frames sent on unsynchronised
741da177e4SLinus Torvalds  *					state ack error.
751da177e4SLinus Torvalds  *		Alan Cox	:	Put in missing check for SYN bit.
761da177e4SLinus Torvalds  *		Alan Cox	:	Added tcp_select_window() aka NET2E
771da177e4SLinus Torvalds  *					window non shrink trick.
781da177e4SLinus Torvalds  *		Alan Cox	:	Added a couple of small NET2E timer
791da177e4SLinus Torvalds  *					fixes
801da177e4SLinus Torvalds  *		Charles Hedrick :	TCP fixes
811da177e4SLinus Torvalds  *		Toomas Tamm	:	TCP window fixes
821da177e4SLinus Torvalds  *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
831da177e4SLinus Torvalds  *		Charles Hedrick	:	Rewrote most of it to actually work
841da177e4SLinus Torvalds  *		Linus		:	Rewrote tcp_read() and URG handling
851da177e4SLinus Torvalds  *					completely
861da177e4SLinus Torvalds  *		Gerhard Koerting:	Fixed some missing timer handling
871da177e4SLinus Torvalds  *		Matthew Dillon  :	Reworked TCP machine states as per RFC
881da177e4SLinus Torvalds  *		Gerhard Koerting:	PC/TCP workarounds
891da177e4SLinus Torvalds  *		Adam Caldwell	:	Assorted timer/timing errors
901da177e4SLinus Torvalds  *		Matthew Dillon	:	Fixed another RST bug
911da177e4SLinus Torvalds  *		Alan Cox	:	Move to kernel side addressing changes.
921da177e4SLinus Torvalds  *		Alan Cox	:	Beginning work on TCP fastpathing
931da177e4SLinus Torvalds  *					(not yet usable)
941da177e4SLinus Torvalds  *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
951da177e4SLinus Torvalds  *		Alan Cox	:	TCP fast path debugging
961da177e4SLinus Torvalds  *		Alan Cox	:	Window clamping
971da177e4SLinus Torvalds  *		Michael Riepe	:	Bug in tcp_check()
981da177e4SLinus Torvalds  *		Matt Dillon	:	More TCP improvements and RST bug fixes
991da177e4SLinus Torvalds  *		Matt Dillon	:	Yet more small nasties remove from the
1001da177e4SLinus Torvalds  *					TCP code (Be very nice to this man if
1011da177e4SLinus Torvalds  *					tcp finally works 100%) 8)
1021da177e4SLinus Torvalds  *		Alan Cox	:	BSD accept semantics.
1031da177e4SLinus Torvalds  *		Alan Cox	:	Reset on closedown bug.
1041da177e4SLinus Torvalds  *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
1051da177e4SLinus Torvalds  *		Michael Pall	:	Handle poll() after URG properly in
1061da177e4SLinus Torvalds  *					all cases.
1071da177e4SLinus Torvalds  *		Michael Pall	:	Undo the last fix in tcp_read_urg()
1081da177e4SLinus Torvalds  *					(multi URG PUSH broke rlogin).
1091da177e4SLinus Torvalds  *		Michael Pall	:	Fix the multi URG PUSH problem in
1101da177e4SLinus Torvalds  *					tcp_readable(), poll() after URG
1111da177e4SLinus Torvalds  *					works now.
1121da177e4SLinus Torvalds  *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
1131da177e4SLinus Torvalds  *					BSD api.
1141da177e4SLinus Torvalds  *		Alan Cox	:	Changed the semantics of sk->socket to
1151da177e4SLinus Torvalds  *					fix a race and a signal problem with
1161da177e4SLinus Torvalds  *					accept() and async I/O.
1171da177e4SLinus Torvalds  *		Alan Cox	:	Relaxed the rules on tcp_sendto().
1181da177e4SLinus Torvalds  *		Yury Shevchuk	:	Really fixed accept() blocking problem.
1191da177e4SLinus Torvalds  *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
1201da177e4SLinus Torvalds  *					clients/servers which listen in on
1211da177e4SLinus Torvalds  *					fixed ports.
1221da177e4SLinus Torvalds  *		Alan Cox	:	Cleaned the above up and shrank it to
1231da177e4SLinus Torvalds  *					a sensible code size.
1241da177e4SLinus Torvalds  *		Alan Cox	:	Self connect lockup fix.
1251da177e4SLinus Torvalds  *		Alan Cox	:	No connect to multicast.
1261da177e4SLinus Torvalds  *		Ross Biro	:	Close unaccepted children on master
1271da177e4SLinus Torvalds  *					socket close.
1281da177e4SLinus Torvalds  *		Alan Cox	:	Reset tracing code.
1291da177e4SLinus Torvalds  *		Alan Cox	:	Spurious resets on shutdown.
1301da177e4SLinus Torvalds  *		Alan Cox	:	Giant 15 minute/60 second timer error
1311da177e4SLinus Torvalds  *		Alan Cox	:	Small whoops in polling before an
1321da177e4SLinus Torvalds  *					accept.
1331da177e4SLinus Torvalds  *		Alan Cox	:	Kept the state trace facility since
1341da177e4SLinus Torvalds  *					it's handy for debugging.
1351da177e4SLinus Torvalds  *		Alan Cox	:	More reset handler fixes.
1361da177e4SLinus Torvalds  *		Alan Cox	:	Started rewriting the code based on
1371da177e4SLinus Torvalds  *					the RFC's for other useful protocol
1381da177e4SLinus Torvalds  *					references see: Comer, KA9Q NOS, and
1391da177e4SLinus Torvalds  *					for a reference on the difference
1401da177e4SLinus Torvalds  *					between specifications and how BSD
1411da177e4SLinus Torvalds  *					works see the 4.4lite source.
1421da177e4SLinus Torvalds  *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
1431da177e4SLinus Torvalds  *					close.
1441da177e4SLinus Torvalds  *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
1451da177e4SLinus Torvalds  *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
1461da177e4SLinus Torvalds  *		Alan Cox	:	Reimplemented timers as per the RFC
1471da177e4SLinus Torvalds  *					and using multiple timers for sanity.
1481da177e4SLinus Torvalds  *		Alan Cox	:	Small bug fixes, and a lot of new
1491da177e4SLinus Torvalds  *					comments.
1501da177e4SLinus Torvalds  *		Alan Cox	:	Fixed dual reader crash by locking
1511da177e4SLinus Torvalds  *					the buffers (much like datagram.c)
1521da177e4SLinus Torvalds  *		Alan Cox	:	Fixed stuck sockets in probe. A probe
1531da177e4SLinus Torvalds  *					now gets fed up of retrying without
1541da177e4SLinus Torvalds  *					(even a no space) answer.
1551da177e4SLinus Torvalds  *		Alan Cox	:	Extracted closing code better
1561da177e4SLinus Torvalds  *		Alan Cox	:	Fixed the closing state machine to
1571da177e4SLinus Torvalds  *					resemble the RFC.
1581da177e4SLinus Torvalds  *		Alan Cox	:	More 'per spec' fixes.
1591da177e4SLinus Torvalds  *		Jorge Cwik	:	Even faster checksumming.
1601da177e4SLinus Torvalds  *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
1611da177e4SLinus Torvalds  *					only frames. At least one pc tcp stack
1621da177e4SLinus Torvalds  *					generates them.
1631da177e4SLinus Torvalds  *		Alan Cox	:	Cache last socket.
1641da177e4SLinus Torvalds  *		Alan Cox	:	Per route irtt.
1651da177e4SLinus Torvalds  *		Matt Day	:	poll()->select() match BSD precisely on error
1661da177e4SLinus Torvalds  *		Alan Cox	:	New buffers
1671da177e4SLinus Torvalds  *		Marc Tamsky	:	Various sk->prot->retransmits and
1681da177e4SLinus Torvalds  *					sk->retransmits misupdating fixed.
1691da177e4SLinus Torvalds  *					Fixed tcp_write_timeout: stuck close,
1701da177e4SLinus Torvalds  *					and TCP syn retries gets used now.
1711da177e4SLinus Torvalds  *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
1721da177e4SLinus Torvalds  *					ack if state is TCP_CLOSED.
1731da177e4SLinus Torvalds  *		Alan Cox	:	Look up device on a retransmit - routes may
1741da177e4SLinus Torvalds  *					change. Doesn't yet cope with MSS shrink right
1751da177e4SLinus Torvalds  *					but it's a start!
1761da177e4SLinus Torvalds  *		Marc Tamsky	:	Closing in closing fixes.
1771da177e4SLinus Torvalds  *		Mike Shaver	:	RFC1122 verifications.
1781da177e4SLinus Torvalds  *		Alan Cox	:	rcv_saddr errors.
1791da177e4SLinus Torvalds  *		Alan Cox	:	Block double connect().
1801da177e4SLinus Torvalds  *		Alan Cox	:	Small hooks for enSKIP.
1811da177e4SLinus Torvalds  *		Alexey Kuznetsov:	Path MTU discovery.
1821da177e4SLinus Torvalds  *		Alan Cox	:	Support soft errors.
1831da177e4SLinus Torvalds  *		Alan Cox	:	Fix MTU discovery pathological case
1841da177e4SLinus Torvalds  *					when the remote claims no mtu!
1851da177e4SLinus Torvalds  *		Marc Tamsky	:	TCP_CLOSE fix.
1861da177e4SLinus Torvalds  *		Colin (G3TNE)	:	Send a reset on syn ack replies in
1871da177e4SLinus Torvalds  *					window but wrong (fixes NT lpd problems)
1881da177e4SLinus Torvalds  *		Pedro Roque	:	Better TCP window handling, delayed ack.
1891da177e4SLinus Torvalds  *		Joerg Reuter	:	No modification of locked buffers in
1901da177e4SLinus Torvalds  *					tcp_do_retransmit()
1911da177e4SLinus Torvalds  *		Eric Schenk	:	Changed receiver side silly window
1921da177e4SLinus Torvalds  *					avoidance algorithm to BSD style
1931da177e4SLinus Torvalds  *					algorithm. This doubles throughput
1941da177e4SLinus Torvalds  *					against machines running Solaris,
1951da177e4SLinus Torvalds  *					and seems to result in general
1961da177e4SLinus Torvalds  *					improvement.
1971da177e4SLinus Torvalds  *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
1981da177e4SLinus Torvalds  *	Willy Konynenberg	:	Transparent proxying support.
1991da177e4SLinus Torvalds  *	Mike McLagan		:	Routing by source
2001da177e4SLinus Torvalds  *		Keith Owens	:	Do proper merging with partial SKB's in
2011da177e4SLinus Torvalds  *					tcp_do_sendmsg to avoid burstiness.
2021da177e4SLinus Torvalds  *		Eric Schenk	:	Fix fast close down bug with
2031da177e4SLinus Torvalds  *					shutdown() followed by close().
2041da177e4SLinus Torvalds  *		Andi Kleen 	:	Make poll agree with SIGIO
2051da177e4SLinus Torvalds  *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
2061da177e4SLinus Torvalds  *					lingertime == 0 (RFC 793 ABORT Call)
2071da177e4SLinus Torvalds  *	Hirokazu Takahashi	:	Use copy_from_user() instead of
2081da177e4SLinus Torvalds  *					csum_and_copy_from_user() if possible.
2091da177e4SLinus Torvalds  *
2101da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
2111da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
2121da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
2131da177e4SLinus Torvalds  *		2 of the License, or(at your option) any later version.
2141da177e4SLinus Torvalds  *
2151da177e4SLinus Torvalds  * Description of States:
2161da177e4SLinus Torvalds  *
2171da177e4SLinus Torvalds  *	TCP_SYN_SENT		sent a connection request, waiting for ack
2181da177e4SLinus Torvalds  *
2191da177e4SLinus Torvalds  *	TCP_SYN_RECV		received a connection request, sent ack,
2201da177e4SLinus Torvalds  *				waiting for final ack in three-way handshake.
2211da177e4SLinus Torvalds  *
2221da177e4SLinus Torvalds  *	TCP_ESTABLISHED		connection established
2231da177e4SLinus Torvalds  *
2241da177e4SLinus Torvalds  *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
2251da177e4SLinus Torvalds  *				transmission of remaining buffered data
2261da177e4SLinus Torvalds  *
2271da177e4SLinus Torvalds  *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
2281da177e4SLinus Torvalds  *				to shutdown
2291da177e4SLinus Torvalds  *
2301da177e4SLinus Torvalds  *	TCP_CLOSING		both sides have shutdown but we still have
2311da177e4SLinus Torvalds  *				data we have to finish sending
2321da177e4SLinus Torvalds  *
2331da177e4SLinus Torvalds  *	TCP_TIME_WAIT		timeout to catch resent junk before entering
2341da177e4SLinus Torvalds  *				closed, can only be entered from FIN_WAIT2
2351da177e4SLinus Torvalds  *				or CLOSING.  Required because the other end
2361da177e4SLinus Torvalds  *				may not have gotten our last ACK causing it
2371da177e4SLinus Torvalds  *				to retransmit the data packet (which we ignore)
2381da177e4SLinus Torvalds  *
2391da177e4SLinus Torvalds  *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
2401da177e4SLinus Torvalds  *				us to finish writing our data and to shutdown
2411da177e4SLinus Torvalds  *				(we have to close() to move on to LAST_ACK)
2421da177e4SLinus Torvalds  *
2431da177e4SLinus Torvalds  *	TCP_LAST_ACK		out side has shutdown after remote has
2441da177e4SLinus Torvalds  *				shutdown.  There may still be data in our
2451da177e4SLinus Torvalds  *				buffer that we have to finish sending
2461da177e4SLinus Torvalds  *
2471da177e4SLinus Torvalds  *	TCP_CLOSE		socket is finished
2481da177e4SLinus Torvalds  */
2491da177e4SLinus Torvalds 
2501da177e4SLinus Torvalds #include <linux/module.h>
2511da177e4SLinus Torvalds #include <linux/types.h>
2521da177e4SLinus Torvalds #include <linux/fcntl.h>
2531da177e4SLinus Torvalds #include <linux/poll.h>
2541da177e4SLinus Torvalds #include <linux/init.h>
2551da177e4SLinus Torvalds #include <linux/fs.h>
2561da177e4SLinus Torvalds #include <linux/random.h>
2571da177e4SLinus Torvalds #include <linux/bootmem.h>
258b8059eadSDavid S. Miller #include <linux/cache.h>
259f4c50d99SHerbert Xu #include <linux/err.h>
260cfb6eeb4SYOSHIFUJI Hideaki #include <linux/crypto.h>
2611da177e4SLinus Torvalds 
2621da177e4SLinus Torvalds #include <net/icmp.h>
2631da177e4SLinus Torvalds #include <net/tcp.h>
2641da177e4SLinus Torvalds #include <net/xfrm.h>
2651da177e4SLinus Torvalds #include <net/ip.h>
2661a2449a8SChris Leech #include <net/netdma.h>
2671da177e4SLinus Torvalds 
2681da177e4SLinus Torvalds #include <asm/uaccess.h>
2691da177e4SLinus Torvalds #include <asm/ioctls.h>
2701da177e4SLinus Torvalds 
271ab32ea5dSBrian Haley int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
2721da177e4SLinus Torvalds 
273ba89966cSEric Dumazet DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
2741da177e4SLinus Torvalds 
2751da177e4SLinus Torvalds atomic_t tcp_orphan_count = ATOMIC_INIT(0);
2761da177e4SLinus Torvalds 
2770a5578cfSArnaldo Carvalho de Melo EXPORT_SYMBOL_GPL(tcp_orphan_count);
2780a5578cfSArnaldo Carvalho de Melo 
279b8059eadSDavid S. Miller int sysctl_tcp_mem[3] __read_mostly;
280b8059eadSDavid S. Miller int sysctl_tcp_wmem[3] __read_mostly;
281b8059eadSDavid S. Miller int sysctl_tcp_rmem[3] __read_mostly;
2821da177e4SLinus Torvalds 
2831da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_mem);
2841da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_rmem);
2851da177e4SLinus Torvalds EXPORT_SYMBOL(sysctl_tcp_wmem);
2861da177e4SLinus Torvalds 
2871da177e4SLinus Torvalds atomic_t tcp_memory_allocated;	/* Current allocated memory. */
2881da177e4SLinus Torvalds atomic_t tcp_sockets_allocated;	/* Current number of TCP sockets. */
2891da177e4SLinus Torvalds 
2901da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_allocated);
2911da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sockets_allocated);
2921da177e4SLinus Torvalds 
2931da177e4SLinus Torvalds /*
2941da177e4SLinus Torvalds  * Pressure flag: try to collapse.
2951da177e4SLinus Torvalds  * Technical note: it is used by multiple contexts non atomically.
2961da177e4SLinus Torvalds  * All the sk_stream_mem_schedule() is of this nature: accounting
2971da177e4SLinus Torvalds  * is strict, actions are advisory and have some latency.
2981da177e4SLinus Torvalds  */
2994103f8cdSEric Dumazet int tcp_memory_pressure __read_mostly;
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_memory_pressure);
3021da177e4SLinus Torvalds 
3031da177e4SLinus Torvalds void tcp_enter_memory_pressure(void)
3041da177e4SLinus Torvalds {
3051da177e4SLinus Torvalds 	if (!tcp_memory_pressure) {
3061da177e4SLinus Torvalds 		NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
3071da177e4SLinus Torvalds 		tcp_memory_pressure = 1;
3081da177e4SLinus Torvalds 	}
3091da177e4SLinus Torvalds }
3101da177e4SLinus Torvalds 
3111da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_enter_memory_pressure);
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds /*
3141da177e4SLinus Torvalds  *	Wait for a TCP event.
3151da177e4SLinus Torvalds  *
3161da177e4SLinus Torvalds  *	Note that we don't need to lock the socket, as the upper poll layers
3171da177e4SLinus Torvalds  *	take care of normal races (between the test and the event) and we don't
3181da177e4SLinus Torvalds  *	go look at any of the socket buffers directly.
3191da177e4SLinus Torvalds  */
3201da177e4SLinus Torvalds unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
3211da177e4SLinus Torvalds {
3221da177e4SLinus Torvalds 	unsigned int mask;
3231da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
3241da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
3251da177e4SLinus Torvalds 
3261da177e4SLinus Torvalds 	poll_wait(file, sk->sk_sleep, wait);
3271da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
328dc40c7bcSArnaldo Carvalho de Melo 		return inet_csk_listen_poll(sk);
3291da177e4SLinus Torvalds 
3301da177e4SLinus Torvalds 	/* Socket is not locked. We are protected from async events
3311da177e4SLinus Torvalds 	   by poll logic and correct handling of state changes
3321da177e4SLinus Torvalds 	   made by another threads is impossible in any case.
3331da177e4SLinus Torvalds 	 */
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds 	mask = 0;
3361da177e4SLinus Torvalds 	if (sk->sk_err)
3371da177e4SLinus Torvalds 		mask = POLLERR;
3381da177e4SLinus Torvalds 
3391da177e4SLinus Torvalds 	/*
3401da177e4SLinus Torvalds 	 * POLLHUP is certainly not done right. But poll() doesn't
3411da177e4SLinus Torvalds 	 * have a notion of HUP in just one direction, and for a
3421da177e4SLinus Torvalds 	 * socket the read side is more interesting.
3431da177e4SLinus Torvalds 	 *
3441da177e4SLinus Torvalds 	 * Some poll() documentation says that POLLHUP is incompatible
3451da177e4SLinus Torvalds 	 * with the POLLOUT/POLLWR flags, so somebody should check this
3461da177e4SLinus Torvalds 	 * all. But careful, it tends to be safer to return too many
3471da177e4SLinus Torvalds 	 * bits than too few, and you can easily break real applications
3481da177e4SLinus Torvalds 	 * if you don't tell them that something has hung up!
3491da177e4SLinus Torvalds 	 *
3501da177e4SLinus Torvalds 	 * Check-me.
3511da177e4SLinus Torvalds 	 *
3521da177e4SLinus Torvalds 	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
3531da177e4SLinus Torvalds 	 * our fs/select.c). It means that after we received EOF,
3541da177e4SLinus Torvalds 	 * poll always returns immediately, making impossible poll() on write()
3551da177e4SLinus Torvalds 	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
3561da177e4SLinus Torvalds 	 * if and only if shutdown has been made in both directions.
3571da177e4SLinus Torvalds 	 * Actually, it is interesting to look how Solaris and DUX
3581da177e4SLinus Torvalds 	 * solve this dilemma. I would prefer, if PULLHUP were maskable,
3591da177e4SLinus Torvalds 	 * then we could set it on SND_SHUTDOWN. BTW examples given
3601da177e4SLinus Torvalds 	 * in Stevens' books assume exactly this behaviour, it explains
3611da177e4SLinus Torvalds 	 * why PULLHUP is incompatible with POLLOUT.	--ANK
3621da177e4SLinus Torvalds 	 *
3631da177e4SLinus Torvalds 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
3641da177e4SLinus Torvalds 	 * blocking on fresh not-connected or disconnected socket. --ANK
3651da177e4SLinus Torvalds 	 */
3661da177e4SLinus Torvalds 	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
3671da177e4SLinus Torvalds 		mask |= POLLHUP;
3681da177e4SLinus Torvalds 	if (sk->sk_shutdown & RCV_SHUTDOWN)
369f348d70aSDavide Libenzi 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
3701da177e4SLinus Torvalds 
3711da177e4SLinus Torvalds 	/* Connected? */
3721da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
3731da177e4SLinus Torvalds 		/* Potential race condition. If read of tp below will
3741da177e4SLinus Torvalds 		 * escape above sk->sk_state, we can be illegally awaken
3751da177e4SLinus Torvalds 		 * in SYN_* states. */
3761da177e4SLinus Torvalds 		if ((tp->rcv_nxt != tp->copied_seq) &&
3771da177e4SLinus Torvalds 		    (tp->urg_seq != tp->copied_seq ||
3781da177e4SLinus Torvalds 		     tp->rcv_nxt != tp->copied_seq + 1 ||
3791da177e4SLinus Torvalds 		     sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
3801da177e4SLinus Torvalds 			mask |= POLLIN | POLLRDNORM;
3811da177e4SLinus Torvalds 
3821da177e4SLinus Torvalds 		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
3831da177e4SLinus Torvalds 			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
3841da177e4SLinus Torvalds 				mask |= POLLOUT | POLLWRNORM;
3851da177e4SLinus Torvalds 			} else {  /* send SIGIO later */
3861da177e4SLinus Torvalds 				set_bit(SOCK_ASYNC_NOSPACE,
3871da177e4SLinus Torvalds 					&sk->sk_socket->flags);
3881da177e4SLinus Torvalds 				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
3891da177e4SLinus Torvalds 
3901da177e4SLinus Torvalds 				/* Race breaker. If space is freed after
3911da177e4SLinus Torvalds 				 * wspace test but before the flags are set,
3921da177e4SLinus Torvalds 				 * IO signal will be lost.
3931da177e4SLinus Torvalds 				 */
3941da177e4SLinus Torvalds 				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
3951da177e4SLinus Torvalds 					mask |= POLLOUT | POLLWRNORM;
3961da177e4SLinus Torvalds 			}
3971da177e4SLinus Torvalds 		}
3981da177e4SLinus Torvalds 
3991da177e4SLinus Torvalds 		if (tp->urg_data & TCP_URG_VALID)
4001da177e4SLinus Torvalds 			mask |= POLLPRI;
4011da177e4SLinus Torvalds 	}
4021da177e4SLinus Torvalds 	return mask;
4031da177e4SLinus Torvalds }
4041da177e4SLinus Torvalds 
4051da177e4SLinus Torvalds int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
4061da177e4SLinus Torvalds {
4071da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
4081da177e4SLinus Torvalds 	int answ;
4091da177e4SLinus Torvalds 
4101da177e4SLinus Torvalds 	switch (cmd) {
4111da177e4SLinus Torvalds 	case SIOCINQ:
4121da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4131da177e4SLinus Torvalds 			return -EINVAL;
4141da177e4SLinus Torvalds 
4151da177e4SLinus Torvalds 		lock_sock(sk);
4161da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4171da177e4SLinus Torvalds 			answ = 0;
4181da177e4SLinus Torvalds 		else if (sock_flag(sk, SOCK_URGINLINE) ||
4191da177e4SLinus Torvalds 			 !tp->urg_data ||
4201da177e4SLinus Torvalds 			 before(tp->urg_seq, tp->copied_seq) ||
4211da177e4SLinus Torvalds 			 !before(tp->urg_seq, tp->rcv_nxt)) {
4221da177e4SLinus Torvalds 			answ = tp->rcv_nxt - tp->copied_seq;
4231da177e4SLinus Torvalds 
4241da177e4SLinus Torvalds 			/* Subtract 1, if FIN is in queue. */
4251da177e4SLinus Torvalds 			if (answ && !skb_queue_empty(&sk->sk_receive_queue))
4261da177e4SLinus Torvalds 				answ -=
427aa8223c7SArnaldo Carvalho de Melo 		       tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
4281da177e4SLinus Torvalds 		} else
4291da177e4SLinus Torvalds 			answ = tp->urg_seq - tp->copied_seq;
4301da177e4SLinus Torvalds 		release_sock(sk);
4311da177e4SLinus Torvalds 		break;
4321da177e4SLinus Torvalds 	case SIOCATMARK:
4331da177e4SLinus Torvalds 		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
4341da177e4SLinus Torvalds 		break;
4351da177e4SLinus Torvalds 	case SIOCOUTQ:
4361da177e4SLinus Torvalds 		if (sk->sk_state == TCP_LISTEN)
4371da177e4SLinus Torvalds 			return -EINVAL;
4381da177e4SLinus Torvalds 
4391da177e4SLinus Torvalds 		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
4401da177e4SLinus Torvalds 			answ = 0;
4411da177e4SLinus Torvalds 		else
4421da177e4SLinus Torvalds 			answ = tp->write_seq - tp->snd_una;
4431da177e4SLinus Torvalds 		break;
4441da177e4SLinus Torvalds 	default:
4451da177e4SLinus Torvalds 		return -ENOIOCTLCMD;
4463ff50b79SStephen Hemminger 	}
4471da177e4SLinus Torvalds 
4481da177e4SLinus Torvalds 	return put_user(answ, (int __user *)arg);
4491da177e4SLinus Torvalds }
4501da177e4SLinus Torvalds 
4511da177e4SLinus Torvalds static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
4521da177e4SLinus Torvalds {
4531da177e4SLinus Torvalds 	TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
4541da177e4SLinus Torvalds 	tp->pushed_seq = tp->write_seq;
4551da177e4SLinus Torvalds }
4561da177e4SLinus Torvalds 
4571da177e4SLinus Torvalds static inline int forced_push(struct tcp_sock *tp)
4581da177e4SLinus Torvalds {
4591da177e4SLinus Torvalds 	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
4601da177e4SLinus Torvalds }
4611da177e4SLinus Torvalds 
4629e412ba7SIlpo Järvinen static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
4631da177e4SLinus Torvalds {
4649e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
465352d4800SArnaldo Carvalho de Melo 	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
466352d4800SArnaldo Carvalho de Melo 
4671da177e4SLinus Torvalds 	skb->csum    = 0;
468352d4800SArnaldo Carvalho de Melo 	tcb->seq     = tcb->end_seq = tp->write_seq;
469352d4800SArnaldo Carvalho de Melo 	tcb->flags   = TCPCB_FLAG_ACK;
470352d4800SArnaldo Carvalho de Melo 	tcb->sacked  = 0;
4711da177e4SLinus Torvalds 	skb_header_release(skb);
472fe067e8aSDavid S. Miller 	tcp_add_write_queue_tail(sk, skb);
4731da177e4SLinus Torvalds 	sk_charge_skb(sk, skb);
47489ebd197SDavid S. Miller 	if (tp->nonagle & TCP_NAGLE_PUSH)
4751da177e4SLinus Torvalds 		tp->nonagle &= ~TCP_NAGLE_PUSH;
4761da177e4SLinus Torvalds }
4771da177e4SLinus Torvalds 
4781da177e4SLinus Torvalds static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
4791da177e4SLinus Torvalds 				struct sk_buff *skb)
4801da177e4SLinus Torvalds {
4811da177e4SLinus Torvalds 	if (flags & MSG_OOB) {
4821da177e4SLinus Torvalds 		tp->urg_mode = 1;
4831da177e4SLinus Torvalds 		tp->snd_up = tp->write_seq;
4841da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
4851da177e4SLinus Torvalds 	}
4861da177e4SLinus Torvalds }
4871da177e4SLinus Torvalds 
4889e412ba7SIlpo Järvinen static inline void tcp_push(struct sock *sk, int flags, int mss_now,
4899e412ba7SIlpo Järvinen 			    int nonagle)
4901da177e4SLinus Torvalds {
4919e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
4929e412ba7SIlpo Järvinen 
493fe067e8aSDavid S. Miller 	if (tcp_send_head(sk)) {
494fe067e8aSDavid S. Miller 		struct sk_buff *skb = tcp_write_queue_tail(sk);
4951da177e4SLinus Torvalds 		if (!(flags & MSG_MORE) || forced_push(tp))
4961da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
4971da177e4SLinus Torvalds 		tcp_mark_urg(tp, flags, skb);
4989e412ba7SIlpo Järvinen 		__tcp_push_pending_frames(sk, mss_now,
4991da177e4SLinus Torvalds 					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
5001da177e4SLinus Torvalds 	}
5011da177e4SLinus Torvalds }
5021da177e4SLinus Torvalds 
5031da177e4SLinus Torvalds static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
5041da177e4SLinus Torvalds 			 size_t psize, int flags)
5051da177e4SLinus Torvalds {
5061da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
507c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
5081da177e4SLinus Torvalds 	int err;
5091da177e4SLinus Torvalds 	ssize_t copied;
5101da177e4SLinus Torvalds 	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
5111da177e4SLinus Torvalds 
5121da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
5131da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
5141da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
5151da177e4SLinus Torvalds 			goto out_err;
5161da177e4SLinus Torvalds 
5171da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
5181da177e4SLinus Torvalds 
5191da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
520c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
5211da177e4SLinus Torvalds 	copied = 0;
5221da177e4SLinus Torvalds 
5231da177e4SLinus Torvalds 	err = -EPIPE;
5241da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
5251da177e4SLinus Torvalds 		goto do_error;
5261da177e4SLinus Torvalds 
5271da177e4SLinus Torvalds 	while (psize > 0) {
528fe067e8aSDavid S. Miller 		struct sk_buff *skb = tcp_write_queue_tail(sk);
5291da177e4SLinus Torvalds 		struct page *page = pages[poffset / PAGE_SIZE];
5301da177e4SLinus Torvalds 		int copy, i, can_coalesce;
5311da177e4SLinus Torvalds 		int offset = poffset % PAGE_SIZE;
5321da177e4SLinus Torvalds 		int size = min_t(size_t, psize, PAGE_SIZE - offset);
5331da177e4SLinus Torvalds 
534fe067e8aSDavid S. Miller 		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
5351da177e4SLinus Torvalds new_segment:
5361da177e4SLinus Torvalds 			if (!sk_stream_memory_free(sk))
5371da177e4SLinus Torvalds 				goto wait_for_sndbuf;
5381da177e4SLinus Torvalds 
5391da177e4SLinus Torvalds 			skb = sk_stream_alloc_pskb(sk, 0, 0,
5401da177e4SLinus Torvalds 						   sk->sk_allocation);
5411da177e4SLinus Torvalds 			if (!skb)
5421da177e4SLinus Torvalds 				goto wait_for_memory;
5431da177e4SLinus Torvalds 
5449e412ba7SIlpo Järvinen 			skb_entail(sk, skb);
545c1b4a7e6SDavid S. Miller 			copy = size_goal;
5461da177e4SLinus Torvalds 		}
5471da177e4SLinus Torvalds 
5481da177e4SLinus Torvalds 		if (copy > size)
5491da177e4SLinus Torvalds 			copy = size;
5501da177e4SLinus Torvalds 
5511da177e4SLinus Torvalds 		i = skb_shinfo(skb)->nr_frags;
5521da177e4SLinus Torvalds 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
5531da177e4SLinus Torvalds 		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
5541da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
5551da177e4SLinus Torvalds 			goto new_segment;
5561da177e4SLinus Torvalds 		}
557d80d99d6SHerbert Xu 		if (!sk_stream_wmem_schedule(sk, copy))
5581da177e4SLinus Torvalds 			goto wait_for_memory;
5591da177e4SLinus Torvalds 
5601da177e4SLinus Torvalds 		if (can_coalesce) {
5611da177e4SLinus Torvalds 			skb_shinfo(skb)->frags[i - 1].size += copy;
5621da177e4SLinus Torvalds 		} else {
5631da177e4SLinus Torvalds 			get_page(page);
5641da177e4SLinus Torvalds 			skb_fill_page_desc(skb, i, page, offset, copy);
5651da177e4SLinus Torvalds 		}
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 		skb->len += copy;
5681da177e4SLinus Torvalds 		skb->data_len += copy;
5691da177e4SLinus Torvalds 		skb->truesize += copy;
5701da177e4SLinus Torvalds 		sk->sk_wmem_queued += copy;
5711da177e4SLinus Torvalds 		sk->sk_forward_alloc -= copy;
57284fa7933SPatrick McHardy 		skb->ip_summed = CHECKSUM_PARTIAL;
5731da177e4SLinus Torvalds 		tp->write_seq += copy;
5741da177e4SLinus Torvalds 		TCP_SKB_CB(skb)->end_seq += copy;
5757967168cSHerbert Xu 		skb_shinfo(skb)->gso_segs = 0;
5761da177e4SLinus Torvalds 
5771da177e4SLinus Torvalds 		if (!copied)
5781da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
5791da177e4SLinus Torvalds 
5801da177e4SLinus Torvalds 		copied += copy;
5811da177e4SLinus Torvalds 		poffset += copy;
5821da177e4SLinus Torvalds 		if (!(psize -= copy))
5831da177e4SLinus Torvalds 			goto out;
5841da177e4SLinus Torvalds 
585c1b4a7e6SDavid S. Miller 		if (skb->len < mss_now || (flags & MSG_OOB))
5861da177e4SLinus Torvalds 			continue;
5871da177e4SLinus Torvalds 
5881da177e4SLinus Torvalds 		if (forced_push(tp)) {
5891da177e4SLinus Torvalds 			tcp_mark_push(tp, skb);
5909e412ba7SIlpo Järvinen 			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
591fe067e8aSDavid S. Miller 		} else if (skb == tcp_send_head(sk))
5921da177e4SLinus Torvalds 			tcp_push_one(sk, mss_now);
5931da177e4SLinus Torvalds 		continue;
5941da177e4SLinus Torvalds 
5951da177e4SLinus Torvalds wait_for_sndbuf:
5961da177e4SLinus Torvalds 		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
5971da177e4SLinus Torvalds wait_for_memory:
5981da177e4SLinus Torvalds 		if (copied)
5999e412ba7SIlpo Järvinen 			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
6001da177e4SLinus Torvalds 
6011da177e4SLinus Torvalds 		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
6021da177e4SLinus Torvalds 			goto do_error;
6031da177e4SLinus Torvalds 
6041da177e4SLinus Torvalds 		mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
605c1b4a7e6SDavid S. Miller 		size_goal = tp->xmit_size_goal;
6061da177e4SLinus Torvalds 	}
6071da177e4SLinus Torvalds 
6081da177e4SLinus Torvalds out:
6091da177e4SLinus Torvalds 	if (copied)
6109e412ba7SIlpo Järvinen 		tcp_push(sk, flags, mss_now, tp->nonagle);
6111da177e4SLinus Torvalds 	return copied;
6121da177e4SLinus Torvalds 
6131da177e4SLinus Torvalds do_error:
6141da177e4SLinus Torvalds 	if (copied)
6151da177e4SLinus Torvalds 		goto out;
6161da177e4SLinus Torvalds out_err:
6171da177e4SLinus Torvalds 	return sk_stream_error(sk, flags, err);
6181da177e4SLinus Torvalds }
6191da177e4SLinus Torvalds 
6201da177e4SLinus Torvalds ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
6211da177e4SLinus Torvalds 		     size_t size, int flags)
6221da177e4SLinus Torvalds {
6231da177e4SLinus Torvalds 	ssize_t res;
6241da177e4SLinus Torvalds 	struct sock *sk = sock->sk;
6251da177e4SLinus Torvalds 
6261da177e4SLinus Torvalds 	if (!(sk->sk_route_caps & NETIF_F_SG) ||
6278648b305SHerbert Xu 	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
6281da177e4SLinus Torvalds 		return sock_no_sendpage(sock, page, offset, size, flags);
6291da177e4SLinus Torvalds 
6301da177e4SLinus Torvalds 	lock_sock(sk);
6311da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
6321da177e4SLinus Torvalds 	res = do_tcp_sendpages(sk, &page, offset, size, flags);
6331da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
6341da177e4SLinus Torvalds 	release_sock(sk);
6351da177e4SLinus Torvalds 	return res;
6361da177e4SLinus Torvalds }
6371da177e4SLinus Torvalds 
6381da177e4SLinus Torvalds #define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
6391da177e4SLinus Torvalds #define TCP_OFF(sk)	(sk->sk_sndmsg_off)
6401da177e4SLinus Torvalds 
6419e412ba7SIlpo Järvinen static inline int select_size(struct sock *sk)
6421da177e4SLinus Torvalds {
6439e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
644c1b4a7e6SDavid S. Miller 	int tmp = tp->mss_cache;
6451da177e4SLinus Torvalds 
646b4e26f5eSDavid S. Miller 	if (sk->sk_route_caps & NETIF_F_SG) {
647bcd76111SHerbert Xu 		if (sk_can_gso(sk))
648c65f7f00SDavid S. Miller 			tmp = 0;
649b4e26f5eSDavid S. Miller 		else {
650b4e26f5eSDavid S. Miller 			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
651b4e26f5eSDavid S. Miller 
652b4e26f5eSDavid S. Miller 			if (tmp >= pgbreak &&
653b4e26f5eSDavid S. Miller 			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
654b4e26f5eSDavid S. Miller 				tmp = pgbreak;
655b4e26f5eSDavid S. Miller 		}
656b4e26f5eSDavid S. Miller 	}
6571da177e4SLinus Torvalds 
6581da177e4SLinus Torvalds 	return tmp;
6591da177e4SLinus Torvalds }
6601da177e4SLinus Torvalds 
6611da177e4SLinus Torvalds int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
6621da177e4SLinus Torvalds 		size_t size)
6631da177e4SLinus Torvalds {
6641da177e4SLinus Torvalds 	struct iovec *iov;
6651da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
6661da177e4SLinus Torvalds 	struct sk_buff *skb;
6671da177e4SLinus Torvalds 	int iovlen, flags;
668c1b4a7e6SDavid S. Miller 	int mss_now, size_goal;
6691da177e4SLinus Torvalds 	int err, copied;
6701da177e4SLinus Torvalds 	long timeo;
6711da177e4SLinus Torvalds 
6721da177e4SLinus Torvalds 	lock_sock(sk);
6731da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
6741da177e4SLinus Torvalds 
6751da177e4SLinus Torvalds 	flags = msg->msg_flags;
6761da177e4SLinus Torvalds 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
6771da177e4SLinus Torvalds 
6781da177e4SLinus Torvalds 	/* Wait for a connection to finish. */
6791da177e4SLinus Torvalds 	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
6801da177e4SLinus Torvalds 		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
6811da177e4SLinus Torvalds 			goto out_err;
6821da177e4SLinus Torvalds 
6831da177e4SLinus Torvalds 	/* This should be in poll */
6841da177e4SLinus Torvalds 	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
6851da177e4SLinus Torvalds 
6861da177e4SLinus Torvalds 	mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
687c1b4a7e6SDavid S. Miller 	size_goal = tp->xmit_size_goal;
6881da177e4SLinus Torvalds 
6891da177e4SLinus Torvalds 	/* Ok commence sending. */
6901da177e4SLinus Torvalds 	iovlen = msg->msg_iovlen;
6911da177e4SLinus Torvalds 	iov = msg->msg_iov;
6921da177e4SLinus Torvalds 	copied = 0;
6931da177e4SLinus Torvalds 
6941da177e4SLinus Torvalds 	err = -EPIPE;
6951da177e4SLinus Torvalds 	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
6961da177e4SLinus Torvalds 		goto do_error;
6971da177e4SLinus Torvalds 
6981da177e4SLinus Torvalds 	while (--iovlen >= 0) {
6991da177e4SLinus Torvalds 		int seglen = iov->iov_len;
7001da177e4SLinus Torvalds 		unsigned char __user *from = iov->iov_base;
7011da177e4SLinus Torvalds 
7021da177e4SLinus Torvalds 		iov++;
7031da177e4SLinus Torvalds 
7041da177e4SLinus Torvalds 		while (seglen > 0) {
7051da177e4SLinus Torvalds 			int copy;
7061da177e4SLinus Torvalds 
707fe067e8aSDavid S. Miller 			skb = tcp_write_queue_tail(sk);
7081da177e4SLinus Torvalds 
709fe067e8aSDavid S. Miller 			if (!tcp_send_head(sk) ||
710c1b4a7e6SDavid S. Miller 			    (copy = size_goal - skb->len) <= 0) {
7111da177e4SLinus Torvalds 
7121da177e4SLinus Torvalds new_segment:
7131da177e4SLinus Torvalds 				/* Allocate new segment. If the interface is SG,
7141da177e4SLinus Torvalds 				 * allocate skb fitting to single page.
7151da177e4SLinus Torvalds 				 */
7161da177e4SLinus Torvalds 				if (!sk_stream_memory_free(sk))
7171da177e4SLinus Torvalds 					goto wait_for_sndbuf;
7181da177e4SLinus Torvalds 
7199e412ba7SIlpo Järvinen 				skb = sk_stream_alloc_pskb(sk, select_size(sk),
7201da177e4SLinus Torvalds 							   0, sk->sk_allocation);
7211da177e4SLinus Torvalds 				if (!skb)
7221da177e4SLinus Torvalds 					goto wait_for_memory;
7231da177e4SLinus Torvalds 
7241da177e4SLinus Torvalds 				/*
7251da177e4SLinus Torvalds 				 * Check whether we can use HW checksum.
7261da177e4SLinus Torvalds 				 */
7278648b305SHerbert Xu 				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
72884fa7933SPatrick McHardy 					skb->ip_summed = CHECKSUM_PARTIAL;
7291da177e4SLinus Torvalds 
7309e412ba7SIlpo Järvinen 				skb_entail(sk, skb);
731c1b4a7e6SDavid S. Miller 				copy = size_goal;
7321da177e4SLinus Torvalds 			}
7331da177e4SLinus Torvalds 
7341da177e4SLinus Torvalds 			/* Try to append data to the end of skb. */
7351da177e4SLinus Torvalds 			if (copy > seglen)
7361da177e4SLinus Torvalds 				copy = seglen;
7371da177e4SLinus Torvalds 
7381da177e4SLinus Torvalds 			/* Where to copy to? */
7391da177e4SLinus Torvalds 			if (skb_tailroom(skb) > 0) {
7401da177e4SLinus Torvalds 				/* We have some space in skb head. Superb! */
7411da177e4SLinus Torvalds 				if (copy > skb_tailroom(skb))
7421da177e4SLinus Torvalds 					copy = skb_tailroom(skb);
7431da177e4SLinus Torvalds 				if ((err = skb_add_data(skb, from, copy)) != 0)
7441da177e4SLinus Torvalds 					goto do_fault;
7451da177e4SLinus Torvalds 			} else {
7461da177e4SLinus Torvalds 				int merge = 0;
7471da177e4SLinus Torvalds 				int i = skb_shinfo(skb)->nr_frags;
7481da177e4SLinus Torvalds 				struct page *page = TCP_PAGE(sk);
7491da177e4SLinus Torvalds 				int off = TCP_OFF(sk);
7501da177e4SLinus Torvalds 
7511da177e4SLinus Torvalds 				if (skb_can_coalesce(skb, i, page, off) &&
7521da177e4SLinus Torvalds 				    off != PAGE_SIZE) {
7531da177e4SLinus Torvalds 					/* We can extend the last page
7541da177e4SLinus Torvalds 					 * fragment. */
7551da177e4SLinus Torvalds 					merge = 1;
7561da177e4SLinus Torvalds 				} else if (i == MAX_SKB_FRAGS ||
7571da177e4SLinus Torvalds 					   (!i &&
7581da177e4SLinus Torvalds 					   !(sk->sk_route_caps & NETIF_F_SG))) {
7591da177e4SLinus Torvalds 					/* Need to add new fragment and cannot
7601da177e4SLinus Torvalds 					 * do this because interface is non-SG,
7611da177e4SLinus Torvalds 					 * or because all the page slots are
7621da177e4SLinus Torvalds 					 * busy. */
7631da177e4SLinus Torvalds 					tcp_mark_push(tp, skb);
7641da177e4SLinus Torvalds 					goto new_segment;
7651da177e4SLinus Torvalds 				} else if (page) {
7661da177e4SLinus Torvalds 					if (off == PAGE_SIZE) {
7671da177e4SLinus Torvalds 						put_page(page);
7681da177e4SLinus Torvalds 						TCP_PAGE(sk) = page = NULL;
769fb5f5e6eSHerbert Xu 						off = 0;
7701da177e4SLinus Torvalds 					}
771ef015786SHerbert Xu 				} else
772fb5f5e6eSHerbert Xu 					off = 0;
773ef015786SHerbert Xu 
774ef015786SHerbert Xu 				if (copy > PAGE_SIZE - off)
775ef015786SHerbert Xu 					copy = PAGE_SIZE - off;
776ef015786SHerbert Xu 
777ef015786SHerbert Xu 				if (!sk_stream_wmem_schedule(sk, copy))
778ef015786SHerbert Xu 					goto wait_for_memory;
7791da177e4SLinus Torvalds 
7801da177e4SLinus Torvalds 				if (!page) {
7811da177e4SLinus Torvalds 					/* Allocate new cache page. */
7821da177e4SLinus Torvalds 					if (!(page = sk_stream_alloc_page(sk)))
7831da177e4SLinus Torvalds 						goto wait_for_memory;
7841da177e4SLinus Torvalds 				}
7851da177e4SLinus Torvalds 
7861da177e4SLinus Torvalds 				/* Time to copy data. We are close to
7871da177e4SLinus Torvalds 				 * the end! */
7881da177e4SLinus Torvalds 				err = skb_copy_to_page(sk, from, skb, page,
7891da177e4SLinus Torvalds 						       off, copy);
7901da177e4SLinus Torvalds 				if (err) {
7911da177e4SLinus Torvalds 					/* If this page was new, give it to the
7921da177e4SLinus Torvalds 					 * socket so it does not get leaked.
7931da177e4SLinus Torvalds 					 */
7941da177e4SLinus Torvalds 					if (!TCP_PAGE(sk)) {
7951da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
7961da177e4SLinus Torvalds 						TCP_OFF(sk) = 0;
7971da177e4SLinus Torvalds 					}
7981da177e4SLinus Torvalds 					goto do_error;
7991da177e4SLinus Torvalds 				}
8001da177e4SLinus Torvalds 
8011da177e4SLinus Torvalds 				/* Update the skb. */
8021da177e4SLinus Torvalds 				if (merge) {
8031da177e4SLinus Torvalds 					skb_shinfo(skb)->frags[i - 1].size +=
8041da177e4SLinus Torvalds 									copy;
8051da177e4SLinus Torvalds 				} else {
8061da177e4SLinus Torvalds 					skb_fill_page_desc(skb, i, page, off, copy);
8071da177e4SLinus Torvalds 					if (TCP_PAGE(sk)) {
8081da177e4SLinus Torvalds 						get_page(page);
8091da177e4SLinus Torvalds 					} else if (off + copy < PAGE_SIZE) {
8101da177e4SLinus Torvalds 						get_page(page);
8111da177e4SLinus Torvalds 						TCP_PAGE(sk) = page;
8121da177e4SLinus Torvalds 					}
8131da177e4SLinus Torvalds 				}
8141da177e4SLinus Torvalds 
8151da177e4SLinus Torvalds 				TCP_OFF(sk) = off + copy;
8161da177e4SLinus Torvalds 			}
8171da177e4SLinus Torvalds 
8181da177e4SLinus Torvalds 			if (!copied)
8191da177e4SLinus Torvalds 				TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
8201da177e4SLinus Torvalds 
8211da177e4SLinus Torvalds 			tp->write_seq += copy;
8221da177e4SLinus Torvalds 			TCP_SKB_CB(skb)->end_seq += copy;
8237967168cSHerbert Xu 			skb_shinfo(skb)->gso_segs = 0;
8241da177e4SLinus Torvalds 
8251da177e4SLinus Torvalds 			from += copy;
8261da177e4SLinus Torvalds 			copied += copy;
8271da177e4SLinus Torvalds 			if ((seglen -= copy) == 0 && iovlen == 0)
8281da177e4SLinus Torvalds 				goto out;
8291da177e4SLinus Torvalds 
830c1b4a7e6SDavid S. Miller 			if (skb->len < mss_now || (flags & MSG_OOB))
8311da177e4SLinus Torvalds 				continue;
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds 			if (forced_push(tp)) {
8341da177e4SLinus Torvalds 				tcp_mark_push(tp, skb);
8359e412ba7SIlpo Järvinen 				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
836fe067e8aSDavid S. Miller 			} else if (skb == tcp_send_head(sk))
8371da177e4SLinus Torvalds 				tcp_push_one(sk, mss_now);
8381da177e4SLinus Torvalds 			continue;
8391da177e4SLinus Torvalds 
8401da177e4SLinus Torvalds wait_for_sndbuf:
8411da177e4SLinus Torvalds 			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
8421da177e4SLinus Torvalds wait_for_memory:
8431da177e4SLinus Torvalds 			if (copied)
8449e412ba7SIlpo Järvinen 				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
8451da177e4SLinus Torvalds 
8461da177e4SLinus Torvalds 			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
8471da177e4SLinus Torvalds 				goto do_error;
8481da177e4SLinus Torvalds 
8491da177e4SLinus Torvalds 			mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
850c1b4a7e6SDavid S. Miller 			size_goal = tp->xmit_size_goal;
8511da177e4SLinus Torvalds 		}
8521da177e4SLinus Torvalds 	}
8531da177e4SLinus Torvalds 
8541da177e4SLinus Torvalds out:
8551da177e4SLinus Torvalds 	if (copied)
8569e412ba7SIlpo Järvinen 		tcp_push(sk, flags, mss_now, tp->nonagle);
8571da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
8581da177e4SLinus Torvalds 	release_sock(sk);
8591da177e4SLinus Torvalds 	return copied;
8601da177e4SLinus Torvalds 
8611da177e4SLinus Torvalds do_fault:
8621da177e4SLinus Torvalds 	if (!skb->len) {
863fe067e8aSDavid S. Miller 		tcp_unlink_write_queue(skb, sk);
864fe067e8aSDavid S. Miller 		/* It is the one place in all of TCP, except connection
865fe067e8aSDavid S. Miller 		 * reset, where we can be unlinking the send_head.
866fe067e8aSDavid S. Miller 		 */
867fe067e8aSDavid S. Miller 		tcp_check_send_head(sk, skb);
8681da177e4SLinus Torvalds 		sk_stream_free_skb(sk, skb);
8691da177e4SLinus Torvalds 	}
8701da177e4SLinus Torvalds 
8711da177e4SLinus Torvalds do_error:
8721da177e4SLinus Torvalds 	if (copied)
8731da177e4SLinus Torvalds 		goto out;
8741da177e4SLinus Torvalds out_err:
8751da177e4SLinus Torvalds 	err = sk_stream_error(sk, flags, err);
8761da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
8771da177e4SLinus Torvalds 	release_sock(sk);
8781da177e4SLinus Torvalds 	return err;
8791da177e4SLinus Torvalds }
8801da177e4SLinus Torvalds 
8811da177e4SLinus Torvalds /*
8821da177e4SLinus Torvalds  *	Handle reading urgent data. BSD has very simple semantics for
8831da177e4SLinus Torvalds  *	this, no blocking and very strange errors 8)
8841da177e4SLinus Torvalds  */
8851da177e4SLinus Torvalds 
8861da177e4SLinus Torvalds static int tcp_recv_urg(struct sock *sk, long timeo,
8871da177e4SLinus Torvalds 			struct msghdr *msg, int len, int flags,
8881da177e4SLinus Torvalds 			int *addr_len)
8891da177e4SLinus Torvalds {
8901da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
8911da177e4SLinus Torvalds 
8921da177e4SLinus Torvalds 	/* No URG data to read. */
8931da177e4SLinus Torvalds 	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
8941da177e4SLinus Torvalds 	    tp->urg_data == TCP_URG_READ)
8951da177e4SLinus Torvalds 		return -EINVAL;	/* Yes this is right ! */
8961da177e4SLinus Torvalds 
8971da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
8981da177e4SLinus Torvalds 		return -ENOTCONN;
8991da177e4SLinus Torvalds 
9001da177e4SLinus Torvalds 	if (tp->urg_data & TCP_URG_VALID) {
9011da177e4SLinus Torvalds 		int err = 0;
9021da177e4SLinus Torvalds 		char c = tp->urg_data;
9031da177e4SLinus Torvalds 
9041da177e4SLinus Torvalds 		if (!(flags & MSG_PEEK))
9051da177e4SLinus Torvalds 			tp->urg_data = TCP_URG_READ;
9061da177e4SLinus Torvalds 
9071da177e4SLinus Torvalds 		/* Read urgent data. */
9081da177e4SLinus Torvalds 		msg->msg_flags |= MSG_OOB;
9091da177e4SLinus Torvalds 
9101da177e4SLinus Torvalds 		if (len > 0) {
9111da177e4SLinus Torvalds 			if (!(flags & MSG_TRUNC))
9121da177e4SLinus Torvalds 				err = memcpy_toiovec(msg->msg_iov, &c, 1);
9131da177e4SLinus Torvalds 			len = 1;
9141da177e4SLinus Torvalds 		} else
9151da177e4SLinus Torvalds 			msg->msg_flags |= MSG_TRUNC;
9161da177e4SLinus Torvalds 
9171da177e4SLinus Torvalds 		return err ? -EFAULT : len;
9181da177e4SLinus Torvalds 	}
9191da177e4SLinus Torvalds 
9201da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
9211da177e4SLinus Torvalds 		return 0;
9221da177e4SLinus Torvalds 
9231da177e4SLinus Torvalds 	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
9241da177e4SLinus Torvalds 	 * the available implementations agree in this case:
9251da177e4SLinus Torvalds 	 * this call should never block, independent of the
9261da177e4SLinus Torvalds 	 * blocking state of the socket.
9271da177e4SLinus Torvalds 	 * Mike <pall@rz.uni-karlsruhe.de>
9281da177e4SLinus Torvalds 	 */
9291da177e4SLinus Torvalds 	return -EAGAIN;
9301da177e4SLinus Torvalds }
9311da177e4SLinus Torvalds 
9321da177e4SLinus Torvalds /* Clean up the receive buffer for full frames taken by the user,
9331da177e4SLinus Torvalds  * then send an ACK if necessary.  COPIED is the number of bytes
9341da177e4SLinus Torvalds  * tcp_recvmsg has given to the user so far, it speeds up the
9351da177e4SLinus Torvalds  * calculation of whether or not we must ACK for the sake of
9361da177e4SLinus Torvalds  * a window update.
9371da177e4SLinus Torvalds  */
9380e4b4992SChris Leech void tcp_cleanup_rbuf(struct sock *sk, int copied)
9391da177e4SLinus Torvalds {
9401da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
9411da177e4SLinus Torvalds 	int time_to_ack = 0;
9421da177e4SLinus Torvalds 
9431da177e4SLinus Torvalds #if TCP_DEBUG
9441da177e4SLinus Torvalds 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
9451da177e4SLinus Torvalds 
9461da177e4SLinus Torvalds 	BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
9471da177e4SLinus Torvalds #endif
9481da177e4SLinus Torvalds 
949463c84b9SArnaldo Carvalho de Melo 	if (inet_csk_ack_scheduled(sk)) {
950463c84b9SArnaldo Carvalho de Melo 		const struct inet_connection_sock *icsk = inet_csk(sk);
9511da177e4SLinus Torvalds 		   /* Delayed ACKs frequently hit locked sockets during bulk
9521da177e4SLinus Torvalds 		    * receive. */
953463c84b9SArnaldo Carvalho de Melo 		if (icsk->icsk_ack.blocked ||
9541da177e4SLinus Torvalds 		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
955463c84b9SArnaldo Carvalho de Melo 		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
9561da177e4SLinus Torvalds 		    /*
9571da177e4SLinus Torvalds 		     * If this read emptied read buffer, we send ACK, if
9581da177e4SLinus Torvalds 		     * connection is not bidirectional, user drained
9591da177e4SLinus Torvalds 		     * receive buffer and there was a small segment
9601da177e4SLinus Torvalds 		     * in queue.
9611da177e4SLinus Torvalds 		     */
9621ef9696cSAlexey Kuznetsov 		    (copied > 0 &&
9631ef9696cSAlexey Kuznetsov 		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
9641ef9696cSAlexey Kuznetsov 		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
9651ef9696cSAlexey Kuznetsov 		       !icsk->icsk_ack.pingpong)) &&
9661ef9696cSAlexey Kuznetsov 		      !atomic_read(&sk->sk_rmem_alloc)))
9671da177e4SLinus Torvalds 			time_to_ack = 1;
9681da177e4SLinus Torvalds 	}
9691da177e4SLinus Torvalds 
9701da177e4SLinus Torvalds 	/* We send an ACK if we can now advertise a non-zero window
9711da177e4SLinus Torvalds 	 * which has been raised "significantly".
9721da177e4SLinus Torvalds 	 *
9731da177e4SLinus Torvalds 	 * Even if window raised up to infinity, do not send window open ACK
9741da177e4SLinus Torvalds 	 * in states, where we will not receive more. It is useless.
9751da177e4SLinus Torvalds 	 */
9761da177e4SLinus Torvalds 	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
9771da177e4SLinus Torvalds 		__u32 rcv_window_now = tcp_receive_window(tp);
9781da177e4SLinus Torvalds 
9791da177e4SLinus Torvalds 		/* Optimize, __tcp_select_window() is not cheap. */
9801da177e4SLinus Torvalds 		if (2*rcv_window_now <= tp->window_clamp) {
9811da177e4SLinus Torvalds 			__u32 new_window = __tcp_select_window(sk);
9821da177e4SLinus Torvalds 
9831da177e4SLinus Torvalds 			/* Send ACK now, if this read freed lots of space
9841da177e4SLinus Torvalds 			 * in our buffer. Certainly, new_window is new window.
9851da177e4SLinus Torvalds 			 * We can advertise it now, if it is not less than current one.
9861da177e4SLinus Torvalds 			 * "Lots" means "at least twice" here.
9871da177e4SLinus Torvalds 			 */
9881da177e4SLinus Torvalds 			if (new_window && new_window >= 2 * rcv_window_now)
9891da177e4SLinus Torvalds 				time_to_ack = 1;
9901da177e4SLinus Torvalds 		}
9911da177e4SLinus Torvalds 	}
9921da177e4SLinus Torvalds 	if (time_to_ack)
9931da177e4SLinus Torvalds 		tcp_send_ack(sk);
9941da177e4SLinus Torvalds }
9951da177e4SLinus Torvalds 
9961da177e4SLinus Torvalds static void tcp_prequeue_process(struct sock *sk)
9971da177e4SLinus Torvalds {
9981da177e4SLinus Torvalds 	struct sk_buff *skb;
9991da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10001da177e4SLinus Torvalds 
1001b03efcfbSDavid S. Miller 	NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
10021da177e4SLinus Torvalds 
10031da177e4SLinus Torvalds 	/* RX process wants to run with disabled BHs, though it is not
10041da177e4SLinus Torvalds 	 * necessary */
10051da177e4SLinus Torvalds 	local_bh_disable();
10061da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
10071da177e4SLinus Torvalds 		sk->sk_backlog_rcv(sk, skb);
10081da177e4SLinus Torvalds 	local_bh_enable();
10091da177e4SLinus Torvalds 
10101da177e4SLinus Torvalds 	/* Clear memory counter. */
10111da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
10121da177e4SLinus Torvalds }
10131da177e4SLinus Torvalds 
10141da177e4SLinus Torvalds static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
10151da177e4SLinus Torvalds {
10161da177e4SLinus Torvalds 	struct sk_buff *skb;
10171da177e4SLinus Torvalds 	u32 offset;
10181da177e4SLinus Torvalds 
10191da177e4SLinus Torvalds 	skb_queue_walk(&sk->sk_receive_queue, skb) {
10201da177e4SLinus Torvalds 		offset = seq - TCP_SKB_CB(skb)->seq;
1021aa8223c7SArnaldo Carvalho de Melo 		if (tcp_hdr(skb)->syn)
10221da177e4SLinus Torvalds 			offset--;
1023aa8223c7SArnaldo Carvalho de Melo 		if (offset < skb->len || tcp_hdr(skb)->fin) {
10241da177e4SLinus Torvalds 			*off = offset;
10251da177e4SLinus Torvalds 			return skb;
10261da177e4SLinus Torvalds 		}
10271da177e4SLinus Torvalds 	}
10281da177e4SLinus Torvalds 	return NULL;
10291da177e4SLinus Torvalds }
10301da177e4SLinus Torvalds 
10311da177e4SLinus Torvalds /*
10321da177e4SLinus Torvalds  * This routine provides an alternative to tcp_recvmsg() for routines
10331da177e4SLinus Torvalds  * that would like to handle copying from skbuffs directly in 'sendfile'
10341da177e4SLinus Torvalds  * fashion.
10351da177e4SLinus Torvalds  * Note:
10361da177e4SLinus Torvalds  *	- It is assumed that the socket was locked by the caller.
10371da177e4SLinus Torvalds  *	- The routine does not block.
10381da177e4SLinus Torvalds  *	- At present, there is no support for reading OOB data
10391da177e4SLinus Torvalds  *	  or for 'peeking' the socket using this routine
10401da177e4SLinus Torvalds  *	  (although both would be easy to implement).
10411da177e4SLinus Torvalds  */
10421da177e4SLinus Torvalds int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
10431da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor)
10441da177e4SLinus Torvalds {
10451da177e4SLinus Torvalds 	struct sk_buff *skb;
10461da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
10471da177e4SLinus Torvalds 	u32 seq = tp->copied_seq;
10481da177e4SLinus Torvalds 	u32 offset;
10491da177e4SLinus Torvalds 	int copied = 0;
10501da177e4SLinus Torvalds 
10511da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
10521da177e4SLinus Torvalds 		return -ENOTCONN;
10531da177e4SLinus Torvalds 	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
10541da177e4SLinus Torvalds 		if (offset < skb->len) {
10551da177e4SLinus Torvalds 			size_t used, len;
10561da177e4SLinus Torvalds 
10571da177e4SLinus Torvalds 			len = skb->len - offset;
10581da177e4SLinus Torvalds 			/* Stop reading if we hit a patch of urgent data */
10591da177e4SLinus Torvalds 			if (tp->urg_data) {
10601da177e4SLinus Torvalds 				u32 urg_offset = tp->urg_seq - seq;
10611da177e4SLinus Torvalds 				if (urg_offset < len)
10621da177e4SLinus Torvalds 					len = urg_offset;
10631da177e4SLinus Torvalds 				if (!len)
10641da177e4SLinus Torvalds 					break;
10651da177e4SLinus Torvalds 			}
10661da177e4SLinus Torvalds 			used = recv_actor(desc, skb, offset, len);
1067ddb61a57SJens Axboe 			if (used < 0) {
1068ddb61a57SJens Axboe 				if (!copied)
1069ddb61a57SJens Axboe 					copied = used;
1070ddb61a57SJens Axboe 				break;
1071ddb61a57SJens Axboe 			} else if (used <= len) {
10721da177e4SLinus Torvalds 				seq += used;
10731da177e4SLinus Torvalds 				copied += used;
10741da177e4SLinus Torvalds 				offset += used;
10751da177e4SLinus Torvalds 			}
10761da177e4SLinus Torvalds 			if (offset != skb->len)
10771da177e4SLinus Torvalds 				break;
10781da177e4SLinus Torvalds 		}
1079aa8223c7SArnaldo Carvalho de Melo 		if (tcp_hdr(skb)->fin) {
1080624d1164SChris Leech 			sk_eat_skb(sk, skb, 0);
10811da177e4SLinus Torvalds 			++seq;
10821da177e4SLinus Torvalds 			break;
10831da177e4SLinus Torvalds 		}
1084624d1164SChris Leech 		sk_eat_skb(sk, skb, 0);
10851da177e4SLinus Torvalds 		if (!desc->count)
10861da177e4SLinus Torvalds 			break;
10871da177e4SLinus Torvalds 	}
10881da177e4SLinus Torvalds 	tp->copied_seq = seq;
10891da177e4SLinus Torvalds 
10901da177e4SLinus Torvalds 	tcp_rcv_space_adjust(sk);
10911da177e4SLinus Torvalds 
10921da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
1093ddb61a57SJens Axboe 	if (copied > 0)
10940e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
10951da177e4SLinus Torvalds 	return copied;
10961da177e4SLinus Torvalds }
10971da177e4SLinus Torvalds 
10981da177e4SLinus Torvalds /*
10991da177e4SLinus Torvalds  *	This routine copies from a sock struct into the user buffer.
11001da177e4SLinus Torvalds  *
11011da177e4SLinus Torvalds  *	Technical note: in 2.3 we work on _locked_ socket, so that
11021da177e4SLinus Torvalds  *	tricks with *seq access order and skb->users are not required.
11031da177e4SLinus Torvalds  *	Probably, code can be easily improved even more.
11041da177e4SLinus Torvalds  */
11051da177e4SLinus Torvalds 
11061da177e4SLinus Torvalds int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
11071da177e4SLinus Torvalds 		size_t len, int nonblock, int flags, int *addr_len)
11081da177e4SLinus Torvalds {
11091da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
11101da177e4SLinus Torvalds 	int copied = 0;
11111da177e4SLinus Torvalds 	u32 peek_seq;
11121da177e4SLinus Torvalds 	u32 *seq;
11131da177e4SLinus Torvalds 	unsigned long used;
11141da177e4SLinus Torvalds 	int err;
11151da177e4SLinus Torvalds 	int target;		/* Read at least this many bytes */
11161da177e4SLinus Torvalds 	long timeo;
11171da177e4SLinus Torvalds 	struct task_struct *user_recv = NULL;
11181a2449a8SChris Leech 	int copied_early = 0;
1119*2b1244a4SChris Leech 	int available = 0;
1120*2b1244a4SChris Leech 	struct sk_buff *skb;
11211da177e4SLinus Torvalds 
11221da177e4SLinus Torvalds 	lock_sock(sk);
11231da177e4SLinus Torvalds 
11241da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
11251da177e4SLinus Torvalds 
11261da177e4SLinus Torvalds 	err = -ENOTCONN;
11271da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN)
11281da177e4SLinus Torvalds 		goto out;
11291da177e4SLinus Torvalds 
11301da177e4SLinus Torvalds 	timeo = sock_rcvtimeo(sk, nonblock);
11311da177e4SLinus Torvalds 
11321da177e4SLinus Torvalds 	/* Urgent data needs to be handled specially. */
11331da177e4SLinus Torvalds 	if (flags & MSG_OOB)
11341da177e4SLinus Torvalds 		goto recv_urg;
11351da177e4SLinus Torvalds 
11361da177e4SLinus Torvalds 	seq = &tp->copied_seq;
11371da177e4SLinus Torvalds 	if (flags & MSG_PEEK) {
11381da177e4SLinus Torvalds 		peek_seq = tp->copied_seq;
11391da177e4SLinus Torvalds 		seq = &peek_seq;
11401da177e4SLinus Torvalds 	}
11411da177e4SLinus Torvalds 
11421da177e4SLinus Torvalds 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
11431da177e4SLinus Torvalds 
11441a2449a8SChris Leech #ifdef CONFIG_NET_DMA
11451a2449a8SChris Leech 	tp->ucopy.dma_chan = NULL;
11461a2449a8SChris Leech 	preempt_disable();
1147*2b1244a4SChris Leech 	skb = skb_peek_tail(&sk->sk_receive_queue);
1148*2b1244a4SChris Leech 	if (skb)
1149*2b1244a4SChris Leech 		available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1150*2b1244a4SChris Leech 	if ((available < target) &&
1151*2b1244a4SChris Leech 	    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
115229bbd72dSAlexey Dobriyan 	    !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
11531a2449a8SChris Leech 		preempt_enable_no_resched();
11541a2449a8SChris Leech 		tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
11551a2449a8SChris Leech 	} else
11561a2449a8SChris Leech 		preempt_enable_no_resched();
11571a2449a8SChris Leech #endif
11581a2449a8SChris Leech 
11591da177e4SLinus Torvalds 	do {
11601da177e4SLinus Torvalds 		u32 offset;
11611da177e4SLinus Torvalds 
11621da177e4SLinus Torvalds 		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
11631da177e4SLinus Torvalds 		if (tp->urg_data && tp->urg_seq == *seq) {
11641da177e4SLinus Torvalds 			if (copied)
11651da177e4SLinus Torvalds 				break;
11661da177e4SLinus Torvalds 			if (signal_pending(current)) {
11671da177e4SLinus Torvalds 				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
11681da177e4SLinus Torvalds 				break;
11691da177e4SLinus Torvalds 			}
11701da177e4SLinus Torvalds 		}
11711da177e4SLinus Torvalds 
11721da177e4SLinus Torvalds 		/* Next get a buffer. */
11731da177e4SLinus Torvalds 
11741da177e4SLinus Torvalds 		skb = skb_peek(&sk->sk_receive_queue);
11751da177e4SLinus Torvalds 		do {
11761da177e4SLinus Torvalds 			if (!skb)
11771da177e4SLinus Torvalds 				break;
11781da177e4SLinus Torvalds 
11791da177e4SLinus Torvalds 			/* Now that we have two receive queues this
11801da177e4SLinus Torvalds 			 * shouldn't happen.
11811da177e4SLinus Torvalds 			 */
11821da177e4SLinus Torvalds 			if (before(*seq, TCP_SKB_CB(skb)->seq)) {
11831da177e4SLinus Torvalds 				printk(KERN_INFO "recvmsg bug: copied %X "
11841da177e4SLinus Torvalds 				       "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
11851da177e4SLinus Torvalds 				break;
11861da177e4SLinus Torvalds 			}
11871da177e4SLinus Torvalds 			offset = *seq - TCP_SKB_CB(skb)->seq;
1188aa8223c7SArnaldo Carvalho de Melo 			if (tcp_hdr(skb)->syn)
11891da177e4SLinus Torvalds 				offset--;
11901da177e4SLinus Torvalds 			if (offset < skb->len)
11911da177e4SLinus Torvalds 				goto found_ok_skb;
1192aa8223c7SArnaldo Carvalho de Melo 			if (tcp_hdr(skb)->fin)
11931da177e4SLinus Torvalds 				goto found_fin_ok;
11941da177e4SLinus Torvalds 			BUG_TRAP(flags & MSG_PEEK);
11951da177e4SLinus Torvalds 			skb = skb->next;
11961da177e4SLinus Torvalds 		} while (skb != (struct sk_buff *)&sk->sk_receive_queue);
11971da177e4SLinus Torvalds 
11981da177e4SLinus Torvalds 		/* Well, if we have backlog, try to process it now yet. */
11991da177e4SLinus Torvalds 
12001da177e4SLinus Torvalds 		if (copied >= target && !sk->sk_backlog.tail)
12011da177e4SLinus Torvalds 			break;
12021da177e4SLinus Torvalds 
12031da177e4SLinus Torvalds 		if (copied) {
12041da177e4SLinus Torvalds 			if (sk->sk_err ||
12051da177e4SLinus Torvalds 			    sk->sk_state == TCP_CLOSE ||
12061da177e4SLinus Torvalds 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
12071da177e4SLinus Torvalds 			    !timeo ||
12081da177e4SLinus Torvalds 			    signal_pending(current) ||
12091da177e4SLinus Torvalds 			    (flags & MSG_PEEK))
12101da177e4SLinus Torvalds 				break;
12111da177e4SLinus Torvalds 		} else {
12121da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_DONE))
12131da177e4SLinus Torvalds 				break;
12141da177e4SLinus Torvalds 
12151da177e4SLinus Torvalds 			if (sk->sk_err) {
12161da177e4SLinus Torvalds 				copied = sock_error(sk);
12171da177e4SLinus Torvalds 				break;
12181da177e4SLinus Torvalds 			}
12191da177e4SLinus Torvalds 
12201da177e4SLinus Torvalds 			if (sk->sk_shutdown & RCV_SHUTDOWN)
12211da177e4SLinus Torvalds 				break;
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds 			if (sk->sk_state == TCP_CLOSE) {
12241da177e4SLinus Torvalds 				if (!sock_flag(sk, SOCK_DONE)) {
12251da177e4SLinus Torvalds 					/* This occurs when user tries to read
12261da177e4SLinus Torvalds 					 * from never connected socket.
12271da177e4SLinus Torvalds 					 */
12281da177e4SLinus Torvalds 					copied = -ENOTCONN;
12291da177e4SLinus Torvalds 					break;
12301da177e4SLinus Torvalds 				}
12311da177e4SLinus Torvalds 				break;
12321da177e4SLinus Torvalds 			}
12331da177e4SLinus Torvalds 
12341da177e4SLinus Torvalds 			if (!timeo) {
12351da177e4SLinus Torvalds 				copied = -EAGAIN;
12361da177e4SLinus Torvalds 				break;
12371da177e4SLinus Torvalds 			}
12381da177e4SLinus Torvalds 
12391da177e4SLinus Torvalds 			if (signal_pending(current)) {
12401da177e4SLinus Torvalds 				copied = sock_intr_errno(timeo);
12411da177e4SLinus Torvalds 				break;
12421da177e4SLinus Torvalds 			}
12431da177e4SLinus Torvalds 		}
12441da177e4SLinus Torvalds 
12450e4b4992SChris Leech 		tcp_cleanup_rbuf(sk, copied);
12461da177e4SLinus Torvalds 
12477df55125SDavid S. Miller 		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
12481da177e4SLinus Torvalds 			/* Install new reader */
12491da177e4SLinus Torvalds 			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
12501da177e4SLinus Torvalds 				user_recv = current;
12511da177e4SLinus Torvalds 				tp->ucopy.task = user_recv;
12521da177e4SLinus Torvalds 				tp->ucopy.iov = msg->msg_iov;
12531da177e4SLinus Torvalds 			}
12541da177e4SLinus Torvalds 
12551da177e4SLinus Torvalds 			tp->ucopy.len = len;
12561da177e4SLinus Torvalds 
12571da177e4SLinus Torvalds 			BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
12581da177e4SLinus Torvalds 				 (flags & (MSG_PEEK | MSG_TRUNC)));
12591da177e4SLinus Torvalds 
12601da177e4SLinus Torvalds 			/* Ugly... If prequeue is not empty, we have to
12611da177e4SLinus Torvalds 			 * process it before releasing socket, otherwise
12621da177e4SLinus Torvalds 			 * order will be broken at second iteration.
12631da177e4SLinus Torvalds 			 * More elegant solution is required!!!
12641da177e4SLinus Torvalds 			 *
12651da177e4SLinus Torvalds 			 * Look: we have the following (pseudo)queues:
12661da177e4SLinus Torvalds 			 *
12671da177e4SLinus Torvalds 			 * 1. packets in flight
12681da177e4SLinus Torvalds 			 * 2. backlog
12691da177e4SLinus Torvalds 			 * 3. prequeue
12701da177e4SLinus Torvalds 			 * 4. receive_queue
12711da177e4SLinus Torvalds 			 *
12721da177e4SLinus Torvalds 			 * Each queue can be processed only if the next ones
12731da177e4SLinus Torvalds 			 * are empty. At this point we have empty receive_queue.
12741da177e4SLinus Torvalds 			 * But prequeue _can_ be not empty after 2nd iteration,
12751da177e4SLinus Torvalds 			 * when we jumped to start of loop because backlog
12761da177e4SLinus Torvalds 			 * processing added something to receive_queue.
12771da177e4SLinus Torvalds 			 * We cannot release_sock(), because backlog contains
12781da177e4SLinus Torvalds 			 * packets arrived _after_ prequeued ones.
12791da177e4SLinus Torvalds 			 *
12801da177e4SLinus Torvalds 			 * Shortly, algorithm is clear --- to process all
12811da177e4SLinus Torvalds 			 * the queues in order. We could make it more directly,
12821da177e4SLinus Torvalds 			 * requeueing packets from backlog to prequeue, if
12831da177e4SLinus Torvalds 			 * is not empty. It is more elegant, but eats cycles,
12841da177e4SLinus Torvalds 			 * unfortunately.
12851da177e4SLinus Torvalds 			 */
1286b03efcfbSDavid S. Miller 			if (!skb_queue_empty(&tp->ucopy.prequeue))
12871da177e4SLinus Torvalds 				goto do_prequeue;
12881da177e4SLinus Torvalds 
12891da177e4SLinus Torvalds 			/* __ Set realtime policy in scheduler __ */
12901da177e4SLinus Torvalds 		}
12911da177e4SLinus Torvalds 
12921da177e4SLinus Torvalds 		if (copied >= target) {
12931da177e4SLinus Torvalds 			/* Do not sleep, just process backlog. */
12941da177e4SLinus Torvalds 			release_sock(sk);
12951da177e4SLinus Torvalds 			lock_sock(sk);
12961da177e4SLinus Torvalds 		} else
12971da177e4SLinus Torvalds 			sk_wait_data(sk, &timeo);
12981da177e4SLinus Torvalds 
12991a2449a8SChris Leech #ifdef CONFIG_NET_DMA
13001a2449a8SChris Leech 		tp->ucopy.wakeup = 0;
13011a2449a8SChris Leech #endif
13021a2449a8SChris Leech 
13031da177e4SLinus Torvalds 		if (user_recv) {
13041da177e4SLinus Torvalds 			int chunk;
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds 			/* __ Restore normal policy in scheduler __ */
13071da177e4SLinus Torvalds 
13081da177e4SLinus Torvalds 			if ((chunk = len - tp->ucopy.len) != 0) {
13091da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
13101da177e4SLinus Torvalds 				len -= chunk;
13111da177e4SLinus Torvalds 				copied += chunk;
13121da177e4SLinus Torvalds 			}
13131da177e4SLinus Torvalds 
13141da177e4SLinus Torvalds 			if (tp->rcv_nxt == tp->copied_seq &&
1315b03efcfbSDavid S. Miller 			    !skb_queue_empty(&tp->ucopy.prequeue)) {
13161da177e4SLinus Torvalds do_prequeue:
13171da177e4SLinus Torvalds 				tcp_prequeue_process(sk);
13181da177e4SLinus Torvalds 
13191da177e4SLinus Torvalds 				if ((chunk = len - tp->ucopy.len) != 0) {
13201da177e4SLinus Torvalds 					NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
13211da177e4SLinus Torvalds 					len -= chunk;
13221da177e4SLinus Torvalds 					copied += chunk;
13231da177e4SLinus Torvalds 				}
13241da177e4SLinus Torvalds 			}
13251da177e4SLinus Torvalds 		}
13261da177e4SLinus Torvalds 		if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
13271da177e4SLinus Torvalds 			if (net_ratelimit())
13281da177e4SLinus Torvalds 				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
13291da177e4SLinus Torvalds 				       current->comm, current->pid);
13301da177e4SLinus Torvalds 			peek_seq = tp->copied_seq;
13311da177e4SLinus Torvalds 		}
13321da177e4SLinus Torvalds 		continue;
13331da177e4SLinus Torvalds 
13341da177e4SLinus Torvalds 	found_ok_skb:
13351da177e4SLinus Torvalds 		/* Ok so how much can we use? */
13361da177e4SLinus Torvalds 		used = skb->len - offset;
13371da177e4SLinus Torvalds 		if (len < used)
13381da177e4SLinus Torvalds 			used = len;
13391da177e4SLinus Torvalds 
13401da177e4SLinus Torvalds 		/* Do we have urgent data here? */
13411da177e4SLinus Torvalds 		if (tp->urg_data) {
13421da177e4SLinus Torvalds 			u32 urg_offset = tp->urg_seq - *seq;
13431da177e4SLinus Torvalds 			if (urg_offset < used) {
13441da177e4SLinus Torvalds 				if (!urg_offset) {
13451da177e4SLinus Torvalds 					if (!sock_flag(sk, SOCK_URGINLINE)) {
13461da177e4SLinus Torvalds 						++*seq;
13471da177e4SLinus Torvalds 						offset++;
13481da177e4SLinus Torvalds 						used--;
13491da177e4SLinus Torvalds 						if (!used)
13501da177e4SLinus Torvalds 							goto skip_copy;
13511da177e4SLinus Torvalds 					}
13521da177e4SLinus Torvalds 				} else
13531da177e4SLinus Torvalds 					used = urg_offset;
13541da177e4SLinus Torvalds 			}
13551da177e4SLinus Torvalds 		}
13561da177e4SLinus Torvalds 
13571da177e4SLinus Torvalds 		if (!(flags & MSG_TRUNC)) {
13581a2449a8SChris Leech #ifdef CONFIG_NET_DMA
13591a2449a8SChris Leech 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
13601a2449a8SChris Leech 				tp->ucopy.dma_chan = get_softnet_dma();
13611a2449a8SChris Leech 
13621a2449a8SChris Leech 			if (tp->ucopy.dma_chan) {
13631a2449a8SChris Leech 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
13641a2449a8SChris Leech 					tp->ucopy.dma_chan, skb, offset,
13651a2449a8SChris Leech 					msg->msg_iov, used,
13661a2449a8SChris Leech 					tp->ucopy.pinned_list);
13671a2449a8SChris Leech 
13681a2449a8SChris Leech 				if (tp->ucopy.dma_cookie < 0) {
13691a2449a8SChris Leech 
13701a2449a8SChris Leech 					printk(KERN_ALERT "dma_cookie < 0\n");
13711a2449a8SChris Leech 
13721a2449a8SChris Leech 					/* Exception. Bailout! */
13731a2449a8SChris Leech 					if (!copied)
13741a2449a8SChris Leech 						copied = -EFAULT;
13751a2449a8SChris Leech 					break;
13761a2449a8SChris Leech 				}
13771a2449a8SChris Leech 				if ((offset + used) == skb->len)
13781a2449a8SChris Leech 					copied_early = 1;
13791a2449a8SChris Leech 
13801a2449a8SChris Leech 			} else
13811a2449a8SChris Leech #endif
13821a2449a8SChris Leech 			{
13831da177e4SLinus Torvalds 				err = skb_copy_datagram_iovec(skb, offset,
13841da177e4SLinus Torvalds 						msg->msg_iov, used);
13851da177e4SLinus Torvalds 				if (err) {
13861da177e4SLinus Torvalds 					/* Exception. Bailout! */
13871da177e4SLinus Torvalds 					if (!copied)
13881da177e4SLinus Torvalds 						copied = -EFAULT;
13891da177e4SLinus Torvalds 					break;
13901da177e4SLinus Torvalds 				}
13911da177e4SLinus Torvalds 			}
13921a2449a8SChris Leech 		}
13931da177e4SLinus Torvalds 
13941da177e4SLinus Torvalds 		*seq += used;
13951da177e4SLinus Torvalds 		copied += used;
13961da177e4SLinus Torvalds 		len -= used;
13971da177e4SLinus Torvalds 
13981da177e4SLinus Torvalds 		tcp_rcv_space_adjust(sk);
13991da177e4SLinus Torvalds 
14001da177e4SLinus Torvalds skip_copy:
14011da177e4SLinus Torvalds 		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
14021da177e4SLinus Torvalds 			tp->urg_data = 0;
14039e412ba7SIlpo Järvinen 			tcp_fast_path_check(sk);
14041da177e4SLinus Torvalds 		}
14051da177e4SLinus Torvalds 		if (used + offset < skb->len)
14061da177e4SLinus Torvalds 			continue;
14071da177e4SLinus Torvalds 
1408aa8223c7SArnaldo Carvalho de Melo 		if (tcp_hdr(skb)->fin)
14091da177e4SLinus Torvalds 			goto found_fin_ok;
14101a2449a8SChris Leech 		if (!(flags & MSG_PEEK)) {
14111a2449a8SChris Leech 			sk_eat_skb(sk, skb, copied_early);
14121a2449a8SChris Leech 			copied_early = 0;
14131a2449a8SChris Leech 		}
14141da177e4SLinus Torvalds 		continue;
14151da177e4SLinus Torvalds 
14161da177e4SLinus Torvalds 	found_fin_ok:
14171da177e4SLinus Torvalds 		/* Process the FIN. */
14181da177e4SLinus Torvalds 		++*seq;
14191a2449a8SChris Leech 		if (!(flags & MSG_PEEK)) {
14201a2449a8SChris Leech 			sk_eat_skb(sk, skb, copied_early);
14211a2449a8SChris Leech 			copied_early = 0;
14221a2449a8SChris Leech 		}
14231da177e4SLinus Torvalds 		break;
14241da177e4SLinus Torvalds 	} while (len > 0);
14251da177e4SLinus Torvalds 
14261da177e4SLinus Torvalds 	if (user_recv) {
1427b03efcfbSDavid S. Miller 		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
14281da177e4SLinus Torvalds 			int chunk;
14291da177e4SLinus Torvalds 
14301da177e4SLinus Torvalds 			tp->ucopy.len = copied > 0 ? len : 0;
14311da177e4SLinus Torvalds 
14321da177e4SLinus Torvalds 			tcp_prequeue_process(sk);
14331da177e4SLinus Torvalds 
14341da177e4SLinus Torvalds 			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
14351da177e4SLinus Torvalds 				NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
14361da177e4SLinus Torvalds 				len -= chunk;
14371da177e4SLinus Torvalds 				copied += chunk;
14381da177e4SLinus Torvalds 			}
14391da177e4SLinus Torvalds 		}
14401da177e4SLinus Torvalds 
14411da177e4SLinus Torvalds 		tp->ucopy.task = NULL;
14421da177e4SLinus Torvalds 		tp->ucopy.len = 0;
14431da177e4SLinus Torvalds 	}
14441da177e4SLinus Torvalds 
14451a2449a8SChris Leech #ifdef CONFIG_NET_DMA
14461a2449a8SChris Leech 	if (tp->ucopy.dma_chan) {
14471a2449a8SChris Leech 		dma_cookie_t done, used;
14481a2449a8SChris Leech 
14491a2449a8SChris Leech 		dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
14501a2449a8SChris Leech 
14511a2449a8SChris Leech 		while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
14521a2449a8SChris Leech 						 tp->ucopy.dma_cookie, &done,
14531a2449a8SChris Leech 						 &used) == DMA_IN_PROGRESS) {
14541a2449a8SChris Leech 			/* do partial cleanup of sk_async_wait_queue */
14551a2449a8SChris Leech 			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
14561a2449a8SChris Leech 			       (dma_async_is_complete(skb->dma_cookie, done,
14571a2449a8SChris Leech 						      used) == DMA_SUCCESS)) {
14581a2449a8SChris Leech 				__skb_dequeue(&sk->sk_async_wait_queue);
14591a2449a8SChris Leech 				kfree_skb(skb);
14601a2449a8SChris Leech 			}
14611a2449a8SChris Leech 		}
14621a2449a8SChris Leech 
14631a2449a8SChris Leech 		/* Safe to free early-copied skbs now */
14641a2449a8SChris Leech 		__skb_queue_purge(&sk->sk_async_wait_queue);
14651a2449a8SChris Leech 		dma_chan_put(tp->ucopy.dma_chan);
14661a2449a8SChris Leech 		tp->ucopy.dma_chan = NULL;
14671a2449a8SChris Leech 	}
14681a2449a8SChris Leech 	if (tp->ucopy.pinned_list) {
14691a2449a8SChris Leech 		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
14701a2449a8SChris Leech 		tp->ucopy.pinned_list = NULL;
14711a2449a8SChris Leech 	}
14721a2449a8SChris Leech #endif
14731a2449a8SChris Leech 
14741da177e4SLinus Torvalds 	/* According to UNIX98, msg_name/msg_namelen are ignored
14751da177e4SLinus Torvalds 	 * on connected socket. I was just happy when found this 8) --ANK
14761da177e4SLinus Torvalds 	 */
14771da177e4SLinus Torvalds 
14781da177e4SLinus Torvalds 	/* Clean up data we have read: This will do ACK frames. */
14790e4b4992SChris Leech 	tcp_cleanup_rbuf(sk, copied);
14801da177e4SLinus Torvalds 
14811da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
14821da177e4SLinus Torvalds 	release_sock(sk);
14831da177e4SLinus Torvalds 	return copied;
14841da177e4SLinus Torvalds 
14851da177e4SLinus Torvalds out:
14861da177e4SLinus Torvalds 	TCP_CHECK_TIMER(sk);
14871da177e4SLinus Torvalds 	release_sock(sk);
14881da177e4SLinus Torvalds 	return err;
14891da177e4SLinus Torvalds 
14901da177e4SLinus Torvalds recv_urg:
14911da177e4SLinus Torvalds 	err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
14921da177e4SLinus Torvalds 	goto out;
14931da177e4SLinus Torvalds }
14941da177e4SLinus Torvalds 
14951da177e4SLinus Torvalds /*
14961da177e4SLinus Torvalds  *	State processing on a close. This implements the state shift for
14971da177e4SLinus Torvalds  *	sending our FIN frame. Note that we only send a FIN for some
14981da177e4SLinus Torvalds  *	states. A shutdown() may have already sent the FIN, or we may be
14991da177e4SLinus Torvalds  *	closed.
15001da177e4SLinus Torvalds  */
15011da177e4SLinus Torvalds 
15029b5b5cffSArjan van de Ven static const unsigned char new_state[16] = {
15031da177e4SLinus Torvalds   /* current state:        new state:      action:	*/
15041da177e4SLinus Torvalds   /* (Invalid)		*/ TCP_CLOSE,
15051da177e4SLinus Torvalds   /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
15061da177e4SLinus Torvalds   /* TCP_SYN_SENT	*/ TCP_CLOSE,
15071da177e4SLinus Torvalds   /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
15081da177e4SLinus Torvalds   /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
15091da177e4SLinus Torvalds   /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
15101da177e4SLinus Torvalds   /* TCP_TIME_WAIT	*/ TCP_CLOSE,
15111da177e4SLinus Torvalds   /* TCP_CLOSE		*/ TCP_CLOSE,
15121da177e4SLinus Torvalds   /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
15131da177e4SLinus Torvalds   /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
15141da177e4SLinus Torvalds   /* TCP_LISTEN		*/ TCP_CLOSE,
15151da177e4SLinus Torvalds   /* TCP_CLOSING	*/ TCP_CLOSING,
15161da177e4SLinus Torvalds };
15171da177e4SLinus Torvalds 
15181da177e4SLinus Torvalds static int tcp_close_state(struct sock *sk)
15191da177e4SLinus Torvalds {
15201da177e4SLinus Torvalds 	int next = (int)new_state[sk->sk_state];
15211da177e4SLinus Torvalds 	int ns = next & TCP_STATE_MASK;
15221da177e4SLinus Torvalds 
15231da177e4SLinus Torvalds 	tcp_set_state(sk, ns);
15241da177e4SLinus Torvalds 
15251da177e4SLinus Torvalds 	return next & TCP_ACTION_FIN;
15261da177e4SLinus Torvalds }
15271da177e4SLinus Torvalds 
15281da177e4SLinus Torvalds /*
15291da177e4SLinus Torvalds  *	Shutdown the sending side of a connection. Much like close except
15301da177e4SLinus Torvalds  *	that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
15311da177e4SLinus Torvalds  */
15321da177e4SLinus Torvalds 
15331da177e4SLinus Torvalds void tcp_shutdown(struct sock *sk, int how)
15341da177e4SLinus Torvalds {
15351da177e4SLinus Torvalds 	/*	We need to grab some memory, and put together a FIN,
15361da177e4SLinus Torvalds 	 *	and then put it into the queue to be sent.
15371da177e4SLinus Torvalds 	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
15381da177e4SLinus Torvalds 	 */
15391da177e4SLinus Torvalds 	if (!(how & SEND_SHUTDOWN))
15401da177e4SLinus Torvalds 		return;
15411da177e4SLinus Torvalds 
15421da177e4SLinus Torvalds 	/* If we've already sent a FIN, or it's a closed state, skip this. */
15431da177e4SLinus Torvalds 	if ((1 << sk->sk_state) &
15441da177e4SLinus Torvalds 	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
15451da177e4SLinus Torvalds 	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
15461da177e4SLinus Torvalds 		/* Clear out any half completed packets.  FIN if needed. */
15471da177e4SLinus Torvalds 		if (tcp_close_state(sk))
15481da177e4SLinus Torvalds 			tcp_send_fin(sk);
15491da177e4SLinus Torvalds 	}
15501da177e4SLinus Torvalds }
15511da177e4SLinus Torvalds 
15521da177e4SLinus Torvalds void tcp_close(struct sock *sk, long timeout)
15531da177e4SLinus Torvalds {
15541da177e4SLinus Torvalds 	struct sk_buff *skb;
15551da177e4SLinus Torvalds 	int data_was_unread = 0;
155675c2d907SHerbert Xu 	int state;
15571da177e4SLinus Torvalds 
15581da177e4SLinus Torvalds 	lock_sock(sk);
15591da177e4SLinus Torvalds 	sk->sk_shutdown = SHUTDOWN_MASK;
15601da177e4SLinus Torvalds 
15611da177e4SLinus Torvalds 	if (sk->sk_state == TCP_LISTEN) {
15621da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
15631da177e4SLinus Torvalds 
15641da177e4SLinus Torvalds 		/* Special case. */
15650a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
15661da177e4SLinus Torvalds 
15671da177e4SLinus Torvalds 		goto adjudge_to_death;
15681da177e4SLinus Torvalds 	}
15691da177e4SLinus Torvalds 
15701da177e4SLinus Torvalds 	/*  We need to flush the recv. buffs.  We do this only on the
15711da177e4SLinus Torvalds 	 *  descriptor close, not protocol-sourced closes, because the
15721da177e4SLinus Torvalds 	 *  reader process may not have drained the data yet!
15731da177e4SLinus Torvalds 	 */
15741da177e4SLinus Torvalds 	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
15751da177e4SLinus Torvalds 		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1576aa8223c7SArnaldo Carvalho de Melo 			  tcp_hdr(skb)->fin;
15771da177e4SLinus Torvalds 		data_was_unread += len;
15781da177e4SLinus Torvalds 		__kfree_skb(skb);
15791da177e4SLinus Torvalds 	}
15801da177e4SLinus Torvalds 
15811da177e4SLinus Torvalds 	sk_stream_mem_reclaim(sk);
15821da177e4SLinus Torvalds 
158365bb723cSGerrit Renker 	/* As outlined in RFC 2525, section 2.17, we send a RST here because
158465bb723cSGerrit Renker 	 * data was lost. To witness the awful effects of the old behavior of
158565bb723cSGerrit Renker 	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
158665bb723cSGerrit Renker 	 * GET in an FTP client, suspend the process, wait for the client to
158765bb723cSGerrit Renker 	 * advertise a zero window, then kill -9 the FTP client, wheee...
158865bb723cSGerrit Renker 	 * Note: timeout is always zero in such a case.
15891da177e4SLinus Torvalds 	 */
15901da177e4SLinus Torvalds 	if (data_was_unread) {
15911da177e4SLinus Torvalds 		/* Unread data was tossed, zap the connection. */
15921da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
15931da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
15941da177e4SLinus Torvalds 		tcp_send_active_reset(sk, GFP_KERNEL);
15951da177e4SLinus Torvalds 	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
15961da177e4SLinus Torvalds 		/* Check zero linger _after_ checking for unread data. */
15971da177e4SLinus Torvalds 		sk->sk_prot->disconnect(sk, 0);
15981da177e4SLinus Torvalds 		NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
15991da177e4SLinus Torvalds 	} else if (tcp_close_state(sk)) {
16001da177e4SLinus Torvalds 		/* We FIN if the application ate all the data before
16011da177e4SLinus Torvalds 		 * zapping the connection.
16021da177e4SLinus Torvalds 		 */
16031da177e4SLinus Torvalds 
16041da177e4SLinus Torvalds 		/* RED-PEN. Formally speaking, we have broken TCP state
16051da177e4SLinus Torvalds 		 * machine. State transitions:
16061da177e4SLinus Torvalds 		 *
16071da177e4SLinus Torvalds 		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
16081da177e4SLinus Torvalds 		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
16091da177e4SLinus Torvalds 		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
16101da177e4SLinus Torvalds 		 *
16111da177e4SLinus Torvalds 		 * are legal only when FIN has been sent (i.e. in window),
16121da177e4SLinus Torvalds 		 * rather than queued out of window. Purists blame.
16131da177e4SLinus Torvalds 		 *
16141da177e4SLinus Torvalds 		 * F.e. "RFC state" is ESTABLISHED,
16151da177e4SLinus Torvalds 		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
16161da177e4SLinus Torvalds 		 *
16171da177e4SLinus Torvalds 		 * The visible declinations are that sometimes
16181da177e4SLinus Torvalds 		 * we enter time-wait state, when it is not required really
16191da177e4SLinus Torvalds 		 * (harmless), do not send active resets, when they are
16201da177e4SLinus Torvalds 		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
16211da177e4SLinus Torvalds 		 * they look as CLOSING or LAST_ACK for Linux)
16221da177e4SLinus Torvalds 		 * Probably, I missed some more holelets.
16231da177e4SLinus Torvalds 		 * 						--ANK
16241da177e4SLinus Torvalds 		 */
16251da177e4SLinus Torvalds 		tcp_send_fin(sk);
16261da177e4SLinus Torvalds 	}
16271da177e4SLinus Torvalds 
16281da177e4SLinus Torvalds 	sk_stream_wait_close(sk, timeout);
16291da177e4SLinus Torvalds 
16301da177e4SLinus Torvalds adjudge_to_death:
163175c2d907SHerbert Xu 	state = sk->sk_state;
163275c2d907SHerbert Xu 	sock_hold(sk);
163375c2d907SHerbert Xu 	sock_orphan(sk);
163475c2d907SHerbert Xu 	atomic_inc(sk->sk_prot->orphan_count);
163575c2d907SHerbert Xu 
16361da177e4SLinus Torvalds 	/* It is the last release_sock in its life. It will remove backlog. */
16371da177e4SLinus Torvalds 	release_sock(sk);
16381da177e4SLinus Torvalds 
16391da177e4SLinus Torvalds 
16401da177e4SLinus Torvalds 	/* Now socket is owned by kernel and we acquire BH lock
16411da177e4SLinus Torvalds 	   to finish close. No need to check for user refs.
16421da177e4SLinus Torvalds 	 */
16431da177e4SLinus Torvalds 	local_bh_disable();
16441da177e4SLinus Torvalds 	bh_lock_sock(sk);
16451da177e4SLinus Torvalds 	BUG_TRAP(!sock_owned_by_user(sk));
16461da177e4SLinus Torvalds 
164775c2d907SHerbert Xu 	/* Have we already been destroyed by a softirq or backlog? */
164875c2d907SHerbert Xu 	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
164975c2d907SHerbert Xu 		goto out;
16501da177e4SLinus Torvalds 
16511da177e4SLinus Torvalds 	/*	This is a (useful) BSD violating of the RFC. There is a
16521da177e4SLinus Torvalds 	 *	problem with TCP as specified in that the other end could
16531da177e4SLinus Torvalds 	 *	keep a socket open forever with no application left this end.
16541da177e4SLinus Torvalds 	 *	We use a 3 minute timeout (about the same as BSD) then kill
16551da177e4SLinus Torvalds 	 *	our end. If they send after that then tough - BUT: long enough
16561da177e4SLinus Torvalds 	 *	that we won't make the old 4*rto = almost no time - whoops
16571da177e4SLinus Torvalds 	 *	reset mistake.
16581da177e4SLinus Torvalds 	 *
16591da177e4SLinus Torvalds 	 *	Nope, it was not mistake. It is really desired behaviour
16601da177e4SLinus Torvalds 	 *	f.e. on http servers, when such sockets are useless, but
16611da177e4SLinus Torvalds 	 *	consume significant resources. Let's do it with special
16621da177e4SLinus Torvalds 	 *	linger2	option.					--ANK
16631da177e4SLinus Torvalds 	 */
16641da177e4SLinus Torvalds 
16651da177e4SLinus Torvalds 	if (sk->sk_state == TCP_FIN_WAIT2) {
16661da177e4SLinus Torvalds 		struct tcp_sock *tp = tcp_sk(sk);
16671da177e4SLinus Torvalds 		if (tp->linger2 < 0) {
16681da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
16691da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
16701da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
16711da177e4SLinus Torvalds 		} else {
1672463c84b9SArnaldo Carvalho de Melo 			const int tmo = tcp_fin_time(sk);
16731da177e4SLinus Torvalds 
16741da177e4SLinus Torvalds 			if (tmo > TCP_TIMEWAIT_LEN) {
167552499afeSDavid S. Miller 				inet_csk_reset_keepalive_timer(sk,
167652499afeSDavid S. Miller 						tmo - TCP_TIMEWAIT_LEN);
16771da177e4SLinus Torvalds 			} else {
16781da177e4SLinus Torvalds 				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
16791da177e4SLinus Torvalds 				goto out;
16801da177e4SLinus Torvalds 			}
16811da177e4SLinus Torvalds 		}
16821da177e4SLinus Torvalds 	}
16831da177e4SLinus Torvalds 	if (sk->sk_state != TCP_CLOSE) {
16841da177e4SLinus Torvalds 		sk_stream_mem_reclaim(sk);
1685e4fd5da3SPavel Emelianov 		if (tcp_too_many_orphans(sk,
1686e4fd5da3SPavel Emelianov 				atomic_read(sk->sk_prot->orphan_count))) {
16871da177e4SLinus Torvalds 			if (net_ratelimit())
16881da177e4SLinus Torvalds 				printk(KERN_INFO "TCP: too many of orphaned "
16891da177e4SLinus Torvalds 				       "sockets\n");
16901da177e4SLinus Torvalds 			tcp_set_state(sk, TCP_CLOSE);
16911da177e4SLinus Torvalds 			tcp_send_active_reset(sk, GFP_ATOMIC);
16921da177e4SLinus Torvalds 			NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
16931da177e4SLinus Torvalds 		}
16941da177e4SLinus Torvalds 	}
16951da177e4SLinus Torvalds 
16961da177e4SLinus Torvalds 	if (sk->sk_state == TCP_CLOSE)
16970a5578cfSArnaldo Carvalho de Melo 		inet_csk_destroy_sock(sk);
16981da177e4SLinus Torvalds 	/* Otherwise, socket is reprieved until protocol close. */
16991da177e4SLinus Torvalds 
17001da177e4SLinus Torvalds out:
17011da177e4SLinus Torvalds 	bh_unlock_sock(sk);
17021da177e4SLinus Torvalds 	local_bh_enable();
17031da177e4SLinus Torvalds 	sock_put(sk);
17041da177e4SLinus Torvalds }
17051da177e4SLinus Torvalds 
17061da177e4SLinus Torvalds /* These states need RST on ABORT according to RFC793 */
17071da177e4SLinus Torvalds 
17081da177e4SLinus Torvalds static inline int tcp_need_reset(int state)
17091da177e4SLinus Torvalds {
17101da177e4SLinus Torvalds 	return (1 << state) &
17111da177e4SLinus Torvalds 	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
17121da177e4SLinus Torvalds 		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
17131da177e4SLinus Torvalds }
17141da177e4SLinus Torvalds 
17151da177e4SLinus Torvalds int tcp_disconnect(struct sock *sk, int flags)
17161da177e4SLinus Torvalds {
17171da177e4SLinus Torvalds 	struct inet_sock *inet = inet_sk(sk);
1718463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
17191da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
17201da177e4SLinus Torvalds 	int err = 0;
17211da177e4SLinus Torvalds 	int old_state = sk->sk_state;
17221da177e4SLinus Torvalds 
17231da177e4SLinus Torvalds 	if (old_state != TCP_CLOSE)
17241da177e4SLinus Torvalds 		tcp_set_state(sk, TCP_CLOSE);
17251da177e4SLinus Torvalds 
17261da177e4SLinus Torvalds 	/* ABORT function of RFC793 */
17271da177e4SLinus Torvalds 	if (old_state == TCP_LISTEN) {
17280a5578cfSArnaldo Carvalho de Melo 		inet_csk_listen_stop(sk);
17291da177e4SLinus Torvalds 	} else if (tcp_need_reset(old_state) ||
17301da177e4SLinus Torvalds 		   (tp->snd_nxt != tp->write_seq &&
17311da177e4SLinus Torvalds 		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1732caa20d9aSStephen Hemminger 		/* The last check adjusts for discrepancy of Linux wrt. RFC
17331da177e4SLinus Torvalds 		 * states
17341da177e4SLinus Torvalds 		 */
17351da177e4SLinus Torvalds 		tcp_send_active_reset(sk, gfp_any());
17361da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
17371da177e4SLinus Torvalds 	} else if (old_state == TCP_SYN_SENT)
17381da177e4SLinus Torvalds 		sk->sk_err = ECONNRESET;
17391da177e4SLinus Torvalds 
17401da177e4SLinus Torvalds 	tcp_clear_xmit_timers(sk);
17411da177e4SLinus Torvalds 	__skb_queue_purge(&sk->sk_receive_queue);
1742fe067e8aSDavid S. Miller 	tcp_write_queue_purge(sk);
17431da177e4SLinus Torvalds 	__skb_queue_purge(&tp->out_of_order_queue);
17441a2449a8SChris Leech #ifdef CONFIG_NET_DMA
17451a2449a8SChris Leech 	__skb_queue_purge(&sk->sk_async_wait_queue);
17461a2449a8SChris Leech #endif
17471da177e4SLinus Torvalds 
17481da177e4SLinus Torvalds 	inet->dport = 0;
17491da177e4SLinus Torvalds 
17501da177e4SLinus Torvalds 	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
17511da177e4SLinus Torvalds 		inet_reset_saddr(sk);
17521da177e4SLinus Torvalds 
17531da177e4SLinus Torvalds 	sk->sk_shutdown = 0;
17541da177e4SLinus Torvalds 	sock_reset_flag(sk, SOCK_DONE);
17551da177e4SLinus Torvalds 	tp->srtt = 0;
17561da177e4SLinus Torvalds 	if ((tp->write_seq += tp->max_window + 2) == 0)
17571da177e4SLinus Torvalds 		tp->write_seq = 1;
1758463c84b9SArnaldo Carvalho de Melo 	icsk->icsk_backoff = 0;
17591da177e4SLinus Torvalds 	tp->snd_cwnd = 2;
17606687e988SArnaldo Carvalho de Melo 	icsk->icsk_probes_out = 0;
17611da177e4SLinus Torvalds 	tp->packets_out = 0;
17621da177e4SLinus Torvalds 	tp->snd_ssthresh = 0x7fffffff;
17631da177e4SLinus Torvalds 	tp->snd_cwnd_cnt = 0;
17649772efb9SStephen Hemminger 	tp->bytes_acked = 0;
17656687e988SArnaldo Carvalho de Melo 	tcp_set_ca_state(sk, TCP_CA_Open);
17661da177e4SLinus Torvalds 	tcp_clear_retrans(tp);
1767463c84b9SArnaldo Carvalho de Melo 	inet_csk_delack_init(sk);
1768fe067e8aSDavid S. Miller 	tcp_init_send_head(sk);
1769b40b4f79SSrinivas Aji 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
17701da177e4SLinus Torvalds 	__sk_dst_reset(sk);
17711da177e4SLinus Torvalds 
1772463c84b9SArnaldo Carvalho de Melo 	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
17731da177e4SLinus Torvalds 
17741da177e4SLinus Torvalds 	sk->sk_error_report(sk);
17751da177e4SLinus Torvalds 	return err;
17761da177e4SLinus Torvalds }
17771da177e4SLinus Torvalds 
17781da177e4SLinus Torvalds /*
17791da177e4SLinus Torvalds  *	Socket option code for TCP.
17801da177e4SLinus Torvalds  */
17813fdadf7dSDmitry Mishin static int do_tcp_setsockopt(struct sock *sk, int level,
17823fdadf7dSDmitry Mishin 		int optname, char __user *optval, int optlen)
17831da177e4SLinus Torvalds {
17841da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1785463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
17861da177e4SLinus Torvalds 	int val;
17871da177e4SLinus Torvalds 	int err = 0;
17881da177e4SLinus Torvalds 
17895f8ef48dSStephen Hemminger 	/* This is a string value all the others are int's */
17905f8ef48dSStephen Hemminger 	if (optname == TCP_CONGESTION) {
17915f8ef48dSStephen Hemminger 		char name[TCP_CA_NAME_MAX];
17925f8ef48dSStephen Hemminger 
17935f8ef48dSStephen Hemminger 		if (optlen < 1)
17945f8ef48dSStephen Hemminger 			return -EINVAL;
17955f8ef48dSStephen Hemminger 
17965f8ef48dSStephen Hemminger 		val = strncpy_from_user(name, optval,
17975f8ef48dSStephen Hemminger 					min(TCP_CA_NAME_MAX-1, optlen));
17985f8ef48dSStephen Hemminger 		if (val < 0)
17995f8ef48dSStephen Hemminger 			return -EFAULT;
18005f8ef48dSStephen Hemminger 		name[val] = 0;
18015f8ef48dSStephen Hemminger 
18025f8ef48dSStephen Hemminger 		lock_sock(sk);
18036687e988SArnaldo Carvalho de Melo 		err = tcp_set_congestion_control(sk, name);
18045f8ef48dSStephen Hemminger 		release_sock(sk);
18055f8ef48dSStephen Hemminger 		return err;
18065f8ef48dSStephen Hemminger 	}
18075f8ef48dSStephen Hemminger 
18081da177e4SLinus Torvalds 	if (optlen < sizeof(int))
18091da177e4SLinus Torvalds 		return -EINVAL;
18101da177e4SLinus Torvalds 
18111da177e4SLinus Torvalds 	if (get_user(val, (int __user *)optval))
18121da177e4SLinus Torvalds 		return -EFAULT;
18131da177e4SLinus Torvalds 
18141da177e4SLinus Torvalds 	lock_sock(sk);
18151da177e4SLinus Torvalds 
18161da177e4SLinus Torvalds 	switch (optname) {
18171da177e4SLinus Torvalds 	case TCP_MAXSEG:
18181da177e4SLinus Torvalds 		/* Values greater than interface MTU won't take effect. However
18191da177e4SLinus Torvalds 		 * at the point when this call is done we typically don't yet
18201da177e4SLinus Torvalds 		 * know which interface is going to be used */
18211da177e4SLinus Torvalds 		if (val < 8 || val > MAX_TCP_WINDOW) {
18221da177e4SLinus Torvalds 			err = -EINVAL;
18231da177e4SLinus Torvalds 			break;
18241da177e4SLinus Torvalds 		}
18251da177e4SLinus Torvalds 		tp->rx_opt.user_mss = val;
18261da177e4SLinus Torvalds 		break;
18271da177e4SLinus Torvalds 
18281da177e4SLinus Torvalds 	case TCP_NODELAY:
18291da177e4SLinus Torvalds 		if (val) {
18301da177e4SLinus Torvalds 			/* TCP_NODELAY is weaker than TCP_CORK, so that
18311da177e4SLinus Torvalds 			 * this option on corked socket is remembered, but
18321da177e4SLinus Torvalds 			 * it is not activated until cork is cleared.
18331da177e4SLinus Torvalds 			 *
18341da177e4SLinus Torvalds 			 * However, when TCP_NODELAY is set we make
18351da177e4SLinus Torvalds 			 * an explicit push, which overrides even TCP_CORK
18361da177e4SLinus Torvalds 			 * for currently queued segments.
18371da177e4SLinus Torvalds 			 */
18381da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
18399e412ba7SIlpo Järvinen 			tcp_push_pending_frames(sk);
18401da177e4SLinus Torvalds 		} else {
18411da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_OFF;
18421da177e4SLinus Torvalds 		}
18431da177e4SLinus Torvalds 		break;
18441da177e4SLinus Torvalds 
18451da177e4SLinus Torvalds 	case TCP_CORK:
18461da177e4SLinus Torvalds 		/* When set indicates to always queue non-full frames.
18471da177e4SLinus Torvalds 		 * Later the user clears this option and we transmit
18481da177e4SLinus Torvalds 		 * any pending partial frames in the queue.  This is
18491da177e4SLinus Torvalds 		 * meant to be used alongside sendfile() to get properly
18501da177e4SLinus Torvalds 		 * filled frames when the user (for example) must write
18511da177e4SLinus Torvalds 		 * out headers with a write() call first and then use
18521da177e4SLinus Torvalds 		 * sendfile to send out the data parts.
18531da177e4SLinus Torvalds 		 *
18541da177e4SLinus Torvalds 		 * TCP_CORK can be set together with TCP_NODELAY and it is
18551da177e4SLinus Torvalds 		 * stronger than TCP_NODELAY.
18561da177e4SLinus Torvalds 		 */
18571da177e4SLinus Torvalds 		if (val) {
18581da177e4SLinus Torvalds 			tp->nonagle |= TCP_NAGLE_CORK;
18591da177e4SLinus Torvalds 		} else {
18601da177e4SLinus Torvalds 			tp->nonagle &= ~TCP_NAGLE_CORK;
18611da177e4SLinus Torvalds 			if (tp->nonagle&TCP_NAGLE_OFF)
18621da177e4SLinus Torvalds 				tp->nonagle |= TCP_NAGLE_PUSH;
18639e412ba7SIlpo Järvinen 			tcp_push_pending_frames(sk);
18641da177e4SLinus Torvalds 		}
18651da177e4SLinus Torvalds 		break;
18661da177e4SLinus Torvalds 
18671da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
18681da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPIDLE)
18691da177e4SLinus Torvalds 			err = -EINVAL;
18701da177e4SLinus Torvalds 		else {
18711da177e4SLinus Torvalds 			tp->keepalive_time = val * HZ;
18721da177e4SLinus Torvalds 			if (sock_flag(sk, SOCK_KEEPOPEN) &&
18731da177e4SLinus Torvalds 			    !((1 << sk->sk_state) &
18741da177e4SLinus Torvalds 			      (TCPF_CLOSE | TCPF_LISTEN))) {
18751da177e4SLinus Torvalds 				__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
18761da177e4SLinus Torvalds 				if (tp->keepalive_time > elapsed)
18771da177e4SLinus Torvalds 					elapsed = tp->keepalive_time - elapsed;
18781da177e4SLinus Torvalds 				else
18791da177e4SLinus Torvalds 					elapsed = 0;
1880463c84b9SArnaldo Carvalho de Melo 				inet_csk_reset_keepalive_timer(sk, elapsed);
18811da177e4SLinus Torvalds 			}
18821da177e4SLinus Torvalds 		}
18831da177e4SLinus Torvalds 		break;
18841da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
18851da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPINTVL)
18861da177e4SLinus Torvalds 			err = -EINVAL;
18871da177e4SLinus Torvalds 		else
18881da177e4SLinus Torvalds 			tp->keepalive_intvl = val * HZ;
18891da177e4SLinus Torvalds 		break;
18901da177e4SLinus Torvalds 	case TCP_KEEPCNT:
18911da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_KEEPCNT)
18921da177e4SLinus Torvalds 			err = -EINVAL;
18931da177e4SLinus Torvalds 		else
18941da177e4SLinus Torvalds 			tp->keepalive_probes = val;
18951da177e4SLinus Torvalds 		break;
18961da177e4SLinus Torvalds 	case TCP_SYNCNT:
18971da177e4SLinus Torvalds 		if (val < 1 || val > MAX_TCP_SYNCNT)
18981da177e4SLinus Torvalds 			err = -EINVAL;
18991da177e4SLinus Torvalds 		else
1900463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_syn_retries = val;
19011da177e4SLinus Torvalds 		break;
19021da177e4SLinus Torvalds 
19031da177e4SLinus Torvalds 	case TCP_LINGER2:
19041da177e4SLinus Torvalds 		if (val < 0)
19051da177e4SLinus Torvalds 			tp->linger2 = -1;
19061da177e4SLinus Torvalds 		else if (val > sysctl_tcp_fin_timeout / HZ)
19071da177e4SLinus Torvalds 			tp->linger2 = 0;
19081da177e4SLinus Torvalds 		else
19091da177e4SLinus Torvalds 			tp->linger2 = val * HZ;
19101da177e4SLinus Torvalds 		break;
19111da177e4SLinus Torvalds 
19121da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
1913295f7324SArnaldo Carvalho de Melo 		icsk->icsk_accept_queue.rskq_defer_accept = 0;
19141da177e4SLinus Torvalds 		if (val > 0) {
19151da177e4SLinus Torvalds 			/* Translate value in seconds to number of
19161da177e4SLinus Torvalds 			 * retransmits */
1917295f7324SArnaldo Carvalho de Melo 			while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
19181da177e4SLinus Torvalds 			       val > ((TCP_TIMEOUT_INIT / HZ) <<
1919295f7324SArnaldo Carvalho de Melo 				       icsk->icsk_accept_queue.rskq_defer_accept))
1920295f7324SArnaldo Carvalho de Melo 				icsk->icsk_accept_queue.rskq_defer_accept++;
1921295f7324SArnaldo Carvalho de Melo 			icsk->icsk_accept_queue.rskq_defer_accept++;
19221da177e4SLinus Torvalds 		}
19231da177e4SLinus Torvalds 		break;
19241da177e4SLinus Torvalds 
19251da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
19261da177e4SLinus Torvalds 		if (!val) {
19271da177e4SLinus Torvalds 			if (sk->sk_state != TCP_CLOSE) {
19281da177e4SLinus Torvalds 				err = -EINVAL;
19291da177e4SLinus Torvalds 				break;
19301da177e4SLinus Torvalds 			}
19311da177e4SLinus Torvalds 			tp->window_clamp = 0;
19321da177e4SLinus Torvalds 		} else
19331da177e4SLinus Torvalds 			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
19341da177e4SLinus Torvalds 						SOCK_MIN_RCVBUF / 2 : val;
19351da177e4SLinus Torvalds 		break;
19361da177e4SLinus Torvalds 
19371da177e4SLinus Torvalds 	case TCP_QUICKACK:
19381da177e4SLinus Torvalds 		if (!val) {
1939463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 1;
19401da177e4SLinus Torvalds 		} else {
1941463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.pingpong = 0;
19421da177e4SLinus Torvalds 			if ((1 << sk->sk_state) &
19431da177e4SLinus Torvalds 			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1944463c84b9SArnaldo Carvalho de Melo 			    inet_csk_ack_scheduled(sk)) {
1945463c84b9SArnaldo Carvalho de Melo 				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
19460e4b4992SChris Leech 				tcp_cleanup_rbuf(sk, 1);
19471da177e4SLinus Torvalds 				if (!(val & 1))
1948463c84b9SArnaldo Carvalho de Melo 					icsk->icsk_ack.pingpong = 1;
19491da177e4SLinus Torvalds 			}
19501da177e4SLinus Torvalds 		}
19511da177e4SLinus Torvalds 		break;
19521da177e4SLinus Torvalds 
1953cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1954cfb6eeb4SYOSHIFUJI Hideaki 	case TCP_MD5SIG:
1955cfb6eeb4SYOSHIFUJI Hideaki 		/* Read the IP->Key mappings from userspace */
1956cfb6eeb4SYOSHIFUJI Hideaki 		err = tp->af_specific->md5_parse(sk, optval, optlen);
1957cfb6eeb4SYOSHIFUJI Hideaki 		break;
1958cfb6eeb4SYOSHIFUJI Hideaki #endif
1959cfb6eeb4SYOSHIFUJI Hideaki 
19601da177e4SLinus Torvalds 	default:
19611da177e4SLinus Torvalds 		err = -ENOPROTOOPT;
19621da177e4SLinus Torvalds 		break;
19633ff50b79SStephen Hemminger 	}
19643ff50b79SStephen Hemminger 
19651da177e4SLinus Torvalds 	release_sock(sk);
19661da177e4SLinus Torvalds 	return err;
19671da177e4SLinus Torvalds }
19681da177e4SLinus Torvalds 
19693fdadf7dSDmitry Mishin int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
19703fdadf7dSDmitry Mishin 		   int optlen)
19713fdadf7dSDmitry Mishin {
19723fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
19733fdadf7dSDmitry Mishin 
19743fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
19753fdadf7dSDmitry Mishin 		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
19763fdadf7dSDmitry Mishin 						     optval, optlen);
19773fdadf7dSDmitry Mishin 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
19783fdadf7dSDmitry Mishin }
19793fdadf7dSDmitry Mishin 
19803fdadf7dSDmitry Mishin #ifdef CONFIG_COMPAT
1981543d9cfeSArnaldo Carvalho de Melo int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1982543d9cfeSArnaldo Carvalho de Melo 			  char __user *optval, int optlen)
19833fdadf7dSDmitry Mishin {
1984dec73ff0SArnaldo Carvalho de Melo 	if (level != SOL_TCP)
1985dec73ff0SArnaldo Carvalho de Melo 		return inet_csk_compat_setsockopt(sk, level, optname,
1986dec73ff0SArnaldo Carvalho de Melo 						  optval, optlen);
19873fdadf7dSDmitry Mishin 	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
19883fdadf7dSDmitry Mishin }
1989543d9cfeSArnaldo Carvalho de Melo 
1990543d9cfeSArnaldo Carvalho de Melo EXPORT_SYMBOL(compat_tcp_setsockopt);
19913fdadf7dSDmitry Mishin #endif
19923fdadf7dSDmitry Mishin 
19931da177e4SLinus Torvalds /* Return information about state of tcp endpoint in API format. */
19941da177e4SLinus Torvalds void tcp_get_info(struct sock *sk, struct tcp_info *info)
19951da177e4SLinus Torvalds {
19961da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
1997463c84b9SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
19981da177e4SLinus Torvalds 	u32 now = tcp_time_stamp;
19991da177e4SLinus Torvalds 
20001da177e4SLinus Torvalds 	memset(info, 0, sizeof(*info));
20011da177e4SLinus Torvalds 
20021da177e4SLinus Torvalds 	info->tcpi_state = sk->sk_state;
20036687e988SArnaldo Carvalho de Melo 	info->tcpi_ca_state = icsk->icsk_ca_state;
2004463c84b9SArnaldo Carvalho de Melo 	info->tcpi_retransmits = icsk->icsk_retransmits;
20056687e988SArnaldo Carvalho de Melo 	info->tcpi_probes = icsk->icsk_probes_out;
2006463c84b9SArnaldo Carvalho de Melo 	info->tcpi_backoff = icsk->icsk_backoff;
20071da177e4SLinus Torvalds 
20081da177e4SLinus Torvalds 	if (tp->rx_opt.tstamp_ok)
20091da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
20101da177e4SLinus Torvalds 	if (tp->rx_opt.sack_ok)
20111da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_SACK;
20121da177e4SLinus Torvalds 	if (tp->rx_opt.wscale_ok) {
20131da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_WSCALE;
20141da177e4SLinus Torvalds 		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
20151da177e4SLinus Torvalds 		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
20161da177e4SLinus Torvalds 	}
20171da177e4SLinus Torvalds 
20181da177e4SLinus Torvalds 	if (tp->ecn_flags&TCP_ECN_OK)
20191da177e4SLinus Torvalds 		info->tcpi_options |= TCPI_OPT_ECN;
20201da177e4SLinus Torvalds 
2021463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2022463c84b9SArnaldo Carvalho de Melo 	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2023c1b4a7e6SDavid S. Miller 	info->tcpi_snd_mss = tp->mss_cache;
2024463c84b9SArnaldo Carvalho de Melo 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
20251da177e4SLinus Torvalds 
20261da177e4SLinus Torvalds 	info->tcpi_unacked = tp->packets_out;
20271da177e4SLinus Torvalds 	info->tcpi_sacked = tp->sacked_out;
20281da177e4SLinus Torvalds 	info->tcpi_lost = tp->lost_out;
20291da177e4SLinus Torvalds 	info->tcpi_retrans = tp->retrans_out;
20301da177e4SLinus Torvalds 	info->tcpi_fackets = tp->fackets_out;
20311da177e4SLinus Torvalds 
20321da177e4SLinus Torvalds 	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2033463c84b9SArnaldo Carvalho de Melo 	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
20341da177e4SLinus Torvalds 	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
20351da177e4SLinus Torvalds 
2036d83d8461SArnaldo Carvalho de Melo 	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
20371da177e4SLinus Torvalds 	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
20381da177e4SLinus Torvalds 	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
20391da177e4SLinus Torvalds 	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
20401da177e4SLinus Torvalds 	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
20411da177e4SLinus Torvalds 	info->tcpi_snd_cwnd = tp->snd_cwnd;
20421da177e4SLinus Torvalds 	info->tcpi_advmss = tp->advmss;
20431da177e4SLinus Torvalds 	info->tcpi_reordering = tp->reordering;
20441da177e4SLinus Torvalds 
20451da177e4SLinus Torvalds 	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
20461da177e4SLinus Torvalds 	info->tcpi_rcv_space = tp->rcvq_space.space;
20471da177e4SLinus Torvalds 
20481da177e4SLinus Torvalds 	info->tcpi_total_retrans = tp->total_retrans;
20491da177e4SLinus Torvalds }
20501da177e4SLinus Torvalds 
20511da177e4SLinus Torvalds EXPORT_SYMBOL_GPL(tcp_get_info);
20521da177e4SLinus Torvalds 
20533fdadf7dSDmitry Mishin static int do_tcp_getsockopt(struct sock *sk, int level,
20543fdadf7dSDmitry Mishin 		int optname, char __user *optval, int __user *optlen)
20551da177e4SLinus Torvalds {
2056295f7324SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
20571da177e4SLinus Torvalds 	struct tcp_sock *tp = tcp_sk(sk);
20581da177e4SLinus Torvalds 	int val, len;
20591da177e4SLinus Torvalds 
20601da177e4SLinus Torvalds 	if (get_user(len, optlen))
20611da177e4SLinus Torvalds 		return -EFAULT;
20621da177e4SLinus Torvalds 
20631da177e4SLinus Torvalds 	len = min_t(unsigned int, len, sizeof(int));
20641da177e4SLinus Torvalds 
20651da177e4SLinus Torvalds 	if (len < 0)
20661da177e4SLinus Torvalds 		return -EINVAL;
20671da177e4SLinus Torvalds 
20681da177e4SLinus Torvalds 	switch (optname) {
20691da177e4SLinus Torvalds 	case TCP_MAXSEG:
2070c1b4a7e6SDavid S. Miller 		val = tp->mss_cache;
20711da177e4SLinus Torvalds 		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
20721da177e4SLinus Torvalds 			val = tp->rx_opt.user_mss;
20731da177e4SLinus Torvalds 		break;
20741da177e4SLinus Torvalds 	case TCP_NODELAY:
20751da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_OFF);
20761da177e4SLinus Torvalds 		break;
20771da177e4SLinus Torvalds 	case TCP_CORK:
20781da177e4SLinus Torvalds 		val = !!(tp->nonagle&TCP_NAGLE_CORK);
20791da177e4SLinus Torvalds 		break;
20801da177e4SLinus Torvalds 	case TCP_KEEPIDLE:
20811da177e4SLinus Torvalds 		val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
20821da177e4SLinus Torvalds 		break;
20831da177e4SLinus Torvalds 	case TCP_KEEPINTVL:
20841da177e4SLinus Torvalds 		val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
20851da177e4SLinus Torvalds 		break;
20861da177e4SLinus Torvalds 	case TCP_KEEPCNT:
20871da177e4SLinus Torvalds 		val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
20881da177e4SLinus Torvalds 		break;
20891da177e4SLinus Torvalds 	case TCP_SYNCNT:
2090295f7324SArnaldo Carvalho de Melo 		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
20911da177e4SLinus Torvalds 		break;
20921da177e4SLinus Torvalds 	case TCP_LINGER2:
20931da177e4SLinus Torvalds 		val = tp->linger2;
20941da177e4SLinus Torvalds 		if (val >= 0)
20951da177e4SLinus Torvalds 			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
20961da177e4SLinus Torvalds 		break;
20971da177e4SLinus Torvalds 	case TCP_DEFER_ACCEPT:
2098295f7324SArnaldo Carvalho de Melo 		val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2099295f7324SArnaldo Carvalho de Melo 			((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
21001da177e4SLinus Torvalds 		break;
21011da177e4SLinus Torvalds 	case TCP_WINDOW_CLAMP:
21021da177e4SLinus Torvalds 		val = tp->window_clamp;
21031da177e4SLinus Torvalds 		break;
21041da177e4SLinus Torvalds 	case TCP_INFO: {
21051da177e4SLinus Torvalds 		struct tcp_info info;
21061da177e4SLinus Torvalds 
21071da177e4SLinus Torvalds 		if (get_user(len, optlen))
21081da177e4SLinus Torvalds 			return -EFAULT;
21091da177e4SLinus Torvalds 
21101da177e4SLinus Torvalds 		tcp_get_info(sk, &info);
21111da177e4SLinus Torvalds 
21121da177e4SLinus Torvalds 		len = min_t(unsigned int, len, sizeof(info));
21131da177e4SLinus Torvalds 		if (put_user(len, optlen))
21141da177e4SLinus Torvalds 			return -EFAULT;
21151da177e4SLinus Torvalds 		if (copy_to_user(optval, &info, len))
21161da177e4SLinus Torvalds 			return -EFAULT;
21171da177e4SLinus Torvalds 		return 0;
21181da177e4SLinus Torvalds 	}
21191da177e4SLinus Torvalds 	case TCP_QUICKACK:
2120295f7324SArnaldo Carvalho de Melo 		val = !icsk->icsk_ack.pingpong;
21211da177e4SLinus Torvalds 		break;
21225f8ef48dSStephen Hemminger 
21235f8ef48dSStephen Hemminger 	case TCP_CONGESTION:
21245f8ef48dSStephen Hemminger 		if (get_user(len, optlen))
21255f8ef48dSStephen Hemminger 			return -EFAULT;
21265f8ef48dSStephen Hemminger 		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
21275f8ef48dSStephen Hemminger 		if (put_user(len, optlen))
21285f8ef48dSStephen Hemminger 			return -EFAULT;
21296687e988SArnaldo Carvalho de Melo 		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
21305f8ef48dSStephen Hemminger 			return -EFAULT;
21315f8ef48dSStephen Hemminger 		return 0;
21321da177e4SLinus Torvalds 	default:
21331da177e4SLinus Torvalds 		return -ENOPROTOOPT;
21343ff50b79SStephen Hemminger 	}
21351da177e4SLinus Torvalds 
21361da177e4SLinus Torvalds 	if (put_user(len, optlen))
21371da177e4SLinus Torvalds 		return -EFAULT;
21381da177e4SLinus Torvalds 	if (copy_to_user(optval, &val, len))
21391da177e4SLinus Torvalds 		return -EFAULT;
21401da177e4SLinus Torvalds 	return 0;
21411da177e4SLinus Torvalds }
21421da177e4SLinus Torvalds 
21433fdadf7dSDmitry Mishin int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
21443fdadf7dSDmitry Mishin 		   int __user *optlen)
21453fdadf7dSDmitry Mishin {
21463fdadf7dSDmitry Mishin 	struct inet_connection_sock *icsk = inet_csk(sk);
21473fdadf7dSDmitry Mishin 
21483fdadf7dSDmitry Mishin 	if (level != SOL_TCP)
21493fdadf7dSDmitry Mishin 		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
21503fdadf7dSDmitry Mishin 						     optval, optlen);
21513fdadf7dSDmitry Mishin 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
21523fdadf7dSDmitry Mishin }
21533fdadf7dSDmitry Mishin 
21543fdadf7dSDmitry Mishin #ifdef CONFIG_COMPAT
2155543d9cfeSArnaldo Carvalho de Melo int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2156543d9cfeSArnaldo Carvalho de Melo 			  char __user *optval, int __user *optlen)
21573fdadf7dSDmitry Mishin {
2158dec73ff0SArnaldo Carvalho de Melo 	if (level != SOL_TCP)
2159dec73ff0SArnaldo Carvalho de Melo 		return inet_csk_compat_getsockopt(sk, level, optname,
2160dec73ff0SArnaldo Carvalho de Melo 						  optval, optlen);
21613fdadf7dSDmitry Mishin 	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
21623fdadf7dSDmitry Mishin }
2163543d9cfeSArnaldo Carvalho de Melo 
2164543d9cfeSArnaldo Carvalho de Melo EXPORT_SYMBOL(compat_tcp_getsockopt);
21653fdadf7dSDmitry Mishin #endif
21661da177e4SLinus Torvalds 
2167576a30ebSHerbert Xu struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2168f4c50d99SHerbert Xu {
2169f4c50d99SHerbert Xu 	struct sk_buff *segs = ERR_PTR(-EINVAL);
2170f4c50d99SHerbert Xu 	struct tcphdr *th;
2171f4c50d99SHerbert Xu 	unsigned thlen;
2172f4c50d99SHerbert Xu 	unsigned int seq;
2173d3bc23e7SAl Viro 	__be32 delta;
2174f4c50d99SHerbert Xu 	unsigned int oldlen;
2175f4c50d99SHerbert Xu 	unsigned int len;
2176f4c50d99SHerbert Xu 
2177f4c50d99SHerbert Xu 	if (!pskb_may_pull(skb, sizeof(*th)))
2178f4c50d99SHerbert Xu 		goto out;
2179f4c50d99SHerbert Xu 
2180aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
2181f4c50d99SHerbert Xu 	thlen = th->doff * 4;
2182f4c50d99SHerbert Xu 	if (thlen < sizeof(*th))
2183f4c50d99SHerbert Xu 		goto out;
2184f4c50d99SHerbert Xu 
2185f4c50d99SHerbert Xu 	if (!pskb_may_pull(skb, thlen))
2186f4c50d99SHerbert Xu 		goto out;
2187f4c50d99SHerbert Xu 
21880718bcc0SHerbert Xu 	oldlen = (u16)~skb->len;
2189f4c50d99SHerbert Xu 	__skb_pull(skb, thlen);
2190f4c50d99SHerbert Xu 
21913820c3f3SHerbert Xu 	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
21923820c3f3SHerbert Xu 		/* Packet is from an untrusted source, reset gso_segs. */
2193bbcf467dSHerbert Xu 		int type = skb_shinfo(skb)->gso_type;
2194bbcf467dSHerbert Xu 		int mss;
21953820c3f3SHerbert Xu 
2196bbcf467dSHerbert Xu 		if (unlikely(type &
2197bbcf467dSHerbert Xu 			     ~(SKB_GSO_TCPV4 |
2198bbcf467dSHerbert Xu 			       SKB_GSO_DODGY |
2199bbcf467dSHerbert Xu 			       SKB_GSO_TCP_ECN |
2200bbcf467dSHerbert Xu 			       SKB_GSO_TCPV6 |
2201bbcf467dSHerbert Xu 			       0) ||
2202bbcf467dSHerbert Xu 			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2203bbcf467dSHerbert Xu 			goto out;
2204bbcf467dSHerbert Xu 
2205bbcf467dSHerbert Xu 		mss = skb_shinfo(skb)->gso_size;
22063820c3f3SHerbert Xu 		skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
22073820c3f3SHerbert Xu 
22083820c3f3SHerbert Xu 		segs = NULL;
22093820c3f3SHerbert Xu 		goto out;
22103820c3f3SHerbert Xu 	}
22113820c3f3SHerbert Xu 
2212576a30ebSHerbert Xu 	segs = skb_segment(skb, features);
2213f4c50d99SHerbert Xu 	if (IS_ERR(segs))
2214f4c50d99SHerbert Xu 		goto out;
2215f4c50d99SHerbert Xu 
2216f4c50d99SHerbert Xu 	len = skb_shinfo(skb)->gso_size;
22170718bcc0SHerbert Xu 	delta = htonl(oldlen + (thlen + len));
2218f4c50d99SHerbert Xu 
2219f4c50d99SHerbert Xu 	skb = segs;
2220aa8223c7SArnaldo Carvalho de Melo 	th = tcp_hdr(skb);
2221f4c50d99SHerbert Xu 	seq = ntohl(th->seq);
2222f4c50d99SHerbert Xu 
2223f4c50d99SHerbert Xu 	do {
2224f4c50d99SHerbert Xu 		th->fin = th->psh = 0;
2225f4c50d99SHerbert Xu 
2226d3bc23e7SAl Viro 		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2227d3bc23e7SAl Viro 				       (__force u32)delta));
222884fa7933SPatrick McHardy 		if (skb->ip_summed != CHECKSUM_PARTIAL)
22299c70220bSArnaldo Carvalho de Melo 			th->check =
22309c70220bSArnaldo Carvalho de Melo 			     csum_fold(csum_partial(skb_transport_header(skb),
22319c70220bSArnaldo Carvalho de Melo 						    thlen, skb->csum));
2232f4c50d99SHerbert Xu 
2233f4c50d99SHerbert Xu 		seq += len;
2234f4c50d99SHerbert Xu 		skb = skb->next;
2235aa8223c7SArnaldo Carvalho de Melo 		th = tcp_hdr(skb);
2236f4c50d99SHerbert Xu 
2237f4c50d99SHerbert Xu 		th->seq = htonl(seq);
2238f4c50d99SHerbert Xu 		th->cwr = 0;
2239f4c50d99SHerbert Xu 	} while (skb->next);
2240f4c50d99SHerbert Xu 
224127a884dcSArnaldo Carvalho de Melo 	delta = htonl(oldlen + (skb->tail - skb->transport_header) +
22429c70220bSArnaldo Carvalho de Melo 		      skb->data_len);
2243d3bc23e7SAl Viro 	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2244d3bc23e7SAl Viro 				(__force u32)delta));
224584fa7933SPatrick McHardy 	if (skb->ip_summed != CHECKSUM_PARTIAL)
22469c70220bSArnaldo Carvalho de Melo 		th->check = csum_fold(csum_partial(skb_transport_header(skb),
22479c70220bSArnaldo Carvalho de Melo 						   thlen, skb->csum));
2248f4c50d99SHerbert Xu 
2249f4c50d99SHerbert Xu out:
2250f4c50d99SHerbert Xu 	return segs;
2251f4c50d99SHerbert Xu }
2252adcfc7d0SHerbert Xu EXPORT_SYMBOL(tcp_tso_segment);
2253f4c50d99SHerbert Xu 
2254cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
2255cfb6eeb4SYOSHIFUJI Hideaki static unsigned long tcp_md5sig_users;
2256cfb6eeb4SYOSHIFUJI Hideaki static struct tcp_md5sig_pool **tcp_md5sig_pool;
2257cfb6eeb4SYOSHIFUJI Hideaki static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2258cfb6eeb4SYOSHIFUJI Hideaki 
2259cfb6eeb4SYOSHIFUJI Hideaki static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2260cfb6eeb4SYOSHIFUJI Hideaki {
2261cfb6eeb4SYOSHIFUJI Hideaki 	int cpu;
2262cfb6eeb4SYOSHIFUJI Hideaki 	for_each_possible_cpu(cpu) {
2263cfb6eeb4SYOSHIFUJI Hideaki 		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2264cfb6eeb4SYOSHIFUJI Hideaki 		if (p) {
2265cfb6eeb4SYOSHIFUJI Hideaki 			if (p->md5_desc.tfm)
2266cfb6eeb4SYOSHIFUJI Hideaki 				crypto_free_hash(p->md5_desc.tfm);
2267cfb6eeb4SYOSHIFUJI Hideaki 			kfree(p);
2268cfb6eeb4SYOSHIFUJI Hideaki 			p = NULL;
2269cfb6eeb4SYOSHIFUJI Hideaki 		}
2270cfb6eeb4SYOSHIFUJI Hideaki 	}
2271cfb6eeb4SYOSHIFUJI Hideaki 	free_percpu(pool);
2272cfb6eeb4SYOSHIFUJI Hideaki }
2273cfb6eeb4SYOSHIFUJI Hideaki 
2274cfb6eeb4SYOSHIFUJI Hideaki void tcp_free_md5sig_pool(void)
2275cfb6eeb4SYOSHIFUJI Hideaki {
2276cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **pool = NULL;
2277cfb6eeb4SYOSHIFUJI Hideaki 
22782c4f6219SDavid S. Miller 	spin_lock_bh(&tcp_md5sig_pool_lock);
2279cfb6eeb4SYOSHIFUJI Hideaki 	if (--tcp_md5sig_users == 0) {
2280cfb6eeb4SYOSHIFUJI Hideaki 		pool = tcp_md5sig_pool;
2281cfb6eeb4SYOSHIFUJI Hideaki 		tcp_md5sig_pool = NULL;
2282cfb6eeb4SYOSHIFUJI Hideaki 	}
22832c4f6219SDavid S. Miller 	spin_unlock_bh(&tcp_md5sig_pool_lock);
2284cfb6eeb4SYOSHIFUJI Hideaki 	if (pool)
2285cfb6eeb4SYOSHIFUJI Hideaki 		__tcp_free_md5sig_pool(pool);
2286cfb6eeb4SYOSHIFUJI Hideaki }
2287cfb6eeb4SYOSHIFUJI Hideaki 
2288cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_free_md5sig_pool);
2289cfb6eeb4SYOSHIFUJI Hideaki 
2290f5b99bcdSAdrian Bunk static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2291cfb6eeb4SYOSHIFUJI Hideaki {
2292cfb6eeb4SYOSHIFUJI Hideaki 	int cpu;
2293cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **pool;
2294cfb6eeb4SYOSHIFUJI Hideaki 
2295cfb6eeb4SYOSHIFUJI Hideaki 	pool = alloc_percpu(struct tcp_md5sig_pool *);
2296cfb6eeb4SYOSHIFUJI Hideaki 	if (!pool)
2297cfb6eeb4SYOSHIFUJI Hideaki 		return NULL;
2298cfb6eeb4SYOSHIFUJI Hideaki 
2299cfb6eeb4SYOSHIFUJI Hideaki 	for_each_possible_cpu(cpu) {
2300cfb6eeb4SYOSHIFUJI Hideaki 		struct tcp_md5sig_pool *p;
2301cfb6eeb4SYOSHIFUJI Hideaki 		struct crypto_hash *hash;
2302cfb6eeb4SYOSHIFUJI Hideaki 
2303cfb6eeb4SYOSHIFUJI Hideaki 		p = kzalloc(sizeof(*p), GFP_KERNEL);
2304cfb6eeb4SYOSHIFUJI Hideaki 		if (!p)
2305cfb6eeb4SYOSHIFUJI Hideaki 			goto out_free;
2306cfb6eeb4SYOSHIFUJI Hideaki 		*per_cpu_ptr(pool, cpu) = p;
2307cfb6eeb4SYOSHIFUJI Hideaki 
2308cfb6eeb4SYOSHIFUJI Hideaki 		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2309cfb6eeb4SYOSHIFUJI Hideaki 		if (!hash || IS_ERR(hash))
2310cfb6eeb4SYOSHIFUJI Hideaki 			goto out_free;
2311cfb6eeb4SYOSHIFUJI Hideaki 
2312cfb6eeb4SYOSHIFUJI Hideaki 		p->md5_desc.tfm = hash;
2313cfb6eeb4SYOSHIFUJI Hideaki 	}
2314cfb6eeb4SYOSHIFUJI Hideaki 	return pool;
2315cfb6eeb4SYOSHIFUJI Hideaki out_free:
2316cfb6eeb4SYOSHIFUJI Hideaki 	__tcp_free_md5sig_pool(pool);
2317cfb6eeb4SYOSHIFUJI Hideaki 	return NULL;
2318cfb6eeb4SYOSHIFUJI Hideaki }
2319cfb6eeb4SYOSHIFUJI Hideaki 
2320cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2321cfb6eeb4SYOSHIFUJI Hideaki {
2322cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **pool;
2323cfb6eeb4SYOSHIFUJI Hideaki 	int alloc = 0;
2324cfb6eeb4SYOSHIFUJI Hideaki 
2325cfb6eeb4SYOSHIFUJI Hideaki retry:
23262c4f6219SDavid S. Miller 	spin_lock_bh(&tcp_md5sig_pool_lock);
2327cfb6eeb4SYOSHIFUJI Hideaki 	pool = tcp_md5sig_pool;
2328cfb6eeb4SYOSHIFUJI Hideaki 	if (tcp_md5sig_users++ == 0) {
2329cfb6eeb4SYOSHIFUJI Hideaki 		alloc = 1;
23302c4f6219SDavid S. Miller 		spin_unlock_bh(&tcp_md5sig_pool_lock);
2331cfb6eeb4SYOSHIFUJI Hideaki 	} else if (!pool) {
2332cfb6eeb4SYOSHIFUJI Hideaki 		tcp_md5sig_users--;
23332c4f6219SDavid S. Miller 		spin_unlock_bh(&tcp_md5sig_pool_lock);
2334cfb6eeb4SYOSHIFUJI Hideaki 		cpu_relax();
2335cfb6eeb4SYOSHIFUJI Hideaki 		goto retry;
2336cfb6eeb4SYOSHIFUJI Hideaki 	} else
23372c4f6219SDavid S. Miller 		spin_unlock_bh(&tcp_md5sig_pool_lock);
2338cfb6eeb4SYOSHIFUJI Hideaki 
2339cfb6eeb4SYOSHIFUJI Hideaki 	if (alloc) {
2340cfb6eeb4SYOSHIFUJI Hideaki 		/* we cannot hold spinlock here because this may sleep. */
2341cfb6eeb4SYOSHIFUJI Hideaki 		struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
23422c4f6219SDavid S. Miller 		spin_lock_bh(&tcp_md5sig_pool_lock);
2343cfb6eeb4SYOSHIFUJI Hideaki 		if (!p) {
2344cfb6eeb4SYOSHIFUJI Hideaki 			tcp_md5sig_users--;
23452c4f6219SDavid S. Miller 			spin_unlock_bh(&tcp_md5sig_pool_lock);
2346cfb6eeb4SYOSHIFUJI Hideaki 			return NULL;
2347cfb6eeb4SYOSHIFUJI Hideaki 		}
2348cfb6eeb4SYOSHIFUJI Hideaki 		pool = tcp_md5sig_pool;
2349cfb6eeb4SYOSHIFUJI Hideaki 		if (pool) {
2350cfb6eeb4SYOSHIFUJI Hideaki 			/* oops, it has already been assigned. */
23512c4f6219SDavid S. Miller 			spin_unlock_bh(&tcp_md5sig_pool_lock);
2352cfb6eeb4SYOSHIFUJI Hideaki 			__tcp_free_md5sig_pool(p);
2353cfb6eeb4SYOSHIFUJI Hideaki 		} else {
2354cfb6eeb4SYOSHIFUJI Hideaki 			tcp_md5sig_pool = pool = p;
23552c4f6219SDavid S. Miller 			spin_unlock_bh(&tcp_md5sig_pool_lock);
2356cfb6eeb4SYOSHIFUJI Hideaki 		}
2357cfb6eeb4SYOSHIFUJI Hideaki 	}
2358cfb6eeb4SYOSHIFUJI Hideaki 	return pool;
2359cfb6eeb4SYOSHIFUJI Hideaki }
2360cfb6eeb4SYOSHIFUJI Hideaki 
2361cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2362cfb6eeb4SYOSHIFUJI Hideaki 
2363cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2364cfb6eeb4SYOSHIFUJI Hideaki {
2365cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp_md5sig_pool **p;
23662c4f6219SDavid S. Miller 	spin_lock_bh(&tcp_md5sig_pool_lock);
2367cfb6eeb4SYOSHIFUJI Hideaki 	p = tcp_md5sig_pool;
2368cfb6eeb4SYOSHIFUJI Hideaki 	if (p)
2369cfb6eeb4SYOSHIFUJI Hideaki 		tcp_md5sig_users++;
23702c4f6219SDavid S. Miller 	spin_unlock_bh(&tcp_md5sig_pool_lock);
2371cfb6eeb4SYOSHIFUJI Hideaki 	return (p ? *per_cpu_ptr(p, cpu) : NULL);
2372cfb6eeb4SYOSHIFUJI Hideaki }
2373cfb6eeb4SYOSHIFUJI Hideaki 
2374cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2375cfb6eeb4SYOSHIFUJI Hideaki 
23766931ba7cSDavid S. Miller void __tcp_put_md5sig_pool(void)
23776931ba7cSDavid S. Miller {
23786931ba7cSDavid S. Miller 	tcp_free_md5sig_pool();
2379cfb6eeb4SYOSHIFUJI Hideaki }
2380cfb6eeb4SYOSHIFUJI Hideaki 
2381cfb6eeb4SYOSHIFUJI Hideaki EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2382cfb6eeb4SYOSHIFUJI Hideaki #endif
2383cfb6eeb4SYOSHIFUJI Hideaki 
23844ac02babSAndi Kleen void tcp_done(struct sock *sk)
23854ac02babSAndi Kleen {
23864ac02babSAndi Kleen 	if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
23874ac02babSAndi Kleen 		TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
23884ac02babSAndi Kleen 
23894ac02babSAndi Kleen 	tcp_set_state(sk, TCP_CLOSE);
23904ac02babSAndi Kleen 	tcp_clear_xmit_timers(sk);
23914ac02babSAndi Kleen 
23924ac02babSAndi Kleen 	sk->sk_shutdown = SHUTDOWN_MASK;
23934ac02babSAndi Kleen 
23944ac02babSAndi Kleen 	if (!sock_flag(sk, SOCK_DEAD))
23954ac02babSAndi Kleen 		sk->sk_state_change(sk);
23964ac02babSAndi Kleen 	else
23974ac02babSAndi Kleen 		inet_csk_destroy_sock(sk);
23984ac02babSAndi Kleen }
23994ac02babSAndi Kleen EXPORT_SYMBOL_GPL(tcp_done);
24004ac02babSAndi Kleen 
24011da177e4SLinus Torvalds extern void __skb_cb_too_small_for_tcp(int, int);
24025f8ef48dSStephen Hemminger extern struct tcp_congestion_ops tcp_reno;
24031da177e4SLinus Torvalds 
24041da177e4SLinus Torvalds static __initdata unsigned long thash_entries;
24051da177e4SLinus Torvalds static int __init set_thash_entries(char *str)
24061da177e4SLinus Torvalds {
24071da177e4SLinus Torvalds 	if (!str)
24081da177e4SLinus Torvalds 		return 0;
24091da177e4SLinus Torvalds 	thash_entries = simple_strtoul(str, &str, 0);
24101da177e4SLinus Torvalds 	return 1;
24111da177e4SLinus Torvalds }
24121da177e4SLinus Torvalds __setup("thash_entries=", set_thash_entries);
24131da177e4SLinus Torvalds 
24141da177e4SLinus Torvalds void __init tcp_init(void)
24151da177e4SLinus Torvalds {
24161da177e4SLinus Torvalds 	struct sk_buff *skb = NULL;
24177b4f4b5eSJohn Heffner 	unsigned long limit;
24187b4f4b5eSJohn Heffner 	int order, i, max_share;
24191da177e4SLinus Torvalds 
24201da177e4SLinus Torvalds 	if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
24211da177e4SLinus Torvalds 		__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
24221da177e4SLinus Torvalds 					   sizeof(skb->cb));
24231da177e4SLinus Torvalds 
24246e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bind_bucket_cachep =
24256e04e021SArnaldo Carvalho de Melo 		kmem_cache_create("tcp_bind_bucket",
24266e04e021SArnaldo Carvalho de Melo 				  sizeof(struct inet_bind_bucket), 0,
2427e5d679f3SAlexey Dobriyan 				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
24281da177e4SLinus Torvalds 
24291da177e4SLinus Torvalds 	/* Size and allocate the main established and bind bucket
24301da177e4SLinus Torvalds 	 * hash tables.
24311da177e4SLinus Torvalds 	 *
24321da177e4SLinus Torvalds 	 * The methodology is similar to that of the buffer cache.
24331da177e4SLinus Torvalds 	 */
24346e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.ehash =
24351da177e4SLinus Torvalds 		alloc_large_system_hash("TCP established",
24360f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_ehash_bucket),
24371da177e4SLinus Torvalds 					thash_entries,
24381da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
243918955cfcSMike Stroyan 					13 : 15,
24409e950efaSJohn Heffner 					0,
24416e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.ehash_size,
24421da177e4SLinus Torvalds 					NULL,
24431da177e4SLinus Torvalds 					0);
2444dbca9b27SEric Dumazet 	tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2445dbca9b27SEric Dumazet 	for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
24466e04e021SArnaldo Carvalho de Melo 		rwlock_init(&tcp_hashinfo.ehash[i].lock);
24476e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2448dbca9b27SEric Dumazet 		INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
24491da177e4SLinus Torvalds 	}
24501da177e4SLinus Torvalds 
24516e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash =
24521da177e4SLinus Torvalds 		alloc_large_system_hash("TCP bind",
24530f7ff927SArnaldo Carvalho de Melo 					sizeof(struct inet_bind_hashbucket),
24546e04e021SArnaldo Carvalho de Melo 					tcp_hashinfo.ehash_size,
24551da177e4SLinus Torvalds 					(num_physpages >= 128 * 1024) ?
245618955cfcSMike Stroyan 					13 : 15,
24579e950efaSJohn Heffner 					0,
24586e04e021SArnaldo Carvalho de Melo 					&tcp_hashinfo.bhash_size,
24591da177e4SLinus Torvalds 					NULL,
24601da177e4SLinus Torvalds 					64 * 1024);
24616e04e021SArnaldo Carvalho de Melo 	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
24626e04e021SArnaldo Carvalho de Melo 	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
24636e04e021SArnaldo Carvalho de Melo 		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
24646e04e021SArnaldo Carvalho de Melo 		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
24651da177e4SLinus Torvalds 	}
24661da177e4SLinus Torvalds 
24671da177e4SLinus Torvalds 	/* Try to be a bit smarter and adjust defaults depending
24681da177e4SLinus Torvalds 	 * on available memory.
24691da177e4SLinus Torvalds 	 */
24701da177e4SLinus Torvalds 	for (order = 0; ((1 << order) << PAGE_SHIFT) <
24716e04e021SArnaldo Carvalho de Melo 			(tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
24721da177e4SLinus Torvalds 			order++)
24731da177e4SLinus Torvalds 		;
2474e7626486SAndi Kleen 	if (order >= 4) {
2475295ff7edSArnaldo Carvalho de Melo 		tcp_death_row.sysctl_max_tw_buckets = 180000;
24761da177e4SLinus Torvalds 		sysctl_tcp_max_orphans = 4096 << (order - 4);
24771da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 1024;
24781da177e4SLinus Torvalds 	} else if (order < 3) {
2479295ff7edSArnaldo Carvalho de Melo 		tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
24801da177e4SLinus Torvalds 		sysctl_tcp_max_orphans >>= (3 - order);
24811da177e4SLinus Torvalds 		sysctl_max_syn_backlog = 128;
24821da177e4SLinus Torvalds 	}
24831da177e4SLinus Torvalds 
248453cdcc04SJohn Heffner 	/* Set the pressure threshold to be a fraction of global memory that
248553cdcc04SJohn Heffner 	 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
248653cdcc04SJohn Heffner 	 * memory, with a floor of 128 pages.
248753cdcc04SJohn Heffner 	 */
248853cdcc04SJohn Heffner 	limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
248953cdcc04SJohn Heffner 	limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
249053cdcc04SJohn Heffner 	limit = max(limit, 128UL);
249153cdcc04SJohn Heffner 	sysctl_tcp_mem[0] = limit / 4 * 3;
249253cdcc04SJohn Heffner 	sysctl_tcp_mem[1] = limit;
249352bf376cSJohn Heffner 	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
24941da177e4SLinus Torvalds 
249553cdcc04SJohn Heffner 	/* Set per-socket limits to no more than 1/128 the pressure threshold */
24967b4f4b5eSJohn Heffner 	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
24977b4f4b5eSJohn Heffner 	max_share = min(4UL*1024*1024, limit);
24987b4f4b5eSJohn Heffner 
24997b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
25007b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[1] = 16*1024;
25017b4f4b5eSJohn Heffner 	sysctl_tcp_wmem[2] = max(64*1024, max_share);
25027b4f4b5eSJohn Heffner 
25037b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
25047b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[1] = 87380;
25057b4f4b5eSJohn Heffner 	sysctl_tcp_rmem[2] = max(87380, max_share);
25061da177e4SLinus Torvalds 
25071da177e4SLinus Torvalds 	printk(KERN_INFO "TCP: Hash tables configured "
25081da177e4SLinus Torvalds 	       "(established %d bind %d)\n",
2509dbca9b27SEric Dumazet 	       tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2510317a76f9SStephen Hemminger 
2511317a76f9SStephen Hemminger 	tcp_register_congestion_control(&tcp_reno);
25121da177e4SLinus Torvalds }
25131da177e4SLinus Torvalds 
25141da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_close);
25151da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_disconnect);
25161da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_getsockopt);
25171da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_ioctl);
25181da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_poll);
25191da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_read_sock);
25201da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_recvmsg);
25211da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendmsg);
25221da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_sendpage);
25231da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_setsockopt);
25241da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_shutdown);
25251da177e4SLinus Torvalds EXPORT_SYMBOL(tcp_statistics);
2526