xref: /linux/include/net/tcp.h (revision 57dde7f70de34d4251f291c9eac7ad920aaf56b2)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Definitions for the TCP module.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	@(#)tcp.h	1.0.5	05/23/93
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
141da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
151da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
161da177e4SLinus Torvalds  *		2 of the License, or (at your option) any later version.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds #ifndef _TCP_H
191da177e4SLinus Torvalds #define _TCP_H
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #define FASTRETRANS_DEBUG 1
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds #include <linux/list.h>
241da177e4SLinus Torvalds #include <linux/tcp.h>
25187f1882SPaul Gortmaker #include <linux/bug.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cache.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
29fb286bb2SHerbert Xu #include <linux/skbuff.h>
30c6aefafbSGlenn Griffin #include <linux/cryptohash.h>
31435cf559SWilliam Allen Simpson #include <linux/kref.h>
32740b0f18SEric Dumazet #include <linux/ktime.h>
333f421baaSArnaldo Carvalho de Melo 
343f421baaSArnaldo Carvalho de Melo #include <net/inet_connection_sock.h>
35295ff7edSArnaldo Carvalho de Melo #include <net/inet_timewait_sock.h>
3677d8bf9cSArnaldo Carvalho de Melo #include <net/inet_hashtables.h>
371da177e4SLinus Torvalds #include <net/checksum.h>
382e6599cbSArnaldo Carvalho de Melo #include <net/request_sock.h>
391da177e4SLinus Torvalds #include <net/sock.h>
401da177e4SLinus Torvalds #include <net/snmp.h>
411da177e4SLinus Torvalds #include <net/ip.h>
42c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
43bdf1ee5dSIlpo Järvinen #include <net/inet_ecn.h>
440c266898SSatoru SATOH #include <net/dst.h>
45c752f073SArnaldo Carvalho de Melo 
461da177e4SLinus Torvalds #include <linux/seq_file.h>
47180d8cd9SGlauber Costa #include <linux/memcontrol.h>
481da177e4SLinus Torvalds 
490f7ff927SArnaldo Carvalho de Melo extern struct inet_hashinfo tcp_hashinfo;
501da177e4SLinus Torvalds 
51dd24c001SEric Dumazet extern struct percpu_counter tcp_orphan_count;
525c9f3023SJoe Perches void tcp_time_wait(struct sock *sk, int state, int timeo);
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #define MAX_TCP_HEADER	(128 + MAX_HEADER)
5533ad798cSAdam Langley #define MAX_TCP_OPTION_SPACE 40
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds /*
581da177e4SLinus Torvalds  * Never offer a window over 32767 without using window scaling. Some
591da177e4SLinus Torvalds  * poor stacks do signed 16bit maths!
601da177e4SLinus Torvalds  */
611da177e4SLinus Torvalds #define MAX_TCP_WINDOW		32767U
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
641da177e4SLinus Torvalds #define TCP_MIN_MSS		88U
651da177e4SLinus Torvalds 
665d424d5aSJohn Heffner /* The least MTU to use for probing */
67dcd8fb85SFan Du #define TCP_BASE_MSS		1024
685d424d5aSJohn Heffner 
6905cbc0dbSFan Du /* probing interval, default to 10 minutes as per RFC4821 */
7005cbc0dbSFan Du #define TCP_PROBE_INTERVAL	600
7105cbc0dbSFan Du 
726b58e0a5SFan Du /* Specify interval when tcp mtu probing will stop */
736b58e0a5SFan Du #define TCP_PROBE_THRESHOLD	8
746b58e0a5SFan Du 
751da177e4SLinus Torvalds /* After receiving this amount of duplicate ACKs fast retransmit starts. */
761da177e4SLinus Torvalds #define TCP_FASTRETRANS_THRESH 3
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /* Maximal number of ACKs sent quickly to accelerate slow-start. */
791da177e4SLinus Torvalds #define TCP_MAX_QUICKACKS	16U
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds /* urg_data states */
821da177e4SLinus Torvalds #define TCP_URG_VALID	0x0100
831da177e4SLinus Torvalds #define TCP_URG_NOTYET	0x0200
841da177e4SLinus Torvalds #define TCP_URG_READ	0x0400
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds #define TCP_RETR1	3	/*
871da177e4SLinus Torvalds 				 * This is how many retries it does before it
881da177e4SLinus Torvalds 				 * tries to figure out if the gateway is
891da177e4SLinus Torvalds 				 * down. Minimal RFC value is 3; it corresponds
901da177e4SLinus Torvalds 				 * to ~3sec-8min depending on RTO.
911da177e4SLinus Torvalds 				 */
921da177e4SLinus Torvalds 
931da177e4SLinus Torvalds #define TCP_RETR2	15	/*
941da177e4SLinus Torvalds 				 * This should take at least
951da177e4SLinus Torvalds 				 * 90 minutes to time out.
961da177e4SLinus Torvalds 				 * RFC1122 says that the limit is 100 sec.
971da177e4SLinus Torvalds 				 * 15 is ~13-30min depending on RTO.
981da177e4SLinus Torvalds 				 */
991da177e4SLinus Torvalds 
1006c9ff979SAlex Bergmann #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
1016c9ff979SAlex Bergmann 				 * when active opening a connection.
1026c9ff979SAlex Bergmann 				 * RFC1122 says the minimum retry MUST
1036c9ff979SAlex Bergmann 				 * be at least 180secs.  Nevertheless
1046c9ff979SAlex Bergmann 				 * this value is corresponding to
1056c9ff979SAlex Bergmann 				 * 63secs of retransmission with the
1066c9ff979SAlex Bergmann 				 * current initial RTO.
1076c9ff979SAlex Bergmann 				 */
1081da177e4SLinus Torvalds 
1096c9ff979SAlex Bergmann #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
1106c9ff979SAlex Bergmann 				 * when passive opening a connection.
1116c9ff979SAlex Bergmann 				 * This is corresponding to 31secs of
1126c9ff979SAlex Bergmann 				 * retransmission with the current
1136c9ff979SAlex Bergmann 				 * initial RTO.
1146c9ff979SAlex Bergmann 				 */
1151da177e4SLinus Torvalds 
1161da177e4SLinus Torvalds #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
1171da177e4SLinus Torvalds 				  * state, about 60 seconds	*/
1181da177e4SLinus Torvalds #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
1191da177e4SLinus Torvalds                                  /* BSD style FIN_WAIT2 deadlock breaker.
1201da177e4SLinus Torvalds 				  * It used to be 3min, new value is 60sec,
1211da177e4SLinus Torvalds 				  * to combine FIN-WAIT-2 timeout with
1221da177e4SLinus Torvalds 				  * TIME-WAIT timer.
1231da177e4SLinus Torvalds 				  */
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
1261da177e4SLinus Torvalds #if HZ >= 100
1271da177e4SLinus Torvalds #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
1281da177e4SLinus Torvalds #define TCP_ATO_MIN	((unsigned)(HZ/25))
1291da177e4SLinus Torvalds #else
1301da177e4SLinus Torvalds #define TCP_DELACK_MIN	4U
1311da177e4SLinus Torvalds #define TCP_ATO_MIN	4U
1321da177e4SLinus Torvalds #endif
1331da177e4SLinus Torvalds #define TCP_RTO_MAX	((unsigned)(120*HZ))
1341da177e4SLinus Torvalds #define TCP_RTO_MIN	((unsigned)(HZ/5))
135fd4f2ceaSEric Dumazet #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
1369ad7c049SJerry Chu #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
1379ad7c049SJerry Chu 						 * used as a fallback RTO for the
1389ad7c049SJerry Chu 						 * initial data transmission if no
1399ad7c049SJerry Chu 						 * valid RTT sample has been acquired,
1409ad7c049SJerry Chu 						 * most likely due to retrans in 3WHS.
1419ad7c049SJerry Chu 						 */
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
1441da177e4SLinus Torvalds 					                 * for local resources.
1451da177e4SLinus Torvalds 					                 */
146*57dde7f7SYuchung Cheng #define TCP_REO_TIMEOUT_MIN	(2000) /* Min RACK reordering timeout in usec */
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
1491da177e4SLinus Torvalds #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
1501da177e4SLinus Torvalds #define TCP_KEEPALIVE_INTVL	(75*HZ)
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds #define MAX_TCP_KEEPIDLE	32767
1531da177e4SLinus Torvalds #define MAX_TCP_KEEPINTVL	32767
1541da177e4SLinus Torvalds #define MAX_TCP_KEEPCNT		127
1551da177e4SLinus Torvalds #define MAX_TCP_SYNCNT		127
1561da177e4SLinus Torvalds 
1571da177e4SLinus Torvalds #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
1601da177e4SLinus Torvalds #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
1611da177e4SLinus Torvalds 					 * after this time. It should be equal
1621da177e4SLinus Torvalds 					 * (or greater than) TCP_TIMEWAIT_LEN
1631da177e4SLinus Torvalds 					 * to provide reliability equal to one
1641da177e4SLinus Torvalds 					 * provided by timewait state.
1651da177e4SLinus Torvalds 					 */
1661da177e4SLinus Torvalds #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
1671da177e4SLinus Torvalds 					 * timestamps. It must be less than
1681da177e4SLinus Torvalds 					 * minimal timewait lifetime.
1691da177e4SLinus Torvalds 					 */
1701da177e4SLinus Torvalds /*
1711da177e4SLinus Torvalds  *	TCP option
1721da177e4SLinus Torvalds  */
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds #define TCPOPT_NOP		1	/* Padding */
1751da177e4SLinus Torvalds #define TCPOPT_EOL		0	/* End of options */
1761da177e4SLinus Torvalds #define TCPOPT_MSS		2	/* Segment size negotiating */
1771da177e4SLinus Torvalds #define TCPOPT_WINDOW		3	/* Window scaling */
1781da177e4SLinus Torvalds #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
1791da177e4SLinus Torvalds #define TCPOPT_SACK             5       /* SACK Block */
1801da177e4SLinus Torvalds #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
181cfb6eeb4SYOSHIFUJI Hideaki #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
1827f9b838bSDaniel Lee #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
1832100c8d2SYuchung Cheng #define TCPOPT_EXP		254	/* Experimental */
1842100c8d2SYuchung Cheng /* Magic number to be after the option value for sharing TCP
1852100c8d2SYuchung Cheng  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
1862100c8d2SYuchung Cheng  */
1872100c8d2SYuchung Cheng #define TCPOPT_FASTOPEN_MAGIC	0xF989
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds /*
1901da177e4SLinus Torvalds  *     TCP option lengths
1911da177e4SLinus Torvalds  */
1921da177e4SLinus Torvalds 
1931da177e4SLinus Torvalds #define TCPOLEN_MSS            4
1941da177e4SLinus Torvalds #define TCPOLEN_WINDOW         3
1951da177e4SLinus Torvalds #define TCPOLEN_SACK_PERM      2
1961da177e4SLinus Torvalds #define TCPOLEN_TIMESTAMP      10
197cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG         18
1987f9b838bSDaniel Lee #define TCPOLEN_FASTOPEN_BASE  2
1992100c8d2SYuchung Cheng #define TCPOLEN_EXP_FASTOPEN_BASE  4
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds /* But this is what stacks really send out. */
2021da177e4SLinus Torvalds #define TCPOLEN_TSTAMP_ALIGNED		12
2031da177e4SLinus Torvalds #define TCPOLEN_WSCALE_ALIGNED		4
2041da177e4SLinus Torvalds #define TCPOLEN_SACKPERM_ALIGNED	4
2051da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE		2
2061da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE_ALIGNED	4
2071da177e4SLinus Torvalds #define TCPOLEN_SACK_PERBLOCK		8
208cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG_ALIGNED		20
20933ad798cSAdam Langley #define TCPOLEN_MSS_ALIGNED		4
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds /* Flags in tp->nonagle */
2121da177e4SLinus Torvalds #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
2131da177e4SLinus Torvalds #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
214caa20d9aSStephen Hemminger #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
2151da177e4SLinus Torvalds 
21636e31b0aSAndreas Petlund /* TCP thin-stream limits */
21736e31b0aSAndreas Petlund #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
21836e31b0aSAndreas Petlund 
21921603fc4SJörg Thalheim /* TCP initial congestion window as per rfc6928 */
220442b9635SDavid S. Miller #define TCP_INIT_CWND		10
221442b9635SDavid S. Miller 
222cf60af03SYuchung Cheng /* Bit Flags for sysctl_tcp_fastopen */
223cf60af03SYuchung Cheng #define	TFO_CLIENT_ENABLE	1
22410467163SJerry Chu #define	TFO_SERVER_ENABLE	2
22567da22d2SYuchung Cheng #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
226cf60af03SYuchung Cheng 
22710467163SJerry Chu /* Accept SYN data w/o any cookie option */
22810467163SJerry Chu #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
22910467163SJerry Chu 
23010467163SJerry Chu /* Force enable TFO on all listeners, i.e., not requiring the
231cebc5cbaSYuchung Cheng  * TCP_FASTOPEN socket option.
23210467163SJerry Chu  */
23310467163SJerry Chu #define	TFO_SERVER_WO_SOCKOPT1	0x400
23410467163SJerry Chu 
235295ff7edSArnaldo Carvalho de Melo 
2361da177e4SLinus Torvalds /* sysctl variables for tcp */
2371da177e4SLinus Torvalds extern int sysctl_tcp_timestamps;
2381da177e4SLinus Torvalds extern int sysctl_tcp_window_scaling;
2391da177e4SLinus Torvalds extern int sysctl_tcp_sack;
2402100c8d2SYuchung Cheng extern int sysctl_tcp_fastopen;
2411da177e4SLinus Torvalds extern int sysctl_tcp_retrans_collapse;
2421da177e4SLinus Torvalds extern int sysctl_tcp_stdurg;
2431da177e4SLinus Torvalds extern int sysctl_tcp_rfc1337;
2441da177e4SLinus Torvalds extern int sysctl_tcp_abort_on_overflow;
2451da177e4SLinus Torvalds extern int sysctl_tcp_max_orphans;
2461da177e4SLinus Torvalds extern int sysctl_tcp_fack;
2471da177e4SLinus Torvalds extern int sysctl_tcp_reordering;
248dca145ffSEric Dumazet extern int sysctl_tcp_max_reordering;
2491da177e4SLinus Torvalds extern int sysctl_tcp_dsack;
250a4fe34bfSEric W. Biederman extern long sysctl_tcp_mem[3];
2511da177e4SLinus Torvalds extern int sysctl_tcp_wmem[3];
2521da177e4SLinus Torvalds extern int sysctl_tcp_rmem[3];
2531da177e4SLinus Torvalds extern int sysctl_tcp_app_win;
2541da177e4SLinus Torvalds extern int sysctl_tcp_adv_win_scale;
2551da177e4SLinus Torvalds extern int sysctl_tcp_frto;
2561da177e4SLinus Torvalds extern int sysctl_tcp_low_latency;
2571da177e4SLinus Torvalds extern int sysctl_tcp_nometrics_save;
2581da177e4SLinus Torvalds extern int sysctl_tcp_moderate_rcvbuf;
2591da177e4SLinus Torvalds extern int sysctl_tcp_tso_win_divisor;
26015d99e02SRick Jones extern int sysctl_tcp_workaround_signed_windows;
26135089bb2SDavid S. Miller extern int sysctl_tcp_slow_start_after_idle;
26236e31b0aSAndreas Petlund extern int sysctl_tcp_thin_linear_timeouts;
2637e380175SAndreas Petlund extern int sysctl_tcp_thin_dupack;
264eed530b6SYuchung Cheng extern int sysctl_tcp_early_retrans;
26546d3ceabSEric Dumazet extern int sysctl_tcp_limit_output_bytes;
266282f23c6SEric Dumazet extern int sysctl_tcp_challenge_ack_limit;
26795bd09ebSEric Dumazet extern int sysctl_tcp_min_tso_segs;
268f6722583SYuchung Cheng extern int sysctl_tcp_min_rtt_wlen;
269f54b3111SEric Dumazet extern int sysctl_tcp_autocorking;
270032ee423SNeal Cardwell extern int sysctl_tcp_invalid_ratelimit;
27143e122b0SEric Dumazet extern int sysctl_tcp_pacing_ss_ratio;
27243e122b0SEric Dumazet extern int sysctl_tcp_pacing_ca_ratio;
2731da177e4SLinus Torvalds 
2748d987e5cSEric Dumazet extern atomic_long_t tcp_memory_allocated;
2751748376bSEric Dumazet extern struct percpu_counter tcp_sockets_allocated;
2761da177e4SLinus Torvalds extern int tcp_memory_pressure;
2771da177e4SLinus Torvalds 
278b8da51ebSEric Dumazet /* optimized version of sk_under_memory_pressure() for TCP sockets */
279b8da51ebSEric Dumazet static inline bool tcp_under_memory_pressure(const struct sock *sk)
280b8da51ebSEric Dumazet {
281baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
282baac50bbSJohannes Weiner 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
283e805605cSJohannes Weiner 		return true;
284b8da51ebSEric Dumazet 
285b8da51ebSEric Dumazet 	return tcp_memory_pressure;
286b8da51ebSEric Dumazet }
2871da177e4SLinus Torvalds /*
2881da177e4SLinus Torvalds  * The next routines deal with comparing 32 bit unsigned ints
2891da177e4SLinus Torvalds  * and worry about wraparound (automatic with unsigned arithmetic).
2901da177e4SLinus Torvalds  */
2911da177e4SLinus Torvalds 
292a2a385d6SEric Dumazet static inline bool before(__u32 seq1, __u32 seq2)
2931da177e4SLinus Torvalds {
2940d630cc0SGerrit Renker         return (__s32)(seq1-seq2) < 0;
2951da177e4SLinus Torvalds }
2969a036b9cSGerrit Renker #define after(seq2, seq1) 	before(seq1, seq2)
2971da177e4SLinus Torvalds 
2981da177e4SLinus Torvalds /* is s2<=s1<=s3 ? */
299a2a385d6SEric Dumazet static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
3001da177e4SLinus Torvalds {
3011da177e4SLinus Torvalds 	return seq3 - seq2 >= seq1 - seq2;
3021da177e4SLinus Torvalds }
3031da177e4SLinus Torvalds 
304efcdbf24SArun Sharma static inline bool tcp_out_of_memory(struct sock *sk)
305efcdbf24SArun Sharma {
306efcdbf24SArun Sharma 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
307efcdbf24SArun Sharma 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
308efcdbf24SArun Sharma 		return true;
309efcdbf24SArun Sharma 	return false;
310efcdbf24SArun Sharma }
311efcdbf24SArun Sharma 
312a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size);
313a6c5ea4cSEric Dumazet 
314ad1af0feSDavid S. Miller static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
315e4fd5da3SPavel Emelianov {
316ad1af0feSDavid S. Miller 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
317ad1af0feSDavid S. Miller 	int orphans = percpu_counter_read_positive(ocp);
318ad1af0feSDavid S. Miller 
319ad1af0feSDavid S. Miller 	if (orphans << shift > sysctl_tcp_max_orphans) {
320ad1af0feSDavid S. Miller 		orphans = percpu_counter_sum_positive(ocp);
321ad1af0feSDavid S. Miller 		if (orphans << shift > sysctl_tcp_max_orphans)
322ad1af0feSDavid S. Miller 			return true;
323ad1af0feSDavid S. Miller 	}
324ad1af0feSDavid S. Miller 	return false;
325e4fd5da3SPavel Emelianov }
3261da177e4SLinus Torvalds 
3275c9f3023SJoe Perches bool tcp_check_oom(struct sock *sk, int shift);
328efcdbf24SArun Sharma 
329a0f82f64SFlorian Westphal 
3301da177e4SLinus Torvalds extern struct proto tcp_prot;
3311da177e4SLinus Torvalds 
33257ef42d5SPavel Emelyanov #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33313415e46SEric Dumazet #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33457ef42d5SPavel Emelyanov #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
335aa2ea058STom Herbert #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
3361da177e4SLinus Torvalds 
3375c9f3023SJoe Perches void tcp_tasklet_init(void);
33846d3ceabSEric Dumazet 
3395c9f3023SJoe Perches void tcp_v4_err(struct sk_buff *skb, u32);
3401da177e4SLinus Torvalds 
3415c9f3023SJoe Perches void tcp_shutdown(struct sock *sk, int how);
3421da177e4SLinus Torvalds 
3435c9f3023SJoe Perches void tcp_v4_early_demux(struct sk_buff *skb);
3445c9f3023SJoe Perches int tcp_v4_rcv(struct sk_buff *skb);
3451da177e4SLinus Torvalds 
3465c9f3023SJoe Perches int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
3471b784140SYing Xue int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
3485c9f3023SJoe Perches int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
3495c9f3023SJoe Perches 		 int flags);
3505c9f3023SJoe Perches void tcp_release_cb(struct sock *sk);
3515c9f3023SJoe Perches void tcp_wfree(struct sk_buff *skb);
3525c9f3023SJoe Perches void tcp_write_timer_handler(struct sock *sk);
3535c9f3023SJoe Perches void tcp_delack_timer_handler(struct sock *sk);
3545c9f3023SJoe Perches int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
35572ab4a86SEric Dumazet int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
3565c9f3023SJoe Perches void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
357cf533ea5SEric Dumazet 			 const struct tcphdr *th, unsigned int len);
3585c9f3023SJoe Perches void tcp_rcv_space_adjust(struct sock *sk);
3595c9f3023SJoe Perches int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
3605c9f3023SJoe Perches void tcp_twsk_destructor(struct sock *sk);
3615c9f3023SJoe Perches ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
36253d3176bSChangli Gao 			struct pipe_inode_info *pipe, size_t len,
36353d3176bSChangli Gao 			unsigned int flags);
3649c55e01cSJens Axboe 
365463c84b9SArnaldo Carvalho de Melo static inline void tcp_dec_quickack_mode(struct sock *sk,
366463c84b9SArnaldo Carvalho de Melo 					 const unsigned int pkts)
3671da177e4SLinus Torvalds {
368463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
369fc6415bcSDavid S. Miller 
370463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.quick) {
371463c84b9SArnaldo Carvalho de Melo 		if (pkts >= icsk->icsk_ack.quick) {
372463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick = 0;
3731da177e4SLinus Torvalds 			/* Leaving quickack mode we deflate ATO. */
374463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
375fc6415bcSDavid S. Miller 		} else
376463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick -= pkts;
3771da177e4SLinus Torvalds 	}
3781da177e4SLinus Torvalds }
3791da177e4SLinus Torvalds 
380bdf1ee5dSIlpo Järvinen #define	TCP_ECN_OK		1
381bdf1ee5dSIlpo Järvinen #define	TCP_ECN_QUEUE_CWR	2
382bdf1ee5dSIlpo Järvinen #define	TCP_ECN_DEMAND_CWR	4
3837a269ffaSEric Dumazet #define	TCP_ECN_SEEN		8
384bdf1ee5dSIlpo Järvinen 
385fd2c3ef7SEric Dumazet enum tcp_tw_status {
3861da177e4SLinus Torvalds 	TCP_TW_SUCCESS = 0,
3871da177e4SLinus Torvalds 	TCP_TW_RST = 1,
3881da177e4SLinus Torvalds 	TCP_TW_ACK = 2,
3891da177e4SLinus Torvalds 	TCP_TW_SYN = 3
3901da177e4SLinus Torvalds };
3911da177e4SLinus Torvalds 
3921da177e4SLinus Torvalds 
3935c9f3023SJoe Perches enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
3941da177e4SLinus Torvalds 					      struct sk_buff *skb,
3958feaf0c0SArnaldo Carvalho de Melo 					      const struct tcphdr *th);
3965c9f3023SJoe Perches struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
39752452c54SEric Dumazet 			   struct request_sock *req, bool fastopen);
3985c9f3023SJoe Perches int tcp_child_process(struct sock *parent, struct sock *child,
3991da177e4SLinus Torvalds 		      struct sk_buff *skb);
4005ae344c9SNeal Cardwell void tcp_enter_loss(struct sock *sk);
401*57dde7f7SYuchung Cheng void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
4025c9f3023SJoe Perches void tcp_clear_retrans(struct tcp_sock *tp);
4035c9f3023SJoe Perches void tcp_update_metrics(struct sock *sk);
4045c9f3023SJoe Perches void tcp_init_metrics(struct sock *sk);
4055c9f3023SJoe Perches void tcp_metrics_init(void);
4065c9f3023SJoe Perches bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
407a26552afSHannes Frederic Sowa 			bool paws_check, bool timestamps);
4085c9f3023SJoe Perches bool tcp_remember_stamp(struct sock *sk);
4095c9f3023SJoe Perches bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
4105c9f3023SJoe Perches void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
4115c9f3023SJoe Perches void tcp_disable_fack(struct tcp_sock *tp);
4125c9f3023SJoe Perches void tcp_close(struct sock *sk, long timeout);
4135c9f3023SJoe Perches void tcp_init_sock(struct sock *sk);
4145c9f3023SJoe Perches unsigned int tcp_poll(struct file *file, struct socket *sock,
41553d3176bSChangli Gao 		      struct poll_table_struct *wait);
4165c9f3023SJoe Perches int tcp_getsockopt(struct sock *sk, int level, int optname,
4173fdadf7dSDmitry Mishin 		   char __user *optval, int __user *optlen);
4185c9f3023SJoe Perches int tcp_setsockopt(struct sock *sk, int level, int optname,
41953d3176bSChangli Gao 		   char __user *optval, unsigned int optlen);
4205c9f3023SJoe Perches int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
42153d3176bSChangli Gao 			  char __user *optval, int __user *optlen);
4225c9f3023SJoe Perches int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
423b7058842SDavid S. Miller 			  char __user *optval, unsigned int optlen);
4245c9f3023SJoe Perches void tcp_set_keepalive(struct sock *sk, int val);
42542cb80a2SEric Dumazet void tcp_syn_ack_timeout(const struct request_sock *req);
4261b784140SYing Xue int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
4271b784140SYing Xue 		int flags, int *addr_len);
4285c9f3023SJoe Perches void tcp_parse_options(const struct sk_buff *skb,
4291a2c6181SChristoph Paasch 		       struct tcp_options_received *opt_rx,
4302100c8d2SYuchung Cheng 		       int estab, struct tcp_fastopen_cookie *foc);
4315c9f3023SJoe Perches const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
4327d5d5525SYOSHIFUJI Hideaki 
4331da177e4SLinus Torvalds /*
4341da177e4SLinus Torvalds  *	TCP v4 functions exported for the inet6 API
4351da177e4SLinus Torvalds  */
4361da177e4SLinus Torvalds 
4375c9f3023SJoe Perches void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4384fab9071SNeal Cardwell void tcp_v4_mtu_reduced(struct sock *sk);
4399cf74903SEric Dumazet void tcp_req_err(struct sock *sk, u32 seq, bool abort);
4405c9f3023SJoe Perches int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
441c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
44260236fddSArnaldo Carvalho de Melo 				      struct request_sock *req,
4431da177e4SLinus Torvalds 				      struct sk_buff *skb);
44481164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
4450c27171eSEric Dumazet struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
44660236fddSArnaldo Carvalho de Melo 				  struct request_sock *req,
4475e0724d0SEric Dumazet 				  struct dst_entry *dst,
4485e0724d0SEric Dumazet 				  struct request_sock *req_unhash,
4495e0724d0SEric Dumazet 				  bool *own_req);
4505c9f3023SJoe Perches int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
4515c9f3023SJoe Perches int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4525c9f3023SJoe Perches int tcp_connect(struct sock *sk);
453b3d05147SEric Dumazet enum tcp_synack_type {
454b3d05147SEric Dumazet 	TCP_SYNACK_NORMAL,
455b3d05147SEric Dumazet 	TCP_SYNACK_FASTOPEN,
456b3d05147SEric Dumazet 	TCP_SYNACK_COOKIE,
457b3d05147SEric Dumazet };
4585d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
459e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
460ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
461b3d05147SEric Dumazet 				enum tcp_synack_type synack_type);
4625c9f3023SJoe Perches int tcp_disconnect(struct sock *sk, int flags);
4631da177e4SLinus Torvalds 
464370816aeSPavel Emelyanov void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
465292e8d8cSPavel Emelyanov int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
46663d02d15SEric Dumazet void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
4671da177e4SLinus Torvalds 
4681da177e4SLinus Torvalds /* From syncookies.c */
469b80c0e78SEric Dumazet struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
470b80c0e78SEric Dumazet 				 struct request_sock *req,
471b80c0e78SEric Dumazet 				 struct dst_entry *dst);
4725c9f3023SJoe Perches int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4730198230bSPatrick McHardy 		      u32 cookie);
474461b74c3SCong Wang struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
475e05c82d3SEric Dumazet #ifdef CONFIG_SYN_COOKIES
4768c27bd75SFlorian Westphal 
47763262315SEric Dumazet /* Syncookies use a monotonic timer which increments every 60 seconds.
4788c27bd75SFlorian Westphal  * This counter is used both as a hash input and partially encoded into
4798c27bd75SFlorian Westphal  * the cookie value.  A cookie is only validated further if the delta
4808c27bd75SFlorian Westphal  * between the current counter value and the encoded one is less than this,
48163262315SEric Dumazet  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
4828c27bd75SFlorian Westphal  * the counter advances immediately after a cookie is generated).
4838c27bd75SFlorian Westphal  */
4848c27bd75SFlorian Westphal #define MAX_SYNCOOKIE_AGE	2
485264ea103SEric Dumazet #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
486264ea103SEric Dumazet #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
487264ea103SEric Dumazet 
488264ea103SEric Dumazet /* syncookies: remember time of last synqueue overflow
489264ea103SEric Dumazet  * But do not dirty this field too often (once per second is enough)
4903f684b4bSEric Dumazet  * It is racy as we do not hold a lock, but race is very minor.
491264ea103SEric Dumazet  */
4923f684b4bSEric Dumazet static inline void tcp_synq_overflow(const struct sock *sk)
493264ea103SEric Dumazet {
494264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
495264ea103SEric Dumazet 	unsigned long now = jiffies;
496264ea103SEric Dumazet 
497264ea103SEric Dumazet 	if (time_after(now, last_overflow + HZ))
498264ea103SEric Dumazet 		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
499264ea103SEric Dumazet }
500264ea103SEric Dumazet 
501264ea103SEric Dumazet /* syncookies: no recent synqueue overflow on this listening socket? */
502264ea103SEric Dumazet static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
503264ea103SEric Dumazet {
504264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
505264ea103SEric Dumazet 
506264ea103SEric Dumazet 	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
507264ea103SEric Dumazet }
5088c27bd75SFlorian Westphal 
5098c27bd75SFlorian Westphal static inline u32 tcp_cookie_time(void)
5108c27bd75SFlorian Westphal {
51163262315SEric Dumazet 	u64 val = get_jiffies_64();
51263262315SEric Dumazet 
513264ea103SEric Dumazet 	do_div(val, TCP_SYNCOOKIE_PERIOD);
51463262315SEric Dumazet 	return val;
5158c27bd75SFlorian Westphal }
5168c27bd75SFlorian Westphal 
5175c9f3023SJoe Perches u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5185c9f3023SJoe Perches 			      u16 *mssp);
5193f684b4bSEric Dumazet __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
5205c9f3023SJoe Perches __u32 cookie_init_timestamp(struct request_sock *req);
521f1673381SFlorian Westphal bool cookie_timestamp_decode(struct tcp_options_received *opt);
522f1673381SFlorian Westphal bool cookie_ecn_ok(const struct tcp_options_received *opt,
523f7b3bec6SFlorian Westphal 		   const struct net *net, const struct dst_entry *dst);
5244dfc2817SFlorian Westphal 
525c6aefafbSGlenn Griffin /* From net/ipv6/syncookies.c */
5265c9f3023SJoe Perches int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
52781eb6a14SPatrick McHardy 		      u32 cookie);
5285c9f3023SJoe Perches struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
529f1673381SFlorian Westphal 
5305c9f3023SJoe Perches u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
53181eb6a14SPatrick McHardy 			      const struct tcphdr *th, u16 *mssp);
5323f684b4bSEric Dumazet __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
533e05c82d3SEric Dumazet #endif
5341da177e4SLinus Torvalds /* tcp_output.c */
5351da177e4SLinus Torvalds 
5361b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
5371b3878caSNeal Cardwell 		     int min_tso_segs);
5385c9f3023SJoe Perches void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
5399e412ba7SIlpo Järvinen 			       int nonagle);
5405c9f3023SJoe Perches bool tcp_may_send_now(struct sock *sk);
54110d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
54210d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5435c9f3023SJoe Perches void tcp_retransmit_timer(struct sock *sk);
5445c9f3023SJoe Perches void tcp_xmit_retransmit_queue(struct sock *);
5455c9f3023SJoe Perches void tcp_simple_retransmit(struct sock *);
546*57dde7f7SYuchung Cheng void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5475c9f3023SJoe Perches int tcp_trim_head(struct sock *, struct sk_buff *, u32);
5486cc55e09SOctavian Purdila int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
5491da177e4SLinus Torvalds 
5505c9f3023SJoe Perches void tcp_send_probe0(struct sock *);
5515c9f3023SJoe Perches void tcp_send_partial(struct sock *);
552e520af48SEric Dumazet int tcp_write_wakeup(struct sock *, int mib);
5535c9f3023SJoe Perches void tcp_send_fin(struct sock *sk);
5545c9f3023SJoe Perches void tcp_send_active_reset(struct sock *sk, gfp_t priority);
5555c9f3023SJoe Perches int tcp_send_synack(struct sock *);
5565c9f3023SJoe Perches void tcp_push_one(struct sock *, unsigned int mss_now);
5575c9f3023SJoe Perches void tcp_send_ack(struct sock *sk);
5585c9f3023SJoe Perches void tcp_send_delayed_ack(struct sock *sk);
5595c9f3023SJoe Perches void tcp_send_loss_probe(struct sock *sk);
5605c9f3023SJoe Perches bool tcp_schedule_loss_probe(struct sock *sk);
561cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
562cfea5a68SMartin KaFai Lau 			     const struct sk_buff *next_skb);
5631da177e4SLinus Torvalds 
564a762a980SDavid S. Miller /* tcp_input.c */
5655c9f3023SJoe Perches void tcp_resume_early_retransmit(struct sock *sk);
5665c9f3023SJoe Perches void tcp_rearm_rto(struct sock *sk);
5670f1c28aeSYuchung Cheng void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5685c9f3023SJoe Perches void tcp_reset(struct sock *sk);
5694f41b1c5SYuchung Cheng void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
570e3e17b77SEric Dumazet void tcp_fin(struct sock *sk);
571a762a980SDavid S. Miller 
5721da177e4SLinus Torvalds /* tcp_timer.c */
5735c9f3023SJoe Perches void tcp_init_xmit_timers(struct sock *);
574463c84b9SArnaldo Carvalho de Melo static inline void tcp_clear_xmit_timers(struct sock *sk)
575463c84b9SArnaldo Carvalho de Melo {
576463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timers(sk);
577463c84b9SArnaldo Carvalho de Melo }
5781da177e4SLinus Torvalds 
5795c9f3023SJoe Perches unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
5805c9f3023SJoe Perches unsigned int tcp_current_mss(struct sock *sk);
5810c54b85fSIlpo Järvinen 
5820c54b85fSIlpo Järvinen /* Bound MSS / TSO packet size with the half of the window */
5830c54b85fSIlpo Järvinen static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
5840c54b85fSIlpo Järvinen {
58501f83d69SAlexey Kuznetsov 	int cutoff;
58601f83d69SAlexey Kuznetsov 
58701f83d69SAlexey Kuznetsov 	/* When peer uses tiny windows, there is no use in packetizing
58801f83d69SAlexey Kuznetsov 	 * to sub-MSS pieces for the sake of SWS or making sure there
58901f83d69SAlexey Kuznetsov 	 * are enough packets in the pipe for fast recovery.
59001f83d69SAlexey Kuznetsov 	 *
59101f83d69SAlexey Kuznetsov 	 * On the other hand, for extremely large MSS devices, handling
59201f83d69SAlexey Kuznetsov 	 * smaller than MSS windows in this way does make sense.
59301f83d69SAlexey Kuznetsov 	 */
5942631b79fSSeymour, Shane M 	if (tp->max_window > TCP_MSS_DEFAULT)
59501f83d69SAlexey Kuznetsov 		cutoff = (tp->max_window >> 1);
59601f83d69SAlexey Kuznetsov 	else
59701f83d69SAlexey Kuznetsov 		cutoff = tp->max_window;
59801f83d69SAlexey Kuznetsov 
59901f83d69SAlexey Kuznetsov 	if (cutoff && pktsize > cutoff)
60001f83d69SAlexey Kuznetsov 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
6010c54b85fSIlpo Järvinen 	else
6020c54b85fSIlpo Järvinen 		return pktsize;
6030c54b85fSIlpo Järvinen }
6041da177e4SLinus Torvalds 
60517b085eaSArnaldo Carvalho de Melo /* tcp.c */
6060df48c26SEric Dumazet void tcp_get_info(struct sock *, struct tcp_info *);
6071da177e4SLinus Torvalds 
6081da177e4SLinus Torvalds /* Read 'sendfile()'-style from a TCP socket */
6095c9f3023SJoe Perches int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
6101da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor);
6111da177e4SLinus Torvalds 
6125c9f3023SJoe Perches void tcp_initialize_rcv_mss(struct sock *sk);
6131da177e4SLinus Torvalds 
6145c9f3023SJoe Perches int tcp_mtu_to_mss(struct sock *sk, int pmtu);
6155c9f3023SJoe Perches int tcp_mss_to_mtu(struct sock *sk, int mss);
6165c9f3023SJoe Perches void tcp_mtup_init(struct sock *sk);
6175c9f3023SJoe Perches void tcp_init_buffer_space(struct sock *sk);
6185d424d5aSJohn Heffner 
619f1ecd5d9SDamian Lukowski static inline void tcp_bound_rto(const struct sock *sk)
620f1ecd5d9SDamian Lukowski {
621f1ecd5d9SDamian Lukowski 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
622f1ecd5d9SDamian Lukowski 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
623f1ecd5d9SDamian Lukowski }
624f1ecd5d9SDamian Lukowski 
625f1ecd5d9SDamian Lukowski static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
626f1ecd5d9SDamian Lukowski {
627740b0f18SEric Dumazet 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
628f1ecd5d9SDamian Lukowski }
629f1ecd5d9SDamian Lukowski 
63040efc6faSStephen Hemminger static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
6311da177e4SLinus Torvalds {
6321da177e4SLinus Torvalds 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
6331da177e4SLinus Torvalds 			       ntohl(TCP_FLAG_ACK) |
6341da177e4SLinus Torvalds 			       snd_wnd);
6351da177e4SLinus Torvalds }
6361da177e4SLinus Torvalds 
63740efc6faSStephen Hemminger static inline void tcp_fast_path_on(struct tcp_sock *tp)
6381da177e4SLinus Torvalds {
6391da177e4SLinus Torvalds 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
6401da177e4SLinus Torvalds }
6411da177e4SLinus Torvalds 
6429e412ba7SIlpo Järvinen static inline void tcp_fast_path_check(struct sock *sk)
6431da177e4SLinus Torvalds {
6449e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6459e412ba7SIlpo Järvinen 
6469f5afeaeSYaogong Wang 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
6471da177e4SLinus Torvalds 	    tp->rcv_wnd &&
6481da177e4SLinus Torvalds 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
6491da177e4SLinus Torvalds 	    !tp->urg_data)
6501da177e4SLinus Torvalds 		tcp_fast_path_on(tp);
6511da177e4SLinus Torvalds }
6521da177e4SLinus Torvalds 
6530c266898SSatoru SATOH /* Compute the actual rto_min value */
6540c266898SSatoru SATOH static inline u32 tcp_rto_min(struct sock *sk)
6550c266898SSatoru SATOH {
656cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
6570c266898SSatoru SATOH 	u32 rto_min = TCP_RTO_MIN;
6580c266898SSatoru SATOH 
6590c266898SSatoru SATOH 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
6600c266898SSatoru SATOH 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
6610c266898SSatoru SATOH 	return rto_min;
6620c266898SSatoru SATOH }
6630c266898SSatoru SATOH 
664740b0f18SEric Dumazet static inline u32 tcp_rto_min_us(struct sock *sk)
665740b0f18SEric Dumazet {
666740b0f18SEric Dumazet 	return jiffies_to_usecs(tcp_rto_min(sk));
667740b0f18SEric Dumazet }
668740b0f18SEric Dumazet 
66981164413SDaniel Borkmann static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
67081164413SDaniel Borkmann {
67181164413SDaniel Borkmann 	return dst_metric_locked(dst, RTAX_CC_ALGO);
67281164413SDaniel Borkmann }
67381164413SDaniel Borkmann 
674f6722583SYuchung Cheng /* Minimum RTT in usec. ~0 means not available. */
675f6722583SYuchung Cheng static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
676f6722583SYuchung Cheng {
67764033892SNeal Cardwell 	return minmax_get(&tp->rtt_min);
678f6722583SYuchung Cheng }
679f6722583SYuchung Cheng 
6801da177e4SLinus Torvalds /* Compute the actual receive window we are currently advertising.
6811da177e4SLinus Torvalds  * Rcv_nxt can be after the window if our peer push more data
6821da177e4SLinus Torvalds  * than the offered window.
6831da177e4SLinus Torvalds  */
68440efc6faSStephen Hemminger static inline u32 tcp_receive_window(const struct tcp_sock *tp)
6851da177e4SLinus Torvalds {
6861da177e4SLinus Torvalds 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds 	if (win < 0)
6891da177e4SLinus Torvalds 		win = 0;
6901da177e4SLinus Torvalds 	return (u32) win;
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds /* Choose a new window, without checks for shrinking, and without
6941da177e4SLinus Torvalds  * scaling applied to the result.  The caller does these things
6951da177e4SLinus Torvalds  * if necessary.  This is a "raw" window selection.
6961da177e4SLinus Torvalds  */
6975c9f3023SJoe Perches u32 __tcp_select_window(struct sock *sk);
6981da177e4SLinus Torvalds 
699ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk);
700ee995283SPavel Emelyanov 
7011da177e4SLinus Torvalds /* TCP timestamps are only 32-bits, this causes a slight
7021da177e4SLinus Torvalds  * complication on 64-bit systems since we store a snapshot
70331f34269SStephen Hemminger  * of jiffies in the buffer control blocks below.  We decided
70431f34269SStephen Hemminger  * to use only the low 32-bits of jiffies and hide the ugly
7051da177e4SLinus Torvalds  * casts with the following macro.
7061da177e4SLinus Torvalds  */
7071da177e4SLinus Torvalds #define tcp_time_stamp		((__u32)(jiffies))
7081da177e4SLinus Torvalds 
7097faee5c0SEric Dumazet static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
7107faee5c0SEric Dumazet {
7117faee5c0SEric Dumazet 	return skb->skb_mstamp.stamp_jiffies;
7127faee5c0SEric Dumazet }
7137faee5c0SEric Dumazet 
7147faee5c0SEric Dumazet 
715a3433f35SChangli Gao #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
716a3433f35SChangli Gao 
717a3433f35SChangli Gao #define TCPHDR_FIN 0x01
718a3433f35SChangli Gao #define TCPHDR_SYN 0x02
719a3433f35SChangli Gao #define TCPHDR_RST 0x04
720a3433f35SChangli Gao #define TCPHDR_PSH 0x08
721a3433f35SChangli Gao #define TCPHDR_ACK 0x10
722a3433f35SChangli Gao #define TCPHDR_URG 0x20
723a3433f35SChangli Gao #define TCPHDR_ECE 0x40
724a3433f35SChangli Gao #define TCPHDR_CWR 0x80
725a3433f35SChangli Gao 
72649213555SDaniel Borkmann #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
72749213555SDaniel Borkmann 
728caa20d9aSStephen Hemminger /* This is what the send packet queuing engine uses to pass
729f86586faSEric Dumazet  * TCP per-packet control information to the transmission code.
730f86586faSEric Dumazet  * We also store the host-order sequence numbers in here too.
731f86586faSEric Dumazet  * This is 44 bytes if IPV6 is enabled.
732f86586faSEric Dumazet  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
7331da177e4SLinus Torvalds  */
7341da177e4SLinus Torvalds struct tcp_skb_cb {
7351da177e4SLinus Torvalds 	__u32		seq;		/* Starting sequence number	*/
7361da177e4SLinus Torvalds 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
737cd7d8498SEric Dumazet 	union {
738cd7d8498SEric Dumazet 		/* Note : tcp_tw_isn is used in input path only
739cd7d8498SEric Dumazet 		 *	  (isn chosen by tcp_timewait_state_process())
740cd7d8498SEric Dumazet 		 *
741f69ad292SEric Dumazet 		 * 	  tcp_gso_segs/size are used in write queue only,
742f69ad292SEric Dumazet 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
743cd7d8498SEric Dumazet 		 */
744cd7d8498SEric Dumazet 		__u32		tcp_tw_isn;
745f69ad292SEric Dumazet 		struct {
746f69ad292SEric Dumazet 			u16	tcp_gso_segs;
747f69ad292SEric Dumazet 			u16	tcp_gso_size;
748f69ad292SEric Dumazet 		};
749cd7d8498SEric Dumazet 	};
7504de075e0SEric Dumazet 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
751f4f9f6e7SNeal Cardwell 
7521da177e4SLinus Torvalds 	__u8		sacked;		/* State flags for SACK/FACK.	*/
7531da177e4SLinus Torvalds #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
7541da177e4SLinus Torvalds #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
7551da177e4SLinus Torvalds #define TCPCB_LOST		0x04	/* SKB is lost			*/
7561da177e4SLinus Torvalds #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
7579d186cacSAndrey Vagin #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
7581da177e4SLinus Torvalds #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
7599d186cacSAndrey Vagin #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
7609d186cacSAndrey Vagin 				TCPCB_REPAIRED)
7611da177e4SLinus Torvalds 
762f4f9f6e7SNeal Cardwell 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
7636b084928SSoheil Hassas Yeganeh 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
764c134ecb8SMartin KaFai Lau 			eor:1,		/* Is skb MSG_EOR marked? */
765c134ecb8SMartin KaFai Lau 			unused:6;
7661da177e4SLinus Torvalds 	__u32		ack_seq;	/* Sequence number ACK'd	*/
767971f10ecSEric Dumazet 	union {
768b75803d5SLawrence Brakmo 		struct {
769b9f64820SYuchung Cheng 			/* There is space for up to 24 bytes */
770d7722e85SSoheil Hassas Yeganeh 			__u32 in_flight:30,/* Bytes in flight at transmit */
771d7722e85SSoheil Hassas Yeganeh 			      is_app_limited:1, /* cwnd not fully used? */
772d7722e85SSoheil Hassas Yeganeh 			      unused:1;
773b9f64820SYuchung Cheng 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
774b9f64820SYuchung Cheng 			__u32 delivered;
775b9f64820SYuchung Cheng 			/* start of send pipeline phase */
776b9f64820SYuchung Cheng 			struct skb_mstamp first_tx_mstamp;
777b9f64820SYuchung Cheng 			/* when we reached the "delivered" count */
778b9f64820SYuchung Cheng 			struct skb_mstamp delivered_mstamp;
779b75803d5SLawrence Brakmo 		} tx;   /* only used for outgoing skbs */
780b75803d5SLawrence Brakmo 		union {
781971f10ecSEric Dumazet 			struct inet_skb_parm	h4;
782971f10ecSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
783971f10ecSEric Dumazet 			struct inet6_skb_parm	h6;
784971f10ecSEric Dumazet #endif
785b75803d5SLawrence Brakmo 		} header;	/* For incoming skbs */
786b75803d5SLawrence Brakmo 	};
7871da177e4SLinus Torvalds };
7881da177e4SLinus Torvalds 
7891da177e4SLinus Torvalds #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
7901da177e4SLinus Torvalds 
791870c3151SEric Dumazet 
792815afe17SEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
793870c3151SEric Dumazet /* This is the variant of inet6_iif() that must be used by TCP,
794870c3151SEric Dumazet  * as TCP moves IP6CB into a different location in skb->cb[]
795870c3151SEric Dumazet  */
796870c3151SEric Dumazet static inline int tcp_v6_iif(const struct sk_buff *skb)
797870c3151SEric Dumazet {
798a04a480dSDavid Ahern 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
79974b20582SDavid Ahern 
80074b20582SDavid Ahern 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
801870c3151SEric Dumazet }
802815afe17SEric Dumazet #endif
803870c3151SEric Dumazet 
804a04a480dSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */
805a04a480dSDavid Ahern static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
806a04a480dSDavid Ahern {
807a04a480dSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
808a04a480dSDavid Ahern 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
809da96786eSDavid Ahern 	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
810a04a480dSDavid Ahern 		return true;
811a04a480dSDavid Ahern #endif
812a04a480dSDavid Ahern 	return false;
813a04a480dSDavid Ahern }
814a04a480dSDavid Ahern 
8151da177e4SLinus Torvalds /* Due to TSO, an SKB can be composed of multiple actual
8161da177e4SLinus Torvalds  * packets.  To keep these tracked properly, we use this.
8171da177e4SLinus Torvalds  */
8181da177e4SLinus Torvalds static inline int tcp_skb_pcount(const struct sk_buff *skb)
8191da177e4SLinus Torvalds {
820cd7d8498SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_segs;
821cd7d8498SEric Dumazet }
822cd7d8498SEric Dumazet 
823cd7d8498SEric Dumazet static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
824cd7d8498SEric Dumazet {
825cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
826cd7d8498SEric Dumazet }
827cd7d8498SEric Dumazet 
828cd7d8498SEric Dumazet static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
829cd7d8498SEric Dumazet {
830cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
8311da177e4SLinus Torvalds }
8321da177e4SLinus Torvalds 
833f69ad292SEric Dumazet /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
8341da177e4SLinus Torvalds static inline int tcp_skb_mss(const struct sk_buff *skb)
8351da177e4SLinus Torvalds {
836f69ad292SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_size;
8371da177e4SLinus Torvalds }
8381da177e4SLinus Torvalds 
839c134ecb8SMartin KaFai Lau static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
840c134ecb8SMartin KaFai Lau {
841c134ecb8SMartin KaFai Lau 	return likely(!TCP_SKB_CB(skb)->eor);
842c134ecb8SMartin KaFai Lau }
843c134ecb8SMartin KaFai Lau 
844317a76f9SStephen Hemminger /* Events passed to congestion control interface */
845317a76f9SStephen Hemminger enum tcp_ca_event {
846317a76f9SStephen Hemminger 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
847317a76f9SStephen Hemminger 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
848317a76f9SStephen Hemminger 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
849317a76f9SStephen Hemminger 	CA_EVENT_LOSS,		/* loss timeout */
8509890092eSFlorian Westphal 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
8519890092eSFlorian Westphal 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
8529890092eSFlorian Westphal 	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
8539890092eSFlorian Westphal 	CA_EVENT_NON_DELAYED_ACK,
8547354c8c3SFlorian Westphal };
8557354c8c3SFlorian Westphal 
8569890092eSFlorian Westphal /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
8577354c8c3SFlorian Westphal enum tcp_ca_ack_event_flags {
8589890092eSFlorian Westphal 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
8599890092eSFlorian Westphal 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
8609890092eSFlorian Westphal 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
861317a76f9SStephen Hemminger };
862317a76f9SStephen Hemminger 
863317a76f9SStephen Hemminger /*
864317a76f9SStephen Hemminger  * Interface for adding new TCP congestion control handlers
865317a76f9SStephen Hemminger  */
866317a76f9SStephen Hemminger #define TCP_CA_NAME_MAX	16
8673ff825b2SStephen Hemminger #define TCP_CA_MAX	128
8683ff825b2SStephen Hemminger #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
8693ff825b2SStephen Hemminger 
870c5c6a8abSDaniel Borkmann #define TCP_CA_UNSPEC	0
871c5c6a8abSDaniel Borkmann 
87230e502a3SDaniel Borkmann /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
873164891aaSStephen Hemminger #define TCP_CONG_NON_RESTRICTED 0x1
87430e502a3SDaniel Borkmann /* Requires ECN/ECT set on all packets */
87530e502a3SDaniel Borkmann #define TCP_CONG_NEEDS_ECN	0x2
876164891aaSStephen Hemminger 
87764f40ff5SEric Dumazet union tcp_cc_info;
87864f40ff5SEric Dumazet 
879756ee172SLawrence Brakmo struct ack_sample {
880756ee172SLawrence Brakmo 	u32 pkts_acked;
881756ee172SLawrence Brakmo 	s32 rtt_us;
8826f094b9eSLawrence Brakmo 	u32 in_flight;
883756ee172SLawrence Brakmo };
884756ee172SLawrence Brakmo 
885b9f64820SYuchung Cheng /* A rate sample measures the number of (original/retransmitted) data
886b9f64820SYuchung Cheng  * packets delivered "delivered" over an interval of time "interval_us".
887b9f64820SYuchung Cheng  * The tcp_rate.c code fills in the rate sample, and congestion
888b9f64820SYuchung Cheng  * control modules that define a cong_control function to run at the end
889b9f64820SYuchung Cheng  * of ACK processing can optionally chose to consult this sample when
890b9f64820SYuchung Cheng  * setting cwnd and pacing rate.
891b9f64820SYuchung Cheng  * A sample is invalid if "delivered" or "interval_us" is negative.
892b9f64820SYuchung Cheng  */
893b9f64820SYuchung Cheng struct rate_sample {
894b9f64820SYuchung Cheng 	struct	skb_mstamp prior_mstamp; /* starting timestamp for interval */
895b9f64820SYuchung Cheng 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
896b9f64820SYuchung Cheng 	s32  delivered;		/* number of packets delivered over interval */
897b9f64820SYuchung Cheng 	long interval_us;	/* time for tp->delivered to incr "delivered" */
898b9f64820SYuchung Cheng 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
899b9f64820SYuchung Cheng 	int  losses;		/* number of packets marked lost upon ACK */
900b9f64820SYuchung Cheng 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
901b9f64820SYuchung Cheng 	u32  prior_in_flight;	/* in flight before this ACK */
902d7722e85SSoheil Hassas Yeganeh 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
903b9f64820SYuchung Cheng 	bool is_retrans;	/* is sample from retransmission? */
904b9f64820SYuchung Cheng };
905b9f64820SYuchung Cheng 
906317a76f9SStephen Hemminger struct tcp_congestion_ops {
907317a76f9SStephen Hemminger 	struct list_head	list;
908c5c6a8abSDaniel Borkmann 	u32 key;
909c5c6a8abSDaniel Borkmann 	u32 flags;
910317a76f9SStephen Hemminger 
911317a76f9SStephen Hemminger 	/* initialize private data (optional) */
9126687e988SArnaldo Carvalho de Melo 	void (*init)(struct sock *sk);
913317a76f9SStephen Hemminger 	/* cleanup private data  (optional) */
9146687e988SArnaldo Carvalho de Melo 	void (*release)(struct sock *sk);
915317a76f9SStephen Hemminger 
916317a76f9SStephen Hemminger 	/* return slow start threshold (required) */
9176687e988SArnaldo Carvalho de Melo 	u32 (*ssthresh)(struct sock *sk);
918317a76f9SStephen Hemminger 	/* do new cwnd calculation (required) */
91924901551SEric Dumazet 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
920317a76f9SStephen Hemminger 	/* call before changing ca_state (optional) */
9216687e988SArnaldo Carvalho de Melo 	void (*set_state)(struct sock *sk, u8 new_state);
922317a76f9SStephen Hemminger 	/* call when cwnd event occurs (optional) */
9236687e988SArnaldo Carvalho de Melo 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
9247354c8c3SFlorian Westphal 	/* call when ack arrives (optional) */
9257354c8c3SFlorian Westphal 	void (*in_ack_event)(struct sock *sk, u32 flags);
926317a76f9SStephen Hemminger 	/* new value of cwnd after loss (optional) */
9276687e988SArnaldo Carvalho de Melo 	u32  (*undo_cwnd)(struct sock *sk);
928317a76f9SStephen Hemminger 	/* hook for packet ack accounting (optional) */
929756ee172SLawrence Brakmo 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
930ed6e7268SNeal Cardwell 	/* suggest number of segments for each skb to transmit (optional) */
931ed6e7268SNeal Cardwell 	u32 (*tso_segs_goal)(struct sock *sk);
93277bfc174SYuchung Cheng 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
93377bfc174SYuchung Cheng 	u32 (*sndbuf_expand)(struct sock *sk);
934c0402760SYuchung Cheng 	/* call when packets are delivered to update cwnd and pacing rate,
935c0402760SYuchung Cheng 	 * after all the ca_state processing. (optional)
936c0402760SYuchung Cheng 	 */
937c0402760SYuchung Cheng 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
93873c1f4a0SArnaldo Carvalho de Melo 	/* get info for inet_diag (optional) */
93964f40ff5SEric Dumazet 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
94064f40ff5SEric Dumazet 			   union tcp_cc_info *info);
941317a76f9SStephen Hemminger 
942317a76f9SStephen Hemminger 	char 		name[TCP_CA_NAME_MAX];
943317a76f9SStephen Hemminger 	struct module 	*owner;
944317a76f9SStephen Hemminger };
945317a76f9SStephen Hemminger 
9465c9f3023SJoe Perches int tcp_register_congestion_control(struct tcp_congestion_ops *type);
9475c9f3023SJoe Perches void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
948317a76f9SStephen Hemminger 
94955d8694fSFlorian Westphal void tcp_assign_congestion_control(struct sock *sk);
9505c9f3023SJoe Perches void tcp_init_congestion_control(struct sock *sk);
9515c9f3023SJoe Perches void tcp_cleanup_congestion_control(struct sock *sk);
9525c9f3023SJoe Perches int tcp_set_default_congestion_control(const char *name);
9535c9f3023SJoe Perches void tcp_get_default_congestion_control(char *name);
9545c9f3023SJoe Perches void tcp_get_available_congestion_control(char *buf, size_t len);
9555c9f3023SJoe Perches void tcp_get_allowed_congestion_control(char *buf, size_t len);
9565c9f3023SJoe Perches int tcp_set_allowed_congestion_control(char *allowed);
9575c9f3023SJoe Perches int tcp_set_congestion_control(struct sock *sk, const char *name);
958e73ebb08SNeal Cardwell u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
959e73ebb08SNeal Cardwell void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
960317a76f9SStephen Hemminger 
9615c9f3023SJoe Perches u32 tcp_reno_ssthresh(struct sock *sk);
962e9799183SFlorian Westphal u32 tcp_reno_undo_cwnd(struct sock *sk);
96324901551SEric Dumazet void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
964a8acfbacSDavid S. Miller extern struct tcp_congestion_ops tcp_reno;
965317a76f9SStephen Hemminger 
966c5c6a8abSDaniel Borkmann struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
967c3a8d947SDaniel Borkmann u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
968ea697639SDaniel Borkmann #ifdef CONFIG_INET
969c5c6a8abSDaniel Borkmann char *tcp_ca_get_name_by_key(u32 key, char *buffer);
970ea697639SDaniel Borkmann #else
971ea697639SDaniel Borkmann static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
972ea697639SDaniel Borkmann {
973ea697639SDaniel Borkmann 	return NULL;
974ea697639SDaniel Borkmann }
975ea697639SDaniel Borkmann #endif
976c5c6a8abSDaniel Borkmann 
97730e502a3SDaniel Borkmann static inline bool tcp_ca_needs_ecn(const struct sock *sk)
97830e502a3SDaniel Borkmann {
97930e502a3SDaniel Borkmann 	const struct inet_connection_sock *icsk = inet_csk(sk);
98030e502a3SDaniel Borkmann 
98130e502a3SDaniel Borkmann 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
98230e502a3SDaniel Borkmann }
98330e502a3SDaniel Borkmann 
9846687e988SArnaldo Carvalho de Melo static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
985317a76f9SStephen Hemminger {
9866687e988SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
9876687e988SArnaldo Carvalho de Melo 
9886687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->set_state)
9896687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->set_state(sk, ca_state);
9906687e988SArnaldo Carvalho de Melo 	icsk->icsk_ca_state = ca_state;
991317a76f9SStephen Hemminger }
992317a76f9SStephen Hemminger 
9936687e988SArnaldo Carvalho de Melo static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
994317a76f9SStephen Hemminger {
9956687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
9966687e988SArnaldo Carvalho de Melo 
9976687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->cwnd_event)
9986687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->cwnd_event(sk, event);
999317a76f9SStephen Hemminger }
1000317a76f9SStephen Hemminger 
1001b9f64820SYuchung Cheng /* From tcp_rate.c */
1002b9f64820SYuchung Cheng void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1003b9f64820SYuchung Cheng void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1004b9f64820SYuchung Cheng 			    struct rate_sample *rs);
1005b9f64820SYuchung Cheng void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1006b9f64820SYuchung Cheng 		  struct skb_mstamp *now, struct rate_sample *rs);
1007d7722e85SSoheil Hassas Yeganeh void tcp_rate_check_app_limited(struct sock *sk);
1008b9f64820SYuchung Cheng 
1009e60402d0SIlpo Järvinen /* These functions determine how the current flow behaves in respect of SACK
1010e60402d0SIlpo Järvinen  * handling. SACK is negotiated with the peer, and therefore it can vary
1011e60402d0SIlpo Järvinen  * between different flows.
1012e60402d0SIlpo Järvinen  *
1013e60402d0SIlpo Järvinen  * tcp_is_sack - SACK enabled
1014e60402d0SIlpo Järvinen  * tcp_is_reno - No SACK
1015e60402d0SIlpo Järvinen  * tcp_is_fack - FACK enabled, implies SACK enabled
1016e60402d0SIlpo Järvinen  */
1017e60402d0SIlpo Järvinen static inline int tcp_is_sack(const struct tcp_sock *tp)
1018e60402d0SIlpo Järvinen {
1019e60402d0SIlpo Järvinen 	return tp->rx_opt.sack_ok;
1020e60402d0SIlpo Järvinen }
1021e60402d0SIlpo Järvinen 
1022a2a385d6SEric Dumazet static inline bool tcp_is_reno(const struct tcp_sock *tp)
1023e60402d0SIlpo Järvinen {
1024e60402d0SIlpo Järvinen 	return !tcp_is_sack(tp);
1025e60402d0SIlpo Järvinen }
1026e60402d0SIlpo Järvinen 
1027a2a385d6SEric Dumazet static inline bool tcp_is_fack(const struct tcp_sock *tp)
1028e60402d0SIlpo Järvinen {
1029ab56222aSVijay Subramanian 	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1030e60402d0SIlpo Järvinen }
1031e60402d0SIlpo Järvinen 
1032e60402d0SIlpo Järvinen static inline void tcp_enable_fack(struct tcp_sock *tp)
1033e60402d0SIlpo Järvinen {
1034ab56222aSVijay Subramanian 	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1035e60402d0SIlpo Järvinen }
1036e60402d0SIlpo Järvinen 
1037eed530b6SYuchung Cheng /* TCP early-retransmit (ER) is similar to but more conservative than
1038eed530b6SYuchung Cheng  * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
1039eed530b6SYuchung Cheng  */
1040eed530b6SYuchung Cheng static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
1041eed530b6SYuchung Cheng {
10421043e25fSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
10431043e25fSNikolay Borisov 
1044eed530b6SYuchung Cheng 	tp->do_early_retrans = sysctl_tcp_early_retrans &&
10456ba8a3b1SNandita Dukkipati 		sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
10461043e25fSNikolay Borisov 		net->ipv4.sysctl_tcp_reordering == 3;
1047eed530b6SYuchung Cheng }
1048eed530b6SYuchung Cheng 
1049eed530b6SYuchung Cheng static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
1050eed530b6SYuchung Cheng {
1051eed530b6SYuchung Cheng 	tp->do_early_retrans = 0;
1052eed530b6SYuchung Cheng }
1053eed530b6SYuchung Cheng 
105483ae4088SIlpo Järvinen static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
105583ae4088SIlpo Järvinen {
105683ae4088SIlpo Järvinen 	return tp->sacked_out + tp->lost_out;
105783ae4088SIlpo Järvinen }
105883ae4088SIlpo Järvinen 
10591da177e4SLinus Torvalds /* This determines how many packets are "in the network" to the best
10601da177e4SLinus Torvalds  * of our knowledge.  In many cases it is conservative, but where
10611da177e4SLinus Torvalds  * detailed information is available from the receiver (via SACK
10621da177e4SLinus Torvalds  * blocks etc.) we can make more aggressive calculations.
10631da177e4SLinus Torvalds  *
10641da177e4SLinus Torvalds  * Use this for decisions involving congestion control, use just
10651da177e4SLinus Torvalds  * tp->packets_out to determine if the send queue is empty or not.
10661da177e4SLinus Torvalds  *
10671da177e4SLinus Torvalds  * Read this equation as:
10681da177e4SLinus Torvalds  *
10691da177e4SLinus Torvalds  *	"Packets sent once on transmission queue" MINUS
10701da177e4SLinus Torvalds  *	"Packets left network, but not honestly ACKed yet" PLUS
10711da177e4SLinus Torvalds  *	"Packets fast retransmitted"
10721da177e4SLinus Torvalds  */
107340efc6faSStephen Hemminger static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
10741da177e4SLinus Torvalds {
107583ae4088SIlpo Järvinen 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
10761da177e4SLinus Torvalds }
10771da177e4SLinus Torvalds 
10780b6a05c1SIlpo Järvinen #define TCP_INFINITE_SSTHRESH	0x7fffffff
10790b6a05c1SIlpo Järvinen 
1080071d5080SYuchung Cheng static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1081071d5080SYuchung Cheng {
108276174004SYuchung Cheng 	return tp->snd_cwnd < tp->snd_ssthresh;
1083071d5080SYuchung Cheng }
1084071d5080SYuchung Cheng 
10850b6a05c1SIlpo Järvinen static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
10860b6a05c1SIlpo Järvinen {
10870b6a05c1SIlpo Järvinen 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
10880b6a05c1SIlpo Järvinen }
10890b6a05c1SIlpo Järvinen 
1090684bad11SYuchung Cheng static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1091684bad11SYuchung Cheng {
1092684bad11SYuchung Cheng 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1093684bad11SYuchung Cheng 	       (1 << inet_csk(sk)->icsk_ca_state);
1094684bad11SYuchung Cheng }
1095684bad11SYuchung Cheng 
10961da177e4SLinus Torvalds /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1097684bad11SYuchung Cheng  * The exception is cwnd reduction phase, when cwnd is decreasing towards
10981da177e4SLinus Torvalds  * ssthresh.
10991da177e4SLinus Torvalds  */
11006687e988SArnaldo Carvalho de Melo static inline __u32 tcp_current_ssthresh(const struct sock *sk)
11011da177e4SLinus Torvalds {
11026687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1103cf533ea5SEric Dumazet 
1104684bad11SYuchung Cheng 	if (tcp_in_cwnd_reduction(sk))
11051da177e4SLinus Torvalds 		return tp->snd_ssthresh;
11061da177e4SLinus Torvalds 	else
11071da177e4SLinus Torvalds 		return max(tp->snd_ssthresh,
11081da177e4SLinus Torvalds 			   ((tp->snd_cwnd >> 1) +
11091da177e4SLinus Torvalds 			    (tp->snd_cwnd >> 2)));
11101da177e4SLinus Torvalds }
11111da177e4SLinus Torvalds 
1112b9c4595bSIlpo Järvinen /* Use define here intentionally to get WARN_ON location shown at the caller */
1113b9c4595bSIlpo Järvinen #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
11141da177e4SLinus Torvalds 
11155ee2c941SChristoph Paasch void tcp_enter_cwr(struct sock *sk);
11165c9f3023SJoe Perches __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
11171da177e4SLinus Torvalds 
11186b5a5c0dSNeal Cardwell /* The maximum number of MSS of available cwnd for which TSO defers
11196b5a5c0dSNeal Cardwell  * sending if not using sysctl_tcp_tso_win_divisor.
11206b5a5c0dSNeal Cardwell  */
11216b5a5c0dSNeal Cardwell static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
11226b5a5c0dSNeal Cardwell {
11236b5a5c0dSNeal Cardwell 	return 3;
11246b5a5c0dSNeal Cardwell }
11256b5a5c0dSNeal Cardwell 
112690840defSIlpo Järvinen /* Returns end sequence number of the receiver's advertised window */
112790840defSIlpo Järvinen static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
112890840defSIlpo Järvinen {
112990840defSIlpo Järvinen 	return tp->snd_una + tp->snd_wnd;
113090840defSIlpo Järvinen }
1131e114a710SEric Dumazet 
1132e114a710SEric Dumazet /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1133e114a710SEric Dumazet  * flexible approach. The RFC suggests cwnd should not be raised unless
1134ca8a2263SNeal Cardwell  * it was fully used previously. And that's exactly what we do in
1135ca8a2263SNeal Cardwell  * congestion avoidance mode. But in slow start we allow cwnd to grow
1136ca8a2263SNeal Cardwell  * as long as the application has used half the cwnd.
1137e114a710SEric Dumazet  * Example :
1138e114a710SEric Dumazet  *    cwnd is 10 (IW10), but application sends 9 frames.
1139e114a710SEric Dumazet  *    We allow cwnd to reach 18 when all frames are ACKed.
1140e114a710SEric Dumazet  * This check is safe because it's as aggressive as slow start which already
1141e114a710SEric Dumazet  * risks 100% overshoot. The advantage is that we discourage application to
1142e114a710SEric Dumazet  * either send more filler packets or data to artificially blow up the cwnd
1143e114a710SEric Dumazet  * usage, and allow application-limited process to probe bw more aggressively.
1144e114a710SEric Dumazet  */
114524901551SEric Dumazet static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1146e114a710SEric Dumazet {
1147e114a710SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1148e114a710SEric Dumazet 
1149ca8a2263SNeal Cardwell 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1150071d5080SYuchung Cheng 	if (tcp_in_slow_start(tp))
1151ca8a2263SNeal Cardwell 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1152ca8a2263SNeal Cardwell 
1153ca8a2263SNeal Cardwell 	return tp->is_cwnd_limited;
1154e114a710SEric Dumazet }
1155f4805edeSStephen Hemminger 
115621c8fe99SEric Dumazet /* Something is really bad, we could not queue an additional packet,
115721c8fe99SEric Dumazet  * because qdisc is full or receiver sent a 0 window.
115821c8fe99SEric Dumazet  * We do not want to add fuel to the fire, or abort too early,
115921c8fe99SEric Dumazet  * so make sure the timer we arm now is at least 200ms in the future,
116021c8fe99SEric Dumazet  * regardless of current icsk_rto value (as it could be ~2ms)
116121c8fe99SEric Dumazet  */
116221c8fe99SEric Dumazet static inline unsigned long tcp_probe0_base(const struct sock *sk)
116321c8fe99SEric Dumazet {
116421c8fe99SEric Dumazet 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
116521c8fe99SEric Dumazet }
116621c8fe99SEric Dumazet 
116721c8fe99SEric Dumazet /* Variant of inet_csk_rto_backoff() used for zero window probes */
116821c8fe99SEric Dumazet static inline unsigned long tcp_probe0_when(const struct sock *sk,
116921c8fe99SEric Dumazet 					    unsigned long max_when)
117021c8fe99SEric Dumazet {
117121c8fe99SEric Dumazet 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
117221c8fe99SEric Dumazet 
117321c8fe99SEric Dumazet 	return (unsigned long)min_t(u64, when, max_when);
117421c8fe99SEric Dumazet }
117521c8fe99SEric Dumazet 
11769e412ba7SIlpo Järvinen static inline void tcp_check_probe_timer(struct sock *sk)
11771da177e4SLinus Torvalds {
117821c8fe99SEric Dumazet 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
11793f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
118021c8fe99SEric Dumazet 					  tcp_probe0_base(sk), TCP_RTO_MAX);
11811da177e4SLinus Torvalds }
11821da177e4SLinus Torvalds 
1183ee7537b6SHantzis Fotis static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
11841da177e4SLinus Torvalds {
11851da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
11861da177e4SLinus Torvalds }
11871da177e4SLinus Torvalds 
1188ee7537b6SHantzis Fotis static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
11891da177e4SLinus Torvalds {
11901da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
11911da177e4SLinus Torvalds }
11921da177e4SLinus Torvalds 
11931da177e4SLinus Torvalds /*
11941da177e4SLinus Torvalds  * Calculate(/check) TCP checksum
11951da177e4SLinus Torvalds  */
1196ba7808eaSFrederik Deweerdt static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1197ba7808eaSFrederik Deweerdt 				   __be32 daddr, __wsum base)
11981da177e4SLinus Torvalds {
11991da177e4SLinus Torvalds 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
12001da177e4SLinus Torvalds }
12011da177e4SLinus Torvalds 
1202b51655b9SAl Viro static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
12031da177e4SLinus Torvalds {
1204fb286bb2SHerbert Xu 	return __skb_checksum_complete(skb);
12051da177e4SLinus Torvalds }
12061da177e4SLinus Torvalds 
1207a2a385d6SEric Dumazet static inline bool tcp_checksum_complete(struct sk_buff *skb)
12081da177e4SLinus Torvalds {
120960476372SHerbert Xu 	return !skb_csum_unnecessary(skb) &&
12101da177e4SLinus Torvalds 		__tcp_checksum_complete(skb);
12111da177e4SLinus Torvalds }
12121da177e4SLinus Torvalds 
12131da177e4SLinus Torvalds /* Prequeue for VJ style copy to user, combined with checksumming. */
12141da177e4SLinus Torvalds 
121540efc6faSStephen Hemminger static inline void tcp_prequeue_init(struct tcp_sock *tp)
12161da177e4SLinus Torvalds {
12171da177e4SLinus Torvalds 	tp->ucopy.task = NULL;
12181da177e4SLinus Torvalds 	tp->ucopy.len = 0;
12191da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
12201da177e4SLinus Torvalds 	skb_queue_head_init(&tp->ucopy.prequeue);
12211da177e4SLinus Torvalds }
12221da177e4SLinus Torvalds 
12235c9f3023SJoe Perches bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1224c9c33212SEric Dumazet bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1225ac6e7800SEric Dumazet int tcp_filter(struct sock *sk, struct sk_buff *skb);
12261da177e4SLinus Torvalds 
12271da177e4SLinus Torvalds #undef STATE_TRACE
12281da177e4SLinus Torvalds 
12291da177e4SLinus Torvalds #ifdef STATE_TRACE
12301da177e4SLinus Torvalds static const char *statename[]={
12311da177e4SLinus Torvalds 	"Unused","Established","Syn Sent","Syn Recv",
12321da177e4SLinus Torvalds 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
12331da177e4SLinus Torvalds 	"Close Wait","Last ACK","Listen","Closing"
12341da177e4SLinus Torvalds };
12351da177e4SLinus Torvalds #endif
12365c9f3023SJoe Perches void tcp_set_state(struct sock *sk, int state);
12371da177e4SLinus Torvalds 
12385c9f3023SJoe Perches void tcp_done(struct sock *sk);
12391da177e4SLinus Torvalds 
1240c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err);
1241c1e64e29SLorenzo Colitti 
124240efc6faSStephen Hemminger static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
12431da177e4SLinus Torvalds {
12441da177e4SLinus Torvalds 	rx_opt->dsack = 0;
12451da177e4SLinus Torvalds 	rx_opt->num_sacks = 0;
12461da177e4SLinus Torvalds }
12471da177e4SLinus Torvalds 
12485c9f3023SJoe Perches u32 tcp_default_init_rwnd(u32 mss);
12496f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta);
12506f021c62SEric Dumazet 
12516f021c62SEric Dumazet static inline void tcp_slow_start_after_idle_check(struct sock *sk)
12526f021c62SEric Dumazet {
12536f021c62SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
12546f021c62SEric Dumazet 	s32 delta;
12556f021c62SEric Dumazet 
12566f021c62SEric Dumazet 	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
12576f021c62SEric Dumazet 		return;
12586f021c62SEric Dumazet 	delta = tcp_time_stamp - tp->lsndtime;
12596f021c62SEric Dumazet 	if (delta > inet_csk(sk)->icsk_rto)
12606f021c62SEric Dumazet 		tcp_cwnd_restart(sk, delta);
12616f021c62SEric Dumazet }
126285f16525SYuchung Cheng 
12631da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. */
12645c9f3023SJoe Perches void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
12655c9f3023SJoe Perches 			       __u32 *window_clamp, int wscale_ok,
12665c9f3023SJoe Perches 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
12671da177e4SLinus Torvalds 
12681da177e4SLinus Torvalds static inline int tcp_win_from_space(int space)
12691da177e4SLinus Torvalds {
12701da177e4SLinus Torvalds 	return sysctl_tcp_adv_win_scale<=0 ?
12711da177e4SLinus Torvalds 		(space>>(-sysctl_tcp_adv_win_scale)) :
12721da177e4SLinus Torvalds 		space - (space>>sysctl_tcp_adv_win_scale);
12731da177e4SLinus Torvalds }
12741da177e4SLinus Torvalds 
12751da177e4SLinus Torvalds /* Note: caller must be prepared to deal with negative returns */
12761da177e4SLinus Torvalds static inline int tcp_space(const struct sock *sk)
12771da177e4SLinus Torvalds {
12781da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf -
12791da177e4SLinus Torvalds 				  atomic_read(&sk->sk_rmem_alloc));
12801da177e4SLinus Torvalds }
12811da177e4SLinus Torvalds 
12821da177e4SLinus Torvalds static inline int tcp_full_space(const struct sock *sk)
12831da177e4SLinus Torvalds {
12841da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf);
12851da177e4SLinus Torvalds }
12861da177e4SLinus Torvalds 
1287843f4a55SYuchung Cheng extern void tcp_openreq_init_rwin(struct request_sock *req,
1288b1964b5fSEric Dumazet 				  const struct sock *sk_listener,
1289b1964b5fSEric Dumazet 				  const struct dst_entry *dst);
1290843f4a55SYuchung Cheng 
12915c9f3023SJoe Perches void tcp_enter_memory_pressure(struct sock *sk);
12921da177e4SLinus Torvalds 
12931da177e4SLinus Torvalds static inline int keepalive_intvl_when(const struct tcp_sock *tp)
12941da177e4SLinus Torvalds {
1295b840d15dSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
1296b840d15dSNikolay Borisov 
1297b840d15dSNikolay Borisov 	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
12981da177e4SLinus Torvalds }
12991da177e4SLinus Torvalds 
13001da177e4SLinus Torvalds static inline int keepalive_time_when(const struct tcp_sock *tp)
13011da177e4SLinus Torvalds {
130213b287e8SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
130313b287e8SNikolay Borisov 
130413b287e8SNikolay Borisov 	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
13051da177e4SLinus Torvalds }
13061da177e4SLinus Torvalds 
1307df19a626SEric Dumazet static inline int keepalive_probes(const struct tcp_sock *tp)
1308df19a626SEric Dumazet {
13099bd6861bSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
13109bd6861bSNikolay Borisov 
13119bd6861bSNikolay Borisov 	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1312df19a626SEric Dumazet }
1313df19a626SEric Dumazet 
13146c37e5deSFlavio Leitner static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
13156c37e5deSFlavio Leitner {
13166c37e5deSFlavio Leitner 	const struct inet_connection_sock *icsk = &tp->inet_conn;
13176c37e5deSFlavio Leitner 
13186c37e5deSFlavio Leitner 	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
13196c37e5deSFlavio Leitner 			  tcp_time_stamp - tp->rcv_tstamp);
13206c37e5deSFlavio Leitner }
13216c37e5deSFlavio Leitner 
1322463c84b9SArnaldo Carvalho de Melo static inline int tcp_fin_time(const struct sock *sk)
13231da177e4SLinus Torvalds {
13241e579caaSNikolay Borisov 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1325463c84b9SArnaldo Carvalho de Melo 	const int rto = inet_csk(sk)->icsk_rto;
13261da177e4SLinus Torvalds 
1327463c84b9SArnaldo Carvalho de Melo 	if (fin_timeout < (rto << 2) - (rto >> 1))
1328463c84b9SArnaldo Carvalho de Melo 		fin_timeout = (rto << 2) - (rto >> 1);
13291da177e4SLinus Torvalds 
13301da177e4SLinus Torvalds 	return fin_timeout;
13311da177e4SLinus Torvalds }
13321da177e4SLinus Torvalds 
1333a2a385d6SEric Dumazet static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1334c887e6d2SIlpo Järvinen 				  int paws_win)
13351da177e4SLinus Torvalds {
1336c887e6d2SIlpo Järvinen 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1337a2a385d6SEric Dumazet 		return true;
1338c887e6d2SIlpo Järvinen 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1339a2a385d6SEric Dumazet 		return true;
1340bc2ce894SEric Dumazet 	/*
1341bc2ce894SEric Dumazet 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1342bc2ce894SEric Dumazet 	 * then following tcp messages have valid values. Ignore 0 value,
1343bc2ce894SEric Dumazet 	 * or else 'negative' tsval might forbid us to accept their packets.
1344bc2ce894SEric Dumazet 	 */
1345bc2ce894SEric Dumazet 	if (!rx_opt->ts_recent)
1346a2a385d6SEric Dumazet 		return true;
1347a2a385d6SEric Dumazet 	return false;
1348c887e6d2SIlpo Järvinen }
1349c887e6d2SIlpo Järvinen 
1350a2a385d6SEric Dumazet static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1351c887e6d2SIlpo Järvinen 				   int rst)
1352c887e6d2SIlpo Järvinen {
1353c887e6d2SIlpo Järvinen 	if (tcp_paws_check(rx_opt, 0))
1354a2a385d6SEric Dumazet 		return false;
13551da177e4SLinus Torvalds 
13561da177e4SLinus Torvalds 	/* RST segments are not recommended to carry timestamp,
13571da177e4SLinus Torvalds 	   and, if they do, it is recommended to ignore PAWS because
13581da177e4SLinus Torvalds 	   "their cleanup function should take precedence over timestamps."
13591da177e4SLinus Torvalds 	   Certainly, it is mistake. It is necessary to understand the reasons
13601da177e4SLinus Torvalds 	   of this constraint to relax it: if peer reboots, clock may go
13611da177e4SLinus Torvalds 	   out-of-sync and half-open connections will not be reset.
13621da177e4SLinus Torvalds 	   Actually, the problem would be not existing if all
13631da177e4SLinus Torvalds 	   the implementations followed draft about maintaining clock
13641da177e4SLinus Torvalds 	   via reboots. Linux-2.2 DOES NOT!
13651da177e4SLinus Torvalds 
13661da177e4SLinus Torvalds 	   However, we can relax time bounds for RST segments to MSL.
13671da177e4SLinus Torvalds 	 */
13689d729f72SJames Morris 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1369a2a385d6SEric Dumazet 		return false;
1370a2a385d6SEric Dumazet 	return true;
13711da177e4SLinus Torvalds }
13721da177e4SLinus Torvalds 
13737970ddc8SEric Dumazet bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
13747970ddc8SEric Dumazet 			  int mib_idx, u32 *last_oow_ack_time);
1375032ee423SNeal Cardwell 
1376a9c19329SPavel Emelyanov static inline void tcp_mib_init(struct net *net)
13771da177e4SLinus Torvalds {
13781da177e4SLinus Torvalds 	/* See RFC 2012 */
13796aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
13806aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
13816aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
13826aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
13831da177e4SLinus Torvalds }
13841da177e4SLinus Torvalds 
13856a438bbeSStephen Hemminger /* from STCP */
1386ef9da47cSIlpo Järvinen static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
13870800f170SDavid S. Miller {
13886a438bbeSStephen Hemminger 	tp->lost_skb_hint = NULL;
1389ef9da47cSIlpo Järvinen }
1390ef9da47cSIlpo Järvinen 
1391ef9da47cSIlpo Järvinen static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1392ef9da47cSIlpo Järvinen {
1393ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
13946a438bbeSStephen Hemminger 	tp->retransmit_skb_hint = NULL;
1395b7689205SIlpo Järvinen }
1396b7689205SIlpo Järvinen 
1397a915da9bSEric Dumazet union tcp_md5_addr {
1398a915da9bSEric Dumazet 	struct in_addr  a4;
1399a915da9bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1400a915da9bSEric Dumazet 	struct in6_addr	a6;
1401a915da9bSEric Dumazet #endif
1402a915da9bSEric Dumazet };
1403a915da9bSEric Dumazet 
1404cfb6eeb4SYOSHIFUJI Hideaki /* - key database */
1405cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key {
1406a915da9bSEric Dumazet 	struct hlist_node	node;
1407cfb6eeb4SYOSHIFUJI Hideaki 	u8			keylen;
1408a915da9bSEric Dumazet 	u8			family; /* AF_INET or AF_INET6 */
1409a915da9bSEric Dumazet 	union tcp_md5_addr	addr;
1410a915da9bSEric Dumazet 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1411a915da9bSEric Dumazet 	struct rcu_head		rcu;
1412cfb6eeb4SYOSHIFUJI Hideaki };
1413cfb6eeb4SYOSHIFUJI Hideaki 
1414cfb6eeb4SYOSHIFUJI Hideaki /* - sock block */
1415cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_info {
1416a915da9bSEric Dumazet 	struct hlist_head	head;
1417a8afca03SEric Dumazet 	struct rcu_head		rcu;
1418cfb6eeb4SYOSHIFUJI Hideaki };
1419cfb6eeb4SYOSHIFUJI Hideaki 
1420cfb6eeb4SYOSHIFUJI Hideaki /* - pseudo header */
1421cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr {
1422cfb6eeb4SYOSHIFUJI Hideaki 	__be32		saddr;
1423cfb6eeb4SYOSHIFUJI Hideaki 	__be32		daddr;
1424cfb6eeb4SYOSHIFUJI Hideaki 	__u8		pad;
1425cfb6eeb4SYOSHIFUJI Hideaki 	__u8		protocol;
1426cfb6eeb4SYOSHIFUJI Hideaki 	__be16		len;
1427cfb6eeb4SYOSHIFUJI Hideaki };
1428cfb6eeb4SYOSHIFUJI Hideaki 
1429cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr {
1430cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr	saddr;
1431cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr daddr;
1432cfb6eeb4SYOSHIFUJI Hideaki 	__be32		len;
1433cfb6eeb4SYOSHIFUJI Hideaki 	__be32		protocol;	/* including padding */
1434cfb6eeb4SYOSHIFUJI Hideaki };
1435cfb6eeb4SYOSHIFUJI Hideaki 
1436cfb6eeb4SYOSHIFUJI Hideaki union tcp_md5sum_block {
1437cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp4_pseudohdr ip4;
1438dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1439cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp6_pseudohdr ip6;
1440cfb6eeb4SYOSHIFUJI Hideaki #endif
1441cfb6eeb4SYOSHIFUJI Hideaki };
1442cfb6eeb4SYOSHIFUJI Hideaki 
1443cfb6eeb4SYOSHIFUJI Hideaki /* - pool: digest algorithm, hash description and scratch buffer */
1444cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool {
1445cf80e0e4SHerbert Xu 	struct ahash_request	*md5_req;
144619689e38SEric Dumazet 	void			*scratch;
1447cfb6eeb4SYOSHIFUJI Hideaki };
1448cfb6eeb4SYOSHIFUJI Hideaki 
1449cfb6eeb4SYOSHIFUJI Hideaki /* - functions */
145039f8e58eSEric Dumazet int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
145139f8e58eSEric Dumazet 			const struct sock *sk, const struct sk_buff *skb);
14525c9f3023SJoe Perches int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
14535c9f3023SJoe Perches 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
14545c9f3023SJoe Perches int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1455a915da9bSEric Dumazet 		   int family);
1456b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1457fd3a154aSEric Dumazet 					 const struct sock *addr_sk);
1458cfb6eeb4SYOSHIFUJI Hideaki 
14599501f972SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1460b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
14615c9f3023SJoe Perches 					 const union tcp_md5_addr *addr,
14625c9f3023SJoe Perches 					 int family);
1463a915da9bSEric Dumazet #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
14649501f972SYOSHIFUJI Hideaki #else
1465b83e3debSEric Dumazet static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1466a915da9bSEric Dumazet 					 const union tcp_md5_addr *addr,
1467a915da9bSEric Dumazet 					 int family)
1468a915da9bSEric Dumazet {
1469a915da9bSEric Dumazet 	return NULL;
1470a915da9bSEric Dumazet }
14719501f972SYOSHIFUJI Hideaki #define tcp_twsk_md5_key(twsk)	NULL
14729501f972SYOSHIFUJI Hideaki #endif
14739501f972SYOSHIFUJI Hideaki 
14745c9f3023SJoe Perches bool tcp_alloc_md5sig_pool(void);
1475cfb6eeb4SYOSHIFUJI Hideaki 
14765c9f3023SJoe Perches struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
147771cea17eSEric Dumazet static inline void tcp_put_md5sig_pool(void)
147871cea17eSEric Dumazet {
147971cea17eSEric Dumazet 	local_bh_enable();
148071cea17eSEric Dumazet }
148135790c04SEric Dumazet 
14825c9f3023SJoe Perches int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
148395c96174SEric Dumazet 			  unsigned int header_len);
14845c9f3023SJoe Perches int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1485cf533ea5SEric Dumazet 		     const struct tcp_md5sig_key *key);
1486cfb6eeb4SYOSHIFUJI Hideaki 
148710467163SJerry Chu /* From tcp_fastopen.c */
14885c9f3023SJoe Perches void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
14895c9f3023SJoe Perches 			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
14905c9f3023SJoe Perches 			    unsigned long *last_syn_loss);
14915c9f3023SJoe Perches void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
14922646c831SDaniel Lee 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
14932646c831SDaniel Lee 			    u16 try_exp);
1494783237e8SYuchung Cheng struct tcp_fastopen_request {
1495783237e8SYuchung Cheng 	/* Fast Open cookie. Size 0 means a cookie request */
1496783237e8SYuchung Cheng 	struct tcp_fastopen_cookie	cookie;
1497783237e8SYuchung Cheng 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1498f5ddcbbbSEric Dumazet 	size_t				size;
1499f5ddcbbbSEric Dumazet 	int				copied;	/* queued in tcp_connect() */
1500783237e8SYuchung Cheng };
1501783237e8SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp);
1502783237e8SYuchung Cheng 
150310467163SJerry Chu extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
150410467163SJerry Chu int tcp_fastopen_reset_cipher(void *key, unsigned int len);
150561d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
15067c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
15075b7ed089SYuchung Cheng 			      struct request_sock *req,
1508843f4a55SYuchung Cheng 			      struct tcp_fastopen_cookie *foc,
1509843f4a55SYuchung Cheng 			      struct dst_entry *dst);
1510222e83d2SHannes Frederic Sowa void tcp_fastopen_init_key_once(bool publish);
151110467163SJerry Chu #define TCP_FASTOPEN_KEY_LENGTH 16
151210467163SJerry Chu 
151310467163SJerry Chu /* Fastopen key context */
151410467163SJerry Chu struct tcp_fastopen_context {
15157ae8639cSEric Dumazet 	struct crypto_cipher	*tfm;
151610467163SJerry Chu 	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
151710467163SJerry Chu 	struct rcu_head		rcu;
151810467163SJerry Chu };
151910467163SJerry Chu 
152005b055e8SFrancis Yan /* Latencies incurred by various limits for a sender. They are
152105b055e8SFrancis Yan  * chronograph-like stats that are mutually exclusive.
152205b055e8SFrancis Yan  */
152305b055e8SFrancis Yan enum tcp_chrono {
152405b055e8SFrancis Yan 	TCP_CHRONO_UNSPEC,
152505b055e8SFrancis Yan 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
152605b055e8SFrancis Yan 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
152705b055e8SFrancis Yan 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
152805b055e8SFrancis Yan 	__TCP_CHRONO_MAX,
152905b055e8SFrancis Yan };
153005b055e8SFrancis Yan 
153105b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
153205b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
153305b055e8SFrancis Yan 
1534fe067e8aSDavid S. Miller /* write queue abstraction */
1535fe067e8aSDavid S. Miller static inline void tcp_write_queue_purge(struct sock *sk)
1536fe067e8aSDavid S. Miller {
1537fe067e8aSDavid S. Miller 	struct sk_buff *skb;
1538fe067e8aSDavid S. Miller 
15390f87230dSFrancis Yan 	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1540fe067e8aSDavid S. Miller 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
15413ab224beSHideo Aoki 		sk_wmem_free_skb(sk, skb);
15423ab224beSHideo Aoki 	sk_mem_reclaim(sk);
15438818a9d8SIlpo Järvinen 	tcp_clear_all_retrans_hints(tcp_sk(sk));
1544fe067e8aSDavid S. Miller }
1545fe067e8aSDavid S. Miller 
1546cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1547fe067e8aSDavid S. Miller {
1548cd07a8eaSDavid S. Miller 	return skb_peek(&sk->sk_write_queue);
1549fe067e8aSDavid S. Miller }
1550fe067e8aSDavid S. Miller 
1551cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1552fe067e8aSDavid S. Miller {
1553cd07a8eaSDavid S. Miller 	return skb_peek_tail(&sk->sk_write_queue);
1554fe067e8aSDavid S. Miller }
1555fe067e8aSDavid S. Miller 
1556cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1557cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1558fe067e8aSDavid S. Miller {
1559cd07a8eaSDavid S. Miller 	return skb_queue_next(&sk->sk_write_queue, skb);
1560fe067e8aSDavid S. Miller }
1561fe067e8aSDavid S. Miller 
1562cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1563cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1564832d11c5SIlpo Järvinen {
1565832d11c5SIlpo Järvinen 	return skb_queue_prev(&sk->sk_write_queue, skb);
1566832d11c5SIlpo Järvinen }
1567832d11c5SIlpo Järvinen 
1568fe067e8aSDavid S. Miller #define tcp_for_write_queue(skb, sk)					\
1569cd07a8eaSDavid S. Miller 	skb_queue_walk(&(sk)->sk_write_queue, skb)
1570fe067e8aSDavid S. Miller 
1571fe067e8aSDavid S. Miller #define tcp_for_write_queue_from(skb, sk)				\
1572cd07a8eaSDavid S. Miller 	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1573fe067e8aSDavid S. Miller 
1574234b6860SIlpo Järvinen #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1575cd07a8eaSDavid S. Miller 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1576234b6860SIlpo Järvinen 
1577cf533ea5SEric Dumazet static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1578fe067e8aSDavid S. Miller {
1579fe067e8aSDavid S. Miller 	return sk->sk_send_head;
1580fe067e8aSDavid S. Miller }
1581fe067e8aSDavid S. Miller 
1582cd07a8eaSDavid S. Miller static inline bool tcp_skb_is_last(const struct sock *sk,
1583cd07a8eaSDavid S. Miller 				   const struct sk_buff *skb)
1584cd07a8eaSDavid S. Miller {
1585cd07a8eaSDavid S. Miller 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1586cd07a8eaSDavid S. Miller }
1587cd07a8eaSDavid S. Miller 
1588cf533ea5SEric Dumazet static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1589fe067e8aSDavid S. Miller {
1590cd07a8eaSDavid S. Miller 	if (tcp_skb_is_last(sk, skb))
1591fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
1592cd07a8eaSDavid S. Miller 	else
1593cd07a8eaSDavid S. Miller 		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1594fe067e8aSDavid S. Miller }
1595fe067e8aSDavid S. Miller 
1596fe067e8aSDavid S. Miller static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1597fe067e8aSDavid S. Miller {
15980f87230dSFrancis Yan 	if (sk->sk_send_head == skb_unlinked) {
1599fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
16000f87230dSFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
16010f87230dSFrancis Yan 	}
1602bb1fcecaSEric Dumazet 	if (tcp_sk(sk)->highest_sack == skb_unlinked)
1603bb1fcecaSEric Dumazet 		tcp_sk(sk)->highest_sack = NULL;
1604fe067e8aSDavid S. Miller }
1605fe067e8aSDavid S. Miller 
1606fe067e8aSDavid S. Miller static inline void tcp_init_send_head(struct sock *sk)
1607fe067e8aSDavid S. Miller {
1608fe067e8aSDavid S. Miller 	sk->sk_send_head = NULL;
1609fe067e8aSDavid S. Miller }
1610fe067e8aSDavid S. Miller 
1611fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1612fe067e8aSDavid S. Miller {
1613fe067e8aSDavid S. Miller 	__skb_queue_tail(&sk->sk_write_queue, skb);
1614fe067e8aSDavid S. Miller }
1615fe067e8aSDavid S. Miller 
1616fe067e8aSDavid S. Miller static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1617fe067e8aSDavid S. Miller {
1618fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, skb);
1619fe067e8aSDavid S. Miller 
1620fe067e8aSDavid S. Miller 	/* Queue it, remembering where we must start sending. */
16216859d494SIlpo Järvinen 	if (sk->sk_send_head == NULL) {
1622fe067e8aSDavid S. Miller 		sk->sk_send_head = skb;
16230f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
16246859d494SIlpo Järvinen 
16256859d494SIlpo Järvinen 		if (tcp_sk(sk)->highest_sack == NULL)
16266859d494SIlpo Järvinen 			tcp_sk(sk)->highest_sack = skb;
16276859d494SIlpo Järvinen 	}
1628fe067e8aSDavid S. Miller }
1629fe067e8aSDavid S. Miller 
1630fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1631fe067e8aSDavid S. Miller {
1632fe067e8aSDavid S. Miller 	__skb_queue_head(&sk->sk_write_queue, skb);
1633fe067e8aSDavid S. Miller }
1634fe067e8aSDavid S. Miller 
1635fe067e8aSDavid S. Miller /* Insert buff after skb on the write queue of sk.  */
1636fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1637fe067e8aSDavid S. Miller 						struct sk_buff *buff,
1638fe067e8aSDavid S. Miller 						struct sock *sk)
1639fe067e8aSDavid S. Miller {
16407de6c033SGerrit Renker 	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1641fe067e8aSDavid S. Miller }
1642fe067e8aSDavid S. Miller 
164343f59c89SDavid S. Miller /* Insert new before skb on the write queue of sk.  */
1644fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1645fe067e8aSDavid S. Miller 						  struct sk_buff *skb,
1646fe067e8aSDavid S. Miller 						  struct sock *sk)
1647fe067e8aSDavid S. Miller {
164843f59c89SDavid S. Miller 	__skb_queue_before(&sk->sk_write_queue, skb, new);
16496e421410SIlpo Järvinen 
16506e421410SIlpo Järvinen 	if (sk->sk_send_head == skb)
16516e421410SIlpo Järvinen 		sk->sk_send_head = new;
1652fe067e8aSDavid S. Miller }
1653fe067e8aSDavid S. Miller 
1654fe067e8aSDavid S. Miller static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1655fe067e8aSDavid S. Miller {
1656fe067e8aSDavid S. Miller 	__skb_unlink(skb, &sk->sk_write_queue);
1657fe067e8aSDavid S. Miller }
1658fe067e8aSDavid S. Miller 
1659a2a385d6SEric Dumazet static inline bool tcp_write_queue_empty(struct sock *sk)
1660fe067e8aSDavid S. Miller {
1661fe067e8aSDavid S. Miller 	return skb_queue_empty(&sk->sk_write_queue);
1662fe067e8aSDavid S. Miller }
1663fe067e8aSDavid S. Miller 
166412d50c46SKrishna Kumar static inline void tcp_push_pending_frames(struct sock *sk)
166512d50c46SKrishna Kumar {
166612d50c46SKrishna Kumar 	if (tcp_send_head(sk)) {
166712d50c46SKrishna Kumar 		struct tcp_sock *tp = tcp_sk(sk);
166812d50c46SKrishna Kumar 
166912d50c46SKrishna Kumar 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
167012d50c46SKrishna Kumar 	}
167112d50c46SKrishna Kumar }
167212d50c46SKrishna Kumar 
1673ecb97192SNeal Cardwell /* Start sequence of the skb just after the highest skb with SACKed
1674ecb97192SNeal Cardwell  * bit, valid only if sacked_out > 0 or when the caller has ensured
1675ecb97192SNeal Cardwell  * validity by itself.
1676a47e5a98SIlpo Järvinen  */
1677a47e5a98SIlpo Järvinen static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1678a47e5a98SIlpo Järvinen {
1679a47e5a98SIlpo Järvinen 	if (!tp->sacked_out)
1680a47e5a98SIlpo Järvinen 		return tp->snd_una;
16816859d494SIlpo Järvinen 
16826859d494SIlpo Järvinen 	if (tp->highest_sack == NULL)
16836859d494SIlpo Järvinen 		return tp->snd_nxt;
16846859d494SIlpo Järvinen 
1685a47e5a98SIlpo Järvinen 	return TCP_SKB_CB(tp->highest_sack)->seq;
1686a47e5a98SIlpo Järvinen }
1687a47e5a98SIlpo Järvinen 
16886859d494SIlpo Järvinen static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
16896859d494SIlpo Järvinen {
16906859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
16916859d494SIlpo Järvinen 						tcp_write_queue_next(sk, skb);
16926859d494SIlpo Järvinen }
16936859d494SIlpo Järvinen 
16946859d494SIlpo Järvinen static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
16956859d494SIlpo Järvinen {
16966859d494SIlpo Järvinen 	return tcp_sk(sk)->highest_sack;
16976859d494SIlpo Järvinen }
16986859d494SIlpo Järvinen 
16996859d494SIlpo Järvinen static inline void tcp_highest_sack_reset(struct sock *sk)
17006859d494SIlpo Järvinen {
17016859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
17026859d494SIlpo Järvinen }
17036859d494SIlpo Järvinen 
17046859d494SIlpo Järvinen /* Called when old skb is about to be deleted (to be combined with new skb) */
17056859d494SIlpo Järvinen static inline void tcp_highest_sack_combine(struct sock *sk,
17066859d494SIlpo Järvinen 					    struct sk_buff *old,
17076859d494SIlpo Järvinen 					    struct sk_buff *new)
17086859d494SIlpo Järvinen {
17096859d494SIlpo Järvinen 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
17106859d494SIlpo Järvinen 		tcp_sk(sk)->highest_sack = new;
17116859d494SIlpo Järvinen }
17126859d494SIlpo Järvinen 
1713b1f0a0e9SFlorian Westphal /* This helper checks if socket has IP_TRANSPARENT set */
1714b1f0a0e9SFlorian Westphal static inline bool inet_sk_transparent(const struct sock *sk)
1715b1f0a0e9SFlorian Westphal {
1716b1f0a0e9SFlorian Westphal 	switch (sk->sk_state) {
1717b1f0a0e9SFlorian Westphal 	case TCP_TIME_WAIT:
1718b1f0a0e9SFlorian Westphal 		return inet_twsk(sk)->tw_transparent;
1719b1f0a0e9SFlorian Westphal 	case TCP_NEW_SYN_RECV:
1720b1f0a0e9SFlorian Westphal 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1721b1f0a0e9SFlorian Westphal 	}
1722b1f0a0e9SFlorian Westphal 	return inet_sk(sk)->transparent;
1723b1f0a0e9SFlorian Westphal }
1724b1f0a0e9SFlorian Westphal 
17255aa4b32fSAndreas Petlund /* Determines whether this is a thin stream (which may suffer from
17265aa4b32fSAndreas Petlund  * increased latency). Used to trigger latency-reducing mechanisms.
17275aa4b32fSAndreas Petlund  */
1728a2a385d6SEric Dumazet static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
17295aa4b32fSAndreas Petlund {
17305aa4b32fSAndreas Petlund 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
17315aa4b32fSAndreas Petlund }
17325aa4b32fSAndreas Petlund 
17331da177e4SLinus Torvalds /* /proc */
17341da177e4SLinus Torvalds enum tcp_seq_states {
17351da177e4SLinus Torvalds 	TCP_SEQ_STATE_LISTENING,
17361da177e4SLinus Torvalds 	TCP_SEQ_STATE_ESTABLISHED,
17371da177e4SLinus Torvalds };
17381da177e4SLinus Torvalds 
173973cb88ecSArjan van de Ven int tcp_seq_open(struct inode *inode, struct file *file);
174073cb88ecSArjan van de Ven 
17411da177e4SLinus Torvalds struct tcp_seq_afinfo {
17421da177e4SLinus Torvalds 	char				*name;
17431da177e4SLinus Torvalds 	sa_family_t			family;
174473cb88ecSArjan van de Ven 	const struct file_operations	*seq_fops;
17459427c4b3SDenis V. Lunev 	struct seq_operations		seq_ops;
17461da177e4SLinus Torvalds };
17471da177e4SLinus Torvalds 
17481da177e4SLinus Torvalds struct tcp_iter_state {
1749a4146b1bSDenis V. Lunev 	struct seq_net_private	p;
17501da177e4SLinus Torvalds 	sa_family_t		family;
17511da177e4SLinus Torvalds 	enum tcp_seq_states	state;
17521da177e4SLinus Torvalds 	struct sock		*syn_wait_sk;
1753a7cb5a49SEric W. Biederman 	int			bucket, offset, sbucket, num;
1754a8b690f9STom Herbert 	loff_t			last_pos;
17551da177e4SLinus Torvalds };
17561da177e4SLinus Torvalds 
17575c9f3023SJoe Perches int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
17585c9f3023SJoe Perches void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
17591da177e4SLinus Torvalds 
176020380731SArnaldo Carvalho de Melo extern struct request_sock_ops tcp_request_sock_ops;
1761c6aefafbSGlenn Griffin extern struct request_sock_ops tcp6_request_sock_ops;
176220380731SArnaldo Carvalho de Melo 
17635c9f3023SJoe Perches void tcp_v4_destroy_sock(struct sock *sk);
176420380731SArnaldo Carvalho de Melo 
176528be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1766c8f44affSMichał Mirosław 				netdev_features_t features);
17675c9f3023SJoe Perches struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
17685c9f3023SJoe Perches int tcp_gro_complete(struct sk_buff *skb);
176928850dc7SDaniel Borkmann 
17705c9f3023SJoe Perches void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1771f4c50d99SHerbert Xu 
1772c9bee3b7SEric Dumazet static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1773c9bee3b7SEric Dumazet {
17744979f2d9SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
17754979f2d9SNikolay Borisov 	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1776c9bee3b7SEric Dumazet }
1777c9bee3b7SEric Dumazet 
1778c9bee3b7SEric Dumazet static inline bool tcp_stream_memory_free(const struct sock *sk)
1779c9bee3b7SEric Dumazet {
1780c9bee3b7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1781c9bee3b7SEric Dumazet 	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1782c9bee3b7SEric Dumazet 
1783c9bee3b7SEric Dumazet 	return notsent_bytes < tcp_notsent_lowat(tp);
1784c9bee3b7SEric Dumazet }
1785c9bee3b7SEric Dumazet 
178620380731SArnaldo Carvalho de Melo #ifdef CONFIG_PROC_FS
17875c9f3023SJoe Perches int tcp4_proc_init(void);
17885c9f3023SJoe Perches void tcp4_proc_exit(void);
178920380731SArnaldo Carvalho de Melo #endif
179020380731SArnaldo Carvalho de Melo 
1791ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
17921fb6f159SOctavian Purdila int tcp_conn_request(struct request_sock_ops *rsk_ops,
17931fb6f159SOctavian Purdila 		     const struct tcp_request_sock_ops *af_ops,
17941fb6f159SOctavian Purdila 		     struct sock *sk, struct sk_buff *skb);
17955db92c99SOctavian Purdila 
1796cfb6eeb4SYOSHIFUJI Hideaki /* TCP af-specific functions */
1797cfb6eeb4SYOSHIFUJI Hideaki struct tcp_sock_af_ops {
1798cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1799b83e3debSEric Dumazet 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1800fd3a154aSEric Dumazet 						const struct sock *addr_sk);
1801cfb6eeb4SYOSHIFUJI Hideaki 	int		(*calc_md5_hash)(char *location,
180239f8e58eSEric Dumazet 					 const struct tcp_md5sig_key *md5,
1803318cf7aaSEric Dumazet 					 const struct sock *sk,
1804318cf7aaSEric Dumazet 					 const struct sk_buff *skb);
1805cfb6eeb4SYOSHIFUJI Hideaki 	int		(*md5_parse)(struct sock *sk,
1806cfb6eeb4SYOSHIFUJI Hideaki 				     char __user *optval,
1807cfb6eeb4SYOSHIFUJI Hideaki 				     int optlen);
1808cfb6eeb4SYOSHIFUJI Hideaki #endif
1809cfb6eeb4SYOSHIFUJI Hideaki };
1810cfb6eeb4SYOSHIFUJI Hideaki 
1811cfb6eeb4SYOSHIFUJI Hideaki struct tcp_request_sock_ops {
18122aec4a29SOctavian Purdila 	u16 mss_clamp;
1813cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1814b83e3debSEric Dumazet 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1815fd3a154aSEric Dumazet 						 const struct sock *addr_sk);
1816e3afe7b7SJohn Dykstra 	int		(*calc_md5_hash) (char *location,
181739f8e58eSEric Dumazet 					  const struct tcp_md5sig_key *md5,
1818318cf7aaSEric Dumazet 					  const struct sock *sk,
1819318cf7aaSEric Dumazet 					  const struct sk_buff *skb);
1820cfb6eeb4SYOSHIFUJI Hideaki #endif
1821b40cf18eSEric Dumazet 	void (*init_req)(struct request_sock *req,
1822b40cf18eSEric Dumazet 			 const struct sock *sk_listener,
182316bea70aSOctavian Purdila 			 struct sk_buff *skb);
1824fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
18253f684b4bSEric Dumazet 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1826fb7b37a7SOctavian Purdila 				 __u16 *mss);
1827fb7b37a7SOctavian Purdila #endif
1828f964629eSEric Dumazet 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1829d94e0417SOctavian Purdila 				       const struct request_sock *req,
1830d94e0417SOctavian Purdila 				       bool *strict);
183195a22caeSFlorian Westphal 	__u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
18320f935dbeSEric Dumazet 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1833d6274bd8SOctavian Purdila 			   struct flowi *fl, struct request_sock *req,
1834dc6ef6beSEric Dumazet 			   struct tcp_fastopen_cookie *foc,
1835b3d05147SEric Dumazet 			   enum tcp_synack_type synack_type);
1836cfb6eeb4SYOSHIFUJI Hideaki };
1837cfb6eeb4SYOSHIFUJI Hideaki 
1838fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
1839fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18403f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1841fb7b37a7SOctavian Purdila 					 __u16 *mss)
1842fb7b37a7SOctavian Purdila {
18433f684b4bSEric Dumazet 	tcp_synq_overflow(sk);
184402a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
18453f684b4bSEric Dumazet 	return ops->cookie_init_seq(skb, mss);
1846fb7b37a7SOctavian Purdila }
1847fb7b37a7SOctavian Purdila #else
1848fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18493f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1850fb7b37a7SOctavian Purdila 					 __u16 *mss)
1851fb7b37a7SOctavian Purdila {
1852fb7b37a7SOctavian Purdila 	return 0;
1853fb7b37a7SOctavian Purdila }
1854fb7b37a7SOctavian Purdila #endif
1855fb7b37a7SOctavian Purdila 
18565c9f3023SJoe Perches int tcpv4_offload_init(void);
185728850dc7SDaniel Borkmann 
18585c9f3023SJoe Perches void tcp_v4_init(void);
18595c9f3023SJoe Perches void tcp_init(void);
186020380731SArnaldo Carvalho de Melo 
1861659a8ad5SYuchung Cheng /* tcp_recovery.c */
1862659a8ad5SYuchung Cheng 
18634f41b1c5SYuchung Cheng /* Flags to enable various loss recovery features. See below */
18644f41b1c5SYuchung Cheng extern int sysctl_tcp_recovery;
18654f41b1c5SYuchung Cheng 
18664f41b1c5SYuchung Cheng /* Use TCP RACK to detect (some) tail and retransmit losses */
18674f41b1c5SYuchung Cheng #define TCP_RACK_LOST_RETRANS  0x1
18684f41b1c5SYuchung Cheng 
1869deed7be7SYuchung Cheng extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
1870deed7be7SYuchung Cheng extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked,
1871deed7be7SYuchung Cheng 			     const struct skb_mstamp *xmit_time,
1872deed7be7SYuchung Cheng 			     const struct skb_mstamp *ack_time);
1873*57dde7f7SYuchung Cheng extern void tcp_rack_reo_timeout(struct sock *sk);
1874659a8ad5SYuchung Cheng 
1875e25f866fSCong Wang /*
1876e25f866fSCong Wang  * Save and compile IPv4 options, return a pointer to it
1877e25f866fSCong Wang  */
1878e25f866fSCong Wang static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1879e25f866fSCong Wang {
1880e25f866fSCong Wang 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1881e25f866fSCong Wang 	struct ip_options_rcu *dopt = NULL;
1882e25f866fSCong Wang 
1883461b74c3SCong Wang 	if (opt->optlen) {
1884e25f866fSCong Wang 		int opt_size = sizeof(*dopt) + opt->optlen;
1885e25f866fSCong Wang 
1886e25f866fSCong Wang 		dopt = kmalloc(opt_size, GFP_ATOMIC);
1887e25f866fSCong Wang 		if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1888e25f866fSCong Wang 			kfree(dopt);
1889e25f866fSCong Wang 			dopt = NULL;
1890e25f866fSCong Wang 		}
1891e25f866fSCong Wang 	}
1892e25f866fSCong Wang 	return dopt;
1893e25f866fSCong Wang }
1894e25f866fSCong Wang 
189598781965SEric Dumazet /* locally generated TCP pure ACKs have skb->truesize == 2
189698781965SEric Dumazet  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
189798781965SEric Dumazet  * This is much faster than dissecting the packet to find out.
189898781965SEric Dumazet  * (Think of GRE encapsulations, IPv4, IPv6, ...)
189998781965SEric Dumazet  */
190098781965SEric Dumazet static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
190198781965SEric Dumazet {
190298781965SEric Dumazet 	return skb->truesize == 2;
190398781965SEric Dumazet }
190498781965SEric Dumazet 
190598781965SEric Dumazet static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
190698781965SEric Dumazet {
190798781965SEric Dumazet 	skb->truesize = 2;
190898781965SEric Dumazet }
190998781965SEric Dumazet 
1910473bd239STom Herbert static inline int tcp_inq(struct sock *sk)
1911473bd239STom Herbert {
1912473bd239STom Herbert 	struct tcp_sock *tp = tcp_sk(sk);
1913473bd239STom Herbert 	int answ;
1914473bd239STom Herbert 
1915473bd239STom Herbert 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1916473bd239STom Herbert 		answ = 0;
1917473bd239STom Herbert 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1918473bd239STom Herbert 		   !tp->urg_data ||
1919473bd239STom Herbert 		   before(tp->urg_seq, tp->copied_seq) ||
1920473bd239STom Herbert 		   !before(tp->urg_seq, tp->rcv_nxt)) {
1921473bd239STom Herbert 
1922473bd239STom Herbert 		answ = tp->rcv_nxt - tp->copied_seq;
1923473bd239STom Herbert 
1924473bd239STom Herbert 		/* Subtract 1, if FIN was received */
1925473bd239STom Herbert 		if (answ && sock_flag(sk, SOCK_DONE))
1926473bd239STom Herbert 			answ--;
1927473bd239STom Herbert 	} else {
1928473bd239STom Herbert 		answ = tp->urg_seq - tp->copied_seq;
1929473bd239STom Herbert 	}
1930473bd239STom Herbert 
1931473bd239STom Herbert 	return answ;
1932473bd239STom Herbert }
1933473bd239STom Herbert 
193432035585STom Herbert int tcp_peek_len(struct socket *sock);
193532035585STom Herbert 
1936a44d6eacSMartin KaFai Lau static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1937a44d6eacSMartin KaFai Lau {
1938a44d6eacSMartin KaFai Lau 	u16 segs_in;
1939a44d6eacSMartin KaFai Lau 
1940a44d6eacSMartin KaFai Lau 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1941a44d6eacSMartin KaFai Lau 	tp->segs_in += segs_in;
1942a44d6eacSMartin KaFai Lau 	if (skb->len > tcp_hdrlen(skb))
1943a44d6eacSMartin KaFai Lau 		tp->data_segs_in += segs_in;
1944a44d6eacSMartin KaFai Lau }
1945a44d6eacSMartin KaFai Lau 
19469caad864SEric Dumazet /*
19479caad864SEric Dumazet  * TCP listen path runs lockless.
19489caad864SEric Dumazet  * We forced "struct sock" to be const qualified to make sure
19499caad864SEric Dumazet  * we don't modify one of its field by mistake.
19509caad864SEric Dumazet  * Here, we increment sk_drops which is an atomic_t, so we can safely
19519caad864SEric Dumazet  * make sock writable again.
19529caad864SEric Dumazet  */
19539caad864SEric Dumazet static inline void tcp_listendrop(const struct sock *sk)
19549caad864SEric Dumazet {
19559caad864SEric Dumazet 	atomic_inc(&((struct sock *)sk)->sk_drops);
195602a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
19579caad864SEric Dumazet }
19589caad864SEric Dumazet 
19591da177e4SLinus Torvalds #endif	/* _TCP_H */
1960