xref: /linux/include/net/tcp.h (revision 1b1fc3fddabfb8739ef2c8f04e05a9858b42c1f7)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Definitions for the TCP module.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	@(#)tcp.h	1.0.5	05/23/93
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
141da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
151da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
161da177e4SLinus Torvalds  *		2 of the License, or (at your option) any later version.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds #ifndef _TCP_H
191da177e4SLinus Torvalds #define _TCP_H
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #define FASTRETRANS_DEBUG 1
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds #include <linux/list.h>
241da177e4SLinus Torvalds #include <linux/tcp.h>
25187f1882SPaul Gortmaker #include <linux/bug.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cache.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
29fb286bb2SHerbert Xu #include <linux/skbuff.h>
30c6aefafbSGlenn Griffin #include <linux/cryptohash.h>
31435cf559SWilliam Allen Simpson #include <linux/kref.h>
32740b0f18SEric Dumazet #include <linux/ktime.h>
333f421baaSArnaldo Carvalho de Melo 
343f421baaSArnaldo Carvalho de Melo #include <net/inet_connection_sock.h>
35295ff7edSArnaldo Carvalho de Melo #include <net/inet_timewait_sock.h>
3677d8bf9cSArnaldo Carvalho de Melo #include <net/inet_hashtables.h>
371da177e4SLinus Torvalds #include <net/checksum.h>
382e6599cbSArnaldo Carvalho de Melo #include <net/request_sock.h>
391da177e4SLinus Torvalds #include <net/sock.h>
401da177e4SLinus Torvalds #include <net/snmp.h>
411da177e4SLinus Torvalds #include <net/ip.h>
42c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
43bdf1ee5dSIlpo Järvinen #include <net/inet_ecn.h>
440c266898SSatoru SATOH #include <net/dst.h>
45c752f073SArnaldo Carvalho de Melo 
461da177e4SLinus Torvalds #include <linux/seq_file.h>
47180d8cd9SGlauber Costa #include <linux/memcontrol.h>
481da177e4SLinus Torvalds 
490f7ff927SArnaldo Carvalho de Melo extern struct inet_hashinfo tcp_hashinfo;
501da177e4SLinus Torvalds 
51dd24c001SEric Dumazet extern struct percpu_counter tcp_orphan_count;
525c9f3023SJoe Perches void tcp_time_wait(struct sock *sk, int state, int timeo);
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #define MAX_TCP_HEADER	(128 + MAX_HEADER)
5533ad798cSAdam Langley #define MAX_TCP_OPTION_SPACE 40
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds /*
581da177e4SLinus Torvalds  * Never offer a window over 32767 without using window scaling. Some
591da177e4SLinus Torvalds  * poor stacks do signed 16bit maths!
601da177e4SLinus Torvalds  */
611da177e4SLinus Torvalds #define MAX_TCP_WINDOW		32767U
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
641da177e4SLinus Torvalds #define TCP_MIN_MSS		88U
651da177e4SLinus Torvalds 
665d424d5aSJohn Heffner /* The least MTU to use for probing */
67dcd8fb85SFan Du #define TCP_BASE_MSS		1024
685d424d5aSJohn Heffner 
6905cbc0dbSFan Du /* probing interval, default to 10 minutes as per RFC4821 */
7005cbc0dbSFan Du #define TCP_PROBE_INTERVAL	600
7105cbc0dbSFan Du 
726b58e0a5SFan Du /* Specify interval when tcp mtu probing will stop */
736b58e0a5SFan Du #define TCP_PROBE_THRESHOLD	8
746b58e0a5SFan Du 
751da177e4SLinus Torvalds /* After receiving this amount of duplicate ACKs fast retransmit starts. */
761da177e4SLinus Torvalds #define TCP_FASTRETRANS_THRESH 3
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /* Maximal number of ACKs sent quickly to accelerate slow-start. */
791da177e4SLinus Torvalds #define TCP_MAX_QUICKACKS	16U
801da177e4SLinus Torvalds 
81589c49cbSGao Feng /* Maximal number of window scale according to RFC1323 */
82589c49cbSGao Feng #define TCP_MAX_WSCALE		14U
83589c49cbSGao Feng 
841da177e4SLinus Torvalds /* urg_data states */
851da177e4SLinus Torvalds #define TCP_URG_VALID	0x0100
861da177e4SLinus Torvalds #define TCP_URG_NOTYET	0x0200
871da177e4SLinus Torvalds #define TCP_URG_READ	0x0400
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds #define TCP_RETR1	3	/*
901da177e4SLinus Torvalds 				 * This is how many retries it does before it
911da177e4SLinus Torvalds 				 * tries to figure out if the gateway is
921da177e4SLinus Torvalds 				 * down. Minimal RFC value is 3; it corresponds
931da177e4SLinus Torvalds 				 * to ~3sec-8min depending on RTO.
941da177e4SLinus Torvalds 				 */
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds #define TCP_RETR2	15	/*
971da177e4SLinus Torvalds 				 * This should take at least
981da177e4SLinus Torvalds 				 * 90 minutes to time out.
991da177e4SLinus Torvalds 				 * RFC1122 says that the limit is 100 sec.
1001da177e4SLinus Torvalds 				 * 15 is ~13-30min depending on RTO.
1011da177e4SLinus Torvalds 				 */
1021da177e4SLinus Torvalds 
1036c9ff979SAlex Bergmann #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
1046c9ff979SAlex Bergmann 				 * when active opening a connection.
1056c9ff979SAlex Bergmann 				 * RFC1122 says the minimum retry MUST
1066c9ff979SAlex Bergmann 				 * be at least 180secs.  Nevertheless
1076c9ff979SAlex Bergmann 				 * this value is corresponding to
1086c9ff979SAlex Bergmann 				 * 63secs of retransmission with the
1096c9ff979SAlex Bergmann 				 * current initial RTO.
1106c9ff979SAlex Bergmann 				 */
1111da177e4SLinus Torvalds 
1126c9ff979SAlex Bergmann #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
1136c9ff979SAlex Bergmann 				 * when passive opening a connection.
1146c9ff979SAlex Bergmann 				 * This is corresponding to 31secs of
1156c9ff979SAlex Bergmann 				 * retransmission with the current
1166c9ff979SAlex Bergmann 				 * initial RTO.
1176c9ff979SAlex Bergmann 				 */
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
1201da177e4SLinus Torvalds 				  * state, about 60 seconds	*/
1211da177e4SLinus Torvalds #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
1221da177e4SLinus Torvalds                                  /* BSD style FIN_WAIT2 deadlock breaker.
1231da177e4SLinus Torvalds 				  * It used to be 3min, new value is 60sec,
1241da177e4SLinus Torvalds 				  * to combine FIN-WAIT-2 timeout with
1251da177e4SLinus Torvalds 				  * TIME-WAIT timer.
1261da177e4SLinus Torvalds 				  */
1271da177e4SLinus Torvalds 
1281da177e4SLinus Torvalds #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
1291da177e4SLinus Torvalds #if HZ >= 100
1301da177e4SLinus Torvalds #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
1311da177e4SLinus Torvalds #define TCP_ATO_MIN	((unsigned)(HZ/25))
1321da177e4SLinus Torvalds #else
1331da177e4SLinus Torvalds #define TCP_DELACK_MIN	4U
1341da177e4SLinus Torvalds #define TCP_ATO_MIN	4U
1351da177e4SLinus Torvalds #endif
1361da177e4SLinus Torvalds #define TCP_RTO_MAX	((unsigned)(120*HZ))
1371da177e4SLinus Torvalds #define TCP_RTO_MIN	((unsigned)(HZ/5))
138fd4f2ceaSEric Dumazet #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
1399ad7c049SJerry Chu #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
1409ad7c049SJerry Chu 						 * used as a fallback RTO for the
1419ad7c049SJerry Chu 						 * initial data transmission if no
1429ad7c049SJerry Chu 						 * valid RTT sample has been acquired,
1439ad7c049SJerry Chu 						 * most likely due to retrans in 3WHS.
1449ad7c049SJerry Chu 						 */
1451da177e4SLinus Torvalds 
1461da177e4SLinus Torvalds #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
1471da177e4SLinus Torvalds 					                 * for local resources.
1481da177e4SLinus Torvalds 					                 */
14957dde7f7SYuchung Cheng #define TCP_REO_TIMEOUT_MIN	(2000) /* Min RACK reordering timeout in usec */
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
1521da177e4SLinus Torvalds #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
1531da177e4SLinus Torvalds #define TCP_KEEPALIVE_INTVL	(75*HZ)
1541da177e4SLinus Torvalds 
1551da177e4SLinus Torvalds #define MAX_TCP_KEEPIDLE	32767
1561da177e4SLinus Torvalds #define MAX_TCP_KEEPINTVL	32767
1571da177e4SLinus Torvalds #define MAX_TCP_KEEPCNT		127
1581da177e4SLinus Torvalds #define MAX_TCP_SYNCNT		127
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
1631da177e4SLinus Torvalds #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
1641da177e4SLinus Torvalds 					 * after this time. It should be equal
1651da177e4SLinus Torvalds 					 * (or greater than) TCP_TIMEWAIT_LEN
1661da177e4SLinus Torvalds 					 * to provide reliability equal to one
1671da177e4SLinus Torvalds 					 * provided by timewait state.
1681da177e4SLinus Torvalds 					 */
1691da177e4SLinus Torvalds #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
1701da177e4SLinus Torvalds 					 * timestamps. It must be less than
1711da177e4SLinus Torvalds 					 * minimal timewait lifetime.
1721da177e4SLinus Torvalds 					 */
1731da177e4SLinus Torvalds /*
1741da177e4SLinus Torvalds  *	TCP option
1751da177e4SLinus Torvalds  */
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds #define TCPOPT_NOP		1	/* Padding */
1781da177e4SLinus Torvalds #define TCPOPT_EOL		0	/* End of options */
1791da177e4SLinus Torvalds #define TCPOPT_MSS		2	/* Segment size negotiating */
1801da177e4SLinus Torvalds #define TCPOPT_WINDOW		3	/* Window scaling */
1811da177e4SLinus Torvalds #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
1821da177e4SLinus Torvalds #define TCPOPT_SACK             5       /* SACK Block */
1831da177e4SLinus Torvalds #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
184cfb6eeb4SYOSHIFUJI Hideaki #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
1857f9b838bSDaniel Lee #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
1862100c8d2SYuchung Cheng #define TCPOPT_EXP		254	/* Experimental */
1872100c8d2SYuchung Cheng /* Magic number to be after the option value for sharing TCP
1882100c8d2SYuchung Cheng  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
1892100c8d2SYuchung Cheng  */
1902100c8d2SYuchung Cheng #define TCPOPT_FASTOPEN_MAGIC	0xF989
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds /*
1931da177e4SLinus Torvalds  *     TCP option lengths
1941da177e4SLinus Torvalds  */
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds #define TCPOLEN_MSS            4
1971da177e4SLinus Torvalds #define TCPOLEN_WINDOW         3
1981da177e4SLinus Torvalds #define TCPOLEN_SACK_PERM      2
1991da177e4SLinus Torvalds #define TCPOLEN_TIMESTAMP      10
200cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG         18
2017f9b838bSDaniel Lee #define TCPOLEN_FASTOPEN_BASE  2
2022100c8d2SYuchung Cheng #define TCPOLEN_EXP_FASTOPEN_BASE  4
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds /* But this is what stacks really send out. */
2051da177e4SLinus Torvalds #define TCPOLEN_TSTAMP_ALIGNED		12
2061da177e4SLinus Torvalds #define TCPOLEN_WSCALE_ALIGNED		4
2071da177e4SLinus Torvalds #define TCPOLEN_SACKPERM_ALIGNED	4
2081da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE		2
2091da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE_ALIGNED	4
2101da177e4SLinus Torvalds #define TCPOLEN_SACK_PERBLOCK		8
211cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG_ALIGNED		20
21233ad798cSAdam Langley #define TCPOLEN_MSS_ALIGNED		4
2131da177e4SLinus Torvalds 
2141da177e4SLinus Torvalds /* Flags in tp->nonagle */
2151da177e4SLinus Torvalds #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
2161da177e4SLinus Torvalds #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
217caa20d9aSStephen Hemminger #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
2181da177e4SLinus Torvalds 
21936e31b0aSAndreas Petlund /* TCP thin-stream limits */
22036e31b0aSAndreas Petlund #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
22136e31b0aSAndreas Petlund 
22221603fc4SJörg Thalheim /* TCP initial congestion window as per rfc6928 */
223442b9635SDavid S. Miller #define TCP_INIT_CWND		10
224442b9635SDavid S. Miller 
225cf60af03SYuchung Cheng /* Bit Flags for sysctl_tcp_fastopen */
226cf60af03SYuchung Cheng #define	TFO_CLIENT_ENABLE	1
22710467163SJerry Chu #define	TFO_SERVER_ENABLE	2
22867da22d2SYuchung Cheng #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
229cf60af03SYuchung Cheng 
23010467163SJerry Chu /* Accept SYN data w/o any cookie option */
23110467163SJerry Chu #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
23210467163SJerry Chu 
23310467163SJerry Chu /* Force enable TFO on all listeners, i.e., not requiring the
234cebc5cbaSYuchung Cheng  * TCP_FASTOPEN socket option.
23510467163SJerry Chu  */
23610467163SJerry Chu #define	TFO_SERVER_WO_SOCKOPT1	0x400
23710467163SJerry Chu 
238295ff7edSArnaldo Carvalho de Melo 
2391da177e4SLinus Torvalds /* sysctl variables for tcp */
2401da177e4SLinus Torvalds extern int sysctl_tcp_timestamps;
2411da177e4SLinus Torvalds extern int sysctl_tcp_window_scaling;
2421da177e4SLinus Torvalds extern int sysctl_tcp_sack;
2432100c8d2SYuchung Cheng extern int sysctl_tcp_fastopen;
2441da177e4SLinus Torvalds extern int sysctl_tcp_retrans_collapse;
2451da177e4SLinus Torvalds extern int sysctl_tcp_stdurg;
2461da177e4SLinus Torvalds extern int sysctl_tcp_rfc1337;
2471da177e4SLinus Torvalds extern int sysctl_tcp_abort_on_overflow;
2481da177e4SLinus Torvalds extern int sysctl_tcp_max_orphans;
2491da177e4SLinus Torvalds extern int sysctl_tcp_fack;
2501da177e4SLinus Torvalds extern int sysctl_tcp_reordering;
251dca145ffSEric Dumazet extern int sysctl_tcp_max_reordering;
2521da177e4SLinus Torvalds extern int sysctl_tcp_dsack;
253a4fe34bfSEric W. Biederman extern long sysctl_tcp_mem[3];
2541da177e4SLinus Torvalds extern int sysctl_tcp_wmem[3];
2551da177e4SLinus Torvalds extern int sysctl_tcp_rmem[3];
2561da177e4SLinus Torvalds extern int sysctl_tcp_app_win;
2571da177e4SLinus Torvalds extern int sysctl_tcp_adv_win_scale;
2581da177e4SLinus Torvalds extern int sysctl_tcp_frto;
2591da177e4SLinus Torvalds extern int sysctl_tcp_low_latency;
2601da177e4SLinus Torvalds extern int sysctl_tcp_nometrics_save;
2611da177e4SLinus Torvalds extern int sysctl_tcp_moderate_rcvbuf;
2621da177e4SLinus Torvalds extern int sysctl_tcp_tso_win_divisor;
26315d99e02SRick Jones extern int sysctl_tcp_workaround_signed_windows;
26435089bb2SDavid S. Miller extern int sysctl_tcp_slow_start_after_idle;
26536e31b0aSAndreas Petlund extern int sysctl_tcp_thin_linear_timeouts;
2667e380175SAndreas Petlund extern int sysctl_tcp_thin_dupack;
267eed530b6SYuchung Cheng extern int sysctl_tcp_early_retrans;
268a0370b3fSYuchung Cheng extern int sysctl_tcp_recovery;
269a0370b3fSYuchung Cheng #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
270a0370b3fSYuchung Cheng 
27146d3ceabSEric Dumazet extern int sysctl_tcp_limit_output_bytes;
272282f23c6SEric Dumazet extern int sysctl_tcp_challenge_ack_limit;
27395bd09ebSEric Dumazet extern int sysctl_tcp_min_tso_segs;
274f6722583SYuchung Cheng extern int sysctl_tcp_min_rtt_wlen;
275f54b3111SEric Dumazet extern int sysctl_tcp_autocorking;
276032ee423SNeal Cardwell extern int sysctl_tcp_invalid_ratelimit;
27743e122b0SEric Dumazet extern int sysctl_tcp_pacing_ss_ratio;
27843e122b0SEric Dumazet extern int sysctl_tcp_pacing_ca_ratio;
2791da177e4SLinus Torvalds 
2808d987e5cSEric Dumazet extern atomic_long_t tcp_memory_allocated;
2811748376bSEric Dumazet extern struct percpu_counter tcp_sockets_allocated;
2821da177e4SLinus Torvalds extern int tcp_memory_pressure;
2831da177e4SLinus Torvalds 
284b8da51ebSEric Dumazet /* optimized version of sk_under_memory_pressure() for TCP sockets */
285b8da51ebSEric Dumazet static inline bool tcp_under_memory_pressure(const struct sock *sk)
286b8da51ebSEric Dumazet {
287baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
288baac50bbSJohannes Weiner 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
289e805605cSJohannes Weiner 		return true;
290b8da51ebSEric Dumazet 
291b8da51ebSEric Dumazet 	return tcp_memory_pressure;
292b8da51ebSEric Dumazet }
2931da177e4SLinus Torvalds /*
2941da177e4SLinus Torvalds  * The next routines deal with comparing 32 bit unsigned ints
2951da177e4SLinus Torvalds  * and worry about wraparound (automatic with unsigned arithmetic).
2961da177e4SLinus Torvalds  */
2971da177e4SLinus Torvalds 
298a2a385d6SEric Dumazet static inline bool before(__u32 seq1, __u32 seq2)
2991da177e4SLinus Torvalds {
3000d630cc0SGerrit Renker         return (__s32)(seq1-seq2) < 0;
3011da177e4SLinus Torvalds }
3029a036b9cSGerrit Renker #define after(seq2, seq1) 	before(seq1, seq2)
3031da177e4SLinus Torvalds 
3041da177e4SLinus Torvalds /* is s2<=s1<=s3 ? */
305a2a385d6SEric Dumazet static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
3061da177e4SLinus Torvalds {
3071da177e4SLinus Torvalds 	return seq3 - seq2 >= seq1 - seq2;
3081da177e4SLinus Torvalds }
3091da177e4SLinus Torvalds 
310efcdbf24SArun Sharma static inline bool tcp_out_of_memory(struct sock *sk)
311efcdbf24SArun Sharma {
312efcdbf24SArun Sharma 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
313efcdbf24SArun Sharma 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
314efcdbf24SArun Sharma 		return true;
315efcdbf24SArun Sharma 	return false;
316efcdbf24SArun Sharma }
317efcdbf24SArun Sharma 
318a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size);
319a6c5ea4cSEric Dumazet 
320ad1af0feSDavid S. Miller static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
321e4fd5da3SPavel Emelianov {
322ad1af0feSDavid S. Miller 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
323ad1af0feSDavid S. Miller 	int orphans = percpu_counter_read_positive(ocp);
324ad1af0feSDavid S. Miller 
325ad1af0feSDavid S. Miller 	if (orphans << shift > sysctl_tcp_max_orphans) {
326ad1af0feSDavid S. Miller 		orphans = percpu_counter_sum_positive(ocp);
327ad1af0feSDavid S. Miller 		if (orphans << shift > sysctl_tcp_max_orphans)
328ad1af0feSDavid S. Miller 			return true;
329ad1af0feSDavid S. Miller 	}
330ad1af0feSDavid S. Miller 	return false;
331e4fd5da3SPavel Emelianov }
3321da177e4SLinus Torvalds 
3335c9f3023SJoe Perches bool tcp_check_oom(struct sock *sk, int shift);
334efcdbf24SArun Sharma 
335a0f82f64SFlorian Westphal 
3361da177e4SLinus Torvalds extern struct proto tcp_prot;
3371da177e4SLinus Torvalds 
33857ef42d5SPavel Emelyanov #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33913415e46SEric Dumazet #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
34057ef42d5SPavel Emelyanov #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
341aa2ea058STom Herbert #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
3421da177e4SLinus Torvalds 
3435c9f3023SJoe Perches void tcp_tasklet_init(void);
34446d3ceabSEric Dumazet 
3455c9f3023SJoe Perches void tcp_v4_err(struct sk_buff *skb, u32);
3461da177e4SLinus Torvalds 
3475c9f3023SJoe Perches void tcp_shutdown(struct sock *sk, int how);
3481da177e4SLinus Torvalds 
3495c9f3023SJoe Perches void tcp_v4_early_demux(struct sk_buff *skb);
3505c9f3023SJoe Perches int tcp_v4_rcv(struct sk_buff *skb);
3511da177e4SLinus Torvalds 
3525c9f3023SJoe Perches int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
3531b784140SYing Xue int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
3545c9f3023SJoe Perches int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
3555c9f3023SJoe Perches 		 int flags);
3565c9f3023SJoe Perches void tcp_release_cb(struct sock *sk);
3575c9f3023SJoe Perches void tcp_wfree(struct sk_buff *skb);
3585c9f3023SJoe Perches void tcp_write_timer_handler(struct sock *sk);
3595c9f3023SJoe Perches void tcp_delack_timer_handler(struct sock *sk);
3605c9f3023SJoe Perches int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
36172ab4a86SEric Dumazet int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
3625c9f3023SJoe Perches void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
363cf533ea5SEric Dumazet 			 const struct tcphdr *th, unsigned int len);
3645c9f3023SJoe Perches void tcp_rcv_space_adjust(struct sock *sk);
3655c9f3023SJoe Perches int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
3665c9f3023SJoe Perches void tcp_twsk_destructor(struct sock *sk);
3675c9f3023SJoe Perches ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
36853d3176bSChangli Gao 			struct pipe_inode_info *pipe, size_t len,
36953d3176bSChangli Gao 			unsigned int flags);
3709c55e01cSJens Axboe 
371463c84b9SArnaldo Carvalho de Melo static inline void tcp_dec_quickack_mode(struct sock *sk,
372463c84b9SArnaldo Carvalho de Melo 					 const unsigned int pkts)
3731da177e4SLinus Torvalds {
374463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
375fc6415bcSDavid S. Miller 
376463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.quick) {
377463c84b9SArnaldo Carvalho de Melo 		if (pkts >= icsk->icsk_ack.quick) {
378463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick = 0;
3791da177e4SLinus Torvalds 			/* Leaving quickack mode we deflate ATO. */
380463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
381fc6415bcSDavid S. Miller 		} else
382463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick -= pkts;
3831da177e4SLinus Torvalds 	}
3841da177e4SLinus Torvalds }
3851da177e4SLinus Torvalds 
386bdf1ee5dSIlpo Järvinen #define	TCP_ECN_OK		1
387bdf1ee5dSIlpo Järvinen #define	TCP_ECN_QUEUE_CWR	2
388bdf1ee5dSIlpo Järvinen #define	TCP_ECN_DEMAND_CWR	4
3897a269ffaSEric Dumazet #define	TCP_ECN_SEEN		8
390bdf1ee5dSIlpo Järvinen 
391fd2c3ef7SEric Dumazet enum tcp_tw_status {
3921da177e4SLinus Torvalds 	TCP_TW_SUCCESS = 0,
3931da177e4SLinus Torvalds 	TCP_TW_RST = 1,
3941da177e4SLinus Torvalds 	TCP_TW_ACK = 2,
3951da177e4SLinus Torvalds 	TCP_TW_SYN = 3
3961da177e4SLinus Torvalds };
3971da177e4SLinus Torvalds 
3981da177e4SLinus Torvalds 
3995c9f3023SJoe Perches enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
4001da177e4SLinus Torvalds 					      struct sk_buff *skb,
4018feaf0c0SArnaldo Carvalho de Melo 					      const struct tcphdr *th);
4025c9f3023SJoe Perches struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
40352452c54SEric Dumazet 			   struct request_sock *req, bool fastopen);
4045c9f3023SJoe Perches int tcp_child_process(struct sock *parent, struct sock *child,
4051da177e4SLinus Torvalds 		      struct sk_buff *skb);
4065ae344c9SNeal Cardwell void tcp_enter_loss(struct sock *sk);
40757dde7f7SYuchung Cheng void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
4085c9f3023SJoe Perches void tcp_clear_retrans(struct tcp_sock *tp);
4095c9f3023SJoe Perches void tcp_update_metrics(struct sock *sk);
4105c9f3023SJoe Perches void tcp_init_metrics(struct sock *sk);
4115c9f3023SJoe Perches void tcp_metrics_init(void);
412d82bae12SSoheil Hassas Yeganeh bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
4135c9f3023SJoe Perches void tcp_disable_fack(struct tcp_sock *tp);
4145c9f3023SJoe Perches void tcp_close(struct sock *sk, long timeout);
4155c9f3023SJoe Perches void tcp_init_sock(struct sock *sk);
4165c9f3023SJoe Perches unsigned int tcp_poll(struct file *file, struct socket *sock,
41753d3176bSChangli Gao 		      struct poll_table_struct *wait);
4185c9f3023SJoe Perches int tcp_getsockopt(struct sock *sk, int level, int optname,
4193fdadf7dSDmitry Mishin 		   char __user *optval, int __user *optlen);
4205c9f3023SJoe Perches int tcp_setsockopt(struct sock *sk, int level, int optname,
42153d3176bSChangli Gao 		   char __user *optval, unsigned int optlen);
4225c9f3023SJoe Perches int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
42353d3176bSChangli Gao 			  char __user *optval, int __user *optlen);
4245c9f3023SJoe Perches int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
425b7058842SDavid S. Miller 			  char __user *optval, unsigned int optlen);
4265c9f3023SJoe Perches void tcp_set_keepalive(struct sock *sk, int val);
42742cb80a2SEric Dumazet void tcp_syn_ack_timeout(const struct request_sock *req);
4281b784140SYing Xue int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
4291b784140SYing Xue 		int flags, int *addr_len);
4305c9f3023SJoe Perches void tcp_parse_options(const struct sk_buff *skb,
4311a2c6181SChristoph Paasch 		       struct tcp_options_received *opt_rx,
4322100c8d2SYuchung Cheng 		       int estab, struct tcp_fastopen_cookie *foc);
4335c9f3023SJoe Perches const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
4347d5d5525SYOSHIFUJI Hideaki 
4351da177e4SLinus Torvalds /*
4361da177e4SLinus Torvalds  *	TCP v4 functions exported for the inet6 API
4371da177e4SLinus Torvalds  */
4381da177e4SLinus Torvalds 
4395c9f3023SJoe Perches void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4404fab9071SNeal Cardwell void tcp_v4_mtu_reduced(struct sock *sk);
4419cf74903SEric Dumazet void tcp_req_err(struct sock *sk, u32 seq, bool abort);
4425c9f3023SJoe Perches int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
443c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
44460236fddSArnaldo Carvalho de Melo 				      struct request_sock *req,
4451da177e4SLinus Torvalds 				      struct sk_buff *skb);
44681164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
4470c27171eSEric Dumazet struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
44860236fddSArnaldo Carvalho de Melo 				  struct request_sock *req,
4495e0724d0SEric Dumazet 				  struct dst_entry *dst,
4505e0724d0SEric Dumazet 				  struct request_sock *req_unhash,
4515e0724d0SEric Dumazet 				  bool *own_req);
4525c9f3023SJoe Perches int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
4535c9f3023SJoe Perches int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4545c9f3023SJoe Perches int tcp_connect(struct sock *sk);
455b3d05147SEric Dumazet enum tcp_synack_type {
456b3d05147SEric Dumazet 	TCP_SYNACK_NORMAL,
457b3d05147SEric Dumazet 	TCP_SYNACK_FASTOPEN,
458b3d05147SEric Dumazet 	TCP_SYNACK_COOKIE,
459b3d05147SEric Dumazet };
4605d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
461e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
462ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
463b3d05147SEric Dumazet 				enum tcp_synack_type synack_type);
4645c9f3023SJoe Perches int tcp_disconnect(struct sock *sk, int flags);
4651da177e4SLinus Torvalds 
466370816aeSPavel Emelyanov void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
467292e8d8cSPavel Emelyanov int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
46863d02d15SEric Dumazet void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
4691da177e4SLinus Torvalds 
4701da177e4SLinus Torvalds /* From syncookies.c */
471b80c0e78SEric Dumazet struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
472b80c0e78SEric Dumazet 				 struct request_sock *req,
47384b114b9SEric Dumazet 				 struct dst_entry *dst, u32 tsoff);
4745c9f3023SJoe Perches int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4750198230bSPatrick McHardy 		      u32 cookie);
476461b74c3SCong Wang struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
477e05c82d3SEric Dumazet #ifdef CONFIG_SYN_COOKIES
4788c27bd75SFlorian Westphal 
47963262315SEric Dumazet /* Syncookies use a monotonic timer which increments every 60 seconds.
4808c27bd75SFlorian Westphal  * This counter is used both as a hash input and partially encoded into
4818c27bd75SFlorian Westphal  * the cookie value.  A cookie is only validated further if the delta
4828c27bd75SFlorian Westphal  * between the current counter value and the encoded one is less than this,
48363262315SEric Dumazet  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
4848c27bd75SFlorian Westphal  * the counter advances immediately after a cookie is generated).
4858c27bd75SFlorian Westphal  */
4868c27bd75SFlorian Westphal #define MAX_SYNCOOKIE_AGE	2
487264ea103SEric Dumazet #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
488264ea103SEric Dumazet #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
489264ea103SEric Dumazet 
490264ea103SEric Dumazet /* syncookies: remember time of last synqueue overflow
491264ea103SEric Dumazet  * But do not dirty this field too often (once per second is enough)
4923f684b4bSEric Dumazet  * It is racy as we do not hold a lock, but race is very minor.
493264ea103SEric Dumazet  */
4943f684b4bSEric Dumazet static inline void tcp_synq_overflow(const struct sock *sk)
495264ea103SEric Dumazet {
496264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
497264ea103SEric Dumazet 	unsigned long now = jiffies;
498264ea103SEric Dumazet 
499264ea103SEric Dumazet 	if (time_after(now, last_overflow + HZ))
500264ea103SEric Dumazet 		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
501264ea103SEric Dumazet }
502264ea103SEric Dumazet 
503264ea103SEric Dumazet /* syncookies: no recent synqueue overflow on this listening socket? */
504264ea103SEric Dumazet static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
505264ea103SEric Dumazet {
506264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
507264ea103SEric Dumazet 
508264ea103SEric Dumazet 	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
509264ea103SEric Dumazet }
5108c27bd75SFlorian Westphal 
5118c27bd75SFlorian Westphal static inline u32 tcp_cookie_time(void)
5128c27bd75SFlorian Westphal {
51363262315SEric Dumazet 	u64 val = get_jiffies_64();
51463262315SEric Dumazet 
515264ea103SEric Dumazet 	do_div(val, TCP_SYNCOOKIE_PERIOD);
51663262315SEric Dumazet 	return val;
5178c27bd75SFlorian Westphal }
5188c27bd75SFlorian Westphal 
5195c9f3023SJoe Perches u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5205c9f3023SJoe Perches 			      u16 *mssp);
5213f684b4bSEric Dumazet __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
5225c9f3023SJoe Perches __u32 cookie_init_timestamp(struct request_sock *req);
523f1673381SFlorian Westphal bool cookie_timestamp_decode(struct tcp_options_received *opt);
524f1673381SFlorian Westphal bool cookie_ecn_ok(const struct tcp_options_received *opt,
525f7b3bec6SFlorian Westphal 		   const struct net *net, const struct dst_entry *dst);
5264dfc2817SFlorian Westphal 
527c6aefafbSGlenn Griffin /* From net/ipv6/syncookies.c */
5285c9f3023SJoe Perches int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
52981eb6a14SPatrick McHardy 		      u32 cookie);
5305c9f3023SJoe Perches struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
531f1673381SFlorian Westphal 
5325c9f3023SJoe Perches u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
53381eb6a14SPatrick McHardy 			      const struct tcphdr *th, u16 *mssp);
5343f684b4bSEric Dumazet __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
535e05c82d3SEric Dumazet #endif
5361da177e4SLinus Torvalds /* tcp_output.c */
5371da177e4SLinus Torvalds 
5381b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
5391b3878caSNeal Cardwell 		     int min_tso_segs);
5405c9f3023SJoe Perches void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
5419e412ba7SIlpo Järvinen 			       int nonagle);
5425c9f3023SJoe Perches bool tcp_may_send_now(struct sock *sk);
54310d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
54410d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5455c9f3023SJoe Perches void tcp_retransmit_timer(struct sock *sk);
5465c9f3023SJoe Perches void tcp_xmit_retransmit_queue(struct sock *);
5475c9f3023SJoe Perches void tcp_simple_retransmit(struct sock *);
54857dde7f7SYuchung Cheng void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5495c9f3023SJoe Perches int tcp_trim_head(struct sock *, struct sk_buff *, u32);
5506cc55e09SOctavian Purdila int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
5511da177e4SLinus Torvalds 
5525c9f3023SJoe Perches void tcp_send_probe0(struct sock *);
5535c9f3023SJoe Perches void tcp_send_partial(struct sock *);
554e520af48SEric Dumazet int tcp_write_wakeup(struct sock *, int mib);
5555c9f3023SJoe Perches void tcp_send_fin(struct sock *sk);
5565c9f3023SJoe Perches void tcp_send_active_reset(struct sock *sk, gfp_t priority);
5575c9f3023SJoe Perches int tcp_send_synack(struct sock *);
5585c9f3023SJoe Perches void tcp_push_one(struct sock *, unsigned int mss_now);
5595c9f3023SJoe Perches void tcp_send_ack(struct sock *sk);
5605c9f3023SJoe Perches void tcp_send_delayed_ack(struct sock *sk);
5615c9f3023SJoe Perches void tcp_send_loss_probe(struct sock *sk);
5625c9f3023SJoe Perches bool tcp_schedule_loss_probe(struct sock *sk);
563cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
564cfea5a68SMartin KaFai Lau 			     const struct sk_buff *next_skb);
5651da177e4SLinus Torvalds 
566a762a980SDavid S. Miller /* tcp_input.c */
5675c9f3023SJoe Perches void tcp_rearm_rto(struct sock *sk);
5680f1c28aeSYuchung Cheng void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5695c9f3023SJoe Perches void tcp_reset(struct sock *sk);
5704f41b1c5SYuchung Cheng void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
571e3e17b77SEric Dumazet void tcp_fin(struct sock *sk);
572a762a980SDavid S. Miller 
5731da177e4SLinus Torvalds /* tcp_timer.c */
5745c9f3023SJoe Perches void tcp_init_xmit_timers(struct sock *);
575463c84b9SArnaldo Carvalho de Melo static inline void tcp_clear_xmit_timers(struct sock *sk)
576463c84b9SArnaldo Carvalho de Melo {
577463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timers(sk);
578463c84b9SArnaldo Carvalho de Melo }
5791da177e4SLinus Torvalds 
5805c9f3023SJoe Perches unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
5815c9f3023SJoe Perches unsigned int tcp_current_mss(struct sock *sk);
5820c54b85fSIlpo Järvinen 
5830c54b85fSIlpo Järvinen /* Bound MSS / TSO packet size with the half of the window */
5840c54b85fSIlpo Järvinen static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
5850c54b85fSIlpo Järvinen {
58601f83d69SAlexey Kuznetsov 	int cutoff;
58701f83d69SAlexey Kuznetsov 
58801f83d69SAlexey Kuznetsov 	/* When peer uses tiny windows, there is no use in packetizing
58901f83d69SAlexey Kuznetsov 	 * to sub-MSS pieces for the sake of SWS or making sure there
59001f83d69SAlexey Kuznetsov 	 * are enough packets in the pipe for fast recovery.
59101f83d69SAlexey Kuznetsov 	 *
59201f83d69SAlexey Kuznetsov 	 * On the other hand, for extremely large MSS devices, handling
59301f83d69SAlexey Kuznetsov 	 * smaller than MSS windows in this way does make sense.
59401f83d69SAlexey Kuznetsov 	 */
5952631b79fSSeymour, Shane M 	if (tp->max_window > TCP_MSS_DEFAULT)
59601f83d69SAlexey Kuznetsov 		cutoff = (tp->max_window >> 1);
59701f83d69SAlexey Kuznetsov 	else
59801f83d69SAlexey Kuznetsov 		cutoff = tp->max_window;
59901f83d69SAlexey Kuznetsov 
60001f83d69SAlexey Kuznetsov 	if (cutoff && pktsize > cutoff)
60101f83d69SAlexey Kuznetsov 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
6020c54b85fSIlpo Järvinen 	else
6030c54b85fSIlpo Järvinen 		return pktsize;
6040c54b85fSIlpo Järvinen }
6051da177e4SLinus Torvalds 
60617b085eaSArnaldo Carvalho de Melo /* tcp.c */
6070df48c26SEric Dumazet void tcp_get_info(struct sock *, struct tcp_info *);
6081da177e4SLinus Torvalds 
6091da177e4SLinus Torvalds /* Read 'sendfile()'-style from a TCP socket */
6105c9f3023SJoe Perches int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
6111da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor);
6121da177e4SLinus Torvalds 
6135c9f3023SJoe Perches void tcp_initialize_rcv_mss(struct sock *sk);
6141da177e4SLinus Torvalds 
6155c9f3023SJoe Perches int tcp_mtu_to_mss(struct sock *sk, int pmtu);
6165c9f3023SJoe Perches int tcp_mss_to_mtu(struct sock *sk, int mss);
6175c9f3023SJoe Perches void tcp_mtup_init(struct sock *sk);
6185c9f3023SJoe Perches void tcp_init_buffer_space(struct sock *sk);
6195d424d5aSJohn Heffner 
620f1ecd5d9SDamian Lukowski static inline void tcp_bound_rto(const struct sock *sk)
621f1ecd5d9SDamian Lukowski {
622f1ecd5d9SDamian Lukowski 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
623f1ecd5d9SDamian Lukowski 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
624f1ecd5d9SDamian Lukowski }
625f1ecd5d9SDamian Lukowski 
626f1ecd5d9SDamian Lukowski static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
627f1ecd5d9SDamian Lukowski {
628740b0f18SEric Dumazet 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
629f1ecd5d9SDamian Lukowski }
630f1ecd5d9SDamian Lukowski 
63140efc6faSStephen Hemminger static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
6321da177e4SLinus Torvalds {
6331da177e4SLinus Torvalds 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
6341da177e4SLinus Torvalds 			       ntohl(TCP_FLAG_ACK) |
6351da177e4SLinus Torvalds 			       snd_wnd);
6361da177e4SLinus Torvalds }
6371da177e4SLinus Torvalds 
63840efc6faSStephen Hemminger static inline void tcp_fast_path_on(struct tcp_sock *tp)
6391da177e4SLinus Torvalds {
6401da177e4SLinus Torvalds 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
6411da177e4SLinus Torvalds }
6421da177e4SLinus Torvalds 
6439e412ba7SIlpo Järvinen static inline void tcp_fast_path_check(struct sock *sk)
6441da177e4SLinus Torvalds {
6459e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6469e412ba7SIlpo Järvinen 
6479f5afeaeSYaogong Wang 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
6481da177e4SLinus Torvalds 	    tp->rcv_wnd &&
6491da177e4SLinus Torvalds 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
6501da177e4SLinus Torvalds 	    !tp->urg_data)
6511da177e4SLinus Torvalds 		tcp_fast_path_on(tp);
6521da177e4SLinus Torvalds }
6531da177e4SLinus Torvalds 
6540c266898SSatoru SATOH /* Compute the actual rto_min value */
6550c266898SSatoru SATOH static inline u32 tcp_rto_min(struct sock *sk)
6560c266898SSatoru SATOH {
657cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
6580c266898SSatoru SATOH 	u32 rto_min = TCP_RTO_MIN;
6590c266898SSatoru SATOH 
6600c266898SSatoru SATOH 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
6610c266898SSatoru SATOH 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
6620c266898SSatoru SATOH 	return rto_min;
6630c266898SSatoru SATOH }
6640c266898SSatoru SATOH 
665740b0f18SEric Dumazet static inline u32 tcp_rto_min_us(struct sock *sk)
666740b0f18SEric Dumazet {
667740b0f18SEric Dumazet 	return jiffies_to_usecs(tcp_rto_min(sk));
668740b0f18SEric Dumazet }
669740b0f18SEric Dumazet 
67081164413SDaniel Borkmann static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
67181164413SDaniel Borkmann {
67281164413SDaniel Borkmann 	return dst_metric_locked(dst, RTAX_CC_ALGO);
67381164413SDaniel Borkmann }
67481164413SDaniel Borkmann 
675f6722583SYuchung Cheng /* Minimum RTT in usec. ~0 means not available. */
676f6722583SYuchung Cheng static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
677f6722583SYuchung Cheng {
67864033892SNeal Cardwell 	return minmax_get(&tp->rtt_min);
679f6722583SYuchung Cheng }
680f6722583SYuchung Cheng 
6811da177e4SLinus Torvalds /* Compute the actual receive window we are currently advertising.
6821da177e4SLinus Torvalds  * Rcv_nxt can be after the window if our peer push more data
6831da177e4SLinus Torvalds  * than the offered window.
6841da177e4SLinus Torvalds  */
68540efc6faSStephen Hemminger static inline u32 tcp_receive_window(const struct tcp_sock *tp)
6861da177e4SLinus Torvalds {
6871da177e4SLinus Torvalds 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
6881da177e4SLinus Torvalds 
6891da177e4SLinus Torvalds 	if (win < 0)
6901da177e4SLinus Torvalds 		win = 0;
6911da177e4SLinus Torvalds 	return (u32) win;
6921da177e4SLinus Torvalds }
6931da177e4SLinus Torvalds 
6941da177e4SLinus Torvalds /* Choose a new window, without checks for shrinking, and without
6951da177e4SLinus Torvalds  * scaling applied to the result.  The caller does these things
6961da177e4SLinus Torvalds  * if necessary.  This is a "raw" window selection.
6971da177e4SLinus Torvalds  */
6985c9f3023SJoe Perches u32 __tcp_select_window(struct sock *sk);
6991da177e4SLinus Torvalds 
700ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk);
701ee995283SPavel Emelyanov 
7021da177e4SLinus Torvalds /* TCP timestamps are only 32-bits, this causes a slight
7031da177e4SLinus Torvalds  * complication on 64-bit systems since we store a snapshot
70431f34269SStephen Hemminger  * of jiffies in the buffer control blocks below.  We decided
70531f34269SStephen Hemminger  * to use only the low 32-bits of jiffies and hide the ugly
7061da177e4SLinus Torvalds  * casts with the following macro.
7071da177e4SLinus Torvalds  */
7081da177e4SLinus Torvalds #define tcp_time_stamp		((__u32)(jiffies))
7091da177e4SLinus Torvalds 
7107faee5c0SEric Dumazet static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
7117faee5c0SEric Dumazet {
7127faee5c0SEric Dumazet 	return skb->skb_mstamp.stamp_jiffies;
7137faee5c0SEric Dumazet }
7147faee5c0SEric Dumazet 
7157faee5c0SEric Dumazet 
716a3433f35SChangli Gao #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
717a3433f35SChangli Gao 
718a3433f35SChangli Gao #define TCPHDR_FIN 0x01
719a3433f35SChangli Gao #define TCPHDR_SYN 0x02
720a3433f35SChangli Gao #define TCPHDR_RST 0x04
721a3433f35SChangli Gao #define TCPHDR_PSH 0x08
722a3433f35SChangli Gao #define TCPHDR_ACK 0x10
723a3433f35SChangli Gao #define TCPHDR_URG 0x20
724a3433f35SChangli Gao #define TCPHDR_ECE 0x40
725a3433f35SChangli Gao #define TCPHDR_CWR 0x80
726a3433f35SChangli Gao 
72749213555SDaniel Borkmann #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
72849213555SDaniel Borkmann 
729caa20d9aSStephen Hemminger /* This is what the send packet queuing engine uses to pass
730f86586faSEric Dumazet  * TCP per-packet control information to the transmission code.
731f86586faSEric Dumazet  * We also store the host-order sequence numbers in here too.
732f86586faSEric Dumazet  * This is 44 bytes if IPV6 is enabled.
733f86586faSEric Dumazet  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
7341da177e4SLinus Torvalds  */
7351da177e4SLinus Torvalds struct tcp_skb_cb {
7361da177e4SLinus Torvalds 	__u32		seq;		/* Starting sequence number	*/
7371da177e4SLinus Torvalds 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
738cd7d8498SEric Dumazet 	union {
739cd7d8498SEric Dumazet 		/* Note : tcp_tw_isn is used in input path only
740cd7d8498SEric Dumazet 		 *	  (isn chosen by tcp_timewait_state_process())
741cd7d8498SEric Dumazet 		 *
742f69ad292SEric Dumazet 		 * 	  tcp_gso_segs/size are used in write queue only,
743f69ad292SEric Dumazet 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
744cd7d8498SEric Dumazet 		 */
745cd7d8498SEric Dumazet 		__u32		tcp_tw_isn;
746f69ad292SEric Dumazet 		struct {
747f69ad292SEric Dumazet 			u16	tcp_gso_segs;
748f69ad292SEric Dumazet 			u16	tcp_gso_size;
749f69ad292SEric Dumazet 		};
750cd7d8498SEric Dumazet 	};
7514de075e0SEric Dumazet 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
752f4f9f6e7SNeal Cardwell 
7531da177e4SLinus Torvalds 	__u8		sacked;		/* State flags for SACK/FACK.	*/
7541da177e4SLinus Torvalds #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
7551da177e4SLinus Torvalds #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
7561da177e4SLinus Torvalds #define TCPCB_LOST		0x04	/* SKB is lost			*/
7571da177e4SLinus Torvalds #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
7589d186cacSAndrey Vagin #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
7591da177e4SLinus Torvalds #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
7609d186cacSAndrey Vagin #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
7619d186cacSAndrey Vagin 				TCPCB_REPAIRED)
7621da177e4SLinus Torvalds 
763f4f9f6e7SNeal Cardwell 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
7646b084928SSoheil Hassas Yeganeh 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
765c134ecb8SMartin KaFai Lau 			eor:1,		/* Is skb MSG_EOR marked? */
766c134ecb8SMartin KaFai Lau 			unused:6;
7671da177e4SLinus Torvalds 	__u32		ack_seq;	/* Sequence number ACK'd	*/
768971f10ecSEric Dumazet 	union {
769b75803d5SLawrence Brakmo 		struct {
770b9f64820SYuchung Cheng 			/* There is space for up to 24 bytes */
771d7722e85SSoheil Hassas Yeganeh 			__u32 in_flight:30,/* Bytes in flight at transmit */
772d7722e85SSoheil Hassas Yeganeh 			      is_app_limited:1, /* cwnd not fully used? */
773d7722e85SSoheil Hassas Yeganeh 			      unused:1;
774b9f64820SYuchung Cheng 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
775b9f64820SYuchung Cheng 			__u32 delivered;
776b9f64820SYuchung Cheng 			/* start of send pipeline phase */
777b9f64820SYuchung Cheng 			struct skb_mstamp first_tx_mstamp;
778b9f64820SYuchung Cheng 			/* when we reached the "delivered" count */
779b9f64820SYuchung Cheng 			struct skb_mstamp delivered_mstamp;
780b75803d5SLawrence Brakmo 		} tx;   /* only used for outgoing skbs */
781b75803d5SLawrence Brakmo 		union {
782971f10ecSEric Dumazet 			struct inet_skb_parm	h4;
783971f10ecSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
784971f10ecSEric Dumazet 			struct inet6_skb_parm	h6;
785971f10ecSEric Dumazet #endif
786b75803d5SLawrence Brakmo 		} header;	/* For incoming skbs */
787b75803d5SLawrence Brakmo 	};
7881da177e4SLinus Torvalds };
7891da177e4SLinus Torvalds 
7901da177e4SLinus Torvalds #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
7911da177e4SLinus Torvalds 
792870c3151SEric Dumazet 
793815afe17SEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
794870c3151SEric Dumazet /* This is the variant of inet6_iif() that must be used by TCP,
795870c3151SEric Dumazet  * as TCP moves IP6CB into a different location in skb->cb[]
796870c3151SEric Dumazet  */
797870c3151SEric Dumazet static inline int tcp_v6_iif(const struct sk_buff *skb)
798870c3151SEric Dumazet {
799a04a480dSDavid Ahern 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
80074b20582SDavid Ahern 
80174b20582SDavid Ahern 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
802870c3151SEric Dumazet }
803815afe17SEric Dumazet #endif
804870c3151SEric Dumazet 
805a04a480dSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */
806a04a480dSDavid Ahern static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
807a04a480dSDavid Ahern {
808a04a480dSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
809a04a480dSDavid Ahern 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
810da96786eSDavid Ahern 	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
811a04a480dSDavid Ahern 		return true;
812a04a480dSDavid Ahern #endif
813a04a480dSDavid Ahern 	return false;
814a04a480dSDavid Ahern }
815a04a480dSDavid Ahern 
8161da177e4SLinus Torvalds /* Due to TSO, an SKB can be composed of multiple actual
8171da177e4SLinus Torvalds  * packets.  To keep these tracked properly, we use this.
8181da177e4SLinus Torvalds  */
8191da177e4SLinus Torvalds static inline int tcp_skb_pcount(const struct sk_buff *skb)
8201da177e4SLinus Torvalds {
821cd7d8498SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_segs;
822cd7d8498SEric Dumazet }
823cd7d8498SEric Dumazet 
824cd7d8498SEric Dumazet static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
825cd7d8498SEric Dumazet {
826cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
827cd7d8498SEric Dumazet }
828cd7d8498SEric Dumazet 
829cd7d8498SEric Dumazet static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
830cd7d8498SEric Dumazet {
831cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
8321da177e4SLinus Torvalds }
8331da177e4SLinus Torvalds 
834f69ad292SEric Dumazet /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
8351da177e4SLinus Torvalds static inline int tcp_skb_mss(const struct sk_buff *skb)
8361da177e4SLinus Torvalds {
837f69ad292SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_size;
8381da177e4SLinus Torvalds }
8391da177e4SLinus Torvalds 
840c134ecb8SMartin KaFai Lau static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
841c134ecb8SMartin KaFai Lau {
842c134ecb8SMartin KaFai Lau 	return likely(!TCP_SKB_CB(skb)->eor);
843c134ecb8SMartin KaFai Lau }
844c134ecb8SMartin KaFai Lau 
845317a76f9SStephen Hemminger /* Events passed to congestion control interface */
846317a76f9SStephen Hemminger enum tcp_ca_event {
847317a76f9SStephen Hemminger 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
848317a76f9SStephen Hemminger 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
849317a76f9SStephen Hemminger 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
850317a76f9SStephen Hemminger 	CA_EVENT_LOSS,		/* loss timeout */
8519890092eSFlorian Westphal 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
8529890092eSFlorian Westphal 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
8539890092eSFlorian Westphal 	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
8549890092eSFlorian Westphal 	CA_EVENT_NON_DELAYED_ACK,
8557354c8c3SFlorian Westphal };
8567354c8c3SFlorian Westphal 
8579890092eSFlorian Westphal /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
8587354c8c3SFlorian Westphal enum tcp_ca_ack_event_flags {
8599890092eSFlorian Westphal 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
8609890092eSFlorian Westphal 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
8619890092eSFlorian Westphal 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
862317a76f9SStephen Hemminger };
863317a76f9SStephen Hemminger 
864317a76f9SStephen Hemminger /*
865317a76f9SStephen Hemminger  * Interface for adding new TCP congestion control handlers
866317a76f9SStephen Hemminger  */
867317a76f9SStephen Hemminger #define TCP_CA_NAME_MAX	16
8683ff825b2SStephen Hemminger #define TCP_CA_MAX	128
8693ff825b2SStephen Hemminger #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
8703ff825b2SStephen Hemminger 
871c5c6a8abSDaniel Borkmann #define TCP_CA_UNSPEC	0
872c5c6a8abSDaniel Borkmann 
87330e502a3SDaniel Borkmann /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
874164891aaSStephen Hemminger #define TCP_CONG_NON_RESTRICTED 0x1
87530e502a3SDaniel Borkmann /* Requires ECN/ECT set on all packets */
87630e502a3SDaniel Borkmann #define TCP_CONG_NEEDS_ECN	0x2
877164891aaSStephen Hemminger 
87864f40ff5SEric Dumazet union tcp_cc_info;
87964f40ff5SEric Dumazet 
880756ee172SLawrence Brakmo struct ack_sample {
881756ee172SLawrence Brakmo 	u32 pkts_acked;
882756ee172SLawrence Brakmo 	s32 rtt_us;
8836f094b9eSLawrence Brakmo 	u32 in_flight;
884756ee172SLawrence Brakmo };
885756ee172SLawrence Brakmo 
886b9f64820SYuchung Cheng /* A rate sample measures the number of (original/retransmitted) data
887b9f64820SYuchung Cheng  * packets delivered "delivered" over an interval of time "interval_us".
888b9f64820SYuchung Cheng  * The tcp_rate.c code fills in the rate sample, and congestion
889b9f64820SYuchung Cheng  * control modules that define a cong_control function to run at the end
890b9f64820SYuchung Cheng  * of ACK processing can optionally chose to consult this sample when
891b9f64820SYuchung Cheng  * setting cwnd and pacing rate.
892b9f64820SYuchung Cheng  * A sample is invalid if "delivered" or "interval_us" is negative.
893b9f64820SYuchung Cheng  */
894b9f64820SYuchung Cheng struct rate_sample {
895b9f64820SYuchung Cheng 	struct	skb_mstamp prior_mstamp; /* starting timestamp for interval */
896b9f64820SYuchung Cheng 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
897b9f64820SYuchung Cheng 	s32  delivered;		/* number of packets delivered over interval */
898b9f64820SYuchung Cheng 	long interval_us;	/* time for tp->delivered to incr "delivered" */
899b9f64820SYuchung Cheng 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
900b9f64820SYuchung Cheng 	int  losses;		/* number of packets marked lost upon ACK */
901b9f64820SYuchung Cheng 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
902b9f64820SYuchung Cheng 	u32  prior_in_flight;	/* in flight before this ACK */
903d7722e85SSoheil Hassas Yeganeh 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
904b9f64820SYuchung Cheng 	bool is_retrans;	/* is sample from retransmission? */
905b9f64820SYuchung Cheng };
906b9f64820SYuchung Cheng 
907317a76f9SStephen Hemminger struct tcp_congestion_ops {
908317a76f9SStephen Hemminger 	struct list_head	list;
909c5c6a8abSDaniel Borkmann 	u32 key;
910c5c6a8abSDaniel Borkmann 	u32 flags;
911317a76f9SStephen Hemminger 
912317a76f9SStephen Hemminger 	/* initialize private data (optional) */
9136687e988SArnaldo Carvalho de Melo 	void (*init)(struct sock *sk);
914317a76f9SStephen Hemminger 	/* cleanup private data  (optional) */
9156687e988SArnaldo Carvalho de Melo 	void (*release)(struct sock *sk);
916317a76f9SStephen Hemminger 
917317a76f9SStephen Hemminger 	/* return slow start threshold (required) */
9186687e988SArnaldo Carvalho de Melo 	u32 (*ssthresh)(struct sock *sk);
919317a76f9SStephen Hemminger 	/* do new cwnd calculation (required) */
92024901551SEric Dumazet 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
921317a76f9SStephen Hemminger 	/* call before changing ca_state (optional) */
9226687e988SArnaldo Carvalho de Melo 	void (*set_state)(struct sock *sk, u8 new_state);
923317a76f9SStephen Hemminger 	/* call when cwnd event occurs (optional) */
9246687e988SArnaldo Carvalho de Melo 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
9257354c8c3SFlorian Westphal 	/* call when ack arrives (optional) */
9267354c8c3SFlorian Westphal 	void (*in_ack_event)(struct sock *sk, u32 flags);
927317a76f9SStephen Hemminger 	/* new value of cwnd after loss (optional) */
9286687e988SArnaldo Carvalho de Melo 	u32  (*undo_cwnd)(struct sock *sk);
929317a76f9SStephen Hemminger 	/* hook for packet ack accounting (optional) */
930756ee172SLawrence Brakmo 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
931ed6e7268SNeal Cardwell 	/* suggest number of segments for each skb to transmit (optional) */
932ed6e7268SNeal Cardwell 	u32 (*tso_segs_goal)(struct sock *sk);
93377bfc174SYuchung Cheng 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
93477bfc174SYuchung Cheng 	u32 (*sndbuf_expand)(struct sock *sk);
935c0402760SYuchung Cheng 	/* call when packets are delivered to update cwnd and pacing rate,
936c0402760SYuchung Cheng 	 * after all the ca_state processing. (optional)
937c0402760SYuchung Cheng 	 */
938c0402760SYuchung Cheng 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
93973c1f4a0SArnaldo Carvalho de Melo 	/* get info for inet_diag (optional) */
94064f40ff5SEric Dumazet 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
94164f40ff5SEric Dumazet 			   union tcp_cc_info *info);
942317a76f9SStephen Hemminger 
943317a76f9SStephen Hemminger 	char 		name[TCP_CA_NAME_MAX];
944317a76f9SStephen Hemminger 	struct module 	*owner;
945317a76f9SStephen Hemminger };
946317a76f9SStephen Hemminger 
9475c9f3023SJoe Perches int tcp_register_congestion_control(struct tcp_congestion_ops *type);
9485c9f3023SJoe Perches void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
949317a76f9SStephen Hemminger 
95055d8694fSFlorian Westphal void tcp_assign_congestion_control(struct sock *sk);
9515c9f3023SJoe Perches void tcp_init_congestion_control(struct sock *sk);
9525c9f3023SJoe Perches void tcp_cleanup_congestion_control(struct sock *sk);
9535c9f3023SJoe Perches int tcp_set_default_congestion_control(const char *name);
9545c9f3023SJoe Perches void tcp_get_default_congestion_control(char *name);
9555c9f3023SJoe Perches void tcp_get_available_congestion_control(char *buf, size_t len);
9565c9f3023SJoe Perches void tcp_get_allowed_congestion_control(char *buf, size_t len);
9575c9f3023SJoe Perches int tcp_set_allowed_congestion_control(char *allowed);
9585c9f3023SJoe Perches int tcp_set_congestion_control(struct sock *sk, const char *name);
959e73ebb08SNeal Cardwell u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
960e73ebb08SNeal Cardwell void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
961317a76f9SStephen Hemminger 
9625c9f3023SJoe Perches u32 tcp_reno_ssthresh(struct sock *sk);
963e9799183SFlorian Westphal u32 tcp_reno_undo_cwnd(struct sock *sk);
96424901551SEric Dumazet void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
965a8acfbacSDavid S. Miller extern struct tcp_congestion_ops tcp_reno;
966317a76f9SStephen Hemminger 
967c5c6a8abSDaniel Borkmann struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
968c3a8d947SDaniel Borkmann u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
969ea697639SDaniel Borkmann #ifdef CONFIG_INET
970c5c6a8abSDaniel Borkmann char *tcp_ca_get_name_by_key(u32 key, char *buffer);
971ea697639SDaniel Borkmann #else
972ea697639SDaniel Borkmann static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
973ea697639SDaniel Borkmann {
974ea697639SDaniel Borkmann 	return NULL;
975ea697639SDaniel Borkmann }
976ea697639SDaniel Borkmann #endif
977c5c6a8abSDaniel Borkmann 
97830e502a3SDaniel Borkmann static inline bool tcp_ca_needs_ecn(const struct sock *sk)
97930e502a3SDaniel Borkmann {
98030e502a3SDaniel Borkmann 	const struct inet_connection_sock *icsk = inet_csk(sk);
98130e502a3SDaniel Borkmann 
98230e502a3SDaniel Borkmann 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
98330e502a3SDaniel Borkmann }
98430e502a3SDaniel Borkmann 
9856687e988SArnaldo Carvalho de Melo static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
986317a76f9SStephen Hemminger {
9876687e988SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
9886687e988SArnaldo Carvalho de Melo 
9896687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->set_state)
9906687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->set_state(sk, ca_state);
9916687e988SArnaldo Carvalho de Melo 	icsk->icsk_ca_state = ca_state;
992317a76f9SStephen Hemminger }
993317a76f9SStephen Hemminger 
9946687e988SArnaldo Carvalho de Melo static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
995317a76f9SStephen Hemminger {
9966687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
9976687e988SArnaldo Carvalho de Melo 
9986687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->cwnd_event)
9996687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1000317a76f9SStephen Hemminger }
1001317a76f9SStephen Hemminger 
1002b9f64820SYuchung Cheng /* From tcp_rate.c */
1003b9f64820SYuchung Cheng void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1004b9f64820SYuchung Cheng void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1005b9f64820SYuchung Cheng 			    struct rate_sample *rs);
1006b9f64820SYuchung Cheng void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
100788d5c650SEric Dumazet 		  struct rate_sample *rs);
1008d7722e85SSoheil Hassas Yeganeh void tcp_rate_check_app_limited(struct sock *sk);
1009b9f64820SYuchung Cheng 
1010e60402d0SIlpo Järvinen /* These functions determine how the current flow behaves in respect of SACK
1011e60402d0SIlpo Järvinen  * handling. SACK is negotiated with the peer, and therefore it can vary
1012e60402d0SIlpo Järvinen  * between different flows.
1013e60402d0SIlpo Järvinen  *
1014e60402d0SIlpo Järvinen  * tcp_is_sack - SACK enabled
1015e60402d0SIlpo Järvinen  * tcp_is_reno - No SACK
1016e60402d0SIlpo Järvinen  * tcp_is_fack - FACK enabled, implies SACK enabled
1017e60402d0SIlpo Järvinen  */
1018e60402d0SIlpo Järvinen static inline int tcp_is_sack(const struct tcp_sock *tp)
1019e60402d0SIlpo Järvinen {
1020e60402d0SIlpo Järvinen 	return tp->rx_opt.sack_ok;
1021e60402d0SIlpo Järvinen }
1022e60402d0SIlpo Järvinen 
1023a2a385d6SEric Dumazet static inline bool tcp_is_reno(const struct tcp_sock *tp)
1024e60402d0SIlpo Järvinen {
1025e60402d0SIlpo Järvinen 	return !tcp_is_sack(tp);
1026e60402d0SIlpo Järvinen }
1027e60402d0SIlpo Järvinen 
1028a2a385d6SEric Dumazet static inline bool tcp_is_fack(const struct tcp_sock *tp)
1029e60402d0SIlpo Järvinen {
1030ab56222aSVijay Subramanian 	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1031e60402d0SIlpo Järvinen }
1032e60402d0SIlpo Järvinen 
1033e60402d0SIlpo Järvinen static inline void tcp_enable_fack(struct tcp_sock *tp)
1034e60402d0SIlpo Järvinen {
1035ab56222aSVijay Subramanian 	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1036e60402d0SIlpo Järvinen }
1037e60402d0SIlpo Järvinen 
103883ae4088SIlpo Järvinen static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
103983ae4088SIlpo Järvinen {
104083ae4088SIlpo Järvinen 	return tp->sacked_out + tp->lost_out;
104183ae4088SIlpo Järvinen }
104283ae4088SIlpo Järvinen 
10431da177e4SLinus Torvalds /* This determines how many packets are "in the network" to the best
10441da177e4SLinus Torvalds  * of our knowledge.  In many cases it is conservative, but where
10451da177e4SLinus Torvalds  * detailed information is available from the receiver (via SACK
10461da177e4SLinus Torvalds  * blocks etc.) we can make more aggressive calculations.
10471da177e4SLinus Torvalds  *
10481da177e4SLinus Torvalds  * Use this for decisions involving congestion control, use just
10491da177e4SLinus Torvalds  * tp->packets_out to determine if the send queue is empty or not.
10501da177e4SLinus Torvalds  *
10511da177e4SLinus Torvalds  * Read this equation as:
10521da177e4SLinus Torvalds  *
10531da177e4SLinus Torvalds  *	"Packets sent once on transmission queue" MINUS
10541da177e4SLinus Torvalds  *	"Packets left network, but not honestly ACKed yet" PLUS
10551da177e4SLinus Torvalds  *	"Packets fast retransmitted"
10561da177e4SLinus Torvalds  */
105740efc6faSStephen Hemminger static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
10581da177e4SLinus Torvalds {
105983ae4088SIlpo Järvinen 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
10601da177e4SLinus Torvalds }
10611da177e4SLinus Torvalds 
10620b6a05c1SIlpo Järvinen #define TCP_INFINITE_SSTHRESH	0x7fffffff
10630b6a05c1SIlpo Järvinen 
1064071d5080SYuchung Cheng static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1065071d5080SYuchung Cheng {
106676174004SYuchung Cheng 	return tp->snd_cwnd < tp->snd_ssthresh;
1067071d5080SYuchung Cheng }
1068071d5080SYuchung Cheng 
10690b6a05c1SIlpo Järvinen static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
10700b6a05c1SIlpo Järvinen {
10710b6a05c1SIlpo Järvinen 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
10720b6a05c1SIlpo Järvinen }
10730b6a05c1SIlpo Järvinen 
1074684bad11SYuchung Cheng static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1075684bad11SYuchung Cheng {
1076684bad11SYuchung Cheng 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1077684bad11SYuchung Cheng 	       (1 << inet_csk(sk)->icsk_ca_state);
1078684bad11SYuchung Cheng }
1079684bad11SYuchung Cheng 
10801da177e4SLinus Torvalds /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1081684bad11SYuchung Cheng  * The exception is cwnd reduction phase, when cwnd is decreasing towards
10821da177e4SLinus Torvalds  * ssthresh.
10831da177e4SLinus Torvalds  */
10846687e988SArnaldo Carvalho de Melo static inline __u32 tcp_current_ssthresh(const struct sock *sk)
10851da177e4SLinus Torvalds {
10866687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1087cf533ea5SEric Dumazet 
1088684bad11SYuchung Cheng 	if (tcp_in_cwnd_reduction(sk))
10891da177e4SLinus Torvalds 		return tp->snd_ssthresh;
10901da177e4SLinus Torvalds 	else
10911da177e4SLinus Torvalds 		return max(tp->snd_ssthresh,
10921da177e4SLinus Torvalds 			   ((tp->snd_cwnd >> 1) +
10931da177e4SLinus Torvalds 			    (tp->snd_cwnd >> 2)));
10941da177e4SLinus Torvalds }
10951da177e4SLinus Torvalds 
1096b9c4595bSIlpo Järvinen /* Use define here intentionally to get WARN_ON location shown at the caller */
1097b9c4595bSIlpo Järvinen #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
10981da177e4SLinus Torvalds 
10995ee2c941SChristoph Paasch void tcp_enter_cwr(struct sock *sk);
11005c9f3023SJoe Perches __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
11011da177e4SLinus Torvalds 
11026b5a5c0dSNeal Cardwell /* The maximum number of MSS of available cwnd for which TSO defers
11036b5a5c0dSNeal Cardwell  * sending if not using sysctl_tcp_tso_win_divisor.
11046b5a5c0dSNeal Cardwell  */
11056b5a5c0dSNeal Cardwell static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
11066b5a5c0dSNeal Cardwell {
11076b5a5c0dSNeal Cardwell 	return 3;
11086b5a5c0dSNeal Cardwell }
11096b5a5c0dSNeal Cardwell 
111090840defSIlpo Järvinen /* Returns end sequence number of the receiver's advertised window */
111190840defSIlpo Järvinen static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
111290840defSIlpo Järvinen {
111390840defSIlpo Järvinen 	return tp->snd_una + tp->snd_wnd;
111490840defSIlpo Järvinen }
1115e114a710SEric Dumazet 
1116e114a710SEric Dumazet /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1117e114a710SEric Dumazet  * flexible approach. The RFC suggests cwnd should not be raised unless
1118ca8a2263SNeal Cardwell  * it was fully used previously. And that's exactly what we do in
1119ca8a2263SNeal Cardwell  * congestion avoidance mode. But in slow start we allow cwnd to grow
1120ca8a2263SNeal Cardwell  * as long as the application has used half the cwnd.
1121e114a710SEric Dumazet  * Example :
1122e114a710SEric Dumazet  *    cwnd is 10 (IW10), but application sends 9 frames.
1123e114a710SEric Dumazet  *    We allow cwnd to reach 18 when all frames are ACKed.
1124e114a710SEric Dumazet  * This check is safe because it's as aggressive as slow start which already
1125e114a710SEric Dumazet  * risks 100% overshoot. The advantage is that we discourage application to
1126e114a710SEric Dumazet  * either send more filler packets or data to artificially blow up the cwnd
1127e114a710SEric Dumazet  * usage, and allow application-limited process to probe bw more aggressively.
1128e114a710SEric Dumazet  */
112924901551SEric Dumazet static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1130e114a710SEric Dumazet {
1131e114a710SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1132e114a710SEric Dumazet 
1133ca8a2263SNeal Cardwell 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1134071d5080SYuchung Cheng 	if (tcp_in_slow_start(tp))
1135ca8a2263SNeal Cardwell 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1136ca8a2263SNeal Cardwell 
1137ca8a2263SNeal Cardwell 	return tp->is_cwnd_limited;
1138e114a710SEric Dumazet }
1139f4805edeSStephen Hemminger 
114021c8fe99SEric Dumazet /* Something is really bad, we could not queue an additional packet,
114121c8fe99SEric Dumazet  * because qdisc is full or receiver sent a 0 window.
114221c8fe99SEric Dumazet  * We do not want to add fuel to the fire, or abort too early,
114321c8fe99SEric Dumazet  * so make sure the timer we arm now is at least 200ms in the future,
114421c8fe99SEric Dumazet  * regardless of current icsk_rto value (as it could be ~2ms)
114521c8fe99SEric Dumazet  */
114621c8fe99SEric Dumazet static inline unsigned long tcp_probe0_base(const struct sock *sk)
114721c8fe99SEric Dumazet {
114821c8fe99SEric Dumazet 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
114921c8fe99SEric Dumazet }
115021c8fe99SEric Dumazet 
115121c8fe99SEric Dumazet /* Variant of inet_csk_rto_backoff() used for zero window probes */
115221c8fe99SEric Dumazet static inline unsigned long tcp_probe0_when(const struct sock *sk,
115321c8fe99SEric Dumazet 					    unsigned long max_when)
115421c8fe99SEric Dumazet {
115521c8fe99SEric Dumazet 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
115621c8fe99SEric Dumazet 
115721c8fe99SEric Dumazet 	return (unsigned long)min_t(u64, when, max_when);
115821c8fe99SEric Dumazet }
115921c8fe99SEric Dumazet 
11609e412ba7SIlpo Järvinen static inline void tcp_check_probe_timer(struct sock *sk)
11611da177e4SLinus Torvalds {
116221c8fe99SEric Dumazet 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
11633f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
116421c8fe99SEric Dumazet 					  tcp_probe0_base(sk), TCP_RTO_MAX);
11651da177e4SLinus Torvalds }
11661da177e4SLinus Torvalds 
1167ee7537b6SHantzis Fotis static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
11681da177e4SLinus Torvalds {
11691da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
11701da177e4SLinus Torvalds }
11711da177e4SLinus Torvalds 
1172ee7537b6SHantzis Fotis static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
11731da177e4SLinus Torvalds {
11741da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
11751da177e4SLinus Torvalds }
11761da177e4SLinus Torvalds 
11771da177e4SLinus Torvalds /*
11781da177e4SLinus Torvalds  * Calculate(/check) TCP checksum
11791da177e4SLinus Torvalds  */
1180ba7808eaSFrederik Deweerdt static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1181ba7808eaSFrederik Deweerdt 				   __be32 daddr, __wsum base)
11821da177e4SLinus Torvalds {
11831da177e4SLinus Torvalds 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
11841da177e4SLinus Torvalds }
11851da177e4SLinus Torvalds 
1186b51655b9SAl Viro static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
11871da177e4SLinus Torvalds {
1188fb286bb2SHerbert Xu 	return __skb_checksum_complete(skb);
11891da177e4SLinus Torvalds }
11901da177e4SLinus Torvalds 
1191a2a385d6SEric Dumazet static inline bool tcp_checksum_complete(struct sk_buff *skb)
11921da177e4SLinus Torvalds {
119360476372SHerbert Xu 	return !skb_csum_unnecessary(skb) &&
11941da177e4SLinus Torvalds 		__tcp_checksum_complete(skb);
11951da177e4SLinus Torvalds }
11961da177e4SLinus Torvalds 
11971da177e4SLinus Torvalds /* Prequeue for VJ style copy to user, combined with checksumming. */
11981da177e4SLinus Torvalds 
119940efc6faSStephen Hemminger static inline void tcp_prequeue_init(struct tcp_sock *tp)
12001da177e4SLinus Torvalds {
12011da177e4SLinus Torvalds 	tp->ucopy.task = NULL;
12021da177e4SLinus Torvalds 	tp->ucopy.len = 0;
12031da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
12041da177e4SLinus Torvalds 	skb_queue_head_init(&tp->ucopy.prequeue);
12051da177e4SLinus Torvalds }
12061da177e4SLinus Torvalds 
12075c9f3023SJoe Perches bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1208c9c33212SEric Dumazet bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1209ac6e7800SEric Dumazet int tcp_filter(struct sock *sk, struct sk_buff *skb);
12101da177e4SLinus Torvalds 
12111da177e4SLinus Torvalds #undef STATE_TRACE
12121da177e4SLinus Torvalds 
12131da177e4SLinus Torvalds #ifdef STATE_TRACE
12141da177e4SLinus Torvalds static const char *statename[]={
12151da177e4SLinus Torvalds 	"Unused","Established","Syn Sent","Syn Recv",
12161da177e4SLinus Torvalds 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
12171da177e4SLinus Torvalds 	"Close Wait","Last ACK","Listen","Closing"
12181da177e4SLinus Torvalds };
12191da177e4SLinus Torvalds #endif
12205c9f3023SJoe Perches void tcp_set_state(struct sock *sk, int state);
12211da177e4SLinus Torvalds 
12225c9f3023SJoe Perches void tcp_done(struct sock *sk);
12231da177e4SLinus Torvalds 
1224c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err);
1225c1e64e29SLorenzo Colitti 
122640efc6faSStephen Hemminger static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
12271da177e4SLinus Torvalds {
12281da177e4SLinus Torvalds 	rx_opt->dsack = 0;
12291da177e4SLinus Torvalds 	rx_opt->num_sacks = 0;
12301da177e4SLinus Torvalds }
12311da177e4SLinus Torvalds 
12325c9f3023SJoe Perches u32 tcp_default_init_rwnd(u32 mss);
12336f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta);
12346f021c62SEric Dumazet 
12356f021c62SEric Dumazet static inline void tcp_slow_start_after_idle_check(struct sock *sk)
12366f021c62SEric Dumazet {
1237*1b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
12386f021c62SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
12396f021c62SEric Dumazet 	s32 delta;
12406f021c62SEric Dumazet 
1241*1b1fc3fdSWei Wang 	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1242*1b1fc3fdSWei Wang 	    ca_ops->cong_control)
12436f021c62SEric Dumazet 		return;
12446f021c62SEric Dumazet 	delta = tcp_time_stamp - tp->lsndtime;
12456f021c62SEric Dumazet 	if (delta > inet_csk(sk)->icsk_rto)
12466f021c62SEric Dumazet 		tcp_cwnd_restart(sk, delta);
12476f021c62SEric Dumazet }
124885f16525SYuchung Cheng 
12491da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. */
12505c9f3023SJoe Perches void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
12515c9f3023SJoe Perches 			       __u32 *window_clamp, int wscale_ok,
12525c9f3023SJoe Perches 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
12531da177e4SLinus Torvalds 
12541da177e4SLinus Torvalds static inline int tcp_win_from_space(int space)
12551da177e4SLinus Torvalds {
1256c4836742SGao Feng 	int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
1257c4836742SGao Feng 
1258c4836742SGao Feng 	return tcp_adv_win_scale <= 0 ?
1259c4836742SGao Feng 		(space>>(-tcp_adv_win_scale)) :
1260c4836742SGao Feng 		space - (space>>tcp_adv_win_scale);
12611da177e4SLinus Torvalds }
12621da177e4SLinus Torvalds 
12631da177e4SLinus Torvalds /* Note: caller must be prepared to deal with negative returns */
12641da177e4SLinus Torvalds static inline int tcp_space(const struct sock *sk)
12651da177e4SLinus Torvalds {
12661da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf -
12671da177e4SLinus Torvalds 				  atomic_read(&sk->sk_rmem_alloc));
12681da177e4SLinus Torvalds }
12691da177e4SLinus Torvalds 
12701da177e4SLinus Torvalds static inline int tcp_full_space(const struct sock *sk)
12711da177e4SLinus Torvalds {
12721da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf);
12731da177e4SLinus Torvalds }
12741da177e4SLinus Torvalds 
1275843f4a55SYuchung Cheng extern void tcp_openreq_init_rwin(struct request_sock *req,
1276b1964b5fSEric Dumazet 				  const struct sock *sk_listener,
1277b1964b5fSEric Dumazet 				  const struct dst_entry *dst);
1278843f4a55SYuchung Cheng 
12795c9f3023SJoe Perches void tcp_enter_memory_pressure(struct sock *sk);
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds static inline int keepalive_intvl_when(const struct tcp_sock *tp)
12821da177e4SLinus Torvalds {
1283b840d15dSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
1284b840d15dSNikolay Borisov 
1285b840d15dSNikolay Borisov 	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
12861da177e4SLinus Torvalds }
12871da177e4SLinus Torvalds 
12881da177e4SLinus Torvalds static inline int keepalive_time_when(const struct tcp_sock *tp)
12891da177e4SLinus Torvalds {
129013b287e8SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
129113b287e8SNikolay Borisov 
129213b287e8SNikolay Borisov 	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
12931da177e4SLinus Torvalds }
12941da177e4SLinus Torvalds 
1295df19a626SEric Dumazet static inline int keepalive_probes(const struct tcp_sock *tp)
1296df19a626SEric Dumazet {
12979bd6861bSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
12989bd6861bSNikolay Borisov 
12999bd6861bSNikolay Borisov 	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1300df19a626SEric Dumazet }
1301df19a626SEric Dumazet 
13026c37e5deSFlavio Leitner static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
13036c37e5deSFlavio Leitner {
13046c37e5deSFlavio Leitner 	const struct inet_connection_sock *icsk = &tp->inet_conn;
13056c37e5deSFlavio Leitner 
13066c37e5deSFlavio Leitner 	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
13076c37e5deSFlavio Leitner 			  tcp_time_stamp - tp->rcv_tstamp);
13086c37e5deSFlavio Leitner }
13096c37e5deSFlavio Leitner 
1310463c84b9SArnaldo Carvalho de Melo static inline int tcp_fin_time(const struct sock *sk)
13111da177e4SLinus Torvalds {
13121e579caaSNikolay Borisov 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1313463c84b9SArnaldo Carvalho de Melo 	const int rto = inet_csk(sk)->icsk_rto;
13141da177e4SLinus Torvalds 
1315463c84b9SArnaldo Carvalho de Melo 	if (fin_timeout < (rto << 2) - (rto >> 1))
1316463c84b9SArnaldo Carvalho de Melo 		fin_timeout = (rto << 2) - (rto >> 1);
13171da177e4SLinus Torvalds 
13181da177e4SLinus Torvalds 	return fin_timeout;
13191da177e4SLinus Torvalds }
13201da177e4SLinus Torvalds 
1321a2a385d6SEric Dumazet static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1322c887e6d2SIlpo Järvinen 				  int paws_win)
13231da177e4SLinus Torvalds {
1324c887e6d2SIlpo Järvinen 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1325a2a385d6SEric Dumazet 		return true;
1326c887e6d2SIlpo Järvinen 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1327a2a385d6SEric Dumazet 		return true;
1328bc2ce894SEric Dumazet 	/*
1329bc2ce894SEric Dumazet 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1330bc2ce894SEric Dumazet 	 * then following tcp messages have valid values. Ignore 0 value,
1331bc2ce894SEric Dumazet 	 * or else 'negative' tsval might forbid us to accept their packets.
1332bc2ce894SEric Dumazet 	 */
1333bc2ce894SEric Dumazet 	if (!rx_opt->ts_recent)
1334a2a385d6SEric Dumazet 		return true;
1335a2a385d6SEric Dumazet 	return false;
1336c887e6d2SIlpo Järvinen }
1337c887e6d2SIlpo Järvinen 
1338a2a385d6SEric Dumazet static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1339c887e6d2SIlpo Järvinen 				   int rst)
1340c887e6d2SIlpo Järvinen {
1341c887e6d2SIlpo Järvinen 	if (tcp_paws_check(rx_opt, 0))
1342a2a385d6SEric Dumazet 		return false;
13431da177e4SLinus Torvalds 
13441da177e4SLinus Torvalds 	/* RST segments are not recommended to carry timestamp,
13451da177e4SLinus Torvalds 	   and, if they do, it is recommended to ignore PAWS because
13461da177e4SLinus Torvalds 	   "their cleanup function should take precedence over timestamps."
13471da177e4SLinus Torvalds 	   Certainly, it is mistake. It is necessary to understand the reasons
13481da177e4SLinus Torvalds 	   of this constraint to relax it: if peer reboots, clock may go
13491da177e4SLinus Torvalds 	   out-of-sync and half-open connections will not be reset.
13501da177e4SLinus Torvalds 	   Actually, the problem would be not existing if all
13511da177e4SLinus Torvalds 	   the implementations followed draft about maintaining clock
13521da177e4SLinus Torvalds 	   via reboots. Linux-2.2 DOES NOT!
13531da177e4SLinus Torvalds 
13541da177e4SLinus Torvalds 	   However, we can relax time bounds for RST segments to MSL.
13551da177e4SLinus Torvalds 	 */
13569d729f72SJames Morris 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1357a2a385d6SEric Dumazet 		return false;
1358a2a385d6SEric Dumazet 	return true;
13591da177e4SLinus Torvalds }
13601da177e4SLinus Torvalds 
13617970ddc8SEric Dumazet bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
13627970ddc8SEric Dumazet 			  int mib_idx, u32 *last_oow_ack_time);
1363032ee423SNeal Cardwell 
1364a9c19329SPavel Emelyanov static inline void tcp_mib_init(struct net *net)
13651da177e4SLinus Torvalds {
13661da177e4SLinus Torvalds 	/* See RFC 2012 */
13676aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
13686aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
13696aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
13706aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
13711da177e4SLinus Torvalds }
13721da177e4SLinus Torvalds 
13736a438bbeSStephen Hemminger /* from STCP */
1374ef9da47cSIlpo Järvinen static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
13750800f170SDavid S. Miller {
13766a438bbeSStephen Hemminger 	tp->lost_skb_hint = NULL;
1377ef9da47cSIlpo Järvinen }
1378ef9da47cSIlpo Järvinen 
1379ef9da47cSIlpo Järvinen static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1380ef9da47cSIlpo Järvinen {
1381ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
13826a438bbeSStephen Hemminger 	tp->retransmit_skb_hint = NULL;
1383b7689205SIlpo Järvinen }
1384b7689205SIlpo Järvinen 
1385a915da9bSEric Dumazet union tcp_md5_addr {
1386a915da9bSEric Dumazet 	struct in_addr  a4;
1387a915da9bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1388a915da9bSEric Dumazet 	struct in6_addr	a6;
1389a915da9bSEric Dumazet #endif
1390a915da9bSEric Dumazet };
1391a915da9bSEric Dumazet 
1392cfb6eeb4SYOSHIFUJI Hideaki /* - key database */
1393cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key {
1394a915da9bSEric Dumazet 	struct hlist_node	node;
1395cfb6eeb4SYOSHIFUJI Hideaki 	u8			keylen;
1396a915da9bSEric Dumazet 	u8			family; /* AF_INET or AF_INET6 */
1397a915da9bSEric Dumazet 	union tcp_md5_addr	addr;
1398a915da9bSEric Dumazet 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1399a915da9bSEric Dumazet 	struct rcu_head		rcu;
1400cfb6eeb4SYOSHIFUJI Hideaki };
1401cfb6eeb4SYOSHIFUJI Hideaki 
1402cfb6eeb4SYOSHIFUJI Hideaki /* - sock block */
1403cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_info {
1404a915da9bSEric Dumazet 	struct hlist_head	head;
1405a8afca03SEric Dumazet 	struct rcu_head		rcu;
1406cfb6eeb4SYOSHIFUJI Hideaki };
1407cfb6eeb4SYOSHIFUJI Hideaki 
1408cfb6eeb4SYOSHIFUJI Hideaki /* - pseudo header */
1409cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr {
1410cfb6eeb4SYOSHIFUJI Hideaki 	__be32		saddr;
1411cfb6eeb4SYOSHIFUJI Hideaki 	__be32		daddr;
1412cfb6eeb4SYOSHIFUJI Hideaki 	__u8		pad;
1413cfb6eeb4SYOSHIFUJI Hideaki 	__u8		protocol;
1414cfb6eeb4SYOSHIFUJI Hideaki 	__be16		len;
1415cfb6eeb4SYOSHIFUJI Hideaki };
1416cfb6eeb4SYOSHIFUJI Hideaki 
1417cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr {
1418cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr	saddr;
1419cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr daddr;
1420cfb6eeb4SYOSHIFUJI Hideaki 	__be32		len;
1421cfb6eeb4SYOSHIFUJI Hideaki 	__be32		protocol;	/* including padding */
1422cfb6eeb4SYOSHIFUJI Hideaki };
1423cfb6eeb4SYOSHIFUJI Hideaki 
1424cfb6eeb4SYOSHIFUJI Hideaki union tcp_md5sum_block {
1425cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp4_pseudohdr ip4;
1426dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1427cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp6_pseudohdr ip6;
1428cfb6eeb4SYOSHIFUJI Hideaki #endif
1429cfb6eeb4SYOSHIFUJI Hideaki };
1430cfb6eeb4SYOSHIFUJI Hideaki 
1431cfb6eeb4SYOSHIFUJI Hideaki /* - pool: digest algorithm, hash description and scratch buffer */
1432cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool {
1433cf80e0e4SHerbert Xu 	struct ahash_request	*md5_req;
143419689e38SEric Dumazet 	void			*scratch;
1435cfb6eeb4SYOSHIFUJI Hideaki };
1436cfb6eeb4SYOSHIFUJI Hideaki 
1437cfb6eeb4SYOSHIFUJI Hideaki /* - functions */
143839f8e58eSEric Dumazet int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
143939f8e58eSEric Dumazet 			const struct sock *sk, const struct sk_buff *skb);
14405c9f3023SJoe Perches int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
14415c9f3023SJoe Perches 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
14425c9f3023SJoe Perches int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1443a915da9bSEric Dumazet 		   int family);
1444b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1445fd3a154aSEric Dumazet 					 const struct sock *addr_sk);
1446cfb6eeb4SYOSHIFUJI Hideaki 
14479501f972SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1448b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
14495c9f3023SJoe Perches 					 const union tcp_md5_addr *addr,
14505c9f3023SJoe Perches 					 int family);
1451a915da9bSEric Dumazet #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
14529501f972SYOSHIFUJI Hideaki #else
1453b83e3debSEric Dumazet static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1454a915da9bSEric Dumazet 					 const union tcp_md5_addr *addr,
1455a915da9bSEric Dumazet 					 int family)
1456a915da9bSEric Dumazet {
1457a915da9bSEric Dumazet 	return NULL;
1458a915da9bSEric Dumazet }
14599501f972SYOSHIFUJI Hideaki #define tcp_twsk_md5_key(twsk)	NULL
14609501f972SYOSHIFUJI Hideaki #endif
14619501f972SYOSHIFUJI Hideaki 
14625c9f3023SJoe Perches bool tcp_alloc_md5sig_pool(void);
1463cfb6eeb4SYOSHIFUJI Hideaki 
14645c9f3023SJoe Perches struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
146571cea17eSEric Dumazet static inline void tcp_put_md5sig_pool(void)
146671cea17eSEric Dumazet {
146771cea17eSEric Dumazet 	local_bh_enable();
146871cea17eSEric Dumazet }
146935790c04SEric Dumazet 
14705c9f3023SJoe Perches int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
147195c96174SEric Dumazet 			  unsigned int header_len);
14725c9f3023SJoe Perches int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1473cf533ea5SEric Dumazet 		     const struct tcp_md5sig_key *key);
1474cfb6eeb4SYOSHIFUJI Hideaki 
147510467163SJerry Chu /* From tcp_fastopen.c */
14765c9f3023SJoe Perches void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
14775c9f3023SJoe Perches 			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
14785c9f3023SJoe Perches 			    unsigned long *last_syn_loss);
14795c9f3023SJoe Perches void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
14802646c831SDaniel Lee 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
14812646c831SDaniel Lee 			    u16 try_exp);
1482783237e8SYuchung Cheng struct tcp_fastopen_request {
1483783237e8SYuchung Cheng 	/* Fast Open cookie. Size 0 means a cookie request */
1484783237e8SYuchung Cheng 	struct tcp_fastopen_cookie	cookie;
1485783237e8SYuchung Cheng 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1486f5ddcbbbSEric Dumazet 	size_t				size;
1487f5ddcbbbSEric Dumazet 	int				copied;	/* queued in tcp_connect() */
1488783237e8SYuchung Cheng };
1489783237e8SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp);
1490783237e8SYuchung Cheng 
149110467163SJerry Chu extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
149210467163SJerry Chu int tcp_fastopen_reset_cipher(void *key, unsigned int len);
149361d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
14947c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
14955b7ed089SYuchung Cheng 			      struct request_sock *req,
1496843f4a55SYuchung Cheng 			      struct tcp_fastopen_cookie *foc,
1497843f4a55SYuchung Cheng 			      struct dst_entry *dst);
1498222e83d2SHannes Frederic Sowa void tcp_fastopen_init_key_once(bool publish);
1499065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1500065263f4SWei Wang 			     struct tcp_fastopen_cookie *cookie);
150119f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
150210467163SJerry Chu #define TCP_FASTOPEN_KEY_LENGTH 16
150310467163SJerry Chu 
150410467163SJerry Chu /* Fastopen key context */
150510467163SJerry Chu struct tcp_fastopen_context {
15067ae8639cSEric Dumazet 	struct crypto_cipher	*tfm;
150710467163SJerry Chu 	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
150810467163SJerry Chu 	struct rcu_head		rcu;
150910467163SJerry Chu };
151010467163SJerry Chu 
1511cf1ef3f0SWei Wang extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
151246c2fa39SWei Wang void tcp_fastopen_active_disable(struct sock *sk);
1513cf1ef3f0SWei Wang bool tcp_fastopen_active_should_disable(struct sock *sk);
1514cf1ef3f0SWei Wang void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1515cf1ef3f0SWei Wang void tcp_fastopen_active_timeout_reset(void);
1516cf1ef3f0SWei Wang 
151705b055e8SFrancis Yan /* Latencies incurred by various limits for a sender. They are
151805b055e8SFrancis Yan  * chronograph-like stats that are mutually exclusive.
151905b055e8SFrancis Yan  */
152005b055e8SFrancis Yan enum tcp_chrono {
152105b055e8SFrancis Yan 	TCP_CHRONO_UNSPEC,
152205b055e8SFrancis Yan 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
152305b055e8SFrancis Yan 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
152405b055e8SFrancis Yan 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
152505b055e8SFrancis Yan 	__TCP_CHRONO_MAX,
152605b055e8SFrancis Yan };
152705b055e8SFrancis Yan 
152805b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
152905b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
153005b055e8SFrancis Yan 
1531fe067e8aSDavid S. Miller /* write queue abstraction */
1532fe067e8aSDavid S. Miller static inline void tcp_write_queue_purge(struct sock *sk)
1533fe067e8aSDavid S. Miller {
1534fe067e8aSDavid S. Miller 	struct sk_buff *skb;
1535fe067e8aSDavid S. Miller 
15360f87230dSFrancis Yan 	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1537fe067e8aSDavid S. Miller 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
15383ab224beSHideo Aoki 		sk_wmem_free_skb(sk, skb);
15393ab224beSHideo Aoki 	sk_mem_reclaim(sk);
15408818a9d8SIlpo Järvinen 	tcp_clear_all_retrans_hints(tcp_sk(sk));
1541fe067e8aSDavid S. Miller }
1542fe067e8aSDavid S. Miller 
1543cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1544fe067e8aSDavid S. Miller {
1545cd07a8eaSDavid S. Miller 	return skb_peek(&sk->sk_write_queue);
1546fe067e8aSDavid S. Miller }
1547fe067e8aSDavid S. Miller 
1548cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1549fe067e8aSDavid S. Miller {
1550cd07a8eaSDavid S. Miller 	return skb_peek_tail(&sk->sk_write_queue);
1551fe067e8aSDavid S. Miller }
1552fe067e8aSDavid S. Miller 
1553cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1554cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1555fe067e8aSDavid S. Miller {
1556cd07a8eaSDavid S. Miller 	return skb_queue_next(&sk->sk_write_queue, skb);
1557fe067e8aSDavid S. Miller }
1558fe067e8aSDavid S. Miller 
1559cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1560cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1561832d11c5SIlpo Järvinen {
1562832d11c5SIlpo Järvinen 	return skb_queue_prev(&sk->sk_write_queue, skb);
1563832d11c5SIlpo Järvinen }
1564832d11c5SIlpo Järvinen 
1565fe067e8aSDavid S. Miller #define tcp_for_write_queue(skb, sk)					\
1566cd07a8eaSDavid S. Miller 	skb_queue_walk(&(sk)->sk_write_queue, skb)
1567fe067e8aSDavid S. Miller 
1568fe067e8aSDavid S. Miller #define tcp_for_write_queue_from(skb, sk)				\
1569cd07a8eaSDavid S. Miller 	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1570fe067e8aSDavid S. Miller 
1571234b6860SIlpo Järvinen #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1572cd07a8eaSDavid S. Miller 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1573234b6860SIlpo Järvinen 
1574cf533ea5SEric Dumazet static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1575fe067e8aSDavid S. Miller {
1576fe067e8aSDavid S. Miller 	return sk->sk_send_head;
1577fe067e8aSDavid S. Miller }
1578fe067e8aSDavid S. Miller 
1579cd07a8eaSDavid S. Miller static inline bool tcp_skb_is_last(const struct sock *sk,
1580cd07a8eaSDavid S. Miller 				   const struct sk_buff *skb)
1581cd07a8eaSDavid S. Miller {
1582cd07a8eaSDavid S. Miller 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1583cd07a8eaSDavid S. Miller }
1584cd07a8eaSDavid S. Miller 
1585cf533ea5SEric Dumazet static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1586fe067e8aSDavid S. Miller {
1587cd07a8eaSDavid S. Miller 	if (tcp_skb_is_last(sk, skb))
1588fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
1589cd07a8eaSDavid S. Miller 	else
1590cd07a8eaSDavid S. Miller 		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1591fe067e8aSDavid S. Miller }
1592fe067e8aSDavid S. Miller 
1593fe067e8aSDavid S. Miller static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1594fe067e8aSDavid S. Miller {
15950f87230dSFrancis Yan 	if (sk->sk_send_head == skb_unlinked) {
1596fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
15970f87230dSFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
15980f87230dSFrancis Yan 	}
1599bb1fcecaSEric Dumazet 	if (tcp_sk(sk)->highest_sack == skb_unlinked)
1600bb1fcecaSEric Dumazet 		tcp_sk(sk)->highest_sack = NULL;
1601fe067e8aSDavid S. Miller }
1602fe067e8aSDavid S. Miller 
1603fe067e8aSDavid S. Miller static inline void tcp_init_send_head(struct sock *sk)
1604fe067e8aSDavid S. Miller {
1605fe067e8aSDavid S. Miller 	sk->sk_send_head = NULL;
1606fe067e8aSDavid S. Miller }
1607fe067e8aSDavid S. Miller 
1608fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1609fe067e8aSDavid S. Miller {
1610fe067e8aSDavid S. Miller 	__skb_queue_tail(&sk->sk_write_queue, skb);
1611fe067e8aSDavid S. Miller }
1612fe067e8aSDavid S. Miller 
1613fe067e8aSDavid S. Miller static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1614fe067e8aSDavid S. Miller {
1615fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, skb);
1616fe067e8aSDavid S. Miller 
1617fe067e8aSDavid S. Miller 	/* Queue it, remembering where we must start sending. */
16186859d494SIlpo Järvinen 	if (sk->sk_send_head == NULL) {
1619fe067e8aSDavid S. Miller 		sk->sk_send_head = skb;
16200f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
16216859d494SIlpo Järvinen 
16226859d494SIlpo Järvinen 		if (tcp_sk(sk)->highest_sack == NULL)
16236859d494SIlpo Järvinen 			tcp_sk(sk)->highest_sack = skb;
16246859d494SIlpo Järvinen 	}
1625fe067e8aSDavid S. Miller }
1626fe067e8aSDavid S. Miller 
1627fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1628fe067e8aSDavid S. Miller {
1629fe067e8aSDavid S. Miller 	__skb_queue_head(&sk->sk_write_queue, skb);
1630fe067e8aSDavid S. Miller }
1631fe067e8aSDavid S. Miller 
1632fe067e8aSDavid S. Miller /* Insert buff after skb on the write queue of sk.  */
1633fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1634fe067e8aSDavid S. Miller 						struct sk_buff *buff,
1635fe067e8aSDavid S. Miller 						struct sock *sk)
1636fe067e8aSDavid S. Miller {
16377de6c033SGerrit Renker 	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1638fe067e8aSDavid S. Miller }
1639fe067e8aSDavid S. Miller 
164043f59c89SDavid S. Miller /* Insert new before skb on the write queue of sk.  */
1641fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1642fe067e8aSDavid S. Miller 						  struct sk_buff *skb,
1643fe067e8aSDavid S. Miller 						  struct sock *sk)
1644fe067e8aSDavid S. Miller {
164543f59c89SDavid S. Miller 	__skb_queue_before(&sk->sk_write_queue, skb, new);
16466e421410SIlpo Järvinen 
16476e421410SIlpo Järvinen 	if (sk->sk_send_head == skb)
16486e421410SIlpo Järvinen 		sk->sk_send_head = new;
1649fe067e8aSDavid S. Miller }
1650fe067e8aSDavid S. Miller 
1651fe067e8aSDavid S. Miller static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1652fe067e8aSDavid S. Miller {
1653fe067e8aSDavid S. Miller 	__skb_unlink(skb, &sk->sk_write_queue);
1654fe067e8aSDavid S. Miller }
1655fe067e8aSDavid S. Miller 
1656a2a385d6SEric Dumazet static inline bool tcp_write_queue_empty(struct sock *sk)
1657fe067e8aSDavid S. Miller {
1658fe067e8aSDavid S. Miller 	return skb_queue_empty(&sk->sk_write_queue);
1659fe067e8aSDavid S. Miller }
1660fe067e8aSDavid S. Miller 
166112d50c46SKrishna Kumar static inline void tcp_push_pending_frames(struct sock *sk)
166212d50c46SKrishna Kumar {
166312d50c46SKrishna Kumar 	if (tcp_send_head(sk)) {
166412d50c46SKrishna Kumar 		struct tcp_sock *tp = tcp_sk(sk);
166512d50c46SKrishna Kumar 
166612d50c46SKrishna Kumar 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
166712d50c46SKrishna Kumar 	}
166812d50c46SKrishna Kumar }
166912d50c46SKrishna Kumar 
1670ecb97192SNeal Cardwell /* Start sequence of the skb just after the highest skb with SACKed
1671ecb97192SNeal Cardwell  * bit, valid only if sacked_out > 0 or when the caller has ensured
1672ecb97192SNeal Cardwell  * validity by itself.
1673a47e5a98SIlpo Järvinen  */
1674a47e5a98SIlpo Järvinen static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1675a47e5a98SIlpo Järvinen {
1676a47e5a98SIlpo Järvinen 	if (!tp->sacked_out)
1677a47e5a98SIlpo Järvinen 		return tp->snd_una;
16786859d494SIlpo Järvinen 
16796859d494SIlpo Järvinen 	if (tp->highest_sack == NULL)
16806859d494SIlpo Järvinen 		return tp->snd_nxt;
16816859d494SIlpo Järvinen 
1682a47e5a98SIlpo Järvinen 	return TCP_SKB_CB(tp->highest_sack)->seq;
1683a47e5a98SIlpo Järvinen }
1684a47e5a98SIlpo Järvinen 
16856859d494SIlpo Järvinen static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
16866859d494SIlpo Järvinen {
16876859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
16886859d494SIlpo Järvinen 						tcp_write_queue_next(sk, skb);
16896859d494SIlpo Järvinen }
16906859d494SIlpo Järvinen 
16916859d494SIlpo Järvinen static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
16926859d494SIlpo Järvinen {
16936859d494SIlpo Järvinen 	return tcp_sk(sk)->highest_sack;
16946859d494SIlpo Järvinen }
16956859d494SIlpo Järvinen 
16966859d494SIlpo Järvinen static inline void tcp_highest_sack_reset(struct sock *sk)
16976859d494SIlpo Järvinen {
16986859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
16996859d494SIlpo Järvinen }
17006859d494SIlpo Järvinen 
17016859d494SIlpo Järvinen /* Called when old skb is about to be deleted (to be combined with new skb) */
17026859d494SIlpo Järvinen static inline void tcp_highest_sack_combine(struct sock *sk,
17036859d494SIlpo Järvinen 					    struct sk_buff *old,
17046859d494SIlpo Järvinen 					    struct sk_buff *new)
17056859d494SIlpo Järvinen {
17066859d494SIlpo Järvinen 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
17076859d494SIlpo Järvinen 		tcp_sk(sk)->highest_sack = new;
17086859d494SIlpo Järvinen }
17096859d494SIlpo Järvinen 
1710b1f0a0e9SFlorian Westphal /* This helper checks if socket has IP_TRANSPARENT set */
1711b1f0a0e9SFlorian Westphal static inline bool inet_sk_transparent(const struct sock *sk)
1712b1f0a0e9SFlorian Westphal {
1713b1f0a0e9SFlorian Westphal 	switch (sk->sk_state) {
1714b1f0a0e9SFlorian Westphal 	case TCP_TIME_WAIT:
1715b1f0a0e9SFlorian Westphal 		return inet_twsk(sk)->tw_transparent;
1716b1f0a0e9SFlorian Westphal 	case TCP_NEW_SYN_RECV:
1717b1f0a0e9SFlorian Westphal 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1718b1f0a0e9SFlorian Westphal 	}
1719b1f0a0e9SFlorian Westphal 	return inet_sk(sk)->transparent;
1720b1f0a0e9SFlorian Westphal }
1721b1f0a0e9SFlorian Westphal 
17225aa4b32fSAndreas Petlund /* Determines whether this is a thin stream (which may suffer from
17235aa4b32fSAndreas Petlund  * increased latency). Used to trigger latency-reducing mechanisms.
17245aa4b32fSAndreas Petlund  */
1725a2a385d6SEric Dumazet static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
17265aa4b32fSAndreas Petlund {
17275aa4b32fSAndreas Petlund 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
17285aa4b32fSAndreas Petlund }
17295aa4b32fSAndreas Petlund 
17301da177e4SLinus Torvalds /* /proc */
17311da177e4SLinus Torvalds enum tcp_seq_states {
17321da177e4SLinus Torvalds 	TCP_SEQ_STATE_LISTENING,
17331da177e4SLinus Torvalds 	TCP_SEQ_STATE_ESTABLISHED,
17341da177e4SLinus Torvalds };
17351da177e4SLinus Torvalds 
173673cb88ecSArjan van de Ven int tcp_seq_open(struct inode *inode, struct file *file);
173773cb88ecSArjan van de Ven 
17381da177e4SLinus Torvalds struct tcp_seq_afinfo {
17391da177e4SLinus Torvalds 	char				*name;
17401da177e4SLinus Torvalds 	sa_family_t			family;
174173cb88ecSArjan van de Ven 	const struct file_operations	*seq_fops;
17429427c4b3SDenis V. Lunev 	struct seq_operations		seq_ops;
17431da177e4SLinus Torvalds };
17441da177e4SLinus Torvalds 
17451da177e4SLinus Torvalds struct tcp_iter_state {
1746a4146b1bSDenis V. Lunev 	struct seq_net_private	p;
17471da177e4SLinus Torvalds 	sa_family_t		family;
17481da177e4SLinus Torvalds 	enum tcp_seq_states	state;
17491da177e4SLinus Torvalds 	struct sock		*syn_wait_sk;
1750a7cb5a49SEric W. Biederman 	int			bucket, offset, sbucket, num;
1751a8b690f9STom Herbert 	loff_t			last_pos;
17521da177e4SLinus Torvalds };
17531da177e4SLinus Torvalds 
17545c9f3023SJoe Perches int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
17555c9f3023SJoe Perches void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
17561da177e4SLinus Torvalds 
175720380731SArnaldo Carvalho de Melo extern struct request_sock_ops tcp_request_sock_ops;
1758c6aefafbSGlenn Griffin extern struct request_sock_ops tcp6_request_sock_ops;
175920380731SArnaldo Carvalho de Melo 
17605c9f3023SJoe Perches void tcp_v4_destroy_sock(struct sock *sk);
176120380731SArnaldo Carvalho de Melo 
176228be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1763c8f44affSMichał Mirosław 				netdev_features_t features);
17645c9f3023SJoe Perches struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
17655c9f3023SJoe Perches int tcp_gro_complete(struct sk_buff *skb);
176628850dc7SDaniel Borkmann 
17675c9f3023SJoe Perches void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1768f4c50d99SHerbert Xu 
1769c9bee3b7SEric Dumazet static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1770c9bee3b7SEric Dumazet {
17714979f2d9SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
17724979f2d9SNikolay Borisov 	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1773c9bee3b7SEric Dumazet }
1774c9bee3b7SEric Dumazet 
1775c9bee3b7SEric Dumazet static inline bool tcp_stream_memory_free(const struct sock *sk)
1776c9bee3b7SEric Dumazet {
1777c9bee3b7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1778c9bee3b7SEric Dumazet 	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1779c9bee3b7SEric Dumazet 
1780c9bee3b7SEric Dumazet 	return notsent_bytes < tcp_notsent_lowat(tp);
1781c9bee3b7SEric Dumazet }
1782c9bee3b7SEric Dumazet 
178320380731SArnaldo Carvalho de Melo #ifdef CONFIG_PROC_FS
17845c9f3023SJoe Perches int tcp4_proc_init(void);
17855c9f3023SJoe Perches void tcp4_proc_exit(void);
178620380731SArnaldo Carvalho de Melo #endif
178720380731SArnaldo Carvalho de Melo 
1788ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
17891fb6f159SOctavian Purdila int tcp_conn_request(struct request_sock_ops *rsk_ops,
17901fb6f159SOctavian Purdila 		     const struct tcp_request_sock_ops *af_ops,
17911fb6f159SOctavian Purdila 		     struct sock *sk, struct sk_buff *skb);
17925db92c99SOctavian Purdila 
1793cfb6eeb4SYOSHIFUJI Hideaki /* TCP af-specific functions */
1794cfb6eeb4SYOSHIFUJI Hideaki struct tcp_sock_af_ops {
1795cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1796b83e3debSEric Dumazet 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1797fd3a154aSEric Dumazet 						const struct sock *addr_sk);
1798cfb6eeb4SYOSHIFUJI Hideaki 	int		(*calc_md5_hash)(char *location,
179939f8e58eSEric Dumazet 					 const struct tcp_md5sig_key *md5,
1800318cf7aaSEric Dumazet 					 const struct sock *sk,
1801318cf7aaSEric Dumazet 					 const struct sk_buff *skb);
1802cfb6eeb4SYOSHIFUJI Hideaki 	int		(*md5_parse)(struct sock *sk,
1803cfb6eeb4SYOSHIFUJI Hideaki 				     char __user *optval,
1804cfb6eeb4SYOSHIFUJI Hideaki 				     int optlen);
1805cfb6eeb4SYOSHIFUJI Hideaki #endif
1806cfb6eeb4SYOSHIFUJI Hideaki };
1807cfb6eeb4SYOSHIFUJI Hideaki 
1808cfb6eeb4SYOSHIFUJI Hideaki struct tcp_request_sock_ops {
18092aec4a29SOctavian Purdila 	u16 mss_clamp;
1810cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1811b83e3debSEric Dumazet 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1812fd3a154aSEric Dumazet 						 const struct sock *addr_sk);
1813e3afe7b7SJohn Dykstra 	int		(*calc_md5_hash) (char *location,
181439f8e58eSEric Dumazet 					  const struct tcp_md5sig_key *md5,
1815318cf7aaSEric Dumazet 					  const struct sock *sk,
1816318cf7aaSEric Dumazet 					  const struct sk_buff *skb);
1817cfb6eeb4SYOSHIFUJI Hideaki #endif
1818b40cf18eSEric Dumazet 	void (*init_req)(struct request_sock *req,
1819b40cf18eSEric Dumazet 			 const struct sock *sk_listener,
182016bea70aSOctavian Purdila 			 struct sk_buff *skb);
1821fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
18223f684b4bSEric Dumazet 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1823fb7b37a7SOctavian Purdila 				 __u16 *mss);
1824fb7b37a7SOctavian Purdila #endif
1825f964629eSEric Dumazet 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
18264396e461SSoheil Hassas Yeganeh 				       const struct request_sock *req);
182784b114b9SEric Dumazet 	u32 (*init_seq)(const struct sk_buff *skb);
182884b114b9SEric Dumazet 	u32 (*init_ts_off)(const struct sk_buff *skb);
18290f935dbeSEric Dumazet 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1830d6274bd8SOctavian Purdila 			   struct flowi *fl, struct request_sock *req,
1831dc6ef6beSEric Dumazet 			   struct tcp_fastopen_cookie *foc,
1832b3d05147SEric Dumazet 			   enum tcp_synack_type synack_type);
1833cfb6eeb4SYOSHIFUJI Hideaki };
1834cfb6eeb4SYOSHIFUJI Hideaki 
1835fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
1836fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18373f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1838fb7b37a7SOctavian Purdila 					 __u16 *mss)
1839fb7b37a7SOctavian Purdila {
18403f684b4bSEric Dumazet 	tcp_synq_overflow(sk);
184102a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
18423f684b4bSEric Dumazet 	return ops->cookie_init_seq(skb, mss);
1843fb7b37a7SOctavian Purdila }
1844fb7b37a7SOctavian Purdila #else
1845fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18463f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1847fb7b37a7SOctavian Purdila 					 __u16 *mss)
1848fb7b37a7SOctavian Purdila {
1849fb7b37a7SOctavian Purdila 	return 0;
1850fb7b37a7SOctavian Purdila }
1851fb7b37a7SOctavian Purdila #endif
1852fb7b37a7SOctavian Purdila 
18535c9f3023SJoe Perches int tcpv4_offload_init(void);
185428850dc7SDaniel Borkmann 
18555c9f3023SJoe Perches void tcp_v4_init(void);
18565c9f3023SJoe Perches void tcp_init(void);
185720380731SArnaldo Carvalho de Melo 
1858659a8ad5SYuchung Cheng /* tcp_recovery.c */
1859128eda86SEric Dumazet extern void tcp_rack_mark_lost(struct sock *sk);
18601d0833dfSYuchung Cheng extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1861d2329f10SEric Dumazet 			     const struct skb_mstamp *xmit_time);
186257dde7f7SYuchung Cheng extern void tcp_rack_reo_timeout(struct sock *sk);
1863659a8ad5SYuchung Cheng 
1864e25f866fSCong Wang /*
1865e25f866fSCong Wang  * Save and compile IPv4 options, return a pointer to it
1866e25f866fSCong Wang  */
1867e25f866fSCong Wang static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1868e25f866fSCong Wang {
1869e25f866fSCong Wang 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1870e25f866fSCong Wang 	struct ip_options_rcu *dopt = NULL;
1871e25f866fSCong Wang 
1872461b74c3SCong Wang 	if (opt->optlen) {
1873e25f866fSCong Wang 		int opt_size = sizeof(*dopt) + opt->optlen;
1874e25f866fSCong Wang 
1875e25f866fSCong Wang 		dopt = kmalloc(opt_size, GFP_ATOMIC);
1876e25f866fSCong Wang 		if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1877e25f866fSCong Wang 			kfree(dopt);
1878e25f866fSCong Wang 			dopt = NULL;
1879e25f866fSCong Wang 		}
1880e25f866fSCong Wang 	}
1881e25f866fSCong Wang 	return dopt;
1882e25f866fSCong Wang }
1883e25f866fSCong Wang 
188498781965SEric Dumazet /* locally generated TCP pure ACKs have skb->truesize == 2
188598781965SEric Dumazet  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
188698781965SEric Dumazet  * This is much faster than dissecting the packet to find out.
188798781965SEric Dumazet  * (Think of GRE encapsulations, IPv4, IPv6, ...)
188898781965SEric Dumazet  */
188998781965SEric Dumazet static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
189098781965SEric Dumazet {
189198781965SEric Dumazet 	return skb->truesize == 2;
189298781965SEric Dumazet }
189398781965SEric Dumazet 
189498781965SEric Dumazet static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
189598781965SEric Dumazet {
189698781965SEric Dumazet 	skb->truesize = 2;
189798781965SEric Dumazet }
189898781965SEric Dumazet 
1899473bd239STom Herbert static inline int tcp_inq(struct sock *sk)
1900473bd239STom Herbert {
1901473bd239STom Herbert 	struct tcp_sock *tp = tcp_sk(sk);
1902473bd239STom Herbert 	int answ;
1903473bd239STom Herbert 
1904473bd239STom Herbert 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1905473bd239STom Herbert 		answ = 0;
1906473bd239STom Herbert 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1907473bd239STom Herbert 		   !tp->urg_data ||
1908473bd239STom Herbert 		   before(tp->urg_seq, tp->copied_seq) ||
1909473bd239STom Herbert 		   !before(tp->urg_seq, tp->rcv_nxt)) {
1910473bd239STom Herbert 
1911473bd239STom Herbert 		answ = tp->rcv_nxt - tp->copied_seq;
1912473bd239STom Herbert 
1913473bd239STom Herbert 		/* Subtract 1, if FIN was received */
1914473bd239STom Herbert 		if (answ && sock_flag(sk, SOCK_DONE))
1915473bd239STom Herbert 			answ--;
1916473bd239STom Herbert 	} else {
1917473bd239STom Herbert 		answ = tp->urg_seq - tp->copied_seq;
1918473bd239STom Herbert 	}
1919473bd239STom Herbert 
1920473bd239STom Herbert 	return answ;
1921473bd239STom Herbert }
1922473bd239STom Herbert 
192332035585STom Herbert int tcp_peek_len(struct socket *sock);
192432035585STom Herbert 
1925a44d6eacSMartin KaFai Lau static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1926a44d6eacSMartin KaFai Lau {
1927a44d6eacSMartin KaFai Lau 	u16 segs_in;
1928a44d6eacSMartin KaFai Lau 
1929a44d6eacSMartin KaFai Lau 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1930a44d6eacSMartin KaFai Lau 	tp->segs_in += segs_in;
1931a44d6eacSMartin KaFai Lau 	if (skb->len > tcp_hdrlen(skb))
1932a44d6eacSMartin KaFai Lau 		tp->data_segs_in += segs_in;
1933a44d6eacSMartin KaFai Lau }
1934a44d6eacSMartin KaFai Lau 
19359caad864SEric Dumazet /*
19369caad864SEric Dumazet  * TCP listen path runs lockless.
19379caad864SEric Dumazet  * We forced "struct sock" to be const qualified to make sure
19389caad864SEric Dumazet  * we don't modify one of its field by mistake.
19399caad864SEric Dumazet  * Here, we increment sk_drops which is an atomic_t, so we can safely
19409caad864SEric Dumazet  * make sock writable again.
19419caad864SEric Dumazet  */
19429caad864SEric Dumazet static inline void tcp_listendrop(const struct sock *sk)
19439caad864SEric Dumazet {
19449caad864SEric Dumazet 	atomic_inc(&((struct sock *)sk)->sk_drops);
194502a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
19469caad864SEric Dumazet }
19479caad864SEric Dumazet 
19481da177e4SLinus Torvalds #endif	/* _TCP_H */
1949