xref: /linux/include/net/tcp.h (revision d82bae12dc38d79a2b77473f5eb0612a3d69c55b)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Definitions for the TCP module.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	@(#)tcp.h	1.0.5	05/23/93
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
141da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
151da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
161da177e4SLinus Torvalds  *		2 of the License, or (at your option) any later version.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds #ifndef _TCP_H
191da177e4SLinus Torvalds #define _TCP_H
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #define FASTRETRANS_DEBUG 1
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds #include <linux/list.h>
241da177e4SLinus Torvalds #include <linux/tcp.h>
25187f1882SPaul Gortmaker #include <linux/bug.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cache.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
29fb286bb2SHerbert Xu #include <linux/skbuff.h>
30c6aefafbSGlenn Griffin #include <linux/cryptohash.h>
31435cf559SWilliam Allen Simpson #include <linux/kref.h>
32740b0f18SEric Dumazet #include <linux/ktime.h>
333f421baaSArnaldo Carvalho de Melo 
343f421baaSArnaldo Carvalho de Melo #include <net/inet_connection_sock.h>
35295ff7edSArnaldo Carvalho de Melo #include <net/inet_timewait_sock.h>
3677d8bf9cSArnaldo Carvalho de Melo #include <net/inet_hashtables.h>
371da177e4SLinus Torvalds #include <net/checksum.h>
382e6599cbSArnaldo Carvalho de Melo #include <net/request_sock.h>
391da177e4SLinus Torvalds #include <net/sock.h>
401da177e4SLinus Torvalds #include <net/snmp.h>
411da177e4SLinus Torvalds #include <net/ip.h>
42c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
43bdf1ee5dSIlpo Järvinen #include <net/inet_ecn.h>
440c266898SSatoru SATOH #include <net/dst.h>
45c752f073SArnaldo Carvalho de Melo 
461da177e4SLinus Torvalds #include <linux/seq_file.h>
47180d8cd9SGlauber Costa #include <linux/memcontrol.h>
481da177e4SLinus Torvalds 
490f7ff927SArnaldo Carvalho de Melo extern struct inet_hashinfo tcp_hashinfo;
501da177e4SLinus Torvalds 
51dd24c001SEric Dumazet extern struct percpu_counter tcp_orphan_count;
525c9f3023SJoe Perches void tcp_time_wait(struct sock *sk, int state, int timeo);
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #define MAX_TCP_HEADER	(128 + MAX_HEADER)
5533ad798cSAdam Langley #define MAX_TCP_OPTION_SPACE 40
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds /*
581da177e4SLinus Torvalds  * Never offer a window over 32767 without using window scaling. Some
591da177e4SLinus Torvalds  * poor stacks do signed 16bit maths!
601da177e4SLinus Torvalds  */
611da177e4SLinus Torvalds #define MAX_TCP_WINDOW		32767U
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
641da177e4SLinus Torvalds #define TCP_MIN_MSS		88U
651da177e4SLinus Torvalds 
665d424d5aSJohn Heffner /* The least MTU to use for probing */
67dcd8fb85SFan Du #define TCP_BASE_MSS		1024
685d424d5aSJohn Heffner 
6905cbc0dbSFan Du /* probing interval, default to 10 minutes as per RFC4821 */
7005cbc0dbSFan Du #define TCP_PROBE_INTERVAL	600
7105cbc0dbSFan Du 
726b58e0a5SFan Du /* Specify interval when tcp mtu probing will stop */
736b58e0a5SFan Du #define TCP_PROBE_THRESHOLD	8
746b58e0a5SFan Du 
751da177e4SLinus Torvalds /* After receiving this amount of duplicate ACKs fast retransmit starts. */
761da177e4SLinus Torvalds #define TCP_FASTRETRANS_THRESH 3
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /* Maximal number of ACKs sent quickly to accelerate slow-start. */
791da177e4SLinus Torvalds #define TCP_MAX_QUICKACKS	16U
801da177e4SLinus Torvalds 
811da177e4SLinus Torvalds /* urg_data states */
821da177e4SLinus Torvalds #define TCP_URG_VALID	0x0100
831da177e4SLinus Torvalds #define TCP_URG_NOTYET	0x0200
841da177e4SLinus Torvalds #define TCP_URG_READ	0x0400
851da177e4SLinus Torvalds 
861da177e4SLinus Torvalds #define TCP_RETR1	3	/*
871da177e4SLinus Torvalds 				 * This is how many retries it does before it
881da177e4SLinus Torvalds 				 * tries to figure out if the gateway is
891da177e4SLinus Torvalds 				 * down. Minimal RFC value is 3; it corresponds
901da177e4SLinus Torvalds 				 * to ~3sec-8min depending on RTO.
911da177e4SLinus Torvalds 				 */
921da177e4SLinus Torvalds 
931da177e4SLinus Torvalds #define TCP_RETR2	15	/*
941da177e4SLinus Torvalds 				 * This should take at least
951da177e4SLinus Torvalds 				 * 90 minutes to time out.
961da177e4SLinus Torvalds 				 * RFC1122 says that the limit is 100 sec.
971da177e4SLinus Torvalds 				 * 15 is ~13-30min depending on RTO.
981da177e4SLinus Torvalds 				 */
991da177e4SLinus Torvalds 
1006c9ff979SAlex Bergmann #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
1016c9ff979SAlex Bergmann 				 * when active opening a connection.
1026c9ff979SAlex Bergmann 				 * RFC1122 says the minimum retry MUST
1036c9ff979SAlex Bergmann 				 * be at least 180secs.  Nevertheless
1046c9ff979SAlex Bergmann 				 * this value is corresponding to
1056c9ff979SAlex Bergmann 				 * 63secs of retransmission with the
1066c9ff979SAlex Bergmann 				 * current initial RTO.
1076c9ff979SAlex Bergmann 				 */
1081da177e4SLinus Torvalds 
1096c9ff979SAlex Bergmann #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
1106c9ff979SAlex Bergmann 				 * when passive opening a connection.
1116c9ff979SAlex Bergmann 				 * This is corresponding to 31secs of
1126c9ff979SAlex Bergmann 				 * retransmission with the current
1136c9ff979SAlex Bergmann 				 * initial RTO.
1146c9ff979SAlex Bergmann 				 */
1151da177e4SLinus Torvalds 
1161da177e4SLinus Torvalds #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
1171da177e4SLinus Torvalds 				  * state, about 60 seconds	*/
1181da177e4SLinus Torvalds #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
1191da177e4SLinus Torvalds                                  /* BSD style FIN_WAIT2 deadlock breaker.
1201da177e4SLinus Torvalds 				  * It used to be 3min, new value is 60sec,
1211da177e4SLinus Torvalds 				  * to combine FIN-WAIT-2 timeout with
1221da177e4SLinus Torvalds 				  * TIME-WAIT timer.
1231da177e4SLinus Torvalds 				  */
1241da177e4SLinus Torvalds 
1251da177e4SLinus Torvalds #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
1261da177e4SLinus Torvalds #if HZ >= 100
1271da177e4SLinus Torvalds #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
1281da177e4SLinus Torvalds #define TCP_ATO_MIN	((unsigned)(HZ/25))
1291da177e4SLinus Torvalds #else
1301da177e4SLinus Torvalds #define TCP_DELACK_MIN	4U
1311da177e4SLinus Torvalds #define TCP_ATO_MIN	4U
1321da177e4SLinus Torvalds #endif
1331da177e4SLinus Torvalds #define TCP_RTO_MAX	((unsigned)(120*HZ))
1341da177e4SLinus Torvalds #define TCP_RTO_MIN	((unsigned)(HZ/5))
135fd4f2ceaSEric Dumazet #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
1369ad7c049SJerry Chu #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
1379ad7c049SJerry Chu 						 * used as a fallback RTO for the
1389ad7c049SJerry Chu 						 * initial data transmission if no
1399ad7c049SJerry Chu 						 * valid RTT sample has been acquired,
1409ad7c049SJerry Chu 						 * most likely due to retrans in 3WHS.
1419ad7c049SJerry Chu 						 */
1421da177e4SLinus Torvalds 
1431da177e4SLinus Torvalds #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
1441da177e4SLinus Torvalds 					                 * for local resources.
1451da177e4SLinus Torvalds 					                 */
14657dde7f7SYuchung Cheng #define TCP_REO_TIMEOUT_MIN	(2000) /* Min RACK reordering timeout in usec */
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
1491da177e4SLinus Torvalds #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
1501da177e4SLinus Torvalds #define TCP_KEEPALIVE_INTVL	(75*HZ)
1511da177e4SLinus Torvalds 
1521da177e4SLinus Torvalds #define MAX_TCP_KEEPIDLE	32767
1531da177e4SLinus Torvalds #define MAX_TCP_KEEPINTVL	32767
1541da177e4SLinus Torvalds #define MAX_TCP_KEEPCNT		127
1551da177e4SLinus Torvalds #define MAX_TCP_SYNCNT		127
1561da177e4SLinus Torvalds 
1571da177e4SLinus Torvalds #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
1581da177e4SLinus Torvalds 
1591da177e4SLinus Torvalds #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
1601da177e4SLinus Torvalds #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
1611da177e4SLinus Torvalds 					 * after this time. It should be equal
1621da177e4SLinus Torvalds 					 * (or greater than) TCP_TIMEWAIT_LEN
1631da177e4SLinus Torvalds 					 * to provide reliability equal to one
1641da177e4SLinus Torvalds 					 * provided by timewait state.
1651da177e4SLinus Torvalds 					 */
1661da177e4SLinus Torvalds #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
1671da177e4SLinus Torvalds 					 * timestamps. It must be less than
1681da177e4SLinus Torvalds 					 * minimal timewait lifetime.
1691da177e4SLinus Torvalds 					 */
1701da177e4SLinus Torvalds /*
1711da177e4SLinus Torvalds  *	TCP option
1721da177e4SLinus Torvalds  */
1731da177e4SLinus Torvalds 
1741da177e4SLinus Torvalds #define TCPOPT_NOP		1	/* Padding */
1751da177e4SLinus Torvalds #define TCPOPT_EOL		0	/* End of options */
1761da177e4SLinus Torvalds #define TCPOPT_MSS		2	/* Segment size negotiating */
1771da177e4SLinus Torvalds #define TCPOPT_WINDOW		3	/* Window scaling */
1781da177e4SLinus Torvalds #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
1791da177e4SLinus Torvalds #define TCPOPT_SACK             5       /* SACK Block */
1801da177e4SLinus Torvalds #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
181cfb6eeb4SYOSHIFUJI Hideaki #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
1827f9b838bSDaniel Lee #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
1832100c8d2SYuchung Cheng #define TCPOPT_EXP		254	/* Experimental */
1842100c8d2SYuchung Cheng /* Magic number to be after the option value for sharing TCP
1852100c8d2SYuchung Cheng  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
1862100c8d2SYuchung Cheng  */
1872100c8d2SYuchung Cheng #define TCPOPT_FASTOPEN_MAGIC	0xF989
1881da177e4SLinus Torvalds 
1891da177e4SLinus Torvalds /*
1901da177e4SLinus Torvalds  *     TCP option lengths
1911da177e4SLinus Torvalds  */
1921da177e4SLinus Torvalds 
1931da177e4SLinus Torvalds #define TCPOLEN_MSS            4
1941da177e4SLinus Torvalds #define TCPOLEN_WINDOW         3
1951da177e4SLinus Torvalds #define TCPOLEN_SACK_PERM      2
1961da177e4SLinus Torvalds #define TCPOLEN_TIMESTAMP      10
197cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG         18
1987f9b838bSDaniel Lee #define TCPOLEN_FASTOPEN_BASE  2
1992100c8d2SYuchung Cheng #define TCPOLEN_EXP_FASTOPEN_BASE  4
2001da177e4SLinus Torvalds 
2011da177e4SLinus Torvalds /* But this is what stacks really send out. */
2021da177e4SLinus Torvalds #define TCPOLEN_TSTAMP_ALIGNED		12
2031da177e4SLinus Torvalds #define TCPOLEN_WSCALE_ALIGNED		4
2041da177e4SLinus Torvalds #define TCPOLEN_SACKPERM_ALIGNED	4
2051da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE		2
2061da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE_ALIGNED	4
2071da177e4SLinus Torvalds #define TCPOLEN_SACK_PERBLOCK		8
208cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG_ALIGNED		20
20933ad798cSAdam Langley #define TCPOLEN_MSS_ALIGNED		4
2101da177e4SLinus Torvalds 
2111da177e4SLinus Torvalds /* Flags in tp->nonagle */
2121da177e4SLinus Torvalds #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
2131da177e4SLinus Torvalds #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
214caa20d9aSStephen Hemminger #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
2151da177e4SLinus Torvalds 
21636e31b0aSAndreas Petlund /* TCP thin-stream limits */
21736e31b0aSAndreas Petlund #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
21836e31b0aSAndreas Petlund 
21921603fc4SJörg Thalheim /* TCP initial congestion window as per rfc6928 */
220442b9635SDavid S. Miller #define TCP_INIT_CWND		10
221442b9635SDavid S. Miller 
222cf60af03SYuchung Cheng /* Bit Flags for sysctl_tcp_fastopen */
223cf60af03SYuchung Cheng #define	TFO_CLIENT_ENABLE	1
22410467163SJerry Chu #define	TFO_SERVER_ENABLE	2
22567da22d2SYuchung Cheng #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
226cf60af03SYuchung Cheng 
22710467163SJerry Chu /* Accept SYN data w/o any cookie option */
22810467163SJerry Chu #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
22910467163SJerry Chu 
23010467163SJerry Chu /* Force enable TFO on all listeners, i.e., not requiring the
231cebc5cbaSYuchung Cheng  * TCP_FASTOPEN socket option.
23210467163SJerry Chu  */
23310467163SJerry Chu #define	TFO_SERVER_WO_SOCKOPT1	0x400
23410467163SJerry Chu 
235295ff7edSArnaldo Carvalho de Melo 
2361da177e4SLinus Torvalds /* sysctl variables for tcp */
2371da177e4SLinus Torvalds extern int sysctl_tcp_timestamps;
2381da177e4SLinus Torvalds extern int sysctl_tcp_window_scaling;
2391da177e4SLinus Torvalds extern int sysctl_tcp_sack;
2402100c8d2SYuchung Cheng extern int sysctl_tcp_fastopen;
2411da177e4SLinus Torvalds extern int sysctl_tcp_retrans_collapse;
2421da177e4SLinus Torvalds extern int sysctl_tcp_stdurg;
2431da177e4SLinus Torvalds extern int sysctl_tcp_rfc1337;
2441da177e4SLinus Torvalds extern int sysctl_tcp_abort_on_overflow;
2451da177e4SLinus Torvalds extern int sysctl_tcp_max_orphans;
2461da177e4SLinus Torvalds extern int sysctl_tcp_fack;
2471da177e4SLinus Torvalds extern int sysctl_tcp_reordering;
248dca145ffSEric Dumazet extern int sysctl_tcp_max_reordering;
2491da177e4SLinus Torvalds extern int sysctl_tcp_dsack;
250a4fe34bfSEric W. Biederman extern long sysctl_tcp_mem[3];
2511da177e4SLinus Torvalds extern int sysctl_tcp_wmem[3];
2521da177e4SLinus Torvalds extern int sysctl_tcp_rmem[3];
2531da177e4SLinus Torvalds extern int sysctl_tcp_app_win;
2541da177e4SLinus Torvalds extern int sysctl_tcp_adv_win_scale;
2551da177e4SLinus Torvalds extern int sysctl_tcp_frto;
2561da177e4SLinus Torvalds extern int sysctl_tcp_low_latency;
2571da177e4SLinus Torvalds extern int sysctl_tcp_nometrics_save;
2581da177e4SLinus Torvalds extern int sysctl_tcp_moderate_rcvbuf;
2591da177e4SLinus Torvalds extern int sysctl_tcp_tso_win_divisor;
26015d99e02SRick Jones extern int sysctl_tcp_workaround_signed_windows;
26135089bb2SDavid S. Miller extern int sysctl_tcp_slow_start_after_idle;
26236e31b0aSAndreas Petlund extern int sysctl_tcp_thin_linear_timeouts;
2637e380175SAndreas Petlund extern int sysctl_tcp_thin_dupack;
264eed530b6SYuchung Cheng extern int sysctl_tcp_early_retrans;
265a0370b3fSYuchung Cheng extern int sysctl_tcp_recovery;
266a0370b3fSYuchung Cheng #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
267a0370b3fSYuchung Cheng 
26846d3ceabSEric Dumazet extern int sysctl_tcp_limit_output_bytes;
269282f23c6SEric Dumazet extern int sysctl_tcp_challenge_ack_limit;
27095bd09ebSEric Dumazet extern int sysctl_tcp_min_tso_segs;
271f6722583SYuchung Cheng extern int sysctl_tcp_min_rtt_wlen;
272f54b3111SEric Dumazet extern int sysctl_tcp_autocorking;
273032ee423SNeal Cardwell extern int sysctl_tcp_invalid_ratelimit;
27443e122b0SEric Dumazet extern int sysctl_tcp_pacing_ss_ratio;
27543e122b0SEric Dumazet extern int sysctl_tcp_pacing_ca_ratio;
2761da177e4SLinus Torvalds 
2778d987e5cSEric Dumazet extern atomic_long_t tcp_memory_allocated;
2781748376bSEric Dumazet extern struct percpu_counter tcp_sockets_allocated;
2791da177e4SLinus Torvalds extern int tcp_memory_pressure;
2801da177e4SLinus Torvalds 
281b8da51ebSEric Dumazet /* optimized version of sk_under_memory_pressure() for TCP sockets */
282b8da51ebSEric Dumazet static inline bool tcp_under_memory_pressure(const struct sock *sk)
283b8da51ebSEric Dumazet {
284baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
285baac50bbSJohannes Weiner 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
286e805605cSJohannes Weiner 		return true;
287b8da51ebSEric Dumazet 
288b8da51ebSEric Dumazet 	return tcp_memory_pressure;
289b8da51ebSEric Dumazet }
2901da177e4SLinus Torvalds /*
2911da177e4SLinus Torvalds  * The next routines deal with comparing 32 bit unsigned ints
2921da177e4SLinus Torvalds  * and worry about wraparound (automatic with unsigned arithmetic).
2931da177e4SLinus Torvalds  */
2941da177e4SLinus Torvalds 
295a2a385d6SEric Dumazet static inline bool before(__u32 seq1, __u32 seq2)
2961da177e4SLinus Torvalds {
2970d630cc0SGerrit Renker         return (__s32)(seq1-seq2) < 0;
2981da177e4SLinus Torvalds }
2999a036b9cSGerrit Renker #define after(seq2, seq1) 	before(seq1, seq2)
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds /* is s2<=s1<=s3 ? */
302a2a385d6SEric Dumazet static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
3031da177e4SLinus Torvalds {
3041da177e4SLinus Torvalds 	return seq3 - seq2 >= seq1 - seq2;
3051da177e4SLinus Torvalds }
3061da177e4SLinus Torvalds 
307efcdbf24SArun Sharma static inline bool tcp_out_of_memory(struct sock *sk)
308efcdbf24SArun Sharma {
309efcdbf24SArun Sharma 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
310efcdbf24SArun Sharma 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
311efcdbf24SArun Sharma 		return true;
312efcdbf24SArun Sharma 	return false;
313efcdbf24SArun Sharma }
314efcdbf24SArun Sharma 
315a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size);
316a6c5ea4cSEric Dumazet 
317ad1af0feSDavid S. Miller static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
318e4fd5da3SPavel Emelianov {
319ad1af0feSDavid S. Miller 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
320ad1af0feSDavid S. Miller 	int orphans = percpu_counter_read_positive(ocp);
321ad1af0feSDavid S. Miller 
322ad1af0feSDavid S. Miller 	if (orphans << shift > sysctl_tcp_max_orphans) {
323ad1af0feSDavid S. Miller 		orphans = percpu_counter_sum_positive(ocp);
324ad1af0feSDavid S. Miller 		if (orphans << shift > sysctl_tcp_max_orphans)
325ad1af0feSDavid S. Miller 			return true;
326ad1af0feSDavid S. Miller 	}
327ad1af0feSDavid S. Miller 	return false;
328e4fd5da3SPavel Emelianov }
3291da177e4SLinus Torvalds 
3305c9f3023SJoe Perches bool tcp_check_oom(struct sock *sk, int shift);
331efcdbf24SArun Sharma 
332a0f82f64SFlorian Westphal 
3331da177e4SLinus Torvalds extern struct proto tcp_prot;
3341da177e4SLinus Torvalds 
33557ef42d5SPavel Emelyanov #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33613415e46SEric Dumazet #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33757ef42d5SPavel Emelyanov #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
338aa2ea058STom Herbert #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
3391da177e4SLinus Torvalds 
3405c9f3023SJoe Perches void tcp_tasklet_init(void);
34146d3ceabSEric Dumazet 
3425c9f3023SJoe Perches void tcp_v4_err(struct sk_buff *skb, u32);
3431da177e4SLinus Torvalds 
3445c9f3023SJoe Perches void tcp_shutdown(struct sock *sk, int how);
3451da177e4SLinus Torvalds 
3465c9f3023SJoe Perches void tcp_v4_early_demux(struct sk_buff *skb);
3475c9f3023SJoe Perches int tcp_v4_rcv(struct sk_buff *skb);
3481da177e4SLinus Torvalds 
3495c9f3023SJoe Perches int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
3501b784140SYing Xue int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
3515c9f3023SJoe Perches int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
3525c9f3023SJoe Perches 		 int flags);
3535c9f3023SJoe Perches void tcp_release_cb(struct sock *sk);
3545c9f3023SJoe Perches void tcp_wfree(struct sk_buff *skb);
3555c9f3023SJoe Perches void tcp_write_timer_handler(struct sock *sk);
3565c9f3023SJoe Perches void tcp_delack_timer_handler(struct sock *sk);
3575c9f3023SJoe Perches int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
35872ab4a86SEric Dumazet int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
3595c9f3023SJoe Perches void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
360cf533ea5SEric Dumazet 			 const struct tcphdr *th, unsigned int len);
3615c9f3023SJoe Perches void tcp_rcv_space_adjust(struct sock *sk);
3625c9f3023SJoe Perches int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
3635c9f3023SJoe Perches void tcp_twsk_destructor(struct sock *sk);
3645c9f3023SJoe Perches ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
36553d3176bSChangli Gao 			struct pipe_inode_info *pipe, size_t len,
36653d3176bSChangli Gao 			unsigned int flags);
3679c55e01cSJens Axboe 
368463c84b9SArnaldo Carvalho de Melo static inline void tcp_dec_quickack_mode(struct sock *sk,
369463c84b9SArnaldo Carvalho de Melo 					 const unsigned int pkts)
3701da177e4SLinus Torvalds {
371463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
372fc6415bcSDavid S. Miller 
373463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.quick) {
374463c84b9SArnaldo Carvalho de Melo 		if (pkts >= icsk->icsk_ack.quick) {
375463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick = 0;
3761da177e4SLinus Torvalds 			/* Leaving quickack mode we deflate ATO. */
377463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
378fc6415bcSDavid S. Miller 		} else
379463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick -= pkts;
3801da177e4SLinus Torvalds 	}
3811da177e4SLinus Torvalds }
3821da177e4SLinus Torvalds 
383bdf1ee5dSIlpo Järvinen #define	TCP_ECN_OK		1
384bdf1ee5dSIlpo Järvinen #define	TCP_ECN_QUEUE_CWR	2
385bdf1ee5dSIlpo Järvinen #define	TCP_ECN_DEMAND_CWR	4
3867a269ffaSEric Dumazet #define	TCP_ECN_SEEN		8
387bdf1ee5dSIlpo Järvinen 
388fd2c3ef7SEric Dumazet enum tcp_tw_status {
3891da177e4SLinus Torvalds 	TCP_TW_SUCCESS = 0,
3901da177e4SLinus Torvalds 	TCP_TW_RST = 1,
3911da177e4SLinus Torvalds 	TCP_TW_ACK = 2,
3921da177e4SLinus Torvalds 	TCP_TW_SYN = 3
3931da177e4SLinus Torvalds };
3941da177e4SLinus Torvalds 
3951da177e4SLinus Torvalds 
3965c9f3023SJoe Perches enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
3971da177e4SLinus Torvalds 					      struct sk_buff *skb,
3988feaf0c0SArnaldo Carvalho de Melo 					      const struct tcphdr *th);
3995c9f3023SJoe Perches struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
40052452c54SEric Dumazet 			   struct request_sock *req, bool fastopen);
4015c9f3023SJoe Perches int tcp_child_process(struct sock *parent, struct sock *child,
4021da177e4SLinus Torvalds 		      struct sk_buff *skb);
4035ae344c9SNeal Cardwell void tcp_enter_loss(struct sock *sk);
40457dde7f7SYuchung Cheng void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
4055c9f3023SJoe Perches void tcp_clear_retrans(struct tcp_sock *tp);
4065c9f3023SJoe Perches void tcp_update_metrics(struct sock *sk);
4075c9f3023SJoe Perches void tcp_init_metrics(struct sock *sk);
4085c9f3023SJoe Perches void tcp_metrics_init(void);
409*d82bae12SSoheil Hassas Yeganeh bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
4105c9f3023SJoe Perches void tcp_disable_fack(struct tcp_sock *tp);
4115c9f3023SJoe Perches void tcp_close(struct sock *sk, long timeout);
4125c9f3023SJoe Perches void tcp_init_sock(struct sock *sk);
4135c9f3023SJoe Perches unsigned int tcp_poll(struct file *file, struct socket *sock,
41453d3176bSChangli Gao 		      struct poll_table_struct *wait);
4155c9f3023SJoe Perches int tcp_getsockopt(struct sock *sk, int level, int optname,
4163fdadf7dSDmitry Mishin 		   char __user *optval, int __user *optlen);
4175c9f3023SJoe Perches int tcp_setsockopt(struct sock *sk, int level, int optname,
41853d3176bSChangli Gao 		   char __user *optval, unsigned int optlen);
4195c9f3023SJoe Perches int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
42053d3176bSChangli Gao 			  char __user *optval, int __user *optlen);
4215c9f3023SJoe Perches int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
422b7058842SDavid S. Miller 			  char __user *optval, unsigned int optlen);
4235c9f3023SJoe Perches void tcp_set_keepalive(struct sock *sk, int val);
42442cb80a2SEric Dumazet void tcp_syn_ack_timeout(const struct request_sock *req);
4251b784140SYing Xue int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
4261b784140SYing Xue 		int flags, int *addr_len);
4275c9f3023SJoe Perches void tcp_parse_options(const struct sk_buff *skb,
4281a2c6181SChristoph Paasch 		       struct tcp_options_received *opt_rx,
4292100c8d2SYuchung Cheng 		       int estab, struct tcp_fastopen_cookie *foc);
4305c9f3023SJoe Perches const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
4317d5d5525SYOSHIFUJI Hideaki 
4321da177e4SLinus Torvalds /*
4331da177e4SLinus Torvalds  *	TCP v4 functions exported for the inet6 API
4341da177e4SLinus Torvalds  */
4351da177e4SLinus Torvalds 
4365c9f3023SJoe Perches void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4374fab9071SNeal Cardwell void tcp_v4_mtu_reduced(struct sock *sk);
4389cf74903SEric Dumazet void tcp_req_err(struct sock *sk, u32 seq, bool abort);
4395c9f3023SJoe Perches int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
440c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
44160236fddSArnaldo Carvalho de Melo 				      struct request_sock *req,
4421da177e4SLinus Torvalds 				      struct sk_buff *skb);
44381164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
4440c27171eSEric Dumazet struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
44560236fddSArnaldo Carvalho de Melo 				  struct request_sock *req,
4465e0724d0SEric Dumazet 				  struct dst_entry *dst,
4475e0724d0SEric Dumazet 				  struct request_sock *req_unhash,
4485e0724d0SEric Dumazet 				  bool *own_req);
4495c9f3023SJoe Perches int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
4505c9f3023SJoe Perches int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4515c9f3023SJoe Perches int tcp_connect(struct sock *sk);
452b3d05147SEric Dumazet enum tcp_synack_type {
453b3d05147SEric Dumazet 	TCP_SYNACK_NORMAL,
454b3d05147SEric Dumazet 	TCP_SYNACK_FASTOPEN,
455b3d05147SEric Dumazet 	TCP_SYNACK_COOKIE,
456b3d05147SEric Dumazet };
4575d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
458e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
459ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
460b3d05147SEric Dumazet 				enum tcp_synack_type synack_type);
4615c9f3023SJoe Perches int tcp_disconnect(struct sock *sk, int flags);
4621da177e4SLinus Torvalds 
463370816aeSPavel Emelyanov void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
464292e8d8cSPavel Emelyanov int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
46563d02d15SEric Dumazet void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
4661da177e4SLinus Torvalds 
4671da177e4SLinus Torvalds /* From syncookies.c */
468b80c0e78SEric Dumazet struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
469b80c0e78SEric Dumazet 				 struct request_sock *req,
470b80c0e78SEric Dumazet 				 struct dst_entry *dst);
4715c9f3023SJoe Perches int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4720198230bSPatrick McHardy 		      u32 cookie);
473461b74c3SCong Wang struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
474e05c82d3SEric Dumazet #ifdef CONFIG_SYN_COOKIES
4758c27bd75SFlorian Westphal 
47663262315SEric Dumazet /* Syncookies use a monotonic timer which increments every 60 seconds.
4778c27bd75SFlorian Westphal  * This counter is used both as a hash input and partially encoded into
4788c27bd75SFlorian Westphal  * the cookie value.  A cookie is only validated further if the delta
4798c27bd75SFlorian Westphal  * between the current counter value and the encoded one is less than this,
48063262315SEric Dumazet  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
4818c27bd75SFlorian Westphal  * the counter advances immediately after a cookie is generated).
4828c27bd75SFlorian Westphal  */
4838c27bd75SFlorian Westphal #define MAX_SYNCOOKIE_AGE	2
484264ea103SEric Dumazet #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
485264ea103SEric Dumazet #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
486264ea103SEric Dumazet 
487264ea103SEric Dumazet /* syncookies: remember time of last synqueue overflow
488264ea103SEric Dumazet  * But do not dirty this field too often (once per second is enough)
4893f684b4bSEric Dumazet  * It is racy as we do not hold a lock, but race is very minor.
490264ea103SEric Dumazet  */
4913f684b4bSEric Dumazet static inline void tcp_synq_overflow(const struct sock *sk)
492264ea103SEric Dumazet {
493264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
494264ea103SEric Dumazet 	unsigned long now = jiffies;
495264ea103SEric Dumazet 
496264ea103SEric Dumazet 	if (time_after(now, last_overflow + HZ))
497264ea103SEric Dumazet 		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
498264ea103SEric Dumazet }
499264ea103SEric Dumazet 
500264ea103SEric Dumazet /* syncookies: no recent synqueue overflow on this listening socket? */
501264ea103SEric Dumazet static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
502264ea103SEric Dumazet {
503264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
504264ea103SEric Dumazet 
505264ea103SEric Dumazet 	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
506264ea103SEric Dumazet }
5078c27bd75SFlorian Westphal 
5088c27bd75SFlorian Westphal static inline u32 tcp_cookie_time(void)
5098c27bd75SFlorian Westphal {
51063262315SEric Dumazet 	u64 val = get_jiffies_64();
51163262315SEric Dumazet 
512264ea103SEric Dumazet 	do_div(val, TCP_SYNCOOKIE_PERIOD);
51363262315SEric Dumazet 	return val;
5148c27bd75SFlorian Westphal }
5158c27bd75SFlorian Westphal 
5165c9f3023SJoe Perches u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5175c9f3023SJoe Perches 			      u16 *mssp);
5183f684b4bSEric Dumazet __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
5195c9f3023SJoe Perches __u32 cookie_init_timestamp(struct request_sock *req);
520f1673381SFlorian Westphal bool cookie_timestamp_decode(struct tcp_options_received *opt);
521f1673381SFlorian Westphal bool cookie_ecn_ok(const struct tcp_options_received *opt,
522f7b3bec6SFlorian Westphal 		   const struct net *net, const struct dst_entry *dst);
5234dfc2817SFlorian Westphal 
524c6aefafbSGlenn Griffin /* From net/ipv6/syncookies.c */
5255c9f3023SJoe Perches int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
52681eb6a14SPatrick McHardy 		      u32 cookie);
5275c9f3023SJoe Perches struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
528f1673381SFlorian Westphal 
5295c9f3023SJoe Perches u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
53081eb6a14SPatrick McHardy 			      const struct tcphdr *th, u16 *mssp);
5313f684b4bSEric Dumazet __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
532e05c82d3SEric Dumazet #endif
5331da177e4SLinus Torvalds /* tcp_output.c */
5341da177e4SLinus Torvalds 
5351b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
5361b3878caSNeal Cardwell 		     int min_tso_segs);
5375c9f3023SJoe Perches void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
5389e412ba7SIlpo Järvinen 			       int nonagle);
5395c9f3023SJoe Perches bool tcp_may_send_now(struct sock *sk);
54010d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
54110d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5425c9f3023SJoe Perches void tcp_retransmit_timer(struct sock *sk);
5435c9f3023SJoe Perches void tcp_xmit_retransmit_queue(struct sock *);
5445c9f3023SJoe Perches void tcp_simple_retransmit(struct sock *);
54557dde7f7SYuchung Cheng void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5465c9f3023SJoe Perches int tcp_trim_head(struct sock *, struct sk_buff *, u32);
5476cc55e09SOctavian Purdila int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
5481da177e4SLinus Torvalds 
5495c9f3023SJoe Perches void tcp_send_probe0(struct sock *);
5505c9f3023SJoe Perches void tcp_send_partial(struct sock *);
551e520af48SEric Dumazet int tcp_write_wakeup(struct sock *, int mib);
5525c9f3023SJoe Perches void tcp_send_fin(struct sock *sk);
5535c9f3023SJoe Perches void tcp_send_active_reset(struct sock *sk, gfp_t priority);
5545c9f3023SJoe Perches int tcp_send_synack(struct sock *);
5555c9f3023SJoe Perches void tcp_push_one(struct sock *, unsigned int mss_now);
5565c9f3023SJoe Perches void tcp_send_ack(struct sock *sk);
5575c9f3023SJoe Perches void tcp_send_delayed_ack(struct sock *sk);
5585c9f3023SJoe Perches void tcp_send_loss_probe(struct sock *sk);
5595c9f3023SJoe Perches bool tcp_schedule_loss_probe(struct sock *sk);
560cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
561cfea5a68SMartin KaFai Lau 			     const struct sk_buff *next_skb);
5621da177e4SLinus Torvalds 
563a762a980SDavid S. Miller /* tcp_input.c */
5645c9f3023SJoe Perches void tcp_rearm_rto(struct sock *sk);
5650f1c28aeSYuchung Cheng void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5665c9f3023SJoe Perches void tcp_reset(struct sock *sk);
5674f41b1c5SYuchung Cheng void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
568e3e17b77SEric Dumazet void tcp_fin(struct sock *sk);
569a762a980SDavid S. Miller 
5701da177e4SLinus Torvalds /* tcp_timer.c */
5715c9f3023SJoe Perches void tcp_init_xmit_timers(struct sock *);
572463c84b9SArnaldo Carvalho de Melo static inline void tcp_clear_xmit_timers(struct sock *sk)
573463c84b9SArnaldo Carvalho de Melo {
574463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timers(sk);
575463c84b9SArnaldo Carvalho de Melo }
5761da177e4SLinus Torvalds 
5775c9f3023SJoe Perches unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
5785c9f3023SJoe Perches unsigned int tcp_current_mss(struct sock *sk);
5790c54b85fSIlpo Järvinen 
5800c54b85fSIlpo Järvinen /* Bound MSS / TSO packet size with the half of the window */
5810c54b85fSIlpo Järvinen static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
5820c54b85fSIlpo Järvinen {
58301f83d69SAlexey Kuznetsov 	int cutoff;
58401f83d69SAlexey Kuznetsov 
58501f83d69SAlexey Kuznetsov 	/* When peer uses tiny windows, there is no use in packetizing
58601f83d69SAlexey Kuznetsov 	 * to sub-MSS pieces for the sake of SWS or making sure there
58701f83d69SAlexey Kuznetsov 	 * are enough packets in the pipe for fast recovery.
58801f83d69SAlexey Kuznetsov 	 *
58901f83d69SAlexey Kuznetsov 	 * On the other hand, for extremely large MSS devices, handling
59001f83d69SAlexey Kuznetsov 	 * smaller than MSS windows in this way does make sense.
59101f83d69SAlexey Kuznetsov 	 */
5922631b79fSSeymour, Shane M 	if (tp->max_window > TCP_MSS_DEFAULT)
59301f83d69SAlexey Kuznetsov 		cutoff = (tp->max_window >> 1);
59401f83d69SAlexey Kuznetsov 	else
59501f83d69SAlexey Kuznetsov 		cutoff = tp->max_window;
59601f83d69SAlexey Kuznetsov 
59701f83d69SAlexey Kuznetsov 	if (cutoff && pktsize > cutoff)
59801f83d69SAlexey Kuznetsov 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
5990c54b85fSIlpo Järvinen 	else
6000c54b85fSIlpo Järvinen 		return pktsize;
6010c54b85fSIlpo Järvinen }
6021da177e4SLinus Torvalds 
60317b085eaSArnaldo Carvalho de Melo /* tcp.c */
6040df48c26SEric Dumazet void tcp_get_info(struct sock *, struct tcp_info *);
6051da177e4SLinus Torvalds 
6061da177e4SLinus Torvalds /* Read 'sendfile()'-style from a TCP socket */
6075c9f3023SJoe Perches int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
6081da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor);
6091da177e4SLinus Torvalds 
6105c9f3023SJoe Perches void tcp_initialize_rcv_mss(struct sock *sk);
6111da177e4SLinus Torvalds 
6125c9f3023SJoe Perches int tcp_mtu_to_mss(struct sock *sk, int pmtu);
6135c9f3023SJoe Perches int tcp_mss_to_mtu(struct sock *sk, int mss);
6145c9f3023SJoe Perches void tcp_mtup_init(struct sock *sk);
6155c9f3023SJoe Perches void tcp_init_buffer_space(struct sock *sk);
6165d424d5aSJohn Heffner 
617f1ecd5d9SDamian Lukowski static inline void tcp_bound_rto(const struct sock *sk)
618f1ecd5d9SDamian Lukowski {
619f1ecd5d9SDamian Lukowski 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
620f1ecd5d9SDamian Lukowski 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
621f1ecd5d9SDamian Lukowski }
622f1ecd5d9SDamian Lukowski 
623f1ecd5d9SDamian Lukowski static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
624f1ecd5d9SDamian Lukowski {
625740b0f18SEric Dumazet 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
626f1ecd5d9SDamian Lukowski }
627f1ecd5d9SDamian Lukowski 
62840efc6faSStephen Hemminger static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
6291da177e4SLinus Torvalds {
6301da177e4SLinus Torvalds 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
6311da177e4SLinus Torvalds 			       ntohl(TCP_FLAG_ACK) |
6321da177e4SLinus Torvalds 			       snd_wnd);
6331da177e4SLinus Torvalds }
6341da177e4SLinus Torvalds 
63540efc6faSStephen Hemminger static inline void tcp_fast_path_on(struct tcp_sock *tp)
6361da177e4SLinus Torvalds {
6371da177e4SLinus Torvalds 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
6381da177e4SLinus Torvalds }
6391da177e4SLinus Torvalds 
6409e412ba7SIlpo Järvinen static inline void tcp_fast_path_check(struct sock *sk)
6411da177e4SLinus Torvalds {
6429e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6439e412ba7SIlpo Järvinen 
6449f5afeaeSYaogong Wang 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
6451da177e4SLinus Torvalds 	    tp->rcv_wnd &&
6461da177e4SLinus Torvalds 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
6471da177e4SLinus Torvalds 	    !tp->urg_data)
6481da177e4SLinus Torvalds 		tcp_fast_path_on(tp);
6491da177e4SLinus Torvalds }
6501da177e4SLinus Torvalds 
6510c266898SSatoru SATOH /* Compute the actual rto_min value */
6520c266898SSatoru SATOH static inline u32 tcp_rto_min(struct sock *sk)
6530c266898SSatoru SATOH {
654cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
6550c266898SSatoru SATOH 	u32 rto_min = TCP_RTO_MIN;
6560c266898SSatoru SATOH 
6570c266898SSatoru SATOH 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
6580c266898SSatoru SATOH 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
6590c266898SSatoru SATOH 	return rto_min;
6600c266898SSatoru SATOH }
6610c266898SSatoru SATOH 
662740b0f18SEric Dumazet static inline u32 tcp_rto_min_us(struct sock *sk)
663740b0f18SEric Dumazet {
664740b0f18SEric Dumazet 	return jiffies_to_usecs(tcp_rto_min(sk));
665740b0f18SEric Dumazet }
666740b0f18SEric Dumazet 
66781164413SDaniel Borkmann static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
66881164413SDaniel Borkmann {
66981164413SDaniel Borkmann 	return dst_metric_locked(dst, RTAX_CC_ALGO);
67081164413SDaniel Borkmann }
67181164413SDaniel Borkmann 
672f6722583SYuchung Cheng /* Minimum RTT in usec. ~0 means not available. */
673f6722583SYuchung Cheng static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
674f6722583SYuchung Cheng {
67564033892SNeal Cardwell 	return minmax_get(&tp->rtt_min);
676f6722583SYuchung Cheng }
677f6722583SYuchung Cheng 
6781da177e4SLinus Torvalds /* Compute the actual receive window we are currently advertising.
6791da177e4SLinus Torvalds  * Rcv_nxt can be after the window if our peer push more data
6801da177e4SLinus Torvalds  * than the offered window.
6811da177e4SLinus Torvalds  */
68240efc6faSStephen Hemminger static inline u32 tcp_receive_window(const struct tcp_sock *tp)
6831da177e4SLinus Torvalds {
6841da177e4SLinus Torvalds 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
6851da177e4SLinus Torvalds 
6861da177e4SLinus Torvalds 	if (win < 0)
6871da177e4SLinus Torvalds 		win = 0;
6881da177e4SLinus Torvalds 	return (u32) win;
6891da177e4SLinus Torvalds }
6901da177e4SLinus Torvalds 
6911da177e4SLinus Torvalds /* Choose a new window, without checks for shrinking, and without
6921da177e4SLinus Torvalds  * scaling applied to the result.  The caller does these things
6931da177e4SLinus Torvalds  * if necessary.  This is a "raw" window selection.
6941da177e4SLinus Torvalds  */
6955c9f3023SJoe Perches u32 __tcp_select_window(struct sock *sk);
6961da177e4SLinus Torvalds 
697ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk);
698ee995283SPavel Emelyanov 
6991da177e4SLinus Torvalds /* TCP timestamps are only 32-bits, this causes a slight
7001da177e4SLinus Torvalds  * complication on 64-bit systems since we store a snapshot
70131f34269SStephen Hemminger  * of jiffies in the buffer control blocks below.  We decided
70231f34269SStephen Hemminger  * to use only the low 32-bits of jiffies and hide the ugly
7031da177e4SLinus Torvalds  * casts with the following macro.
7041da177e4SLinus Torvalds  */
7051da177e4SLinus Torvalds #define tcp_time_stamp		((__u32)(jiffies))
7061da177e4SLinus Torvalds 
7077faee5c0SEric Dumazet static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
7087faee5c0SEric Dumazet {
7097faee5c0SEric Dumazet 	return skb->skb_mstamp.stamp_jiffies;
7107faee5c0SEric Dumazet }
7117faee5c0SEric Dumazet 
7127faee5c0SEric Dumazet 
713a3433f35SChangli Gao #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
714a3433f35SChangli Gao 
715a3433f35SChangli Gao #define TCPHDR_FIN 0x01
716a3433f35SChangli Gao #define TCPHDR_SYN 0x02
717a3433f35SChangli Gao #define TCPHDR_RST 0x04
718a3433f35SChangli Gao #define TCPHDR_PSH 0x08
719a3433f35SChangli Gao #define TCPHDR_ACK 0x10
720a3433f35SChangli Gao #define TCPHDR_URG 0x20
721a3433f35SChangli Gao #define TCPHDR_ECE 0x40
722a3433f35SChangli Gao #define TCPHDR_CWR 0x80
723a3433f35SChangli Gao 
72449213555SDaniel Borkmann #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
72549213555SDaniel Borkmann 
726caa20d9aSStephen Hemminger /* This is what the send packet queuing engine uses to pass
727f86586faSEric Dumazet  * TCP per-packet control information to the transmission code.
728f86586faSEric Dumazet  * We also store the host-order sequence numbers in here too.
729f86586faSEric Dumazet  * This is 44 bytes if IPV6 is enabled.
730f86586faSEric Dumazet  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
7311da177e4SLinus Torvalds  */
7321da177e4SLinus Torvalds struct tcp_skb_cb {
7331da177e4SLinus Torvalds 	__u32		seq;		/* Starting sequence number	*/
7341da177e4SLinus Torvalds 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
735cd7d8498SEric Dumazet 	union {
736cd7d8498SEric Dumazet 		/* Note : tcp_tw_isn is used in input path only
737cd7d8498SEric Dumazet 		 *	  (isn chosen by tcp_timewait_state_process())
738cd7d8498SEric Dumazet 		 *
739f69ad292SEric Dumazet 		 * 	  tcp_gso_segs/size are used in write queue only,
740f69ad292SEric Dumazet 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
741cd7d8498SEric Dumazet 		 */
742cd7d8498SEric Dumazet 		__u32		tcp_tw_isn;
743f69ad292SEric Dumazet 		struct {
744f69ad292SEric Dumazet 			u16	tcp_gso_segs;
745f69ad292SEric Dumazet 			u16	tcp_gso_size;
746f69ad292SEric Dumazet 		};
747cd7d8498SEric Dumazet 	};
7484de075e0SEric Dumazet 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
749f4f9f6e7SNeal Cardwell 
7501da177e4SLinus Torvalds 	__u8		sacked;		/* State flags for SACK/FACK.	*/
7511da177e4SLinus Torvalds #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
7521da177e4SLinus Torvalds #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
7531da177e4SLinus Torvalds #define TCPCB_LOST		0x04	/* SKB is lost			*/
7541da177e4SLinus Torvalds #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
7559d186cacSAndrey Vagin #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
7561da177e4SLinus Torvalds #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
7579d186cacSAndrey Vagin #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
7589d186cacSAndrey Vagin 				TCPCB_REPAIRED)
7591da177e4SLinus Torvalds 
760f4f9f6e7SNeal Cardwell 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
7616b084928SSoheil Hassas Yeganeh 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
762c134ecb8SMartin KaFai Lau 			eor:1,		/* Is skb MSG_EOR marked? */
763c134ecb8SMartin KaFai Lau 			unused:6;
7641da177e4SLinus Torvalds 	__u32		ack_seq;	/* Sequence number ACK'd	*/
765971f10ecSEric Dumazet 	union {
766b75803d5SLawrence Brakmo 		struct {
767b9f64820SYuchung Cheng 			/* There is space for up to 24 bytes */
768d7722e85SSoheil Hassas Yeganeh 			__u32 in_flight:30,/* Bytes in flight at transmit */
769d7722e85SSoheil Hassas Yeganeh 			      is_app_limited:1, /* cwnd not fully used? */
770d7722e85SSoheil Hassas Yeganeh 			      unused:1;
771b9f64820SYuchung Cheng 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
772b9f64820SYuchung Cheng 			__u32 delivered;
773b9f64820SYuchung Cheng 			/* start of send pipeline phase */
774b9f64820SYuchung Cheng 			struct skb_mstamp first_tx_mstamp;
775b9f64820SYuchung Cheng 			/* when we reached the "delivered" count */
776b9f64820SYuchung Cheng 			struct skb_mstamp delivered_mstamp;
777b75803d5SLawrence Brakmo 		} tx;   /* only used for outgoing skbs */
778b75803d5SLawrence Brakmo 		union {
779971f10ecSEric Dumazet 			struct inet_skb_parm	h4;
780971f10ecSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
781971f10ecSEric Dumazet 			struct inet6_skb_parm	h6;
782971f10ecSEric Dumazet #endif
783b75803d5SLawrence Brakmo 		} header;	/* For incoming skbs */
784b75803d5SLawrence Brakmo 	};
7851da177e4SLinus Torvalds };
7861da177e4SLinus Torvalds 
7871da177e4SLinus Torvalds #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
7881da177e4SLinus Torvalds 
789870c3151SEric Dumazet 
790815afe17SEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
791870c3151SEric Dumazet /* This is the variant of inet6_iif() that must be used by TCP,
792870c3151SEric Dumazet  * as TCP moves IP6CB into a different location in skb->cb[]
793870c3151SEric Dumazet  */
794870c3151SEric Dumazet static inline int tcp_v6_iif(const struct sk_buff *skb)
795870c3151SEric Dumazet {
796a04a480dSDavid Ahern 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
79774b20582SDavid Ahern 
79874b20582SDavid Ahern 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
799870c3151SEric Dumazet }
800815afe17SEric Dumazet #endif
801870c3151SEric Dumazet 
802a04a480dSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */
803a04a480dSDavid Ahern static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
804a04a480dSDavid Ahern {
805a04a480dSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
806a04a480dSDavid Ahern 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
807da96786eSDavid Ahern 	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
808a04a480dSDavid Ahern 		return true;
809a04a480dSDavid Ahern #endif
810a04a480dSDavid Ahern 	return false;
811a04a480dSDavid Ahern }
812a04a480dSDavid Ahern 
8131da177e4SLinus Torvalds /* Due to TSO, an SKB can be composed of multiple actual
8141da177e4SLinus Torvalds  * packets.  To keep these tracked properly, we use this.
8151da177e4SLinus Torvalds  */
8161da177e4SLinus Torvalds static inline int tcp_skb_pcount(const struct sk_buff *skb)
8171da177e4SLinus Torvalds {
818cd7d8498SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_segs;
819cd7d8498SEric Dumazet }
820cd7d8498SEric Dumazet 
821cd7d8498SEric Dumazet static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
822cd7d8498SEric Dumazet {
823cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
824cd7d8498SEric Dumazet }
825cd7d8498SEric Dumazet 
826cd7d8498SEric Dumazet static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
827cd7d8498SEric Dumazet {
828cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
8291da177e4SLinus Torvalds }
8301da177e4SLinus Torvalds 
831f69ad292SEric Dumazet /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
8321da177e4SLinus Torvalds static inline int tcp_skb_mss(const struct sk_buff *skb)
8331da177e4SLinus Torvalds {
834f69ad292SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_size;
8351da177e4SLinus Torvalds }
8361da177e4SLinus Torvalds 
837c134ecb8SMartin KaFai Lau static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
838c134ecb8SMartin KaFai Lau {
839c134ecb8SMartin KaFai Lau 	return likely(!TCP_SKB_CB(skb)->eor);
840c134ecb8SMartin KaFai Lau }
841c134ecb8SMartin KaFai Lau 
842317a76f9SStephen Hemminger /* Events passed to congestion control interface */
843317a76f9SStephen Hemminger enum tcp_ca_event {
844317a76f9SStephen Hemminger 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
845317a76f9SStephen Hemminger 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
846317a76f9SStephen Hemminger 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
847317a76f9SStephen Hemminger 	CA_EVENT_LOSS,		/* loss timeout */
8489890092eSFlorian Westphal 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
8499890092eSFlorian Westphal 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
8509890092eSFlorian Westphal 	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
8519890092eSFlorian Westphal 	CA_EVENT_NON_DELAYED_ACK,
8527354c8c3SFlorian Westphal };
8537354c8c3SFlorian Westphal 
8549890092eSFlorian Westphal /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
8557354c8c3SFlorian Westphal enum tcp_ca_ack_event_flags {
8569890092eSFlorian Westphal 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
8579890092eSFlorian Westphal 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
8589890092eSFlorian Westphal 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
859317a76f9SStephen Hemminger };
860317a76f9SStephen Hemminger 
861317a76f9SStephen Hemminger /*
862317a76f9SStephen Hemminger  * Interface for adding new TCP congestion control handlers
863317a76f9SStephen Hemminger  */
864317a76f9SStephen Hemminger #define TCP_CA_NAME_MAX	16
8653ff825b2SStephen Hemminger #define TCP_CA_MAX	128
8663ff825b2SStephen Hemminger #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
8673ff825b2SStephen Hemminger 
868c5c6a8abSDaniel Borkmann #define TCP_CA_UNSPEC	0
869c5c6a8abSDaniel Borkmann 
87030e502a3SDaniel Borkmann /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
871164891aaSStephen Hemminger #define TCP_CONG_NON_RESTRICTED 0x1
87230e502a3SDaniel Borkmann /* Requires ECN/ECT set on all packets */
87330e502a3SDaniel Borkmann #define TCP_CONG_NEEDS_ECN	0x2
874164891aaSStephen Hemminger 
87564f40ff5SEric Dumazet union tcp_cc_info;
87664f40ff5SEric Dumazet 
877756ee172SLawrence Brakmo struct ack_sample {
878756ee172SLawrence Brakmo 	u32 pkts_acked;
879756ee172SLawrence Brakmo 	s32 rtt_us;
8806f094b9eSLawrence Brakmo 	u32 in_flight;
881756ee172SLawrence Brakmo };
882756ee172SLawrence Brakmo 
883b9f64820SYuchung Cheng /* A rate sample measures the number of (original/retransmitted) data
884b9f64820SYuchung Cheng  * packets delivered "delivered" over an interval of time "interval_us".
885b9f64820SYuchung Cheng  * The tcp_rate.c code fills in the rate sample, and congestion
886b9f64820SYuchung Cheng  * control modules that define a cong_control function to run at the end
887b9f64820SYuchung Cheng  * of ACK processing can optionally chose to consult this sample when
888b9f64820SYuchung Cheng  * setting cwnd and pacing rate.
889b9f64820SYuchung Cheng  * A sample is invalid if "delivered" or "interval_us" is negative.
890b9f64820SYuchung Cheng  */
891b9f64820SYuchung Cheng struct rate_sample {
892b9f64820SYuchung Cheng 	struct	skb_mstamp prior_mstamp; /* starting timestamp for interval */
893b9f64820SYuchung Cheng 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
894b9f64820SYuchung Cheng 	s32  delivered;		/* number of packets delivered over interval */
895b9f64820SYuchung Cheng 	long interval_us;	/* time for tp->delivered to incr "delivered" */
896b9f64820SYuchung Cheng 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
897b9f64820SYuchung Cheng 	int  losses;		/* number of packets marked lost upon ACK */
898b9f64820SYuchung Cheng 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
899b9f64820SYuchung Cheng 	u32  prior_in_flight;	/* in flight before this ACK */
900d7722e85SSoheil Hassas Yeganeh 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
901b9f64820SYuchung Cheng 	bool is_retrans;	/* is sample from retransmission? */
902b9f64820SYuchung Cheng };
903b9f64820SYuchung Cheng 
904317a76f9SStephen Hemminger struct tcp_congestion_ops {
905317a76f9SStephen Hemminger 	struct list_head	list;
906c5c6a8abSDaniel Borkmann 	u32 key;
907c5c6a8abSDaniel Borkmann 	u32 flags;
908317a76f9SStephen Hemminger 
909317a76f9SStephen Hemminger 	/* initialize private data (optional) */
9106687e988SArnaldo Carvalho de Melo 	void (*init)(struct sock *sk);
911317a76f9SStephen Hemminger 	/* cleanup private data  (optional) */
9126687e988SArnaldo Carvalho de Melo 	void (*release)(struct sock *sk);
913317a76f9SStephen Hemminger 
914317a76f9SStephen Hemminger 	/* return slow start threshold (required) */
9156687e988SArnaldo Carvalho de Melo 	u32 (*ssthresh)(struct sock *sk);
916317a76f9SStephen Hemminger 	/* do new cwnd calculation (required) */
91724901551SEric Dumazet 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
918317a76f9SStephen Hemminger 	/* call before changing ca_state (optional) */
9196687e988SArnaldo Carvalho de Melo 	void (*set_state)(struct sock *sk, u8 new_state);
920317a76f9SStephen Hemminger 	/* call when cwnd event occurs (optional) */
9216687e988SArnaldo Carvalho de Melo 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
9227354c8c3SFlorian Westphal 	/* call when ack arrives (optional) */
9237354c8c3SFlorian Westphal 	void (*in_ack_event)(struct sock *sk, u32 flags);
924317a76f9SStephen Hemminger 	/* new value of cwnd after loss (optional) */
9256687e988SArnaldo Carvalho de Melo 	u32  (*undo_cwnd)(struct sock *sk);
926317a76f9SStephen Hemminger 	/* hook for packet ack accounting (optional) */
927756ee172SLawrence Brakmo 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
928ed6e7268SNeal Cardwell 	/* suggest number of segments for each skb to transmit (optional) */
929ed6e7268SNeal Cardwell 	u32 (*tso_segs_goal)(struct sock *sk);
93077bfc174SYuchung Cheng 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
93177bfc174SYuchung Cheng 	u32 (*sndbuf_expand)(struct sock *sk);
932c0402760SYuchung Cheng 	/* call when packets are delivered to update cwnd and pacing rate,
933c0402760SYuchung Cheng 	 * after all the ca_state processing. (optional)
934c0402760SYuchung Cheng 	 */
935c0402760SYuchung Cheng 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
93673c1f4a0SArnaldo Carvalho de Melo 	/* get info for inet_diag (optional) */
93764f40ff5SEric Dumazet 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
93864f40ff5SEric Dumazet 			   union tcp_cc_info *info);
939317a76f9SStephen Hemminger 
940317a76f9SStephen Hemminger 	char 		name[TCP_CA_NAME_MAX];
941317a76f9SStephen Hemminger 	struct module 	*owner;
942317a76f9SStephen Hemminger };
943317a76f9SStephen Hemminger 
9445c9f3023SJoe Perches int tcp_register_congestion_control(struct tcp_congestion_ops *type);
9455c9f3023SJoe Perches void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
946317a76f9SStephen Hemminger 
94755d8694fSFlorian Westphal void tcp_assign_congestion_control(struct sock *sk);
9485c9f3023SJoe Perches void tcp_init_congestion_control(struct sock *sk);
9495c9f3023SJoe Perches void tcp_cleanup_congestion_control(struct sock *sk);
9505c9f3023SJoe Perches int tcp_set_default_congestion_control(const char *name);
9515c9f3023SJoe Perches void tcp_get_default_congestion_control(char *name);
9525c9f3023SJoe Perches void tcp_get_available_congestion_control(char *buf, size_t len);
9535c9f3023SJoe Perches void tcp_get_allowed_congestion_control(char *buf, size_t len);
9545c9f3023SJoe Perches int tcp_set_allowed_congestion_control(char *allowed);
9555c9f3023SJoe Perches int tcp_set_congestion_control(struct sock *sk, const char *name);
956e73ebb08SNeal Cardwell u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
957e73ebb08SNeal Cardwell void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
958317a76f9SStephen Hemminger 
9595c9f3023SJoe Perches u32 tcp_reno_ssthresh(struct sock *sk);
960e9799183SFlorian Westphal u32 tcp_reno_undo_cwnd(struct sock *sk);
96124901551SEric Dumazet void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
962a8acfbacSDavid S. Miller extern struct tcp_congestion_ops tcp_reno;
963317a76f9SStephen Hemminger 
964c5c6a8abSDaniel Borkmann struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
965c3a8d947SDaniel Borkmann u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
966ea697639SDaniel Borkmann #ifdef CONFIG_INET
967c5c6a8abSDaniel Borkmann char *tcp_ca_get_name_by_key(u32 key, char *buffer);
968ea697639SDaniel Borkmann #else
969ea697639SDaniel Borkmann static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
970ea697639SDaniel Borkmann {
971ea697639SDaniel Borkmann 	return NULL;
972ea697639SDaniel Borkmann }
973ea697639SDaniel Borkmann #endif
974c5c6a8abSDaniel Borkmann 
97530e502a3SDaniel Borkmann static inline bool tcp_ca_needs_ecn(const struct sock *sk)
97630e502a3SDaniel Borkmann {
97730e502a3SDaniel Borkmann 	const struct inet_connection_sock *icsk = inet_csk(sk);
97830e502a3SDaniel Borkmann 
97930e502a3SDaniel Borkmann 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
98030e502a3SDaniel Borkmann }
98130e502a3SDaniel Borkmann 
9826687e988SArnaldo Carvalho de Melo static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
983317a76f9SStephen Hemminger {
9846687e988SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
9856687e988SArnaldo Carvalho de Melo 
9866687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->set_state)
9876687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->set_state(sk, ca_state);
9886687e988SArnaldo Carvalho de Melo 	icsk->icsk_ca_state = ca_state;
989317a76f9SStephen Hemminger }
990317a76f9SStephen Hemminger 
9916687e988SArnaldo Carvalho de Melo static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
992317a76f9SStephen Hemminger {
9936687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
9946687e988SArnaldo Carvalho de Melo 
9956687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->cwnd_event)
9966687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->cwnd_event(sk, event);
997317a76f9SStephen Hemminger }
998317a76f9SStephen Hemminger 
999b9f64820SYuchung Cheng /* From tcp_rate.c */
1000b9f64820SYuchung Cheng void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1001b9f64820SYuchung Cheng void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1002b9f64820SYuchung Cheng 			    struct rate_sample *rs);
1003b9f64820SYuchung Cheng void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1004b9f64820SYuchung Cheng 		  struct skb_mstamp *now, struct rate_sample *rs);
1005d7722e85SSoheil Hassas Yeganeh void tcp_rate_check_app_limited(struct sock *sk);
1006b9f64820SYuchung Cheng 
1007e60402d0SIlpo Järvinen /* These functions determine how the current flow behaves in respect of SACK
1008e60402d0SIlpo Järvinen  * handling. SACK is negotiated with the peer, and therefore it can vary
1009e60402d0SIlpo Järvinen  * between different flows.
1010e60402d0SIlpo Järvinen  *
1011e60402d0SIlpo Järvinen  * tcp_is_sack - SACK enabled
1012e60402d0SIlpo Järvinen  * tcp_is_reno - No SACK
1013e60402d0SIlpo Järvinen  * tcp_is_fack - FACK enabled, implies SACK enabled
1014e60402d0SIlpo Järvinen  */
1015e60402d0SIlpo Järvinen static inline int tcp_is_sack(const struct tcp_sock *tp)
1016e60402d0SIlpo Järvinen {
1017e60402d0SIlpo Järvinen 	return tp->rx_opt.sack_ok;
1018e60402d0SIlpo Järvinen }
1019e60402d0SIlpo Järvinen 
1020a2a385d6SEric Dumazet static inline bool tcp_is_reno(const struct tcp_sock *tp)
1021e60402d0SIlpo Järvinen {
1022e60402d0SIlpo Järvinen 	return !tcp_is_sack(tp);
1023e60402d0SIlpo Järvinen }
1024e60402d0SIlpo Järvinen 
1025a2a385d6SEric Dumazet static inline bool tcp_is_fack(const struct tcp_sock *tp)
1026e60402d0SIlpo Järvinen {
1027ab56222aSVijay Subramanian 	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1028e60402d0SIlpo Järvinen }
1029e60402d0SIlpo Järvinen 
1030e60402d0SIlpo Järvinen static inline void tcp_enable_fack(struct tcp_sock *tp)
1031e60402d0SIlpo Järvinen {
1032ab56222aSVijay Subramanian 	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1033e60402d0SIlpo Järvinen }
1034e60402d0SIlpo Järvinen 
103583ae4088SIlpo Järvinen static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
103683ae4088SIlpo Järvinen {
103783ae4088SIlpo Järvinen 	return tp->sacked_out + tp->lost_out;
103883ae4088SIlpo Järvinen }
103983ae4088SIlpo Järvinen 
10401da177e4SLinus Torvalds /* This determines how many packets are "in the network" to the best
10411da177e4SLinus Torvalds  * of our knowledge.  In many cases it is conservative, but where
10421da177e4SLinus Torvalds  * detailed information is available from the receiver (via SACK
10431da177e4SLinus Torvalds  * blocks etc.) we can make more aggressive calculations.
10441da177e4SLinus Torvalds  *
10451da177e4SLinus Torvalds  * Use this for decisions involving congestion control, use just
10461da177e4SLinus Torvalds  * tp->packets_out to determine if the send queue is empty or not.
10471da177e4SLinus Torvalds  *
10481da177e4SLinus Torvalds  * Read this equation as:
10491da177e4SLinus Torvalds  *
10501da177e4SLinus Torvalds  *	"Packets sent once on transmission queue" MINUS
10511da177e4SLinus Torvalds  *	"Packets left network, but not honestly ACKed yet" PLUS
10521da177e4SLinus Torvalds  *	"Packets fast retransmitted"
10531da177e4SLinus Torvalds  */
105440efc6faSStephen Hemminger static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
10551da177e4SLinus Torvalds {
105683ae4088SIlpo Järvinen 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
10571da177e4SLinus Torvalds }
10581da177e4SLinus Torvalds 
10590b6a05c1SIlpo Järvinen #define TCP_INFINITE_SSTHRESH	0x7fffffff
10600b6a05c1SIlpo Järvinen 
1061071d5080SYuchung Cheng static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1062071d5080SYuchung Cheng {
106376174004SYuchung Cheng 	return tp->snd_cwnd < tp->snd_ssthresh;
1064071d5080SYuchung Cheng }
1065071d5080SYuchung Cheng 
10660b6a05c1SIlpo Järvinen static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
10670b6a05c1SIlpo Järvinen {
10680b6a05c1SIlpo Järvinen 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
10690b6a05c1SIlpo Järvinen }
10700b6a05c1SIlpo Järvinen 
1071684bad11SYuchung Cheng static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1072684bad11SYuchung Cheng {
1073684bad11SYuchung Cheng 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1074684bad11SYuchung Cheng 	       (1 << inet_csk(sk)->icsk_ca_state);
1075684bad11SYuchung Cheng }
1076684bad11SYuchung Cheng 
10771da177e4SLinus Torvalds /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1078684bad11SYuchung Cheng  * The exception is cwnd reduction phase, when cwnd is decreasing towards
10791da177e4SLinus Torvalds  * ssthresh.
10801da177e4SLinus Torvalds  */
10816687e988SArnaldo Carvalho de Melo static inline __u32 tcp_current_ssthresh(const struct sock *sk)
10821da177e4SLinus Torvalds {
10836687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1084cf533ea5SEric Dumazet 
1085684bad11SYuchung Cheng 	if (tcp_in_cwnd_reduction(sk))
10861da177e4SLinus Torvalds 		return tp->snd_ssthresh;
10871da177e4SLinus Torvalds 	else
10881da177e4SLinus Torvalds 		return max(tp->snd_ssthresh,
10891da177e4SLinus Torvalds 			   ((tp->snd_cwnd >> 1) +
10901da177e4SLinus Torvalds 			    (tp->snd_cwnd >> 2)));
10911da177e4SLinus Torvalds }
10921da177e4SLinus Torvalds 
1093b9c4595bSIlpo Järvinen /* Use define here intentionally to get WARN_ON location shown at the caller */
1094b9c4595bSIlpo Järvinen #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
10951da177e4SLinus Torvalds 
10965ee2c941SChristoph Paasch void tcp_enter_cwr(struct sock *sk);
10975c9f3023SJoe Perches __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
10981da177e4SLinus Torvalds 
10996b5a5c0dSNeal Cardwell /* The maximum number of MSS of available cwnd for which TSO defers
11006b5a5c0dSNeal Cardwell  * sending if not using sysctl_tcp_tso_win_divisor.
11016b5a5c0dSNeal Cardwell  */
11026b5a5c0dSNeal Cardwell static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
11036b5a5c0dSNeal Cardwell {
11046b5a5c0dSNeal Cardwell 	return 3;
11056b5a5c0dSNeal Cardwell }
11066b5a5c0dSNeal Cardwell 
110790840defSIlpo Järvinen /* Returns end sequence number of the receiver's advertised window */
110890840defSIlpo Järvinen static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
110990840defSIlpo Järvinen {
111090840defSIlpo Järvinen 	return tp->snd_una + tp->snd_wnd;
111190840defSIlpo Järvinen }
1112e114a710SEric Dumazet 
1113e114a710SEric Dumazet /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1114e114a710SEric Dumazet  * flexible approach. The RFC suggests cwnd should not be raised unless
1115ca8a2263SNeal Cardwell  * it was fully used previously. And that's exactly what we do in
1116ca8a2263SNeal Cardwell  * congestion avoidance mode. But in slow start we allow cwnd to grow
1117ca8a2263SNeal Cardwell  * as long as the application has used half the cwnd.
1118e114a710SEric Dumazet  * Example :
1119e114a710SEric Dumazet  *    cwnd is 10 (IW10), but application sends 9 frames.
1120e114a710SEric Dumazet  *    We allow cwnd to reach 18 when all frames are ACKed.
1121e114a710SEric Dumazet  * This check is safe because it's as aggressive as slow start which already
1122e114a710SEric Dumazet  * risks 100% overshoot. The advantage is that we discourage application to
1123e114a710SEric Dumazet  * either send more filler packets or data to artificially blow up the cwnd
1124e114a710SEric Dumazet  * usage, and allow application-limited process to probe bw more aggressively.
1125e114a710SEric Dumazet  */
112624901551SEric Dumazet static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1127e114a710SEric Dumazet {
1128e114a710SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1129e114a710SEric Dumazet 
1130ca8a2263SNeal Cardwell 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1131071d5080SYuchung Cheng 	if (tcp_in_slow_start(tp))
1132ca8a2263SNeal Cardwell 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1133ca8a2263SNeal Cardwell 
1134ca8a2263SNeal Cardwell 	return tp->is_cwnd_limited;
1135e114a710SEric Dumazet }
1136f4805edeSStephen Hemminger 
113721c8fe99SEric Dumazet /* Something is really bad, we could not queue an additional packet,
113821c8fe99SEric Dumazet  * because qdisc is full or receiver sent a 0 window.
113921c8fe99SEric Dumazet  * We do not want to add fuel to the fire, or abort too early,
114021c8fe99SEric Dumazet  * so make sure the timer we arm now is at least 200ms in the future,
114121c8fe99SEric Dumazet  * regardless of current icsk_rto value (as it could be ~2ms)
114221c8fe99SEric Dumazet  */
114321c8fe99SEric Dumazet static inline unsigned long tcp_probe0_base(const struct sock *sk)
114421c8fe99SEric Dumazet {
114521c8fe99SEric Dumazet 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
114621c8fe99SEric Dumazet }
114721c8fe99SEric Dumazet 
114821c8fe99SEric Dumazet /* Variant of inet_csk_rto_backoff() used for zero window probes */
114921c8fe99SEric Dumazet static inline unsigned long tcp_probe0_when(const struct sock *sk,
115021c8fe99SEric Dumazet 					    unsigned long max_when)
115121c8fe99SEric Dumazet {
115221c8fe99SEric Dumazet 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
115321c8fe99SEric Dumazet 
115421c8fe99SEric Dumazet 	return (unsigned long)min_t(u64, when, max_when);
115521c8fe99SEric Dumazet }
115621c8fe99SEric Dumazet 
11579e412ba7SIlpo Järvinen static inline void tcp_check_probe_timer(struct sock *sk)
11581da177e4SLinus Torvalds {
115921c8fe99SEric Dumazet 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
11603f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
116121c8fe99SEric Dumazet 					  tcp_probe0_base(sk), TCP_RTO_MAX);
11621da177e4SLinus Torvalds }
11631da177e4SLinus Torvalds 
1164ee7537b6SHantzis Fotis static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
11651da177e4SLinus Torvalds {
11661da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
11671da177e4SLinus Torvalds }
11681da177e4SLinus Torvalds 
1169ee7537b6SHantzis Fotis static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
11701da177e4SLinus Torvalds {
11711da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
11721da177e4SLinus Torvalds }
11731da177e4SLinus Torvalds 
11741da177e4SLinus Torvalds /*
11751da177e4SLinus Torvalds  * Calculate(/check) TCP checksum
11761da177e4SLinus Torvalds  */
1177ba7808eaSFrederik Deweerdt static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1178ba7808eaSFrederik Deweerdt 				   __be32 daddr, __wsum base)
11791da177e4SLinus Torvalds {
11801da177e4SLinus Torvalds 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
11811da177e4SLinus Torvalds }
11821da177e4SLinus Torvalds 
1183b51655b9SAl Viro static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
11841da177e4SLinus Torvalds {
1185fb286bb2SHerbert Xu 	return __skb_checksum_complete(skb);
11861da177e4SLinus Torvalds }
11871da177e4SLinus Torvalds 
1188a2a385d6SEric Dumazet static inline bool tcp_checksum_complete(struct sk_buff *skb)
11891da177e4SLinus Torvalds {
119060476372SHerbert Xu 	return !skb_csum_unnecessary(skb) &&
11911da177e4SLinus Torvalds 		__tcp_checksum_complete(skb);
11921da177e4SLinus Torvalds }
11931da177e4SLinus Torvalds 
11941da177e4SLinus Torvalds /* Prequeue for VJ style copy to user, combined with checksumming. */
11951da177e4SLinus Torvalds 
119640efc6faSStephen Hemminger static inline void tcp_prequeue_init(struct tcp_sock *tp)
11971da177e4SLinus Torvalds {
11981da177e4SLinus Torvalds 	tp->ucopy.task = NULL;
11991da177e4SLinus Torvalds 	tp->ucopy.len = 0;
12001da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
12011da177e4SLinus Torvalds 	skb_queue_head_init(&tp->ucopy.prequeue);
12021da177e4SLinus Torvalds }
12031da177e4SLinus Torvalds 
12045c9f3023SJoe Perches bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1205c9c33212SEric Dumazet bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1206ac6e7800SEric Dumazet int tcp_filter(struct sock *sk, struct sk_buff *skb);
12071da177e4SLinus Torvalds 
12081da177e4SLinus Torvalds #undef STATE_TRACE
12091da177e4SLinus Torvalds 
12101da177e4SLinus Torvalds #ifdef STATE_TRACE
12111da177e4SLinus Torvalds static const char *statename[]={
12121da177e4SLinus Torvalds 	"Unused","Established","Syn Sent","Syn Recv",
12131da177e4SLinus Torvalds 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
12141da177e4SLinus Torvalds 	"Close Wait","Last ACK","Listen","Closing"
12151da177e4SLinus Torvalds };
12161da177e4SLinus Torvalds #endif
12175c9f3023SJoe Perches void tcp_set_state(struct sock *sk, int state);
12181da177e4SLinus Torvalds 
12195c9f3023SJoe Perches void tcp_done(struct sock *sk);
12201da177e4SLinus Torvalds 
1221c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err);
1222c1e64e29SLorenzo Colitti 
122340efc6faSStephen Hemminger static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
12241da177e4SLinus Torvalds {
12251da177e4SLinus Torvalds 	rx_opt->dsack = 0;
12261da177e4SLinus Torvalds 	rx_opt->num_sacks = 0;
12271da177e4SLinus Torvalds }
12281da177e4SLinus Torvalds 
12295c9f3023SJoe Perches u32 tcp_default_init_rwnd(u32 mss);
12306f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta);
12316f021c62SEric Dumazet 
12326f021c62SEric Dumazet static inline void tcp_slow_start_after_idle_check(struct sock *sk)
12336f021c62SEric Dumazet {
12346f021c62SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
12356f021c62SEric Dumazet 	s32 delta;
12366f021c62SEric Dumazet 
12376f021c62SEric Dumazet 	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
12386f021c62SEric Dumazet 		return;
12396f021c62SEric Dumazet 	delta = tcp_time_stamp - tp->lsndtime;
12406f021c62SEric Dumazet 	if (delta > inet_csk(sk)->icsk_rto)
12416f021c62SEric Dumazet 		tcp_cwnd_restart(sk, delta);
12426f021c62SEric Dumazet }
124385f16525SYuchung Cheng 
12441da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. */
12455c9f3023SJoe Perches void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
12465c9f3023SJoe Perches 			       __u32 *window_clamp, int wscale_ok,
12475c9f3023SJoe Perches 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
12481da177e4SLinus Torvalds 
12491da177e4SLinus Torvalds static inline int tcp_win_from_space(int space)
12501da177e4SLinus Torvalds {
12511da177e4SLinus Torvalds 	return sysctl_tcp_adv_win_scale<=0 ?
12521da177e4SLinus Torvalds 		(space>>(-sysctl_tcp_adv_win_scale)) :
12531da177e4SLinus Torvalds 		space - (space>>sysctl_tcp_adv_win_scale);
12541da177e4SLinus Torvalds }
12551da177e4SLinus Torvalds 
12561da177e4SLinus Torvalds /* Note: caller must be prepared to deal with negative returns */
12571da177e4SLinus Torvalds static inline int tcp_space(const struct sock *sk)
12581da177e4SLinus Torvalds {
12591da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf -
12601da177e4SLinus Torvalds 				  atomic_read(&sk->sk_rmem_alloc));
12611da177e4SLinus Torvalds }
12621da177e4SLinus Torvalds 
12631da177e4SLinus Torvalds static inline int tcp_full_space(const struct sock *sk)
12641da177e4SLinus Torvalds {
12651da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf);
12661da177e4SLinus Torvalds }
12671da177e4SLinus Torvalds 
1268843f4a55SYuchung Cheng extern void tcp_openreq_init_rwin(struct request_sock *req,
1269b1964b5fSEric Dumazet 				  const struct sock *sk_listener,
1270b1964b5fSEric Dumazet 				  const struct dst_entry *dst);
1271843f4a55SYuchung Cheng 
12725c9f3023SJoe Perches void tcp_enter_memory_pressure(struct sock *sk);
12731da177e4SLinus Torvalds 
12741da177e4SLinus Torvalds static inline int keepalive_intvl_when(const struct tcp_sock *tp)
12751da177e4SLinus Torvalds {
1276b840d15dSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
1277b840d15dSNikolay Borisov 
1278b840d15dSNikolay Borisov 	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
12791da177e4SLinus Torvalds }
12801da177e4SLinus Torvalds 
12811da177e4SLinus Torvalds static inline int keepalive_time_when(const struct tcp_sock *tp)
12821da177e4SLinus Torvalds {
128313b287e8SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
128413b287e8SNikolay Borisov 
128513b287e8SNikolay Borisov 	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
12861da177e4SLinus Torvalds }
12871da177e4SLinus Torvalds 
1288df19a626SEric Dumazet static inline int keepalive_probes(const struct tcp_sock *tp)
1289df19a626SEric Dumazet {
12909bd6861bSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
12919bd6861bSNikolay Borisov 
12929bd6861bSNikolay Borisov 	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1293df19a626SEric Dumazet }
1294df19a626SEric Dumazet 
12956c37e5deSFlavio Leitner static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
12966c37e5deSFlavio Leitner {
12976c37e5deSFlavio Leitner 	const struct inet_connection_sock *icsk = &tp->inet_conn;
12986c37e5deSFlavio Leitner 
12996c37e5deSFlavio Leitner 	return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
13006c37e5deSFlavio Leitner 			  tcp_time_stamp - tp->rcv_tstamp);
13016c37e5deSFlavio Leitner }
13026c37e5deSFlavio Leitner 
1303463c84b9SArnaldo Carvalho de Melo static inline int tcp_fin_time(const struct sock *sk)
13041da177e4SLinus Torvalds {
13051e579caaSNikolay Borisov 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1306463c84b9SArnaldo Carvalho de Melo 	const int rto = inet_csk(sk)->icsk_rto;
13071da177e4SLinus Torvalds 
1308463c84b9SArnaldo Carvalho de Melo 	if (fin_timeout < (rto << 2) - (rto >> 1))
1309463c84b9SArnaldo Carvalho de Melo 		fin_timeout = (rto << 2) - (rto >> 1);
13101da177e4SLinus Torvalds 
13111da177e4SLinus Torvalds 	return fin_timeout;
13121da177e4SLinus Torvalds }
13131da177e4SLinus Torvalds 
1314a2a385d6SEric Dumazet static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1315c887e6d2SIlpo Järvinen 				  int paws_win)
13161da177e4SLinus Torvalds {
1317c887e6d2SIlpo Järvinen 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1318a2a385d6SEric Dumazet 		return true;
1319c887e6d2SIlpo Järvinen 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1320a2a385d6SEric Dumazet 		return true;
1321bc2ce894SEric Dumazet 	/*
1322bc2ce894SEric Dumazet 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1323bc2ce894SEric Dumazet 	 * then following tcp messages have valid values. Ignore 0 value,
1324bc2ce894SEric Dumazet 	 * or else 'negative' tsval might forbid us to accept their packets.
1325bc2ce894SEric Dumazet 	 */
1326bc2ce894SEric Dumazet 	if (!rx_opt->ts_recent)
1327a2a385d6SEric Dumazet 		return true;
1328a2a385d6SEric Dumazet 	return false;
1329c887e6d2SIlpo Järvinen }
1330c887e6d2SIlpo Järvinen 
1331a2a385d6SEric Dumazet static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1332c887e6d2SIlpo Järvinen 				   int rst)
1333c887e6d2SIlpo Järvinen {
1334c887e6d2SIlpo Järvinen 	if (tcp_paws_check(rx_opt, 0))
1335a2a385d6SEric Dumazet 		return false;
13361da177e4SLinus Torvalds 
13371da177e4SLinus Torvalds 	/* RST segments are not recommended to carry timestamp,
13381da177e4SLinus Torvalds 	   and, if they do, it is recommended to ignore PAWS because
13391da177e4SLinus Torvalds 	   "their cleanup function should take precedence over timestamps."
13401da177e4SLinus Torvalds 	   Certainly, it is mistake. It is necessary to understand the reasons
13411da177e4SLinus Torvalds 	   of this constraint to relax it: if peer reboots, clock may go
13421da177e4SLinus Torvalds 	   out-of-sync and half-open connections will not be reset.
13431da177e4SLinus Torvalds 	   Actually, the problem would be not existing if all
13441da177e4SLinus Torvalds 	   the implementations followed draft about maintaining clock
13451da177e4SLinus Torvalds 	   via reboots. Linux-2.2 DOES NOT!
13461da177e4SLinus Torvalds 
13471da177e4SLinus Torvalds 	   However, we can relax time bounds for RST segments to MSL.
13481da177e4SLinus Torvalds 	 */
13499d729f72SJames Morris 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1350a2a385d6SEric Dumazet 		return false;
1351a2a385d6SEric Dumazet 	return true;
13521da177e4SLinus Torvalds }
13531da177e4SLinus Torvalds 
13547970ddc8SEric Dumazet bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
13557970ddc8SEric Dumazet 			  int mib_idx, u32 *last_oow_ack_time);
1356032ee423SNeal Cardwell 
1357a9c19329SPavel Emelyanov static inline void tcp_mib_init(struct net *net)
13581da177e4SLinus Torvalds {
13591da177e4SLinus Torvalds 	/* See RFC 2012 */
13606aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
13616aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
13626aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
13636aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
13641da177e4SLinus Torvalds }
13651da177e4SLinus Torvalds 
13666a438bbeSStephen Hemminger /* from STCP */
1367ef9da47cSIlpo Järvinen static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
13680800f170SDavid S. Miller {
13696a438bbeSStephen Hemminger 	tp->lost_skb_hint = NULL;
1370ef9da47cSIlpo Järvinen }
1371ef9da47cSIlpo Järvinen 
1372ef9da47cSIlpo Järvinen static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1373ef9da47cSIlpo Järvinen {
1374ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
13756a438bbeSStephen Hemminger 	tp->retransmit_skb_hint = NULL;
1376b7689205SIlpo Järvinen }
1377b7689205SIlpo Järvinen 
1378a915da9bSEric Dumazet union tcp_md5_addr {
1379a915da9bSEric Dumazet 	struct in_addr  a4;
1380a915da9bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1381a915da9bSEric Dumazet 	struct in6_addr	a6;
1382a915da9bSEric Dumazet #endif
1383a915da9bSEric Dumazet };
1384a915da9bSEric Dumazet 
1385cfb6eeb4SYOSHIFUJI Hideaki /* - key database */
1386cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key {
1387a915da9bSEric Dumazet 	struct hlist_node	node;
1388cfb6eeb4SYOSHIFUJI Hideaki 	u8			keylen;
1389a915da9bSEric Dumazet 	u8			family; /* AF_INET or AF_INET6 */
1390a915da9bSEric Dumazet 	union tcp_md5_addr	addr;
1391a915da9bSEric Dumazet 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1392a915da9bSEric Dumazet 	struct rcu_head		rcu;
1393cfb6eeb4SYOSHIFUJI Hideaki };
1394cfb6eeb4SYOSHIFUJI Hideaki 
1395cfb6eeb4SYOSHIFUJI Hideaki /* - sock block */
1396cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_info {
1397a915da9bSEric Dumazet 	struct hlist_head	head;
1398a8afca03SEric Dumazet 	struct rcu_head		rcu;
1399cfb6eeb4SYOSHIFUJI Hideaki };
1400cfb6eeb4SYOSHIFUJI Hideaki 
1401cfb6eeb4SYOSHIFUJI Hideaki /* - pseudo header */
1402cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr {
1403cfb6eeb4SYOSHIFUJI Hideaki 	__be32		saddr;
1404cfb6eeb4SYOSHIFUJI Hideaki 	__be32		daddr;
1405cfb6eeb4SYOSHIFUJI Hideaki 	__u8		pad;
1406cfb6eeb4SYOSHIFUJI Hideaki 	__u8		protocol;
1407cfb6eeb4SYOSHIFUJI Hideaki 	__be16		len;
1408cfb6eeb4SYOSHIFUJI Hideaki };
1409cfb6eeb4SYOSHIFUJI Hideaki 
1410cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr {
1411cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr	saddr;
1412cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr daddr;
1413cfb6eeb4SYOSHIFUJI Hideaki 	__be32		len;
1414cfb6eeb4SYOSHIFUJI Hideaki 	__be32		protocol;	/* including padding */
1415cfb6eeb4SYOSHIFUJI Hideaki };
1416cfb6eeb4SYOSHIFUJI Hideaki 
1417cfb6eeb4SYOSHIFUJI Hideaki union tcp_md5sum_block {
1418cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp4_pseudohdr ip4;
1419dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1420cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp6_pseudohdr ip6;
1421cfb6eeb4SYOSHIFUJI Hideaki #endif
1422cfb6eeb4SYOSHIFUJI Hideaki };
1423cfb6eeb4SYOSHIFUJI Hideaki 
1424cfb6eeb4SYOSHIFUJI Hideaki /* - pool: digest algorithm, hash description and scratch buffer */
1425cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool {
1426cf80e0e4SHerbert Xu 	struct ahash_request	*md5_req;
142719689e38SEric Dumazet 	void			*scratch;
1428cfb6eeb4SYOSHIFUJI Hideaki };
1429cfb6eeb4SYOSHIFUJI Hideaki 
1430cfb6eeb4SYOSHIFUJI Hideaki /* - functions */
143139f8e58eSEric Dumazet int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
143239f8e58eSEric Dumazet 			const struct sock *sk, const struct sk_buff *skb);
14335c9f3023SJoe Perches int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
14345c9f3023SJoe Perches 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
14355c9f3023SJoe Perches int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1436a915da9bSEric Dumazet 		   int family);
1437b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1438fd3a154aSEric Dumazet 					 const struct sock *addr_sk);
1439cfb6eeb4SYOSHIFUJI Hideaki 
14409501f972SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1441b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
14425c9f3023SJoe Perches 					 const union tcp_md5_addr *addr,
14435c9f3023SJoe Perches 					 int family);
1444a915da9bSEric Dumazet #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
14459501f972SYOSHIFUJI Hideaki #else
1446b83e3debSEric Dumazet static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1447a915da9bSEric Dumazet 					 const union tcp_md5_addr *addr,
1448a915da9bSEric Dumazet 					 int family)
1449a915da9bSEric Dumazet {
1450a915da9bSEric Dumazet 	return NULL;
1451a915da9bSEric Dumazet }
14529501f972SYOSHIFUJI Hideaki #define tcp_twsk_md5_key(twsk)	NULL
14539501f972SYOSHIFUJI Hideaki #endif
14549501f972SYOSHIFUJI Hideaki 
14555c9f3023SJoe Perches bool tcp_alloc_md5sig_pool(void);
1456cfb6eeb4SYOSHIFUJI Hideaki 
14575c9f3023SJoe Perches struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
145871cea17eSEric Dumazet static inline void tcp_put_md5sig_pool(void)
145971cea17eSEric Dumazet {
146071cea17eSEric Dumazet 	local_bh_enable();
146171cea17eSEric Dumazet }
146235790c04SEric Dumazet 
14635c9f3023SJoe Perches int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
146495c96174SEric Dumazet 			  unsigned int header_len);
14655c9f3023SJoe Perches int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1466cf533ea5SEric Dumazet 		     const struct tcp_md5sig_key *key);
1467cfb6eeb4SYOSHIFUJI Hideaki 
146810467163SJerry Chu /* From tcp_fastopen.c */
14695c9f3023SJoe Perches void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
14705c9f3023SJoe Perches 			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
14715c9f3023SJoe Perches 			    unsigned long *last_syn_loss);
14725c9f3023SJoe Perches void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
14732646c831SDaniel Lee 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
14742646c831SDaniel Lee 			    u16 try_exp);
1475783237e8SYuchung Cheng struct tcp_fastopen_request {
1476783237e8SYuchung Cheng 	/* Fast Open cookie. Size 0 means a cookie request */
1477783237e8SYuchung Cheng 	struct tcp_fastopen_cookie	cookie;
1478783237e8SYuchung Cheng 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1479f5ddcbbbSEric Dumazet 	size_t				size;
1480f5ddcbbbSEric Dumazet 	int				copied;	/* queued in tcp_connect() */
1481783237e8SYuchung Cheng };
1482783237e8SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp);
1483783237e8SYuchung Cheng 
148410467163SJerry Chu extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
148510467163SJerry Chu int tcp_fastopen_reset_cipher(void *key, unsigned int len);
148661d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
14877c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
14885b7ed089SYuchung Cheng 			      struct request_sock *req,
1489843f4a55SYuchung Cheng 			      struct tcp_fastopen_cookie *foc,
1490843f4a55SYuchung Cheng 			      struct dst_entry *dst);
1491222e83d2SHannes Frederic Sowa void tcp_fastopen_init_key_once(bool publish);
1492065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1493065263f4SWei Wang 			     struct tcp_fastopen_cookie *cookie);
149419f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
149510467163SJerry Chu #define TCP_FASTOPEN_KEY_LENGTH 16
149610467163SJerry Chu 
149710467163SJerry Chu /* Fastopen key context */
149810467163SJerry Chu struct tcp_fastopen_context {
14997ae8639cSEric Dumazet 	struct crypto_cipher	*tfm;
150010467163SJerry Chu 	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
150110467163SJerry Chu 	struct rcu_head		rcu;
150210467163SJerry Chu };
150310467163SJerry Chu 
150405b055e8SFrancis Yan /* Latencies incurred by various limits for a sender. They are
150505b055e8SFrancis Yan  * chronograph-like stats that are mutually exclusive.
150605b055e8SFrancis Yan  */
150705b055e8SFrancis Yan enum tcp_chrono {
150805b055e8SFrancis Yan 	TCP_CHRONO_UNSPEC,
150905b055e8SFrancis Yan 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
151005b055e8SFrancis Yan 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
151105b055e8SFrancis Yan 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
151205b055e8SFrancis Yan 	__TCP_CHRONO_MAX,
151305b055e8SFrancis Yan };
151405b055e8SFrancis Yan 
151505b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
151605b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
151705b055e8SFrancis Yan 
1518fe067e8aSDavid S. Miller /* write queue abstraction */
1519fe067e8aSDavid S. Miller static inline void tcp_write_queue_purge(struct sock *sk)
1520fe067e8aSDavid S. Miller {
1521fe067e8aSDavid S. Miller 	struct sk_buff *skb;
1522fe067e8aSDavid S. Miller 
15230f87230dSFrancis Yan 	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1524fe067e8aSDavid S. Miller 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
15253ab224beSHideo Aoki 		sk_wmem_free_skb(sk, skb);
15263ab224beSHideo Aoki 	sk_mem_reclaim(sk);
15278818a9d8SIlpo Järvinen 	tcp_clear_all_retrans_hints(tcp_sk(sk));
1528fe067e8aSDavid S. Miller }
1529fe067e8aSDavid S. Miller 
1530cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1531fe067e8aSDavid S. Miller {
1532cd07a8eaSDavid S. Miller 	return skb_peek(&sk->sk_write_queue);
1533fe067e8aSDavid S. Miller }
1534fe067e8aSDavid S. Miller 
1535cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1536fe067e8aSDavid S. Miller {
1537cd07a8eaSDavid S. Miller 	return skb_peek_tail(&sk->sk_write_queue);
1538fe067e8aSDavid S. Miller }
1539fe067e8aSDavid S. Miller 
1540cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1541cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1542fe067e8aSDavid S. Miller {
1543cd07a8eaSDavid S. Miller 	return skb_queue_next(&sk->sk_write_queue, skb);
1544fe067e8aSDavid S. Miller }
1545fe067e8aSDavid S. Miller 
1546cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1547cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1548832d11c5SIlpo Järvinen {
1549832d11c5SIlpo Järvinen 	return skb_queue_prev(&sk->sk_write_queue, skb);
1550832d11c5SIlpo Järvinen }
1551832d11c5SIlpo Järvinen 
1552fe067e8aSDavid S. Miller #define tcp_for_write_queue(skb, sk)					\
1553cd07a8eaSDavid S. Miller 	skb_queue_walk(&(sk)->sk_write_queue, skb)
1554fe067e8aSDavid S. Miller 
1555fe067e8aSDavid S. Miller #define tcp_for_write_queue_from(skb, sk)				\
1556cd07a8eaSDavid S. Miller 	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1557fe067e8aSDavid S. Miller 
1558234b6860SIlpo Järvinen #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1559cd07a8eaSDavid S. Miller 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1560234b6860SIlpo Järvinen 
1561cf533ea5SEric Dumazet static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1562fe067e8aSDavid S. Miller {
1563fe067e8aSDavid S. Miller 	return sk->sk_send_head;
1564fe067e8aSDavid S. Miller }
1565fe067e8aSDavid S. Miller 
1566cd07a8eaSDavid S. Miller static inline bool tcp_skb_is_last(const struct sock *sk,
1567cd07a8eaSDavid S. Miller 				   const struct sk_buff *skb)
1568cd07a8eaSDavid S. Miller {
1569cd07a8eaSDavid S. Miller 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1570cd07a8eaSDavid S. Miller }
1571cd07a8eaSDavid S. Miller 
1572cf533ea5SEric Dumazet static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1573fe067e8aSDavid S. Miller {
1574cd07a8eaSDavid S. Miller 	if (tcp_skb_is_last(sk, skb))
1575fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
1576cd07a8eaSDavid S. Miller 	else
1577cd07a8eaSDavid S. Miller 		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1578fe067e8aSDavid S. Miller }
1579fe067e8aSDavid S. Miller 
1580fe067e8aSDavid S. Miller static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1581fe067e8aSDavid S. Miller {
15820f87230dSFrancis Yan 	if (sk->sk_send_head == skb_unlinked) {
1583fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
15840f87230dSFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
15850f87230dSFrancis Yan 	}
1586bb1fcecaSEric Dumazet 	if (tcp_sk(sk)->highest_sack == skb_unlinked)
1587bb1fcecaSEric Dumazet 		tcp_sk(sk)->highest_sack = NULL;
1588fe067e8aSDavid S. Miller }
1589fe067e8aSDavid S. Miller 
1590fe067e8aSDavid S. Miller static inline void tcp_init_send_head(struct sock *sk)
1591fe067e8aSDavid S. Miller {
1592fe067e8aSDavid S. Miller 	sk->sk_send_head = NULL;
1593fe067e8aSDavid S. Miller }
1594fe067e8aSDavid S. Miller 
1595fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1596fe067e8aSDavid S. Miller {
1597fe067e8aSDavid S. Miller 	__skb_queue_tail(&sk->sk_write_queue, skb);
1598fe067e8aSDavid S. Miller }
1599fe067e8aSDavid S. Miller 
1600fe067e8aSDavid S. Miller static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1601fe067e8aSDavid S. Miller {
1602fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, skb);
1603fe067e8aSDavid S. Miller 
1604fe067e8aSDavid S. Miller 	/* Queue it, remembering where we must start sending. */
16056859d494SIlpo Järvinen 	if (sk->sk_send_head == NULL) {
1606fe067e8aSDavid S. Miller 		sk->sk_send_head = skb;
16070f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
16086859d494SIlpo Järvinen 
16096859d494SIlpo Järvinen 		if (tcp_sk(sk)->highest_sack == NULL)
16106859d494SIlpo Järvinen 			tcp_sk(sk)->highest_sack = skb;
16116859d494SIlpo Järvinen 	}
1612fe067e8aSDavid S. Miller }
1613fe067e8aSDavid S. Miller 
1614fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1615fe067e8aSDavid S. Miller {
1616fe067e8aSDavid S. Miller 	__skb_queue_head(&sk->sk_write_queue, skb);
1617fe067e8aSDavid S. Miller }
1618fe067e8aSDavid S. Miller 
1619fe067e8aSDavid S. Miller /* Insert buff after skb on the write queue of sk.  */
1620fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1621fe067e8aSDavid S. Miller 						struct sk_buff *buff,
1622fe067e8aSDavid S. Miller 						struct sock *sk)
1623fe067e8aSDavid S. Miller {
16247de6c033SGerrit Renker 	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1625fe067e8aSDavid S. Miller }
1626fe067e8aSDavid S. Miller 
162743f59c89SDavid S. Miller /* Insert new before skb on the write queue of sk.  */
1628fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1629fe067e8aSDavid S. Miller 						  struct sk_buff *skb,
1630fe067e8aSDavid S. Miller 						  struct sock *sk)
1631fe067e8aSDavid S. Miller {
163243f59c89SDavid S. Miller 	__skb_queue_before(&sk->sk_write_queue, skb, new);
16336e421410SIlpo Järvinen 
16346e421410SIlpo Järvinen 	if (sk->sk_send_head == skb)
16356e421410SIlpo Järvinen 		sk->sk_send_head = new;
1636fe067e8aSDavid S. Miller }
1637fe067e8aSDavid S. Miller 
1638fe067e8aSDavid S. Miller static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1639fe067e8aSDavid S. Miller {
1640fe067e8aSDavid S. Miller 	__skb_unlink(skb, &sk->sk_write_queue);
1641fe067e8aSDavid S. Miller }
1642fe067e8aSDavid S. Miller 
1643a2a385d6SEric Dumazet static inline bool tcp_write_queue_empty(struct sock *sk)
1644fe067e8aSDavid S. Miller {
1645fe067e8aSDavid S. Miller 	return skb_queue_empty(&sk->sk_write_queue);
1646fe067e8aSDavid S. Miller }
1647fe067e8aSDavid S. Miller 
164812d50c46SKrishna Kumar static inline void tcp_push_pending_frames(struct sock *sk)
164912d50c46SKrishna Kumar {
165012d50c46SKrishna Kumar 	if (tcp_send_head(sk)) {
165112d50c46SKrishna Kumar 		struct tcp_sock *tp = tcp_sk(sk);
165212d50c46SKrishna Kumar 
165312d50c46SKrishna Kumar 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
165412d50c46SKrishna Kumar 	}
165512d50c46SKrishna Kumar }
165612d50c46SKrishna Kumar 
1657ecb97192SNeal Cardwell /* Start sequence of the skb just after the highest skb with SACKed
1658ecb97192SNeal Cardwell  * bit, valid only if sacked_out > 0 or when the caller has ensured
1659ecb97192SNeal Cardwell  * validity by itself.
1660a47e5a98SIlpo Järvinen  */
1661a47e5a98SIlpo Järvinen static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1662a47e5a98SIlpo Järvinen {
1663a47e5a98SIlpo Järvinen 	if (!tp->sacked_out)
1664a47e5a98SIlpo Järvinen 		return tp->snd_una;
16656859d494SIlpo Järvinen 
16666859d494SIlpo Järvinen 	if (tp->highest_sack == NULL)
16676859d494SIlpo Järvinen 		return tp->snd_nxt;
16686859d494SIlpo Järvinen 
1669a47e5a98SIlpo Järvinen 	return TCP_SKB_CB(tp->highest_sack)->seq;
1670a47e5a98SIlpo Järvinen }
1671a47e5a98SIlpo Järvinen 
16726859d494SIlpo Järvinen static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
16736859d494SIlpo Järvinen {
16746859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
16756859d494SIlpo Järvinen 						tcp_write_queue_next(sk, skb);
16766859d494SIlpo Järvinen }
16776859d494SIlpo Järvinen 
16786859d494SIlpo Järvinen static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
16796859d494SIlpo Järvinen {
16806859d494SIlpo Järvinen 	return tcp_sk(sk)->highest_sack;
16816859d494SIlpo Järvinen }
16826859d494SIlpo Järvinen 
16836859d494SIlpo Järvinen static inline void tcp_highest_sack_reset(struct sock *sk)
16846859d494SIlpo Järvinen {
16856859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
16866859d494SIlpo Järvinen }
16876859d494SIlpo Järvinen 
16886859d494SIlpo Järvinen /* Called when old skb is about to be deleted (to be combined with new skb) */
16896859d494SIlpo Järvinen static inline void tcp_highest_sack_combine(struct sock *sk,
16906859d494SIlpo Järvinen 					    struct sk_buff *old,
16916859d494SIlpo Järvinen 					    struct sk_buff *new)
16926859d494SIlpo Järvinen {
16936859d494SIlpo Järvinen 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
16946859d494SIlpo Järvinen 		tcp_sk(sk)->highest_sack = new;
16956859d494SIlpo Järvinen }
16966859d494SIlpo Järvinen 
1697b1f0a0e9SFlorian Westphal /* This helper checks if socket has IP_TRANSPARENT set */
1698b1f0a0e9SFlorian Westphal static inline bool inet_sk_transparent(const struct sock *sk)
1699b1f0a0e9SFlorian Westphal {
1700b1f0a0e9SFlorian Westphal 	switch (sk->sk_state) {
1701b1f0a0e9SFlorian Westphal 	case TCP_TIME_WAIT:
1702b1f0a0e9SFlorian Westphal 		return inet_twsk(sk)->tw_transparent;
1703b1f0a0e9SFlorian Westphal 	case TCP_NEW_SYN_RECV:
1704b1f0a0e9SFlorian Westphal 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1705b1f0a0e9SFlorian Westphal 	}
1706b1f0a0e9SFlorian Westphal 	return inet_sk(sk)->transparent;
1707b1f0a0e9SFlorian Westphal }
1708b1f0a0e9SFlorian Westphal 
17095aa4b32fSAndreas Petlund /* Determines whether this is a thin stream (which may suffer from
17105aa4b32fSAndreas Petlund  * increased latency). Used to trigger latency-reducing mechanisms.
17115aa4b32fSAndreas Petlund  */
1712a2a385d6SEric Dumazet static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
17135aa4b32fSAndreas Petlund {
17145aa4b32fSAndreas Petlund 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
17155aa4b32fSAndreas Petlund }
17165aa4b32fSAndreas Petlund 
17171da177e4SLinus Torvalds /* /proc */
17181da177e4SLinus Torvalds enum tcp_seq_states {
17191da177e4SLinus Torvalds 	TCP_SEQ_STATE_LISTENING,
17201da177e4SLinus Torvalds 	TCP_SEQ_STATE_ESTABLISHED,
17211da177e4SLinus Torvalds };
17221da177e4SLinus Torvalds 
172373cb88ecSArjan van de Ven int tcp_seq_open(struct inode *inode, struct file *file);
172473cb88ecSArjan van de Ven 
17251da177e4SLinus Torvalds struct tcp_seq_afinfo {
17261da177e4SLinus Torvalds 	char				*name;
17271da177e4SLinus Torvalds 	sa_family_t			family;
172873cb88ecSArjan van de Ven 	const struct file_operations	*seq_fops;
17299427c4b3SDenis V. Lunev 	struct seq_operations		seq_ops;
17301da177e4SLinus Torvalds };
17311da177e4SLinus Torvalds 
17321da177e4SLinus Torvalds struct tcp_iter_state {
1733a4146b1bSDenis V. Lunev 	struct seq_net_private	p;
17341da177e4SLinus Torvalds 	sa_family_t		family;
17351da177e4SLinus Torvalds 	enum tcp_seq_states	state;
17361da177e4SLinus Torvalds 	struct sock		*syn_wait_sk;
1737a7cb5a49SEric W. Biederman 	int			bucket, offset, sbucket, num;
1738a8b690f9STom Herbert 	loff_t			last_pos;
17391da177e4SLinus Torvalds };
17401da177e4SLinus Torvalds 
17415c9f3023SJoe Perches int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
17425c9f3023SJoe Perches void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
17431da177e4SLinus Torvalds 
174420380731SArnaldo Carvalho de Melo extern struct request_sock_ops tcp_request_sock_ops;
1745c6aefafbSGlenn Griffin extern struct request_sock_ops tcp6_request_sock_ops;
174620380731SArnaldo Carvalho de Melo 
17475c9f3023SJoe Perches void tcp_v4_destroy_sock(struct sock *sk);
174820380731SArnaldo Carvalho de Melo 
174928be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1750c8f44affSMichał Mirosław 				netdev_features_t features);
17515c9f3023SJoe Perches struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
17525c9f3023SJoe Perches int tcp_gro_complete(struct sk_buff *skb);
175328850dc7SDaniel Borkmann 
17545c9f3023SJoe Perches void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1755f4c50d99SHerbert Xu 
1756c9bee3b7SEric Dumazet static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1757c9bee3b7SEric Dumazet {
17584979f2d9SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
17594979f2d9SNikolay Borisov 	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1760c9bee3b7SEric Dumazet }
1761c9bee3b7SEric Dumazet 
1762c9bee3b7SEric Dumazet static inline bool tcp_stream_memory_free(const struct sock *sk)
1763c9bee3b7SEric Dumazet {
1764c9bee3b7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1765c9bee3b7SEric Dumazet 	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1766c9bee3b7SEric Dumazet 
1767c9bee3b7SEric Dumazet 	return notsent_bytes < tcp_notsent_lowat(tp);
1768c9bee3b7SEric Dumazet }
1769c9bee3b7SEric Dumazet 
177020380731SArnaldo Carvalho de Melo #ifdef CONFIG_PROC_FS
17715c9f3023SJoe Perches int tcp4_proc_init(void);
17725c9f3023SJoe Perches void tcp4_proc_exit(void);
177320380731SArnaldo Carvalho de Melo #endif
177420380731SArnaldo Carvalho de Melo 
1775ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
17761fb6f159SOctavian Purdila int tcp_conn_request(struct request_sock_ops *rsk_ops,
17771fb6f159SOctavian Purdila 		     const struct tcp_request_sock_ops *af_ops,
17781fb6f159SOctavian Purdila 		     struct sock *sk, struct sk_buff *skb);
17795db92c99SOctavian Purdila 
1780cfb6eeb4SYOSHIFUJI Hideaki /* TCP af-specific functions */
1781cfb6eeb4SYOSHIFUJI Hideaki struct tcp_sock_af_ops {
1782cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1783b83e3debSEric Dumazet 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1784fd3a154aSEric Dumazet 						const struct sock *addr_sk);
1785cfb6eeb4SYOSHIFUJI Hideaki 	int		(*calc_md5_hash)(char *location,
178639f8e58eSEric Dumazet 					 const struct tcp_md5sig_key *md5,
1787318cf7aaSEric Dumazet 					 const struct sock *sk,
1788318cf7aaSEric Dumazet 					 const struct sk_buff *skb);
1789cfb6eeb4SYOSHIFUJI Hideaki 	int		(*md5_parse)(struct sock *sk,
1790cfb6eeb4SYOSHIFUJI Hideaki 				     char __user *optval,
1791cfb6eeb4SYOSHIFUJI Hideaki 				     int optlen);
1792cfb6eeb4SYOSHIFUJI Hideaki #endif
1793cfb6eeb4SYOSHIFUJI Hideaki };
1794cfb6eeb4SYOSHIFUJI Hideaki 
1795cfb6eeb4SYOSHIFUJI Hideaki struct tcp_request_sock_ops {
17962aec4a29SOctavian Purdila 	u16 mss_clamp;
1797cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1798b83e3debSEric Dumazet 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1799fd3a154aSEric Dumazet 						 const struct sock *addr_sk);
1800e3afe7b7SJohn Dykstra 	int		(*calc_md5_hash) (char *location,
180139f8e58eSEric Dumazet 					  const struct tcp_md5sig_key *md5,
1802318cf7aaSEric Dumazet 					  const struct sock *sk,
1803318cf7aaSEric Dumazet 					  const struct sk_buff *skb);
1804cfb6eeb4SYOSHIFUJI Hideaki #endif
1805b40cf18eSEric Dumazet 	void (*init_req)(struct request_sock *req,
1806b40cf18eSEric Dumazet 			 const struct sock *sk_listener,
180716bea70aSOctavian Purdila 			 struct sk_buff *skb);
1808fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
18093f684b4bSEric Dumazet 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1810fb7b37a7SOctavian Purdila 				 __u16 *mss);
1811fb7b37a7SOctavian Purdila #endif
1812f964629eSEric Dumazet 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1813d94e0417SOctavian Purdila 				       const struct request_sock *req,
1814d94e0417SOctavian Purdila 				       bool *strict);
1815a30aad50SAlexey Kodanev 	__u32 (*init_seq_tsoff)(const struct sk_buff *skb, u32 *tsoff);
18160f935dbeSEric Dumazet 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1817d6274bd8SOctavian Purdila 			   struct flowi *fl, struct request_sock *req,
1818dc6ef6beSEric Dumazet 			   struct tcp_fastopen_cookie *foc,
1819b3d05147SEric Dumazet 			   enum tcp_synack_type synack_type);
1820cfb6eeb4SYOSHIFUJI Hideaki };
1821cfb6eeb4SYOSHIFUJI Hideaki 
1822fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
1823fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18243f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1825fb7b37a7SOctavian Purdila 					 __u16 *mss)
1826fb7b37a7SOctavian Purdila {
18273f684b4bSEric Dumazet 	tcp_synq_overflow(sk);
182802a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
18293f684b4bSEric Dumazet 	return ops->cookie_init_seq(skb, mss);
1830fb7b37a7SOctavian Purdila }
1831fb7b37a7SOctavian Purdila #else
1832fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18333f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1834fb7b37a7SOctavian Purdila 					 __u16 *mss)
1835fb7b37a7SOctavian Purdila {
1836fb7b37a7SOctavian Purdila 	return 0;
1837fb7b37a7SOctavian Purdila }
1838fb7b37a7SOctavian Purdila #endif
1839fb7b37a7SOctavian Purdila 
18405c9f3023SJoe Perches int tcpv4_offload_init(void);
184128850dc7SDaniel Borkmann 
18425c9f3023SJoe Perches void tcp_v4_init(void);
18435c9f3023SJoe Perches void tcp_init(void);
184420380731SArnaldo Carvalho de Melo 
1845659a8ad5SYuchung Cheng /* tcp_recovery.c */
1846deed7be7SYuchung Cheng extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
18471d0833dfSYuchung Cheng extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1848deed7be7SYuchung Cheng 			     const struct skb_mstamp *xmit_time,
1849deed7be7SYuchung Cheng 			     const struct skb_mstamp *ack_time);
185057dde7f7SYuchung Cheng extern void tcp_rack_reo_timeout(struct sock *sk);
1851659a8ad5SYuchung Cheng 
1852e25f866fSCong Wang /*
1853e25f866fSCong Wang  * Save and compile IPv4 options, return a pointer to it
1854e25f866fSCong Wang  */
1855e25f866fSCong Wang static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1856e25f866fSCong Wang {
1857e25f866fSCong Wang 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1858e25f866fSCong Wang 	struct ip_options_rcu *dopt = NULL;
1859e25f866fSCong Wang 
1860461b74c3SCong Wang 	if (opt->optlen) {
1861e25f866fSCong Wang 		int opt_size = sizeof(*dopt) + opt->optlen;
1862e25f866fSCong Wang 
1863e25f866fSCong Wang 		dopt = kmalloc(opt_size, GFP_ATOMIC);
1864e25f866fSCong Wang 		if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1865e25f866fSCong Wang 			kfree(dopt);
1866e25f866fSCong Wang 			dopt = NULL;
1867e25f866fSCong Wang 		}
1868e25f866fSCong Wang 	}
1869e25f866fSCong Wang 	return dopt;
1870e25f866fSCong Wang }
1871e25f866fSCong Wang 
187298781965SEric Dumazet /* locally generated TCP pure ACKs have skb->truesize == 2
187398781965SEric Dumazet  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
187498781965SEric Dumazet  * This is much faster than dissecting the packet to find out.
187598781965SEric Dumazet  * (Think of GRE encapsulations, IPv4, IPv6, ...)
187698781965SEric Dumazet  */
187798781965SEric Dumazet static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
187898781965SEric Dumazet {
187998781965SEric Dumazet 	return skb->truesize == 2;
188098781965SEric Dumazet }
188198781965SEric Dumazet 
188298781965SEric Dumazet static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
188398781965SEric Dumazet {
188498781965SEric Dumazet 	skb->truesize = 2;
188598781965SEric Dumazet }
188698781965SEric Dumazet 
1887473bd239STom Herbert static inline int tcp_inq(struct sock *sk)
1888473bd239STom Herbert {
1889473bd239STom Herbert 	struct tcp_sock *tp = tcp_sk(sk);
1890473bd239STom Herbert 	int answ;
1891473bd239STom Herbert 
1892473bd239STom Herbert 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1893473bd239STom Herbert 		answ = 0;
1894473bd239STom Herbert 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1895473bd239STom Herbert 		   !tp->urg_data ||
1896473bd239STom Herbert 		   before(tp->urg_seq, tp->copied_seq) ||
1897473bd239STom Herbert 		   !before(tp->urg_seq, tp->rcv_nxt)) {
1898473bd239STom Herbert 
1899473bd239STom Herbert 		answ = tp->rcv_nxt - tp->copied_seq;
1900473bd239STom Herbert 
1901473bd239STom Herbert 		/* Subtract 1, if FIN was received */
1902473bd239STom Herbert 		if (answ && sock_flag(sk, SOCK_DONE))
1903473bd239STom Herbert 			answ--;
1904473bd239STom Herbert 	} else {
1905473bd239STom Herbert 		answ = tp->urg_seq - tp->copied_seq;
1906473bd239STom Herbert 	}
1907473bd239STom Herbert 
1908473bd239STom Herbert 	return answ;
1909473bd239STom Herbert }
1910473bd239STom Herbert 
191132035585STom Herbert int tcp_peek_len(struct socket *sock);
191232035585STom Herbert 
1913a44d6eacSMartin KaFai Lau static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1914a44d6eacSMartin KaFai Lau {
1915a44d6eacSMartin KaFai Lau 	u16 segs_in;
1916a44d6eacSMartin KaFai Lau 
1917a44d6eacSMartin KaFai Lau 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1918a44d6eacSMartin KaFai Lau 	tp->segs_in += segs_in;
1919a44d6eacSMartin KaFai Lau 	if (skb->len > tcp_hdrlen(skb))
1920a44d6eacSMartin KaFai Lau 		tp->data_segs_in += segs_in;
1921a44d6eacSMartin KaFai Lau }
1922a44d6eacSMartin KaFai Lau 
19239caad864SEric Dumazet /*
19249caad864SEric Dumazet  * TCP listen path runs lockless.
19259caad864SEric Dumazet  * We forced "struct sock" to be const qualified to make sure
19269caad864SEric Dumazet  * we don't modify one of its field by mistake.
19279caad864SEric Dumazet  * Here, we increment sk_drops which is an atomic_t, so we can safely
19289caad864SEric Dumazet  * make sock writable again.
19299caad864SEric Dumazet  */
19309caad864SEric Dumazet static inline void tcp_listendrop(const struct sock *sk)
19319caad864SEric Dumazet {
19329caad864SEric Dumazet 	atomic_inc(&((struct sock *)sk)->sk_drops);
193302a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
19349caad864SEric Dumazet }
19359caad864SEric Dumazet 
19361da177e4SLinus Torvalds #endif	/* _TCP_H */
1937