xref: /linux/include/net/tcp.h (revision 0604475119de5f80dc051a5db055c6a2a75bd542)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Definitions for the TCP module.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	@(#)tcp.h	1.0.5	05/23/93
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
141da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
151da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
161da177e4SLinus Torvalds  *		2 of the License, or (at your option) any later version.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds #ifndef _TCP_H
191da177e4SLinus Torvalds #define _TCP_H
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #define FASTRETRANS_DEBUG 1
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds #include <linux/list.h>
241da177e4SLinus Torvalds #include <linux/tcp.h>
25187f1882SPaul Gortmaker #include <linux/bug.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cache.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
29fb286bb2SHerbert Xu #include <linux/skbuff.h>
30c6aefafbSGlenn Griffin #include <linux/cryptohash.h>
31435cf559SWilliam Allen Simpson #include <linux/kref.h>
32740b0f18SEric Dumazet #include <linux/ktime.h>
333f421baaSArnaldo Carvalho de Melo 
343f421baaSArnaldo Carvalho de Melo #include <net/inet_connection_sock.h>
35295ff7edSArnaldo Carvalho de Melo #include <net/inet_timewait_sock.h>
3677d8bf9cSArnaldo Carvalho de Melo #include <net/inet_hashtables.h>
371da177e4SLinus Torvalds #include <net/checksum.h>
382e6599cbSArnaldo Carvalho de Melo #include <net/request_sock.h>
391da177e4SLinus Torvalds #include <net/sock.h>
401da177e4SLinus Torvalds #include <net/snmp.h>
411da177e4SLinus Torvalds #include <net/ip.h>
42c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
43bdf1ee5dSIlpo Järvinen #include <net/inet_ecn.h>
440c266898SSatoru SATOH #include <net/dst.h>
45c752f073SArnaldo Carvalho de Melo 
461da177e4SLinus Torvalds #include <linux/seq_file.h>
47180d8cd9SGlauber Costa #include <linux/memcontrol.h>
481da177e4SLinus Torvalds 
490f7ff927SArnaldo Carvalho de Melo extern struct inet_hashinfo tcp_hashinfo;
501da177e4SLinus Torvalds 
51dd24c001SEric Dumazet extern struct percpu_counter tcp_orphan_count;
525c9f3023SJoe Perches void tcp_time_wait(struct sock *sk, int state, int timeo);
531da177e4SLinus Torvalds 
541da177e4SLinus Torvalds #define MAX_TCP_HEADER	(128 + MAX_HEADER)
5533ad798cSAdam Langley #define MAX_TCP_OPTION_SPACE 40
561da177e4SLinus Torvalds 
571da177e4SLinus Torvalds /*
581da177e4SLinus Torvalds  * Never offer a window over 32767 without using window scaling. Some
591da177e4SLinus Torvalds  * poor stacks do signed 16bit maths!
601da177e4SLinus Torvalds  */
611da177e4SLinus Torvalds #define MAX_TCP_WINDOW		32767U
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
641da177e4SLinus Torvalds #define TCP_MIN_MSS		88U
651da177e4SLinus Torvalds 
665d424d5aSJohn Heffner /* The least MTU to use for probing */
67dcd8fb85SFan Du #define TCP_BASE_MSS		1024
685d424d5aSJohn Heffner 
6905cbc0dbSFan Du /* probing interval, default to 10 minutes as per RFC4821 */
7005cbc0dbSFan Du #define TCP_PROBE_INTERVAL	600
7105cbc0dbSFan Du 
726b58e0a5SFan Du /* Specify interval when tcp mtu probing will stop */
736b58e0a5SFan Du #define TCP_PROBE_THRESHOLD	8
746b58e0a5SFan Du 
751da177e4SLinus Torvalds /* After receiving this amount of duplicate ACKs fast retransmit starts. */
761da177e4SLinus Torvalds #define TCP_FASTRETRANS_THRESH 3
771da177e4SLinus Torvalds 
781da177e4SLinus Torvalds /* Maximal number of ACKs sent quickly to accelerate slow-start. */
791da177e4SLinus Torvalds #define TCP_MAX_QUICKACKS	16U
801da177e4SLinus Torvalds 
81589c49cbSGao Feng /* Maximal number of window scale according to RFC1323 */
82589c49cbSGao Feng #define TCP_MAX_WSCALE		14U
83589c49cbSGao Feng 
841da177e4SLinus Torvalds /* urg_data states */
851da177e4SLinus Torvalds #define TCP_URG_VALID	0x0100
861da177e4SLinus Torvalds #define TCP_URG_NOTYET	0x0200
871da177e4SLinus Torvalds #define TCP_URG_READ	0x0400
881da177e4SLinus Torvalds 
891da177e4SLinus Torvalds #define TCP_RETR1	3	/*
901da177e4SLinus Torvalds 				 * This is how many retries it does before it
911da177e4SLinus Torvalds 				 * tries to figure out if the gateway is
921da177e4SLinus Torvalds 				 * down. Minimal RFC value is 3; it corresponds
931da177e4SLinus Torvalds 				 * to ~3sec-8min depending on RTO.
941da177e4SLinus Torvalds 				 */
951da177e4SLinus Torvalds 
961da177e4SLinus Torvalds #define TCP_RETR2	15	/*
971da177e4SLinus Torvalds 				 * This should take at least
981da177e4SLinus Torvalds 				 * 90 minutes to time out.
991da177e4SLinus Torvalds 				 * RFC1122 says that the limit is 100 sec.
1001da177e4SLinus Torvalds 				 * 15 is ~13-30min depending on RTO.
1011da177e4SLinus Torvalds 				 */
1021da177e4SLinus Torvalds 
1036c9ff979SAlex Bergmann #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
1046c9ff979SAlex Bergmann 				 * when active opening a connection.
1056c9ff979SAlex Bergmann 				 * RFC1122 says the minimum retry MUST
1066c9ff979SAlex Bergmann 				 * be at least 180secs.  Nevertheless
1076c9ff979SAlex Bergmann 				 * this value is corresponding to
1086c9ff979SAlex Bergmann 				 * 63secs of retransmission with the
1096c9ff979SAlex Bergmann 				 * current initial RTO.
1106c9ff979SAlex Bergmann 				 */
1111da177e4SLinus Torvalds 
1126c9ff979SAlex Bergmann #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
1136c9ff979SAlex Bergmann 				 * when passive opening a connection.
1146c9ff979SAlex Bergmann 				 * This is corresponding to 31secs of
1156c9ff979SAlex Bergmann 				 * retransmission with the current
1166c9ff979SAlex Bergmann 				 * initial RTO.
1176c9ff979SAlex Bergmann 				 */
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
1201da177e4SLinus Torvalds 				  * state, about 60 seconds	*/
1211da177e4SLinus Torvalds #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
1221da177e4SLinus Torvalds                                  /* BSD style FIN_WAIT2 deadlock breaker.
1231da177e4SLinus Torvalds 				  * It used to be 3min, new value is 60sec,
1241da177e4SLinus Torvalds 				  * to combine FIN-WAIT-2 timeout with
1251da177e4SLinus Torvalds 				  * TIME-WAIT timer.
1261da177e4SLinus Torvalds 				  */
1271da177e4SLinus Torvalds 
1281da177e4SLinus Torvalds #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
1291da177e4SLinus Torvalds #if HZ >= 100
1301da177e4SLinus Torvalds #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
1311da177e4SLinus Torvalds #define TCP_ATO_MIN	((unsigned)(HZ/25))
1321da177e4SLinus Torvalds #else
1331da177e4SLinus Torvalds #define TCP_DELACK_MIN	4U
1341da177e4SLinus Torvalds #define TCP_ATO_MIN	4U
1351da177e4SLinus Torvalds #endif
1361da177e4SLinus Torvalds #define TCP_RTO_MAX	((unsigned)(120*HZ))
1371da177e4SLinus Torvalds #define TCP_RTO_MIN	((unsigned)(HZ/5))
138fd4f2ceaSEric Dumazet #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
1399ad7c049SJerry Chu #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
1409ad7c049SJerry Chu 						 * used as a fallback RTO for the
1419ad7c049SJerry Chu 						 * initial data transmission if no
1429ad7c049SJerry Chu 						 * valid RTT sample has been acquired,
1439ad7c049SJerry Chu 						 * most likely due to retrans in 3WHS.
1449ad7c049SJerry Chu 						 */
1451da177e4SLinus Torvalds 
1461da177e4SLinus Torvalds #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
1471da177e4SLinus Torvalds 					                 * for local resources.
1481da177e4SLinus Torvalds 					                 */
14957dde7f7SYuchung Cheng #define TCP_REO_TIMEOUT_MIN	(2000) /* Min RACK reordering timeout in usec */
1501da177e4SLinus Torvalds 
1511da177e4SLinus Torvalds #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
1521da177e4SLinus Torvalds #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
1531da177e4SLinus Torvalds #define TCP_KEEPALIVE_INTVL	(75*HZ)
1541da177e4SLinus Torvalds 
1551da177e4SLinus Torvalds #define MAX_TCP_KEEPIDLE	32767
1561da177e4SLinus Torvalds #define MAX_TCP_KEEPINTVL	32767
1571da177e4SLinus Torvalds #define MAX_TCP_KEEPCNT		127
1581da177e4SLinus Torvalds #define MAX_TCP_SYNCNT		127
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
1631da177e4SLinus Torvalds #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
1641da177e4SLinus Torvalds 					 * after this time. It should be equal
1651da177e4SLinus Torvalds 					 * (or greater than) TCP_TIMEWAIT_LEN
1661da177e4SLinus Torvalds 					 * to provide reliability equal to one
1671da177e4SLinus Torvalds 					 * provided by timewait state.
1681da177e4SLinus Torvalds 					 */
1691da177e4SLinus Torvalds #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
1701da177e4SLinus Torvalds 					 * timestamps. It must be less than
1711da177e4SLinus Torvalds 					 * minimal timewait lifetime.
1721da177e4SLinus Torvalds 					 */
1731da177e4SLinus Torvalds /*
1741da177e4SLinus Torvalds  *	TCP option
1751da177e4SLinus Torvalds  */
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds #define TCPOPT_NOP		1	/* Padding */
1781da177e4SLinus Torvalds #define TCPOPT_EOL		0	/* End of options */
1791da177e4SLinus Torvalds #define TCPOPT_MSS		2	/* Segment size negotiating */
1801da177e4SLinus Torvalds #define TCPOPT_WINDOW		3	/* Window scaling */
1811da177e4SLinus Torvalds #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
1821da177e4SLinus Torvalds #define TCPOPT_SACK             5       /* SACK Block */
1831da177e4SLinus Torvalds #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
184cfb6eeb4SYOSHIFUJI Hideaki #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
1857f9b838bSDaniel Lee #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
1862100c8d2SYuchung Cheng #define TCPOPT_EXP		254	/* Experimental */
1872100c8d2SYuchung Cheng /* Magic number to be after the option value for sharing TCP
1882100c8d2SYuchung Cheng  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
1892100c8d2SYuchung Cheng  */
1902100c8d2SYuchung Cheng #define TCPOPT_FASTOPEN_MAGIC	0xF989
1911da177e4SLinus Torvalds 
1921da177e4SLinus Torvalds /*
1931da177e4SLinus Torvalds  *     TCP option lengths
1941da177e4SLinus Torvalds  */
1951da177e4SLinus Torvalds 
1961da177e4SLinus Torvalds #define TCPOLEN_MSS            4
1971da177e4SLinus Torvalds #define TCPOLEN_WINDOW         3
1981da177e4SLinus Torvalds #define TCPOLEN_SACK_PERM      2
1991da177e4SLinus Torvalds #define TCPOLEN_TIMESTAMP      10
200cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG         18
2017f9b838bSDaniel Lee #define TCPOLEN_FASTOPEN_BASE  2
2022100c8d2SYuchung Cheng #define TCPOLEN_EXP_FASTOPEN_BASE  4
2031da177e4SLinus Torvalds 
2041da177e4SLinus Torvalds /* But this is what stacks really send out. */
2051da177e4SLinus Torvalds #define TCPOLEN_TSTAMP_ALIGNED		12
2061da177e4SLinus Torvalds #define TCPOLEN_WSCALE_ALIGNED		4
2071da177e4SLinus Torvalds #define TCPOLEN_SACKPERM_ALIGNED	4
2081da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE		2
2091da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE_ALIGNED	4
2101da177e4SLinus Torvalds #define TCPOLEN_SACK_PERBLOCK		8
211cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG_ALIGNED		20
21233ad798cSAdam Langley #define TCPOLEN_MSS_ALIGNED		4
2131da177e4SLinus Torvalds 
2141da177e4SLinus Torvalds /* Flags in tp->nonagle */
2151da177e4SLinus Torvalds #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
2161da177e4SLinus Torvalds #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
217caa20d9aSStephen Hemminger #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
2181da177e4SLinus Torvalds 
21936e31b0aSAndreas Petlund /* TCP thin-stream limits */
22036e31b0aSAndreas Petlund #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
22136e31b0aSAndreas Petlund 
22221603fc4SJörg Thalheim /* TCP initial congestion window as per rfc6928 */
223442b9635SDavid S. Miller #define TCP_INIT_CWND		10
224442b9635SDavid S. Miller 
225cf60af03SYuchung Cheng /* Bit Flags for sysctl_tcp_fastopen */
226cf60af03SYuchung Cheng #define	TFO_CLIENT_ENABLE	1
22710467163SJerry Chu #define	TFO_SERVER_ENABLE	2
22867da22d2SYuchung Cheng #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
229cf60af03SYuchung Cheng 
23010467163SJerry Chu /* Accept SYN data w/o any cookie option */
23110467163SJerry Chu #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
23210467163SJerry Chu 
23310467163SJerry Chu /* Force enable TFO on all listeners, i.e., not requiring the
234cebc5cbaSYuchung Cheng  * TCP_FASTOPEN socket option.
23510467163SJerry Chu  */
23610467163SJerry Chu #define	TFO_SERVER_WO_SOCKOPT1	0x400
23710467163SJerry Chu 
238295ff7edSArnaldo Carvalho de Melo 
2391da177e4SLinus Torvalds /* sysctl variables for tcp */
2402100c8d2SYuchung Cheng extern int sysctl_tcp_fastopen;
2411da177e4SLinus Torvalds extern int sysctl_tcp_retrans_collapse;
2421da177e4SLinus Torvalds extern int sysctl_tcp_stdurg;
2431da177e4SLinus Torvalds extern int sysctl_tcp_rfc1337;
2441da177e4SLinus Torvalds extern int sysctl_tcp_abort_on_overflow;
2451da177e4SLinus Torvalds extern int sysctl_tcp_max_orphans;
2461da177e4SLinus Torvalds extern int sysctl_tcp_fack;
2471da177e4SLinus Torvalds extern int sysctl_tcp_reordering;
248dca145ffSEric Dumazet extern int sysctl_tcp_max_reordering;
2491da177e4SLinus Torvalds extern int sysctl_tcp_dsack;
250a4fe34bfSEric W. Biederman extern long sysctl_tcp_mem[3];
2511da177e4SLinus Torvalds extern int sysctl_tcp_wmem[3];
2521da177e4SLinus Torvalds extern int sysctl_tcp_rmem[3];
2531da177e4SLinus Torvalds extern int sysctl_tcp_app_win;
2541da177e4SLinus Torvalds extern int sysctl_tcp_adv_win_scale;
2551da177e4SLinus Torvalds extern int sysctl_tcp_frto;
2561da177e4SLinus Torvalds extern int sysctl_tcp_low_latency;
2571da177e4SLinus Torvalds extern int sysctl_tcp_nometrics_save;
2581da177e4SLinus Torvalds extern int sysctl_tcp_moderate_rcvbuf;
2591da177e4SLinus Torvalds extern int sysctl_tcp_tso_win_divisor;
26015d99e02SRick Jones extern int sysctl_tcp_workaround_signed_windows;
26135089bb2SDavid S. Miller extern int sysctl_tcp_slow_start_after_idle;
26236e31b0aSAndreas Petlund extern int sysctl_tcp_thin_linear_timeouts;
2637e380175SAndreas Petlund extern int sysctl_tcp_thin_dupack;
264eed530b6SYuchung Cheng extern int sysctl_tcp_early_retrans;
265a0370b3fSYuchung Cheng extern int sysctl_tcp_recovery;
266a0370b3fSYuchung Cheng #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
267a0370b3fSYuchung Cheng 
26846d3ceabSEric Dumazet extern int sysctl_tcp_limit_output_bytes;
269282f23c6SEric Dumazet extern int sysctl_tcp_challenge_ack_limit;
27095bd09ebSEric Dumazet extern int sysctl_tcp_min_tso_segs;
271f6722583SYuchung Cheng extern int sysctl_tcp_min_rtt_wlen;
272f54b3111SEric Dumazet extern int sysctl_tcp_autocorking;
273032ee423SNeal Cardwell extern int sysctl_tcp_invalid_ratelimit;
27443e122b0SEric Dumazet extern int sysctl_tcp_pacing_ss_ratio;
27543e122b0SEric Dumazet extern int sysctl_tcp_pacing_ca_ratio;
2761da177e4SLinus Torvalds 
2778d987e5cSEric Dumazet extern atomic_long_t tcp_memory_allocated;
2781748376bSEric Dumazet extern struct percpu_counter tcp_sockets_allocated;
279*06044751SEric Dumazet extern unsigned long tcp_memory_pressure;
2801da177e4SLinus Torvalds 
281b8da51ebSEric Dumazet /* optimized version of sk_under_memory_pressure() for TCP sockets */
282b8da51ebSEric Dumazet static inline bool tcp_under_memory_pressure(const struct sock *sk)
283b8da51ebSEric Dumazet {
284baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
285baac50bbSJohannes Weiner 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
286e805605cSJohannes Weiner 		return true;
287b8da51ebSEric Dumazet 
288b8da51ebSEric Dumazet 	return tcp_memory_pressure;
289b8da51ebSEric Dumazet }
2901da177e4SLinus Torvalds /*
2911da177e4SLinus Torvalds  * The next routines deal with comparing 32 bit unsigned ints
2921da177e4SLinus Torvalds  * and worry about wraparound (automatic with unsigned arithmetic).
2931da177e4SLinus Torvalds  */
2941da177e4SLinus Torvalds 
295a2a385d6SEric Dumazet static inline bool before(__u32 seq1, __u32 seq2)
2961da177e4SLinus Torvalds {
2970d630cc0SGerrit Renker         return (__s32)(seq1-seq2) < 0;
2981da177e4SLinus Torvalds }
2999a036b9cSGerrit Renker #define after(seq2, seq1) 	before(seq1, seq2)
3001da177e4SLinus Torvalds 
3011da177e4SLinus Torvalds /* is s2<=s1<=s3 ? */
302a2a385d6SEric Dumazet static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
3031da177e4SLinus Torvalds {
3041da177e4SLinus Torvalds 	return seq3 - seq2 >= seq1 - seq2;
3051da177e4SLinus Torvalds }
3061da177e4SLinus Torvalds 
307efcdbf24SArun Sharma static inline bool tcp_out_of_memory(struct sock *sk)
308efcdbf24SArun Sharma {
309efcdbf24SArun Sharma 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
310efcdbf24SArun Sharma 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
311efcdbf24SArun Sharma 		return true;
312efcdbf24SArun Sharma 	return false;
313efcdbf24SArun Sharma }
314efcdbf24SArun Sharma 
315a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size);
316a6c5ea4cSEric Dumazet 
317ad1af0feSDavid S. Miller static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
318e4fd5da3SPavel Emelianov {
319ad1af0feSDavid S. Miller 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
320ad1af0feSDavid S. Miller 	int orphans = percpu_counter_read_positive(ocp);
321ad1af0feSDavid S. Miller 
322ad1af0feSDavid S. Miller 	if (orphans << shift > sysctl_tcp_max_orphans) {
323ad1af0feSDavid S. Miller 		orphans = percpu_counter_sum_positive(ocp);
324ad1af0feSDavid S. Miller 		if (orphans << shift > sysctl_tcp_max_orphans)
325ad1af0feSDavid S. Miller 			return true;
326ad1af0feSDavid S. Miller 	}
327ad1af0feSDavid S. Miller 	return false;
328e4fd5da3SPavel Emelianov }
3291da177e4SLinus Torvalds 
3305c9f3023SJoe Perches bool tcp_check_oom(struct sock *sk, int shift);
331efcdbf24SArun Sharma 
332a0f82f64SFlorian Westphal 
3331da177e4SLinus Torvalds extern struct proto tcp_prot;
3341da177e4SLinus Torvalds 
33557ef42d5SPavel Emelyanov #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33613415e46SEric Dumazet #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
33757ef42d5SPavel Emelyanov #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
338aa2ea058STom Herbert #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
3391da177e4SLinus Torvalds 
3405c9f3023SJoe Perches void tcp_tasklet_init(void);
34146d3ceabSEric Dumazet 
3425c9f3023SJoe Perches void tcp_v4_err(struct sk_buff *skb, u32);
3431da177e4SLinus Torvalds 
3445c9f3023SJoe Perches void tcp_shutdown(struct sock *sk, int how);
3451da177e4SLinus Torvalds 
3465c9f3023SJoe Perches void tcp_v4_early_demux(struct sk_buff *skb);
3475c9f3023SJoe Perches int tcp_v4_rcv(struct sk_buff *skb);
3481da177e4SLinus Torvalds 
3495c9f3023SJoe Perches int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
3501b784140SYing Xue int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
3515c9f3023SJoe Perches int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
3525c9f3023SJoe Perches 		 int flags);
3535c9f3023SJoe Perches void tcp_release_cb(struct sock *sk);
3545c9f3023SJoe Perches void tcp_wfree(struct sk_buff *skb);
3555c9f3023SJoe Perches void tcp_write_timer_handler(struct sock *sk);
3565c9f3023SJoe Perches void tcp_delack_timer_handler(struct sock *sk);
3575c9f3023SJoe Perches int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
35872ab4a86SEric Dumazet int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
3595c9f3023SJoe Perches void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
360cf533ea5SEric Dumazet 			 const struct tcphdr *th, unsigned int len);
3615c9f3023SJoe Perches void tcp_rcv_space_adjust(struct sock *sk);
3625c9f3023SJoe Perches int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
3635c9f3023SJoe Perches void tcp_twsk_destructor(struct sock *sk);
3645c9f3023SJoe Perches ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
36553d3176bSChangli Gao 			struct pipe_inode_info *pipe, size_t len,
36653d3176bSChangli Gao 			unsigned int flags);
3679c55e01cSJens Axboe 
368463c84b9SArnaldo Carvalho de Melo static inline void tcp_dec_quickack_mode(struct sock *sk,
369463c84b9SArnaldo Carvalho de Melo 					 const unsigned int pkts)
3701da177e4SLinus Torvalds {
371463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
372fc6415bcSDavid S. Miller 
373463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.quick) {
374463c84b9SArnaldo Carvalho de Melo 		if (pkts >= icsk->icsk_ack.quick) {
375463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick = 0;
3761da177e4SLinus Torvalds 			/* Leaving quickack mode we deflate ATO. */
377463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
378fc6415bcSDavid S. Miller 		} else
379463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick -= pkts;
3801da177e4SLinus Torvalds 	}
3811da177e4SLinus Torvalds }
3821da177e4SLinus Torvalds 
383bdf1ee5dSIlpo Järvinen #define	TCP_ECN_OK		1
384bdf1ee5dSIlpo Järvinen #define	TCP_ECN_QUEUE_CWR	2
385bdf1ee5dSIlpo Järvinen #define	TCP_ECN_DEMAND_CWR	4
3867a269ffaSEric Dumazet #define	TCP_ECN_SEEN		8
387bdf1ee5dSIlpo Järvinen 
388fd2c3ef7SEric Dumazet enum tcp_tw_status {
3891da177e4SLinus Torvalds 	TCP_TW_SUCCESS = 0,
3901da177e4SLinus Torvalds 	TCP_TW_RST = 1,
3911da177e4SLinus Torvalds 	TCP_TW_ACK = 2,
3921da177e4SLinus Torvalds 	TCP_TW_SYN = 3
3931da177e4SLinus Torvalds };
3941da177e4SLinus Torvalds 
3951da177e4SLinus Torvalds 
3965c9f3023SJoe Perches enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
3971da177e4SLinus Torvalds 					      struct sk_buff *skb,
3988feaf0c0SArnaldo Carvalho de Melo 					      const struct tcphdr *th);
3995c9f3023SJoe Perches struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
40052452c54SEric Dumazet 			   struct request_sock *req, bool fastopen);
4015c9f3023SJoe Perches int tcp_child_process(struct sock *parent, struct sock *child,
4021da177e4SLinus Torvalds 		      struct sk_buff *skb);
4035ae344c9SNeal Cardwell void tcp_enter_loss(struct sock *sk);
40457dde7f7SYuchung Cheng void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
4055c9f3023SJoe Perches void tcp_clear_retrans(struct tcp_sock *tp);
4065c9f3023SJoe Perches void tcp_update_metrics(struct sock *sk);
4075c9f3023SJoe Perches void tcp_init_metrics(struct sock *sk);
4085c9f3023SJoe Perches void tcp_metrics_init(void);
409d82bae12SSoheil Hassas Yeganeh bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
4105c9f3023SJoe Perches void tcp_disable_fack(struct tcp_sock *tp);
4115c9f3023SJoe Perches void tcp_close(struct sock *sk, long timeout);
4125c9f3023SJoe Perches void tcp_init_sock(struct sock *sk);
4135c9f3023SJoe Perches unsigned int tcp_poll(struct file *file, struct socket *sock,
41453d3176bSChangli Gao 		      struct poll_table_struct *wait);
4155c9f3023SJoe Perches int tcp_getsockopt(struct sock *sk, int level, int optname,
4163fdadf7dSDmitry Mishin 		   char __user *optval, int __user *optlen);
4175c9f3023SJoe Perches int tcp_setsockopt(struct sock *sk, int level, int optname,
41853d3176bSChangli Gao 		   char __user *optval, unsigned int optlen);
4195c9f3023SJoe Perches int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
42053d3176bSChangli Gao 			  char __user *optval, int __user *optlen);
4215c9f3023SJoe Perches int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
422b7058842SDavid S. Miller 			  char __user *optval, unsigned int optlen);
4235c9f3023SJoe Perches void tcp_set_keepalive(struct sock *sk, int val);
42442cb80a2SEric Dumazet void tcp_syn_ack_timeout(const struct request_sock *req);
4251b784140SYing Xue int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
4261b784140SYing Xue 		int flags, int *addr_len);
427eed29f17SEric Dumazet void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
4281a2c6181SChristoph Paasch 		       struct tcp_options_received *opt_rx,
4292100c8d2SYuchung Cheng 		       int estab, struct tcp_fastopen_cookie *foc);
4305c9f3023SJoe Perches const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
4317d5d5525SYOSHIFUJI Hideaki 
4321da177e4SLinus Torvalds /*
4331da177e4SLinus Torvalds  *	TCP v4 functions exported for the inet6 API
4341da177e4SLinus Torvalds  */
4351da177e4SLinus Torvalds 
4365c9f3023SJoe Perches void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4374fab9071SNeal Cardwell void tcp_v4_mtu_reduced(struct sock *sk);
4389cf74903SEric Dumazet void tcp_req_err(struct sock *sk, u32 seq, bool abort);
4395c9f3023SJoe Perches int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
440c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
44160236fddSArnaldo Carvalho de Melo 				      struct request_sock *req,
4421da177e4SLinus Torvalds 				      struct sk_buff *skb);
44381164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
4440c27171eSEric Dumazet struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
44560236fddSArnaldo Carvalho de Melo 				  struct request_sock *req,
4465e0724d0SEric Dumazet 				  struct dst_entry *dst,
4475e0724d0SEric Dumazet 				  struct request_sock *req_unhash,
4485e0724d0SEric Dumazet 				  bool *own_req);
4495c9f3023SJoe Perches int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
4505c9f3023SJoe Perches int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4515c9f3023SJoe Perches int tcp_connect(struct sock *sk);
452b3d05147SEric Dumazet enum tcp_synack_type {
453b3d05147SEric Dumazet 	TCP_SYNACK_NORMAL,
454b3d05147SEric Dumazet 	TCP_SYNACK_FASTOPEN,
455b3d05147SEric Dumazet 	TCP_SYNACK_COOKIE,
456b3d05147SEric Dumazet };
4575d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
458e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
459ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
460b3d05147SEric Dumazet 				enum tcp_synack_type synack_type);
4615c9f3023SJoe Perches int tcp_disconnect(struct sock *sk, int flags);
4621da177e4SLinus Torvalds 
463370816aeSPavel Emelyanov void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
464292e8d8cSPavel Emelyanov int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
46563d02d15SEric Dumazet void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
4661da177e4SLinus Torvalds 
4671da177e4SLinus Torvalds /* From syncookies.c */
468b80c0e78SEric Dumazet struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
469b80c0e78SEric Dumazet 				 struct request_sock *req,
47084b114b9SEric Dumazet 				 struct dst_entry *dst, u32 tsoff);
4715c9f3023SJoe Perches int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4720198230bSPatrick McHardy 		      u32 cookie);
473461b74c3SCong Wang struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
474e05c82d3SEric Dumazet #ifdef CONFIG_SYN_COOKIES
4758c27bd75SFlorian Westphal 
47663262315SEric Dumazet /* Syncookies use a monotonic timer which increments every 60 seconds.
4778c27bd75SFlorian Westphal  * This counter is used both as a hash input and partially encoded into
4788c27bd75SFlorian Westphal  * the cookie value.  A cookie is only validated further if the delta
4798c27bd75SFlorian Westphal  * between the current counter value and the encoded one is less than this,
48063262315SEric Dumazet  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
4818c27bd75SFlorian Westphal  * the counter advances immediately after a cookie is generated).
4828c27bd75SFlorian Westphal  */
4838c27bd75SFlorian Westphal #define MAX_SYNCOOKIE_AGE	2
484264ea103SEric Dumazet #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
485264ea103SEric Dumazet #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
486264ea103SEric Dumazet 
487264ea103SEric Dumazet /* syncookies: remember time of last synqueue overflow
488264ea103SEric Dumazet  * But do not dirty this field too often (once per second is enough)
4893f684b4bSEric Dumazet  * It is racy as we do not hold a lock, but race is very minor.
490264ea103SEric Dumazet  */
4913f684b4bSEric Dumazet static inline void tcp_synq_overflow(const struct sock *sk)
492264ea103SEric Dumazet {
493264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
494264ea103SEric Dumazet 	unsigned long now = jiffies;
495264ea103SEric Dumazet 
496264ea103SEric Dumazet 	if (time_after(now, last_overflow + HZ))
497264ea103SEric Dumazet 		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
498264ea103SEric Dumazet }
499264ea103SEric Dumazet 
500264ea103SEric Dumazet /* syncookies: no recent synqueue overflow on this listening socket? */
501264ea103SEric Dumazet static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
502264ea103SEric Dumazet {
503264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
504264ea103SEric Dumazet 
505264ea103SEric Dumazet 	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
506264ea103SEric Dumazet }
5078c27bd75SFlorian Westphal 
5088c27bd75SFlorian Westphal static inline u32 tcp_cookie_time(void)
5098c27bd75SFlorian Westphal {
51063262315SEric Dumazet 	u64 val = get_jiffies_64();
51163262315SEric Dumazet 
512264ea103SEric Dumazet 	do_div(val, TCP_SYNCOOKIE_PERIOD);
51363262315SEric Dumazet 	return val;
5148c27bd75SFlorian Westphal }
5158c27bd75SFlorian Westphal 
5165c9f3023SJoe Perches u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5175c9f3023SJoe Perches 			      u16 *mssp);
5183f684b4bSEric Dumazet __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
5199a568de4SEric Dumazet u64 cookie_init_timestamp(struct request_sock *req);
520f9301034SEric Dumazet bool cookie_timestamp_decode(const struct net *net,
521f9301034SEric Dumazet 			     struct tcp_options_received *opt);
522f1673381SFlorian Westphal bool cookie_ecn_ok(const struct tcp_options_received *opt,
523f7b3bec6SFlorian Westphal 		   const struct net *net, const struct dst_entry *dst);
5244dfc2817SFlorian Westphal 
525c6aefafbSGlenn Griffin /* From net/ipv6/syncookies.c */
5265c9f3023SJoe Perches int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
52781eb6a14SPatrick McHardy 		      u32 cookie);
5285c9f3023SJoe Perches struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
529f1673381SFlorian Westphal 
5305c9f3023SJoe Perches u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
53181eb6a14SPatrick McHardy 			      const struct tcphdr *th, u16 *mssp);
5323f684b4bSEric Dumazet __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
533e05c82d3SEric Dumazet #endif
5341da177e4SLinus Torvalds /* tcp_output.c */
5351da177e4SLinus Torvalds 
5361b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
5371b3878caSNeal Cardwell 		     int min_tso_segs);
5385c9f3023SJoe Perches void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
5399e412ba7SIlpo Järvinen 			       int nonagle);
5405c9f3023SJoe Perches bool tcp_may_send_now(struct sock *sk);
54110d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
54210d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5435c9f3023SJoe Perches void tcp_retransmit_timer(struct sock *sk);
5445c9f3023SJoe Perches void tcp_xmit_retransmit_queue(struct sock *);
5455c9f3023SJoe Perches void tcp_simple_retransmit(struct sock *);
54657dde7f7SYuchung Cheng void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5475c9f3023SJoe Perches int tcp_trim_head(struct sock *, struct sk_buff *, u32);
5486cc55e09SOctavian Purdila int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
5491da177e4SLinus Torvalds 
5505c9f3023SJoe Perches void tcp_send_probe0(struct sock *);
5515c9f3023SJoe Perches void tcp_send_partial(struct sock *);
552e520af48SEric Dumazet int tcp_write_wakeup(struct sock *, int mib);
5535c9f3023SJoe Perches void tcp_send_fin(struct sock *sk);
5545c9f3023SJoe Perches void tcp_send_active_reset(struct sock *sk, gfp_t priority);
5555c9f3023SJoe Perches int tcp_send_synack(struct sock *);
5565c9f3023SJoe Perches void tcp_push_one(struct sock *, unsigned int mss_now);
5575c9f3023SJoe Perches void tcp_send_ack(struct sock *sk);
5585c9f3023SJoe Perches void tcp_send_delayed_ack(struct sock *sk);
5595c9f3023SJoe Perches void tcp_send_loss_probe(struct sock *sk);
5605c9f3023SJoe Perches bool tcp_schedule_loss_probe(struct sock *sk);
561cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
562cfea5a68SMartin KaFai Lau 			     const struct sk_buff *next_skb);
5631da177e4SLinus Torvalds 
564a762a980SDavid S. Miller /* tcp_input.c */
5655c9f3023SJoe Perches void tcp_rearm_rto(struct sock *sk);
5660f1c28aeSYuchung Cheng void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5675c9f3023SJoe Perches void tcp_reset(struct sock *sk);
5684f41b1c5SYuchung Cheng void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
569e3e17b77SEric Dumazet void tcp_fin(struct sock *sk);
570a762a980SDavid S. Miller 
5711da177e4SLinus Torvalds /* tcp_timer.c */
5725c9f3023SJoe Perches void tcp_init_xmit_timers(struct sock *);
573463c84b9SArnaldo Carvalho de Melo static inline void tcp_clear_xmit_timers(struct sock *sk)
574463c84b9SArnaldo Carvalho de Melo {
575218af599SEric Dumazet 	hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
576463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timers(sk);
577463c84b9SArnaldo Carvalho de Melo }
5781da177e4SLinus Torvalds 
5795c9f3023SJoe Perches unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
5805c9f3023SJoe Perches unsigned int tcp_current_mss(struct sock *sk);
5810c54b85fSIlpo Järvinen 
5820c54b85fSIlpo Järvinen /* Bound MSS / TSO packet size with the half of the window */
5830c54b85fSIlpo Järvinen static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
5840c54b85fSIlpo Järvinen {
58501f83d69SAlexey Kuznetsov 	int cutoff;
58601f83d69SAlexey Kuznetsov 
58701f83d69SAlexey Kuznetsov 	/* When peer uses tiny windows, there is no use in packetizing
58801f83d69SAlexey Kuznetsov 	 * to sub-MSS pieces for the sake of SWS or making sure there
58901f83d69SAlexey Kuznetsov 	 * are enough packets in the pipe for fast recovery.
59001f83d69SAlexey Kuznetsov 	 *
59101f83d69SAlexey Kuznetsov 	 * On the other hand, for extremely large MSS devices, handling
59201f83d69SAlexey Kuznetsov 	 * smaller than MSS windows in this way does make sense.
59301f83d69SAlexey Kuznetsov 	 */
5942631b79fSSeymour, Shane M 	if (tp->max_window > TCP_MSS_DEFAULT)
59501f83d69SAlexey Kuznetsov 		cutoff = (tp->max_window >> 1);
59601f83d69SAlexey Kuznetsov 	else
59701f83d69SAlexey Kuznetsov 		cutoff = tp->max_window;
59801f83d69SAlexey Kuznetsov 
59901f83d69SAlexey Kuznetsov 	if (cutoff && pktsize > cutoff)
60001f83d69SAlexey Kuznetsov 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
6010c54b85fSIlpo Järvinen 	else
6020c54b85fSIlpo Järvinen 		return pktsize;
6030c54b85fSIlpo Järvinen }
6041da177e4SLinus Torvalds 
60517b085eaSArnaldo Carvalho de Melo /* tcp.c */
6060df48c26SEric Dumazet void tcp_get_info(struct sock *, struct tcp_info *);
6071da177e4SLinus Torvalds 
6081da177e4SLinus Torvalds /* Read 'sendfile()'-style from a TCP socket */
6095c9f3023SJoe Perches int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
6101da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor);
6111da177e4SLinus Torvalds 
6125c9f3023SJoe Perches void tcp_initialize_rcv_mss(struct sock *sk);
6131da177e4SLinus Torvalds 
6145c9f3023SJoe Perches int tcp_mtu_to_mss(struct sock *sk, int pmtu);
6155c9f3023SJoe Perches int tcp_mss_to_mtu(struct sock *sk, int mss);
6165c9f3023SJoe Perches void tcp_mtup_init(struct sock *sk);
6175c9f3023SJoe Perches void tcp_init_buffer_space(struct sock *sk);
6185d424d5aSJohn Heffner 
619f1ecd5d9SDamian Lukowski static inline void tcp_bound_rto(const struct sock *sk)
620f1ecd5d9SDamian Lukowski {
621f1ecd5d9SDamian Lukowski 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
622f1ecd5d9SDamian Lukowski 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
623f1ecd5d9SDamian Lukowski }
624f1ecd5d9SDamian Lukowski 
625f1ecd5d9SDamian Lukowski static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
626f1ecd5d9SDamian Lukowski {
627740b0f18SEric Dumazet 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
628f1ecd5d9SDamian Lukowski }
629f1ecd5d9SDamian Lukowski 
63040efc6faSStephen Hemminger static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
6311da177e4SLinus Torvalds {
6321da177e4SLinus Torvalds 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
6331da177e4SLinus Torvalds 			       ntohl(TCP_FLAG_ACK) |
6341da177e4SLinus Torvalds 			       snd_wnd);
6351da177e4SLinus Torvalds }
6361da177e4SLinus Torvalds 
63740efc6faSStephen Hemminger static inline void tcp_fast_path_on(struct tcp_sock *tp)
6381da177e4SLinus Torvalds {
6391da177e4SLinus Torvalds 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
6401da177e4SLinus Torvalds }
6411da177e4SLinus Torvalds 
6429e412ba7SIlpo Järvinen static inline void tcp_fast_path_check(struct sock *sk)
6431da177e4SLinus Torvalds {
6449e412ba7SIlpo Järvinen 	struct tcp_sock *tp = tcp_sk(sk);
6459e412ba7SIlpo Järvinen 
6469f5afeaeSYaogong Wang 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
6471da177e4SLinus Torvalds 	    tp->rcv_wnd &&
6481da177e4SLinus Torvalds 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
6491da177e4SLinus Torvalds 	    !tp->urg_data)
6501da177e4SLinus Torvalds 		tcp_fast_path_on(tp);
6511da177e4SLinus Torvalds }
6521da177e4SLinus Torvalds 
6530c266898SSatoru SATOH /* Compute the actual rto_min value */
6540c266898SSatoru SATOH static inline u32 tcp_rto_min(struct sock *sk)
6550c266898SSatoru SATOH {
656cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
6570c266898SSatoru SATOH 	u32 rto_min = TCP_RTO_MIN;
6580c266898SSatoru SATOH 
6590c266898SSatoru SATOH 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
6600c266898SSatoru SATOH 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
6610c266898SSatoru SATOH 	return rto_min;
6620c266898SSatoru SATOH }
6630c266898SSatoru SATOH 
664740b0f18SEric Dumazet static inline u32 tcp_rto_min_us(struct sock *sk)
665740b0f18SEric Dumazet {
666740b0f18SEric Dumazet 	return jiffies_to_usecs(tcp_rto_min(sk));
667740b0f18SEric Dumazet }
668740b0f18SEric Dumazet 
66981164413SDaniel Borkmann static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
67081164413SDaniel Borkmann {
67181164413SDaniel Borkmann 	return dst_metric_locked(dst, RTAX_CC_ALGO);
67281164413SDaniel Borkmann }
67381164413SDaniel Borkmann 
674f6722583SYuchung Cheng /* Minimum RTT in usec. ~0 means not available. */
675f6722583SYuchung Cheng static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
676f6722583SYuchung Cheng {
67764033892SNeal Cardwell 	return minmax_get(&tp->rtt_min);
678f6722583SYuchung Cheng }
679f6722583SYuchung Cheng 
6801da177e4SLinus Torvalds /* Compute the actual receive window we are currently advertising.
6811da177e4SLinus Torvalds  * Rcv_nxt can be after the window if our peer push more data
6821da177e4SLinus Torvalds  * than the offered window.
6831da177e4SLinus Torvalds  */
68440efc6faSStephen Hemminger static inline u32 tcp_receive_window(const struct tcp_sock *tp)
6851da177e4SLinus Torvalds {
6861da177e4SLinus Torvalds 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
6871da177e4SLinus Torvalds 
6881da177e4SLinus Torvalds 	if (win < 0)
6891da177e4SLinus Torvalds 		win = 0;
6901da177e4SLinus Torvalds 	return (u32) win;
6911da177e4SLinus Torvalds }
6921da177e4SLinus Torvalds 
6931da177e4SLinus Torvalds /* Choose a new window, without checks for shrinking, and without
6941da177e4SLinus Torvalds  * scaling applied to the result.  The caller does these things
6951da177e4SLinus Torvalds  * if necessary.  This is a "raw" window selection.
6961da177e4SLinus Torvalds  */
6975c9f3023SJoe Perches u32 __tcp_select_window(struct sock *sk);
6981da177e4SLinus Torvalds 
699ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk);
700ee995283SPavel Emelyanov 
701ec66eda8SEric Dumazet /* TCP uses 32bit jiffies to save some space.
702ec66eda8SEric Dumazet  * Note that this is different from tcp_time_stamp, which
703ec66eda8SEric Dumazet  * historically has been the same until linux-4.13.
704ec66eda8SEric Dumazet  */
705ec66eda8SEric Dumazet #define tcp_jiffies32 ((u32)jiffies)
706ec66eda8SEric Dumazet 
7079a568de4SEric Dumazet /*
7089a568de4SEric Dumazet  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
7099a568de4SEric Dumazet  * It is no longer tied to jiffies, but to 1 ms clock.
7109a568de4SEric Dumazet  * Note: double check if you want to use tcp_jiffies32 instead of this.
7111da177e4SLinus Torvalds  */
7129a568de4SEric Dumazet #define TCP_TS_HZ	1000
7139a568de4SEric Dumazet 
7149a568de4SEric Dumazet static inline u64 tcp_clock_ns(void)
7159a568de4SEric Dumazet {
7169a568de4SEric Dumazet 	return local_clock();
7179a568de4SEric Dumazet }
7189a568de4SEric Dumazet 
7199a568de4SEric Dumazet static inline u64 tcp_clock_us(void)
7209a568de4SEric Dumazet {
7219a568de4SEric Dumazet 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
7229a568de4SEric Dumazet }
7239a568de4SEric Dumazet 
7249a568de4SEric Dumazet /* This should only be used in contexts where tp->tcp_mstamp is up to date */
7259a568de4SEric Dumazet static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
7269a568de4SEric Dumazet {
7279a568de4SEric Dumazet 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
7289a568de4SEric Dumazet }
7299a568de4SEric Dumazet 
7309a568de4SEric Dumazet /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
7319a568de4SEric Dumazet static inline u32 tcp_time_stamp_raw(void)
7329a568de4SEric Dumazet {
7339a568de4SEric Dumazet 	return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
7349a568de4SEric Dumazet }
7359a568de4SEric Dumazet 
7369a568de4SEric Dumazet 
7379a568de4SEric Dumazet /* Refresh 1us clock of a TCP socket,
7389a568de4SEric Dumazet  * ensuring monotically increasing values.
7399a568de4SEric Dumazet  */
7409a568de4SEric Dumazet static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
7419a568de4SEric Dumazet {
7429a568de4SEric Dumazet 	u64 val = tcp_clock_us();
7439a568de4SEric Dumazet 
7449a568de4SEric Dumazet 	if (val > tp->tcp_mstamp)
7459a568de4SEric Dumazet 		tp->tcp_mstamp = val;
7469a568de4SEric Dumazet }
7479a568de4SEric Dumazet 
7489a568de4SEric Dumazet static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
7499a568de4SEric Dumazet {
7509a568de4SEric Dumazet 	return max_t(s64, t1 - t0, 0);
7519a568de4SEric Dumazet }
7521da177e4SLinus Torvalds 
7537faee5c0SEric Dumazet static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
7547faee5c0SEric Dumazet {
7559a568de4SEric Dumazet 	return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
7567faee5c0SEric Dumazet }
7577faee5c0SEric Dumazet 
7587faee5c0SEric Dumazet 
759a3433f35SChangli Gao #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
760a3433f35SChangli Gao 
761a3433f35SChangli Gao #define TCPHDR_FIN 0x01
762a3433f35SChangli Gao #define TCPHDR_SYN 0x02
763a3433f35SChangli Gao #define TCPHDR_RST 0x04
764a3433f35SChangli Gao #define TCPHDR_PSH 0x08
765a3433f35SChangli Gao #define TCPHDR_ACK 0x10
766a3433f35SChangli Gao #define TCPHDR_URG 0x20
767a3433f35SChangli Gao #define TCPHDR_ECE 0x40
768a3433f35SChangli Gao #define TCPHDR_CWR 0x80
769a3433f35SChangli Gao 
77049213555SDaniel Borkmann #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
77149213555SDaniel Borkmann 
772caa20d9aSStephen Hemminger /* This is what the send packet queuing engine uses to pass
773f86586faSEric Dumazet  * TCP per-packet control information to the transmission code.
774f86586faSEric Dumazet  * We also store the host-order sequence numbers in here too.
775f86586faSEric Dumazet  * This is 44 bytes if IPV6 is enabled.
776f86586faSEric Dumazet  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
7771da177e4SLinus Torvalds  */
7781da177e4SLinus Torvalds struct tcp_skb_cb {
7791da177e4SLinus Torvalds 	__u32		seq;		/* Starting sequence number	*/
7801da177e4SLinus Torvalds 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
781cd7d8498SEric Dumazet 	union {
782cd7d8498SEric Dumazet 		/* Note : tcp_tw_isn is used in input path only
783cd7d8498SEric Dumazet 		 *	  (isn chosen by tcp_timewait_state_process())
784cd7d8498SEric Dumazet 		 *
785f69ad292SEric Dumazet 		 * 	  tcp_gso_segs/size are used in write queue only,
786f69ad292SEric Dumazet 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
787cd7d8498SEric Dumazet 		 */
788cd7d8498SEric Dumazet 		__u32		tcp_tw_isn;
789f69ad292SEric Dumazet 		struct {
790f69ad292SEric Dumazet 			u16	tcp_gso_segs;
791f69ad292SEric Dumazet 			u16	tcp_gso_size;
792f69ad292SEric Dumazet 		};
793cd7d8498SEric Dumazet 	};
7944de075e0SEric Dumazet 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
795f4f9f6e7SNeal Cardwell 
7961da177e4SLinus Torvalds 	__u8		sacked;		/* State flags for SACK/FACK.	*/
7971da177e4SLinus Torvalds #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
7981da177e4SLinus Torvalds #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
7991da177e4SLinus Torvalds #define TCPCB_LOST		0x04	/* SKB is lost			*/
8001da177e4SLinus Torvalds #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
8019d186cacSAndrey Vagin #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
8021da177e4SLinus Torvalds #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
8039d186cacSAndrey Vagin #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
8049d186cacSAndrey Vagin 				TCPCB_REPAIRED)
8051da177e4SLinus Torvalds 
806f4f9f6e7SNeal Cardwell 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
8076b084928SSoheil Hassas Yeganeh 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
808c134ecb8SMartin KaFai Lau 			eor:1,		/* Is skb MSG_EOR marked? */
809c134ecb8SMartin KaFai Lau 			unused:6;
8101da177e4SLinus Torvalds 	__u32		ack_seq;	/* Sequence number ACK'd	*/
811971f10ecSEric Dumazet 	union {
812b75803d5SLawrence Brakmo 		struct {
813b9f64820SYuchung Cheng 			/* There is space for up to 24 bytes */
814d7722e85SSoheil Hassas Yeganeh 			__u32 in_flight:30,/* Bytes in flight at transmit */
815d7722e85SSoheil Hassas Yeganeh 			      is_app_limited:1, /* cwnd not fully used? */
816d7722e85SSoheil Hassas Yeganeh 			      unused:1;
817b9f64820SYuchung Cheng 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
818b9f64820SYuchung Cheng 			__u32 delivered;
819b9f64820SYuchung Cheng 			/* start of send pipeline phase */
8209a568de4SEric Dumazet 			u64 first_tx_mstamp;
821b9f64820SYuchung Cheng 			/* when we reached the "delivered" count */
8229a568de4SEric Dumazet 			u64 delivered_mstamp;
823b75803d5SLawrence Brakmo 		} tx;   /* only used for outgoing skbs */
824b75803d5SLawrence Brakmo 		union {
825971f10ecSEric Dumazet 			struct inet_skb_parm	h4;
826971f10ecSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
827971f10ecSEric Dumazet 			struct inet6_skb_parm	h6;
828971f10ecSEric Dumazet #endif
829b75803d5SLawrence Brakmo 		} header;	/* For incoming skbs */
830b75803d5SLawrence Brakmo 	};
8311da177e4SLinus Torvalds };
8321da177e4SLinus Torvalds 
8331da177e4SLinus Torvalds #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
8341da177e4SLinus Torvalds 
835870c3151SEric Dumazet 
836815afe17SEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
837870c3151SEric Dumazet /* This is the variant of inet6_iif() that must be used by TCP,
838870c3151SEric Dumazet  * as TCP moves IP6CB into a different location in skb->cb[]
839870c3151SEric Dumazet  */
840870c3151SEric Dumazet static inline int tcp_v6_iif(const struct sk_buff *skb)
841870c3151SEric Dumazet {
842a04a480dSDavid Ahern 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
84374b20582SDavid Ahern 
84474b20582SDavid Ahern 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
845870c3151SEric Dumazet }
846815afe17SEric Dumazet #endif
847870c3151SEric Dumazet 
848a04a480dSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */
849a04a480dSDavid Ahern static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
850a04a480dSDavid Ahern {
851a04a480dSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
852a04a480dSDavid Ahern 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
853da96786eSDavid Ahern 	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
854a04a480dSDavid Ahern 		return true;
855a04a480dSDavid Ahern #endif
856a04a480dSDavid Ahern 	return false;
857a04a480dSDavid Ahern }
858a04a480dSDavid Ahern 
8591da177e4SLinus Torvalds /* Due to TSO, an SKB can be composed of multiple actual
8601da177e4SLinus Torvalds  * packets.  To keep these tracked properly, we use this.
8611da177e4SLinus Torvalds  */
8621da177e4SLinus Torvalds static inline int tcp_skb_pcount(const struct sk_buff *skb)
8631da177e4SLinus Torvalds {
864cd7d8498SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_segs;
865cd7d8498SEric Dumazet }
866cd7d8498SEric Dumazet 
867cd7d8498SEric Dumazet static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
868cd7d8498SEric Dumazet {
869cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
870cd7d8498SEric Dumazet }
871cd7d8498SEric Dumazet 
872cd7d8498SEric Dumazet static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
873cd7d8498SEric Dumazet {
874cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
8751da177e4SLinus Torvalds }
8761da177e4SLinus Torvalds 
877f69ad292SEric Dumazet /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
8781da177e4SLinus Torvalds static inline int tcp_skb_mss(const struct sk_buff *skb)
8791da177e4SLinus Torvalds {
880f69ad292SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_size;
8811da177e4SLinus Torvalds }
8821da177e4SLinus Torvalds 
883c134ecb8SMartin KaFai Lau static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
884c134ecb8SMartin KaFai Lau {
885c134ecb8SMartin KaFai Lau 	return likely(!TCP_SKB_CB(skb)->eor);
886c134ecb8SMartin KaFai Lau }
887c134ecb8SMartin KaFai Lau 
888317a76f9SStephen Hemminger /* Events passed to congestion control interface */
889317a76f9SStephen Hemminger enum tcp_ca_event {
890317a76f9SStephen Hemminger 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
891317a76f9SStephen Hemminger 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
892317a76f9SStephen Hemminger 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
893317a76f9SStephen Hemminger 	CA_EVENT_LOSS,		/* loss timeout */
8949890092eSFlorian Westphal 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
8959890092eSFlorian Westphal 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
8969890092eSFlorian Westphal 	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
8979890092eSFlorian Westphal 	CA_EVENT_NON_DELAYED_ACK,
8987354c8c3SFlorian Westphal };
8997354c8c3SFlorian Westphal 
9009890092eSFlorian Westphal /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
9017354c8c3SFlorian Westphal enum tcp_ca_ack_event_flags {
9029890092eSFlorian Westphal 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
9039890092eSFlorian Westphal 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
9049890092eSFlorian Westphal 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
905317a76f9SStephen Hemminger };
906317a76f9SStephen Hemminger 
907317a76f9SStephen Hemminger /*
908317a76f9SStephen Hemminger  * Interface for adding new TCP congestion control handlers
909317a76f9SStephen Hemminger  */
910317a76f9SStephen Hemminger #define TCP_CA_NAME_MAX	16
9113ff825b2SStephen Hemminger #define TCP_CA_MAX	128
9123ff825b2SStephen Hemminger #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
9133ff825b2SStephen Hemminger 
914c5c6a8abSDaniel Borkmann #define TCP_CA_UNSPEC	0
915c5c6a8abSDaniel Borkmann 
91630e502a3SDaniel Borkmann /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
917164891aaSStephen Hemminger #define TCP_CONG_NON_RESTRICTED 0x1
91830e502a3SDaniel Borkmann /* Requires ECN/ECT set on all packets */
91930e502a3SDaniel Borkmann #define TCP_CONG_NEEDS_ECN	0x2
920164891aaSStephen Hemminger 
92164f40ff5SEric Dumazet union tcp_cc_info;
92264f40ff5SEric Dumazet 
923756ee172SLawrence Brakmo struct ack_sample {
924756ee172SLawrence Brakmo 	u32 pkts_acked;
925756ee172SLawrence Brakmo 	s32 rtt_us;
9266f094b9eSLawrence Brakmo 	u32 in_flight;
927756ee172SLawrence Brakmo };
928756ee172SLawrence Brakmo 
929b9f64820SYuchung Cheng /* A rate sample measures the number of (original/retransmitted) data
930b9f64820SYuchung Cheng  * packets delivered "delivered" over an interval of time "interval_us".
931b9f64820SYuchung Cheng  * The tcp_rate.c code fills in the rate sample, and congestion
932b9f64820SYuchung Cheng  * control modules that define a cong_control function to run at the end
933b9f64820SYuchung Cheng  * of ACK processing can optionally chose to consult this sample when
934b9f64820SYuchung Cheng  * setting cwnd and pacing rate.
935b9f64820SYuchung Cheng  * A sample is invalid if "delivered" or "interval_us" is negative.
936b9f64820SYuchung Cheng  */
937b9f64820SYuchung Cheng struct rate_sample {
9389a568de4SEric Dumazet 	u64  prior_mstamp; /* starting timestamp for interval */
939b9f64820SYuchung Cheng 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
940b9f64820SYuchung Cheng 	s32  delivered;		/* number of packets delivered over interval */
941b9f64820SYuchung Cheng 	long interval_us;	/* time for tp->delivered to incr "delivered" */
942b9f64820SYuchung Cheng 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
943b9f64820SYuchung Cheng 	int  losses;		/* number of packets marked lost upon ACK */
944b9f64820SYuchung Cheng 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
945b9f64820SYuchung Cheng 	u32  prior_in_flight;	/* in flight before this ACK */
946d7722e85SSoheil Hassas Yeganeh 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
947b9f64820SYuchung Cheng 	bool is_retrans;	/* is sample from retransmission? */
948b9f64820SYuchung Cheng };
949b9f64820SYuchung Cheng 
950317a76f9SStephen Hemminger struct tcp_congestion_ops {
951317a76f9SStephen Hemminger 	struct list_head	list;
952c5c6a8abSDaniel Borkmann 	u32 key;
953c5c6a8abSDaniel Borkmann 	u32 flags;
954317a76f9SStephen Hemminger 
955317a76f9SStephen Hemminger 	/* initialize private data (optional) */
9566687e988SArnaldo Carvalho de Melo 	void (*init)(struct sock *sk);
957317a76f9SStephen Hemminger 	/* cleanup private data  (optional) */
9586687e988SArnaldo Carvalho de Melo 	void (*release)(struct sock *sk);
959317a76f9SStephen Hemminger 
960317a76f9SStephen Hemminger 	/* return slow start threshold (required) */
9616687e988SArnaldo Carvalho de Melo 	u32 (*ssthresh)(struct sock *sk);
962317a76f9SStephen Hemminger 	/* do new cwnd calculation (required) */
96324901551SEric Dumazet 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
964317a76f9SStephen Hemminger 	/* call before changing ca_state (optional) */
9656687e988SArnaldo Carvalho de Melo 	void (*set_state)(struct sock *sk, u8 new_state);
966317a76f9SStephen Hemminger 	/* call when cwnd event occurs (optional) */
9676687e988SArnaldo Carvalho de Melo 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
9687354c8c3SFlorian Westphal 	/* call when ack arrives (optional) */
9697354c8c3SFlorian Westphal 	void (*in_ack_event)(struct sock *sk, u32 flags);
9701e0ce2a1SAnmol Sarma 	/* new value of cwnd after loss (required) */
9716687e988SArnaldo Carvalho de Melo 	u32  (*undo_cwnd)(struct sock *sk);
972317a76f9SStephen Hemminger 	/* hook for packet ack accounting (optional) */
973756ee172SLawrence Brakmo 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
974ed6e7268SNeal Cardwell 	/* suggest number of segments for each skb to transmit (optional) */
975ed6e7268SNeal Cardwell 	u32 (*tso_segs_goal)(struct sock *sk);
97677bfc174SYuchung Cheng 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
97777bfc174SYuchung Cheng 	u32 (*sndbuf_expand)(struct sock *sk);
978c0402760SYuchung Cheng 	/* call when packets are delivered to update cwnd and pacing rate,
979c0402760SYuchung Cheng 	 * after all the ca_state processing. (optional)
980c0402760SYuchung Cheng 	 */
981c0402760SYuchung Cheng 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
98273c1f4a0SArnaldo Carvalho de Melo 	/* get info for inet_diag (optional) */
98364f40ff5SEric Dumazet 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
98464f40ff5SEric Dumazet 			   union tcp_cc_info *info);
985317a76f9SStephen Hemminger 
986317a76f9SStephen Hemminger 	char 		name[TCP_CA_NAME_MAX];
987317a76f9SStephen Hemminger 	struct module 	*owner;
988317a76f9SStephen Hemminger };
989317a76f9SStephen Hemminger 
9905c9f3023SJoe Perches int tcp_register_congestion_control(struct tcp_congestion_ops *type);
9915c9f3023SJoe Perches void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
992317a76f9SStephen Hemminger 
99355d8694fSFlorian Westphal void tcp_assign_congestion_control(struct sock *sk);
9945c9f3023SJoe Perches void tcp_init_congestion_control(struct sock *sk);
9955c9f3023SJoe Perches void tcp_cleanup_congestion_control(struct sock *sk);
9965c9f3023SJoe Perches int tcp_set_default_congestion_control(const char *name);
9975c9f3023SJoe Perches void tcp_get_default_congestion_control(char *name);
9985c9f3023SJoe Perches void tcp_get_available_congestion_control(char *buf, size_t len);
9995c9f3023SJoe Perches void tcp_get_allowed_congestion_control(char *buf, size_t len);
10005c9f3023SJoe Perches int tcp_set_allowed_congestion_control(char *allowed);
10015c9f3023SJoe Perches int tcp_set_congestion_control(struct sock *sk, const char *name);
1002e73ebb08SNeal Cardwell u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1003e73ebb08SNeal Cardwell void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1004317a76f9SStephen Hemminger 
10055c9f3023SJoe Perches u32 tcp_reno_ssthresh(struct sock *sk);
1006e9799183SFlorian Westphal u32 tcp_reno_undo_cwnd(struct sock *sk);
100724901551SEric Dumazet void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1008a8acfbacSDavid S. Miller extern struct tcp_congestion_ops tcp_reno;
1009317a76f9SStephen Hemminger 
1010c5c6a8abSDaniel Borkmann struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1011c3a8d947SDaniel Borkmann u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1012ea697639SDaniel Borkmann #ifdef CONFIG_INET
1013c5c6a8abSDaniel Borkmann char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1014ea697639SDaniel Borkmann #else
1015ea697639SDaniel Borkmann static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1016ea697639SDaniel Borkmann {
1017ea697639SDaniel Borkmann 	return NULL;
1018ea697639SDaniel Borkmann }
1019ea697639SDaniel Borkmann #endif
1020c5c6a8abSDaniel Borkmann 
102130e502a3SDaniel Borkmann static inline bool tcp_ca_needs_ecn(const struct sock *sk)
102230e502a3SDaniel Borkmann {
102330e502a3SDaniel Borkmann 	const struct inet_connection_sock *icsk = inet_csk(sk);
102430e502a3SDaniel Borkmann 
102530e502a3SDaniel Borkmann 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
102630e502a3SDaniel Borkmann }
102730e502a3SDaniel Borkmann 
10286687e988SArnaldo Carvalho de Melo static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1029317a76f9SStephen Hemminger {
10306687e988SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
10316687e988SArnaldo Carvalho de Melo 
10326687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->set_state)
10336687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->set_state(sk, ca_state);
10346687e988SArnaldo Carvalho de Melo 	icsk->icsk_ca_state = ca_state;
1035317a76f9SStephen Hemminger }
1036317a76f9SStephen Hemminger 
10376687e988SArnaldo Carvalho de Melo static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1038317a76f9SStephen Hemminger {
10396687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
10406687e988SArnaldo Carvalho de Melo 
10416687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->cwnd_event)
10426687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1043317a76f9SStephen Hemminger }
1044317a76f9SStephen Hemminger 
1045b9f64820SYuchung Cheng /* From tcp_rate.c */
1046b9f64820SYuchung Cheng void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1047b9f64820SYuchung Cheng void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1048b9f64820SYuchung Cheng 			    struct rate_sample *rs);
1049b9f64820SYuchung Cheng void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
105088d5c650SEric Dumazet 		  struct rate_sample *rs);
1051d7722e85SSoheil Hassas Yeganeh void tcp_rate_check_app_limited(struct sock *sk);
1052b9f64820SYuchung Cheng 
1053e60402d0SIlpo Järvinen /* These functions determine how the current flow behaves in respect of SACK
1054e60402d0SIlpo Järvinen  * handling. SACK is negotiated with the peer, and therefore it can vary
1055e60402d0SIlpo Järvinen  * between different flows.
1056e60402d0SIlpo Järvinen  *
1057e60402d0SIlpo Järvinen  * tcp_is_sack - SACK enabled
1058e60402d0SIlpo Järvinen  * tcp_is_reno - No SACK
1059e60402d0SIlpo Järvinen  * tcp_is_fack - FACK enabled, implies SACK enabled
1060e60402d0SIlpo Järvinen  */
1061e60402d0SIlpo Järvinen static inline int tcp_is_sack(const struct tcp_sock *tp)
1062e60402d0SIlpo Järvinen {
1063e60402d0SIlpo Järvinen 	return tp->rx_opt.sack_ok;
1064e60402d0SIlpo Järvinen }
1065e60402d0SIlpo Järvinen 
1066a2a385d6SEric Dumazet static inline bool tcp_is_reno(const struct tcp_sock *tp)
1067e60402d0SIlpo Järvinen {
1068e60402d0SIlpo Järvinen 	return !tcp_is_sack(tp);
1069e60402d0SIlpo Järvinen }
1070e60402d0SIlpo Järvinen 
1071a2a385d6SEric Dumazet static inline bool tcp_is_fack(const struct tcp_sock *tp)
1072e60402d0SIlpo Järvinen {
1073ab56222aSVijay Subramanian 	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1074e60402d0SIlpo Järvinen }
1075e60402d0SIlpo Järvinen 
1076e60402d0SIlpo Järvinen static inline void tcp_enable_fack(struct tcp_sock *tp)
1077e60402d0SIlpo Järvinen {
1078ab56222aSVijay Subramanian 	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1079e60402d0SIlpo Järvinen }
1080e60402d0SIlpo Järvinen 
108183ae4088SIlpo Järvinen static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
108283ae4088SIlpo Järvinen {
108383ae4088SIlpo Järvinen 	return tp->sacked_out + tp->lost_out;
108483ae4088SIlpo Järvinen }
108583ae4088SIlpo Järvinen 
10861da177e4SLinus Torvalds /* This determines how many packets are "in the network" to the best
10871da177e4SLinus Torvalds  * of our knowledge.  In many cases it is conservative, but where
10881da177e4SLinus Torvalds  * detailed information is available from the receiver (via SACK
10891da177e4SLinus Torvalds  * blocks etc.) we can make more aggressive calculations.
10901da177e4SLinus Torvalds  *
10911da177e4SLinus Torvalds  * Use this for decisions involving congestion control, use just
10921da177e4SLinus Torvalds  * tp->packets_out to determine if the send queue is empty or not.
10931da177e4SLinus Torvalds  *
10941da177e4SLinus Torvalds  * Read this equation as:
10951da177e4SLinus Torvalds  *
10961da177e4SLinus Torvalds  *	"Packets sent once on transmission queue" MINUS
10971da177e4SLinus Torvalds  *	"Packets left network, but not honestly ACKed yet" PLUS
10981da177e4SLinus Torvalds  *	"Packets fast retransmitted"
10991da177e4SLinus Torvalds  */
110040efc6faSStephen Hemminger static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
11011da177e4SLinus Torvalds {
110283ae4088SIlpo Järvinen 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
11031da177e4SLinus Torvalds }
11041da177e4SLinus Torvalds 
11050b6a05c1SIlpo Järvinen #define TCP_INFINITE_SSTHRESH	0x7fffffff
11060b6a05c1SIlpo Järvinen 
1107071d5080SYuchung Cheng static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1108071d5080SYuchung Cheng {
110976174004SYuchung Cheng 	return tp->snd_cwnd < tp->snd_ssthresh;
1110071d5080SYuchung Cheng }
1111071d5080SYuchung Cheng 
11120b6a05c1SIlpo Järvinen static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
11130b6a05c1SIlpo Järvinen {
11140b6a05c1SIlpo Järvinen 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
11150b6a05c1SIlpo Järvinen }
11160b6a05c1SIlpo Järvinen 
1117684bad11SYuchung Cheng static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1118684bad11SYuchung Cheng {
1119684bad11SYuchung Cheng 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1120684bad11SYuchung Cheng 	       (1 << inet_csk(sk)->icsk_ca_state);
1121684bad11SYuchung Cheng }
1122684bad11SYuchung Cheng 
11231da177e4SLinus Torvalds /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1124684bad11SYuchung Cheng  * The exception is cwnd reduction phase, when cwnd is decreasing towards
11251da177e4SLinus Torvalds  * ssthresh.
11261da177e4SLinus Torvalds  */
11276687e988SArnaldo Carvalho de Melo static inline __u32 tcp_current_ssthresh(const struct sock *sk)
11281da177e4SLinus Torvalds {
11296687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1130cf533ea5SEric Dumazet 
1131684bad11SYuchung Cheng 	if (tcp_in_cwnd_reduction(sk))
11321da177e4SLinus Torvalds 		return tp->snd_ssthresh;
11331da177e4SLinus Torvalds 	else
11341da177e4SLinus Torvalds 		return max(tp->snd_ssthresh,
11351da177e4SLinus Torvalds 			   ((tp->snd_cwnd >> 1) +
11361da177e4SLinus Torvalds 			    (tp->snd_cwnd >> 2)));
11371da177e4SLinus Torvalds }
11381da177e4SLinus Torvalds 
1139b9c4595bSIlpo Järvinen /* Use define here intentionally to get WARN_ON location shown at the caller */
1140b9c4595bSIlpo Järvinen #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
11411da177e4SLinus Torvalds 
11425ee2c941SChristoph Paasch void tcp_enter_cwr(struct sock *sk);
11435c9f3023SJoe Perches __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
11441da177e4SLinus Torvalds 
11456b5a5c0dSNeal Cardwell /* The maximum number of MSS of available cwnd for which TSO defers
11466b5a5c0dSNeal Cardwell  * sending if not using sysctl_tcp_tso_win_divisor.
11476b5a5c0dSNeal Cardwell  */
11486b5a5c0dSNeal Cardwell static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
11496b5a5c0dSNeal Cardwell {
11506b5a5c0dSNeal Cardwell 	return 3;
11516b5a5c0dSNeal Cardwell }
11526b5a5c0dSNeal Cardwell 
115390840defSIlpo Järvinen /* Returns end sequence number of the receiver's advertised window */
115490840defSIlpo Järvinen static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
115590840defSIlpo Järvinen {
115690840defSIlpo Järvinen 	return tp->snd_una + tp->snd_wnd;
115790840defSIlpo Järvinen }
1158e114a710SEric Dumazet 
1159e114a710SEric Dumazet /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1160e114a710SEric Dumazet  * flexible approach. The RFC suggests cwnd should not be raised unless
1161ca8a2263SNeal Cardwell  * it was fully used previously. And that's exactly what we do in
1162ca8a2263SNeal Cardwell  * congestion avoidance mode. But in slow start we allow cwnd to grow
1163ca8a2263SNeal Cardwell  * as long as the application has used half the cwnd.
1164e114a710SEric Dumazet  * Example :
1165e114a710SEric Dumazet  *    cwnd is 10 (IW10), but application sends 9 frames.
1166e114a710SEric Dumazet  *    We allow cwnd to reach 18 when all frames are ACKed.
1167e114a710SEric Dumazet  * This check is safe because it's as aggressive as slow start which already
1168e114a710SEric Dumazet  * risks 100% overshoot. The advantage is that we discourage application to
1169e114a710SEric Dumazet  * either send more filler packets or data to artificially blow up the cwnd
1170e114a710SEric Dumazet  * usage, and allow application-limited process to probe bw more aggressively.
1171e114a710SEric Dumazet  */
117224901551SEric Dumazet static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1173e114a710SEric Dumazet {
1174e114a710SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1175e114a710SEric Dumazet 
1176ca8a2263SNeal Cardwell 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1177071d5080SYuchung Cheng 	if (tcp_in_slow_start(tp))
1178ca8a2263SNeal Cardwell 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1179ca8a2263SNeal Cardwell 
1180ca8a2263SNeal Cardwell 	return tp->is_cwnd_limited;
1181e114a710SEric Dumazet }
1182f4805edeSStephen Hemminger 
118321c8fe99SEric Dumazet /* Something is really bad, we could not queue an additional packet,
118421c8fe99SEric Dumazet  * because qdisc is full or receiver sent a 0 window.
118521c8fe99SEric Dumazet  * We do not want to add fuel to the fire, or abort too early,
118621c8fe99SEric Dumazet  * so make sure the timer we arm now is at least 200ms in the future,
118721c8fe99SEric Dumazet  * regardless of current icsk_rto value (as it could be ~2ms)
118821c8fe99SEric Dumazet  */
118921c8fe99SEric Dumazet static inline unsigned long tcp_probe0_base(const struct sock *sk)
119021c8fe99SEric Dumazet {
119121c8fe99SEric Dumazet 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
119221c8fe99SEric Dumazet }
119321c8fe99SEric Dumazet 
119421c8fe99SEric Dumazet /* Variant of inet_csk_rto_backoff() used for zero window probes */
119521c8fe99SEric Dumazet static inline unsigned long tcp_probe0_when(const struct sock *sk,
119621c8fe99SEric Dumazet 					    unsigned long max_when)
119721c8fe99SEric Dumazet {
119821c8fe99SEric Dumazet 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
119921c8fe99SEric Dumazet 
120021c8fe99SEric Dumazet 	return (unsigned long)min_t(u64, when, max_when);
120121c8fe99SEric Dumazet }
120221c8fe99SEric Dumazet 
12039e412ba7SIlpo Järvinen static inline void tcp_check_probe_timer(struct sock *sk)
12041da177e4SLinus Torvalds {
120521c8fe99SEric Dumazet 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
12063f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
120721c8fe99SEric Dumazet 					  tcp_probe0_base(sk), TCP_RTO_MAX);
12081da177e4SLinus Torvalds }
12091da177e4SLinus Torvalds 
1210ee7537b6SHantzis Fotis static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
12111da177e4SLinus Torvalds {
12121da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
12131da177e4SLinus Torvalds }
12141da177e4SLinus Torvalds 
1215ee7537b6SHantzis Fotis static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
12161da177e4SLinus Torvalds {
12171da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
12181da177e4SLinus Torvalds }
12191da177e4SLinus Torvalds 
12201da177e4SLinus Torvalds /*
12211da177e4SLinus Torvalds  * Calculate(/check) TCP checksum
12221da177e4SLinus Torvalds  */
1223ba7808eaSFrederik Deweerdt static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1224ba7808eaSFrederik Deweerdt 				   __be32 daddr, __wsum base)
12251da177e4SLinus Torvalds {
12261da177e4SLinus Torvalds 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
12271da177e4SLinus Torvalds }
12281da177e4SLinus Torvalds 
1229b51655b9SAl Viro static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
12301da177e4SLinus Torvalds {
1231fb286bb2SHerbert Xu 	return __skb_checksum_complete(skb);
12321da177e4SLinus Torvalds }
12331da177e4SLinus Torvalds 
1234a2a385d6SEric Dumazet static inline bool tcp_checksum_complete(struct sk_buff *skb)
12351da177e4SLinus Torvalds {
123660476372SHerbert Xu 	return !skb_csum_unnecessary(skb) &&
12371da177e4SLinus Torvalds 		__tcp_checksum_complete(skb);
12381da177e4SLinus Torvalds }
12391da177e4SLinus Torvalds 
12401da177e4SLinus Torvalds /* Prequeue for VJ style copy to user, combined with checksumming. */
12411da177e4SLinus Torvalds 
124240efc6faSStephen Hemminger static inline void tcp_prequeue_init(struct tcp_sock *tp)
12431da177e4SLinus Torvalds {
12441da177e4SLinus Torvalds 	tp->ucopy.task = NULL;
12451da177e4SLinus Torvalds 	tp->ucopy.len = 0;
12461da177e4SLinus Torvalds 	tp->ucopy.memory = 0;
12471da177e4SLinus Torvalds 	skb_queue_head_init(&tp->ucopy.prequeue);
12481da177e4SLinus Torvalds }
12491da177e4SLinus Torvalds 
12505c9f3023SJoe Perches bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1251c9c33212SEric Dumazet bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1252ac6e7800SEric Dumazet int tcp_filter(struct sock *sk, struct sk_buff *skb);
12531da177e4SLinus Torvalds 
12541da177e4SLinus Torvalds #undef STATE_TRACE
12551da177e4SLinus Torvalds 
12561da177e4SLinus Torvalds #ifdef STATE_TRACE
12571da177e4SLinus Torvalds static const char *statename[]={
12581da177e4SLinus Torvalds 	"Unused","Established","Syn Sent","Syn Recv",
12591da177e4SLinus Torvalds 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
12601da177e4SLinus Torvalds 	"Close Wait","Last ACK","Listen","Closing"
12611da177e4SLinus Torvalds };
12621da177e4SLinus Torvalds #endif
12635c9f3023SJoe Perches void tcp_set_state(struct sock *sk, int state);
12641da177e4SLinus Torvalds 
12655c9f3023SJoe Perches void tcp_done(struct sock *sk);
12661da177e4SLinus Torvalds 
1267c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err);
1268c1e64e29SLorenzo Colitti 
126940efc6faSStephen Hemminger static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
12701da177e4SLinus Torvalds {
12711da177e4SLinus Torvalds 	rx_opt->dsack = 0;
12721da177e4SLinus Torvalds 	rx_opt->num_sacks = 0;
12731da177e4SLinus Torvalds }
12741da177e4SLinus Torvalds 
12755c9f3023SJoe Perches u32 tcp_default_init_rwnd(u32 mss);
12766f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta);
12776f021c62SEric Dumazet 
12786f021c62SEric Dumazet static inline void tcp_slow_start_after_idle_check(struct sock *sk)
12796f021c62SEric Dumazet {
12801b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
12816f021c62SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
12826f021c62SEric Dumazet 	s32 delta;
12836f021c62SEric Dumazet 
12841b1fc3fdSWei Wang 	if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
12851b1fc3fdSWei Wang 	    ca_ops->cong_control)
12866f021c62SEric Dumazet 		return;
1287d635fbe2SEric Dumazet 	delta = tcp_jiffies32 - tp->lsndtime;
12886f021c62SEric Dumazet 	if (delta > inet_csk(sk)->icsk_rto)
12896f021c62SEric Dumazet 		tcp_cwnd_restart(sk, delta);
12906f021c62SEric Dumazet }
129185f16525SYuchung Cheng 
12921da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. */
12935c9f3023SJoe Perches void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
12945c9f3023SJoe Perches 			       __u32 *window_clamp, int wscale_ok,
12955c9f3023SJoe Perches 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
12961da177e4SLinus Torvalds 
12971da177e4SLinus Torvalds static inline int tcp_win_from_space(int space)
12981da177e4SLinus Torvalds {
1299c4836742SGao Feng 	int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
1300c4836742SGao Feng 
1301c4836742SGao Feng 	return tcp_adv_win_scale <= 0 ?
1302c4836742SGao Feng 		(space>>(-tcp_adv_win_scale)) :
1303c4836742SGao Feng 		space - (space>>tcp_adv_win_scale);
13041da177e4SLinus Torvalds }
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds /* Note: caller must be prepared to deal with negative returns */
13071da177e4SLinus Torvalds static inline int tcp_space(const struct sock *sk)
13081da177e4SLinus Torvalds {
13091da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf -
13101da177e4SLinus Torvalds 				  atomic_read(&sk->sk_rmem_alloc));
13111da177e4SLinus Torvalds }
13121da177e4SLinus Torvalds 
13131da177e4SLinus Torvalds static inline int tcp_full_space(const struct sock *sk)
13141da177e4SLinus Torvalds {
13151da177e4SLinus Torvalds 	return tcp_win_from_space(sk->sk_rcvbuf);
13161da177e4SLinus Torvalds }
13171da177e4SLinus Torvalds 
1318843f4a55SYuchung Cheng extern void tcp_openreq_init_rwin(struct request_sock *req,
1319b1964b5fSEric Dumazet 				  const struct sock *sk_listener,
1320b1964b5fSEric Dumazet 				  const struct dst_entry *dst);
1321843f4a55SYuchung Cheng 
13225c9f3023SJoe Perches void tcp_enter_memory_pressure(struct sock *sk);
1323*06044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk);
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds static inline int keepalive_intvl_when(const struct tcp_sock *tp)
13261da177e4SLinus Torvalds {
1327b840d15dSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
1328b840d15dSNikolay Borisov 
1329b840d15dSNikolay Borisov 	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
13301da177e4SLinus Torvalds }
13311da177e4SLinus Torvalds 
13321da177e4SLinus Torvalds static inline int keepalive_time_when(const struct tcp_sock *tp)
13331da177e4SLinus Torvalds {
133413b287e8SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
133513b287e8SNikolay Borisov 
133613b287e8SNikolay Borisov 	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
13371da177e4SLinus Torvalds }
13381da177e4SLinus Torvalds 
1339df19a626SEric Dumazet static inline int keepalive_probes(const struct tcp_sock *tp)
1340df19a626SEric Dumazet {
13419bd6861bSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
13429bd6861bSNikolay Borisov 
13439bd6861bSNikolay Borisov 	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1344df19a626SEric Dumazet }
1345df19a626SEric Dumazet 
13466c37e5deSFlavio Leitner static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
13476c37e5deSFlavio Leitner {
13486c37e5deSFlavio Leitner 	const struct inet_connection_sock *icsk = &tp->inet_conn;
13496c37e5deSFlavio Leitner 
135070eabf0eSEric Dumazet 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
135170eabf0eSEric Dumazet 			  tcp_jiffies32 - tp->rcv_tstamp);
13526c37e5deSFlavio Leitner }
13536c37e5deSFlavio Leitner 
1354463c84b9SArnaldo Carvalho de Melo static inline int tcp_fin_time(const struct sock *sk)
13551da177e4SLinus Torvalds {
13561e579caaSNikolay Borisov 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1357463c84b9SArnaldo Carvalho de Melo 	const int rto = inet_csk(sk)->icsk_rto;
13581da177e4SLinus Torvalds 
1359463c84b9SArnaldo Carvalho de Melo 	if (fin_timeout < (rto << 2) - (rto >> 1))
1360463c84b9SArnaldo Carvalho de Melo 		fin_timeout = (rto << 2) - (rto >> 1);
13611da177e4SLinus Torvalds 
13621da177e4SLinus Torvalds 	return fin_timeout;
13631da177e4SLinus Torvalds }
13641da177e4SLinus Torvalds 
1365a2a385d6SEric Dumazet static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1366c887e6d2SIlpo Järvinen 				  int paws_win)
13671da177e4SLinus Torvalds {
1368c887e6d2SIlpo Järvinen 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1369a2a385d6SEric Dumazet 		return true;
1370c887e6d2SIlpo Järvinen 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1371a2a385d6SEric Dumazet 		return true;
1372bc2ce894SEric Dumazet 	/*
1373bc2ce894SEric Dumazet 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1374bc2ce894SEric Dumazet 	 * then following tcp messages have valid values. Ignore 0 value,
1375bc2ce894SEric Dumazet 	 * or else 'negative' tsval might forbid us to accept their packets.
1376bc2ce894SEric Dumazet 	 */
1377bc2ce894SEric Dumazet 	if (!rx_opt->ts_recent)
1378a2a385d6SEric Dumazet 		return true;
1379a2a385d6SEric Dumazet 	return false;
1380c887e6d2SIlpo Järvinen }
1381c887e6d2SIlpo Järvinen 
1382a2a385d6SEric Dumazet static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1383c887e6d2SIlpo Järvinen 				   int rst)
1384c887e6d2SIlpo Järvinen {
1385c887e6d2SIlpo Järvinen 	if (tcp_paws_check(rx_opt, 0))
1386a2a385d6SEric Dumazet 		return false;
13871da177e4SLinus Torvalds 
13881da177e4SLinus Torvalds 	/* RST segments are not recommended to carry timestamp,
13891da177e4SLinus Torvalds 	   and, if they do, it is recommended to ignore PAWS because
13901da177e4SLinus Torvalds 	   "their cleanup function should take precedence over timestamps."
13911da177e4SLinus Torvalds 	   Certainly, it is mistake. It is necessary to understand the reasons
13921da177e4SLinus Torvalds 	   of this constraint to relax it: if peer reboots, clock may go
13931da177e4SLinus Torvalds 	   out-of-sync and half-open connections will not be reset.
13941da177e4SLinus Torvalds 	   Actually, the problem would be not existing if all
13951da177e4SLinus Torvalds 	   the implementations followed draft about maintaining clock
13961da177e4SLinus Torvalds 	   via reboots. Linux-2.2 DOES NOT!
13971da177e4SLinus Torvalds 
13981da177e4SLinus Torvalds 	   However, we can relax time bounds for RST segments to MSL.
13991da177e4SLinus Torvalds 	 */
14009d729f72SJames Morris 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1401a2a385d6SEric Dumazet 		return false;
1402a2a385d6SEric Dumazet 	return true;
14031da177e4SLinus Torvalds }
14041da177e4SLinus Torvalds 
14057970ddc8SEric Dumazet bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
14067970ddc8SEric Dumazet 			  int mib_idx, u32 *last_oow_ack_time);
1407032ee423SNeal Cardwell 
1408a9c19329SPavel Emelyanov static inline void tcp_mib_init(struct net *net)
14091da177e4SLinus Torvalds {
14101da177e4SLinus Torvalds 	/* See RFC 2012 */
14116aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
14126aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
14136aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
14146aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
14151da177e4SLinus Torvalds }
14161da177e4SLinus Torvalds 
14176a438bbeSStephen Hemminger /* from STCP */
1418ef9da47cSIlpo Järvinen static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
14190800f170SDavid S. Miller {
14206a438bbeSStephen Hemminger 	tp->lost_skb_hint = NULL;
1421ef9da47cSIlpo Järvinen }
1422ef9da47cSIlpo Järvinen 
1423ef9da47cSIlpo Järvinen static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1424ef9da47cSIlpo Järvinen {
1425ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
14266a438bbeSStephen Hemminger 	tp->retransmit_skb_hint = NULL;
1427b7689205SIlpo Järvinen }
1428b7689205SIlpo Järvinen 
1429a915da9bSEric Dumazet union tcp_md5_addr {
1430a915da9bSEric Dumazet 	struct in_addr  a4;
1431a915da9bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1432a915da9bSEric Dumazet 	struct in6_addr	a6;
1433a915da9bSEric Dumazet #endif
1434a915da9bSEric Dumazet };
1435a915da9bSEric Dumazet 
1436cfb6eeb4SYOSHIFUJI Hideaki /* - key database */
1437cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key {
1438a915da9bSEric Dumazet 	struct hlist_node	node;
1439cfb6eeb4SYOSHIFUJI Hideaki 	u8			keylen;
1440a915da9bSEric Dumazet 	u8			family; /* AF_INET or AF_INET6 */
1441a915da9bSEric Dumazet 	union tcp_md5_addr	addr;
1442a915da9bSEric Dumazet 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1443a915da9bSEric Dumazet 	struct rcu_head		rcu;
1444cfb6eeb4SYOSHIFUJI Hideaki };
1445cfb6eeb4SYOSHIFUJI Hideaki 
1446cfb6eeb4SYOSHIFUJI Hideaki /* - sock block */
1447cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_info {
1448a915da9bSEric Dumazet 	struct hlist_head	head;
1449a8afca03SEric Dumazet 	struct rcu_head		rcu;
1450cfb6eeb4SYOSHIFUJI Hideaki };
1451cfb6eeb4SYOSHIFUJI Hideaki 
1452cfb6eeb4SYOSHIFUJI Hideaki /* - pseudo header */
1453cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr {
1454cfb6eeb4SYOSHIFUJI Hideaki 	__be32		saddr;
1455cfb6eeb4SYOSHIFUJI Hideaki 	__be32		daddr;
1456cfb6eeb4SYOSHIFUJI Hideaki 	__u8		pad;
1457cfb6eeb4SYOSHIFUJI Hideaki 	__u8		protocol;
1458cfb6eeb4SYOSHIFUJI Hideaki 	__be16		len;
1459cfb6eeb4SYOSHIFUJI Hideaki };
1460cfb6eeb4SYOSHIFUJI Hideaki 
1461cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr {
1462cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr	saddr;
1463cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr daddr;
1464cfb6eeb4SYOSHIFUJI Hideaki 	__be32		len;
1465cfb6eeb4SYOSHIFUJI Hideaki 	__be32		protocol;	/* including padding */
1466cfb6eeb4SYOSHIFUJI Hideaki };
1467cfb6eeb4SYOSHIFUJI Hideaki 
1468cfb6eeb4SYOSHIFUJI Hideaki union tcp_md5sum_block {
1469cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp4_pseudohdr ip4;
1470dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1471cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp6_pseudohdr ip6;
1472cfb6eeb4SYOSHIFUJI Hideaki #endif
1473cfb6eeb4SYOSHIFUJI Hideaki };
1474cfb6eeb4SYOSHIFUJI Hideaki 
1475cfb6eeb4SYOSHIFUJI Hideaki /* - pool: digest algorithm, hash description and scratch buffer */
1476cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool {
1477cf80e0e4SHerbert Xu 	struct ahash_request	*md5_req;
147819689e38SEric Dumazet 	void			*scratch;
1479cfb6eeb4SYOSHIFUJI Hideaki };
1480cfb6eeb4SYOSHIFUJI Hideaki 
1481cfb6eeb4SYOSHIFUJI Hideaki /* - functions */
148239f8e58eSEric Dumazet int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
148339f8e58eSEric Dumazet 			const struct sock *sk, const struct sk_buff *skb);
14845c9f3023SJoe Perches int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
14855c9f3023SJoe Perches 		   int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
14865c9f3023SJoe Perches int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1487a915da9bSEric Dumazet 		   int family);
1488b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1489fd3a154aSEric Dumazet 					 const struct sock *addr_sk);
1490cfb6eeb4SYOSHIFUJI Hideaki 
14919501f972SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1492b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
14935c9f3023SJoe Perches 					 const union tcp_md5_addr *addr,
14945c9f3023SJoe Perches 					 int family);
1495a915da9bSEric Dumazet #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
14969501f972SYOSHIFUJI Hideaki #else
1497b83e3debSEric Dumazet static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1498a915da9bSEric Dumazet 					 const union tcp_md5_addr *addr,
1499a915da9bSEric Dumazet 					 int family)
1500a915da9bSEric Dumazet {
1501a915da9bSEric Dumazet 	return NULL;
1502a915da9bSEric Dumazet }
15039501f972SYOSHIFUJI Hideaki #define tcp_twsk_md5_key(twsk)	NULL
15049501f972SYOSHIFUJI Hideaki #endif
15059501f972SYOSHIFUJI Hideaki 
15065c9f3023SJoe Perches bool tcp_alloc_md5sig_pool(void);
1507cfb6eeb4SYOSHIFUJI Hideaki 
15085c9f3023SJoe Perches struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
150971cea17eSEric Dumazet static inline void tcp_put_md5sig_pool(void)
151071cea17eSEric Dumazet {
151171cea17eSEric Dumazet 	local_bh_enable();
151271cea17eSEric Dumazet }
151335790c04SEric Dumazet 
15145c9f3023SJoe Perches int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
151595c96174SEric Dumazet 			  unsigned int header_len);
15165c9f3023SJoe Perches int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1517cf533ea5SEric Dumazet 		     const struct tcp_md5sig_key *key);
1518cfb6eeb4SYOSHIFUJI Hideaki 
151910467163SJerry Chu /* From tcp_fastopen.c */
15205c9f3023SJoe Perches void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
15215c9f3023SJoe Perches 			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
15225c9f3023SJoe Perches 			    unsigned long *last_syn_loss);
15235c9f3023SJoe Perches void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
15242646c831SDaniel Lee 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
15252646c831SDaniel Lee 			    u16 try_exp);
1526783237e8SYuchung Cheng struct tcp_fastopen_request {
1527783237e8SYuchung Cheng 	/* Fast Open cookie. Size 0 means a cookie request */
1528783237e8SYuchung Cheng 	struct tcp_fastopen_cookie	cookie;
1529783237e8SYuchung Cheng 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1530f5ddcbbbSEric Dumazet 	size_t				size;
1531f5ddcbbbSEric Dumazet 	int				copied;	/* queued in tcp_connect() */
1532783237e8SYuchung Cheng };
1533783237e8SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp);
1534783237e8SYuchung Cheng 
153510467163SJerry Chu extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
153610467163SJerry Chu int tcp_fastopen_reset_cipher(void *key, unsigned int len);
153761d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
15387c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
15395b7ed089SYuchung Cheng 			      struct request_sock *req,
1540843f4a55SYuchung Cheng 			      struct tcp_fastopen_cookie *foc,
1541843f4a55SYuchung Cheng 			      struct dst_entry *dst);
1542222e83d2SHannes Frederic Sowa void tcp_fastopen_init_key_once(bool publish);
1543065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1544065263f4SWei Wang 			     struct tcp_fastopen_cookie *cookie);
154519f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
154610467163SJerry Chu #define TCP_FASTOPEN_KEY_LENGTH 16
154710467163SJerry Chu 
154810467163SJerry Chu /* Fastopen key context */
154910467163SJerry Chu struct tcp_fastopen_context {
15507ae8639cSEric Dumazet 	struct crypto_cipher	*tfm;
155110467163SJerry Chu 	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
155210467163SJerry Chu 	struct rcu_head		rcu;
155310467163SJerry Chu };
155410467163SJerry Chu 
1555cf1ef3f0SWei Wang extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
155646c2fa39SWei Wang void tcp_fastopen_active_disable(struct sock *sk);
1557cf1ef3f0SWei Wang bool tcp_fastopen_active_should_disable(struct sock *sk);
1558cf1ef3f0SWei Wang void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1559cf1ef3f0SWei Wang void tcp_fastopen_active_timeout_reset(void);
1560cf1ef3f0SWei Wang 
156105b055e8SFrancis Yan /* Latencies incurred by various limits for a sender. They are
156205b055e8SFrancis Yan  * chronograph-like stats that are mutually exclusive.
156305b055e8SFrancis Yan  */
156405b055e8SFrancis Yan enum tcp_chrono {
156505b055e8SFrancis Yan 	TCP_CHRONO_UNSPEC,
156605b055e8SFrancis Yan 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
156705b055e8SFrancis Yan 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
156805b055e8SFrancis Yan 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
156905b055e8SFrancis Yan 	__TCP_CHRONO_MAX,
157005b055e8SFrancis Yan };
157105b055e8SFrancis Yan 
157205b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
157305b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
157405b055e8SFrancis Yan 
1575fe067e8aSDavid S. Miller /* write queue abstraction */
1576fe067e8aSDavid S. Miller static inline void tcp_write_queue_purge(struct sock *sk)
1577fe067e8aSDavid S. Miller {
1578fe067e8aSDavid S. Miller 	struct sk_buff *skb;
1579fe067e8aSDavid S. Miller 
15800f87230dSFrancis Yan 	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1581fe067e8aSDavid S. Miller 	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
15823ab224beSHideo Aoki 		sk_wmem_free_skb(sk, skb);
15833ab224beSHideo Aoki 	sk_mem_reclaim(sk);
15848818a9d8SIlpo Järvinen 	tcp_clear_all_retrans_hints(tcp_sk(sk));
1585fe067e8aSDavid S. Miller }
1586fe067e8aSDavid S. Miller 
1587cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1588fe067e8aSDavid S. Miller {
1589cd07a8eaSDavid S. Miller 	return skb_peek(&sk->sk_write_queue);
1590fe067e8aSDavid S. Miller }
1591fe067e8aSDavid S. Miller 
1592cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1593fe067e8aSDavid S. Miller {
1594cd07a8eaSDavid S. Miller 	return skb_peek_tail(&sk->sk_write_queue);
1595fe067e8aSDavid S. Miller }
1596fe067e8aSDavid S. Miller 
1597cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1598cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1599fe067e8aSDavid S. Miller {
1600cd07a8eaSDavid S. Miller 	return skb_queue_next(&sk->sk_write_queue, skb);
1601fe067e8aSDavid S. Miller }
1602fe067e8aSDavid S. Miller 
1603cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1604cf533ea5SEric Dumazet 						   const struct sk_buff *skb)
1605832d11c5SIlpo Järvinen {
1606832d11c5SIlpo Järvinen 	return skb_queue_prev(&sk->sk_write_queue, skb);
1607832d11c5SIlpo Järvinen }
1608832d11c5SIlpo Järvinen 
1609fe067e8aSDavid S. Miller #define tcp_for_write_queue(skb, sk)					\
1610cd07a8eaSDavid S. Miller 	skb_queue_walk(&(sk)->sk_write_queue, skb)
1611fe067e8aSDavid S. Miller 
1612fe067e8aSDavid S. Miller #define tcp_for_write_queue_from(skb, sk)				\
1613cd07a8eaSDavid S. Miller 	skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1614fe067e8aSDavid S. Miller 
1615234b6860SIlpo Järvinen #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1616cd07a8eaSDavid S. Miller 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1617234b6860SIlpo Järvinen 
1618cf533ea5SEric Dumazet static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1619fe067e8aSDavid S. Miller {
1620fe067e8aSDavid S. Miller 	return sk->sk_send_head;
1621fe067e8aSDavid S. Miller }
1622fe067e8aSDavid S. Miller 
1623cd07a8eaSDavid S. Miller static inline bool tcp_skb_is_last(const struct sock *sk,
1624cd07a8eaSDavid S. Miller 				   const struct sk_buff *skb)
1625cd07a8eaSDavid S. Miller {
1626cd07a8eaSDavid S. Miller 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1627cd07a8eaSDavid S. Miller }
1628cd07a8eaSDavid S. Miller 
1629cf533ea5SEric Dumazet static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1630fe067e8aSDavid S. Miller {
1631cd07a8eaSDavid S. Miller 	if (tcp_skb_is_last(sk, skb))
1632fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
1633cd07a8eaSDavid S. Miller 	else
1634cd07a8eaSDavid S. Miller 		sk->sk_send_head = tcp_write_queue_next(sk, skb);
1635fe067e8aSDavid S. Miller }
1636fe067e8aSDavid S. Miller 
1637fe067e8aSDavid S. Miller static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1638fe067e8aSDavid S. Miller {
16390f87230dSFrancis Yan 	if (sk->sk_send_head == skb_unlinked) {
1640fe067e8aSDavid S. Miller 		sk->sk_send_head = NULL;
16410f87230dSFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
16420f87230dSFrancis Yan 	}
1643bb1fcecaSEric Dumazet 	if (tcp_sk(sk)->highest_sack == skb_unlinked)
1644bb1fcecaSEric Dumazet 		tcp_sk(sk)->highest_sack = NULL;
1645fe067e8aSDavid S. Miller }
1646fe067e8aSDavid S. Miller 
1647fe067e8aSDavid S. Miller static inline void tcp_init_send_head(struct sock *sk)
1648fe067e8aSDavid S. Miller {
1649fe067e8aSDavid S. Miller 	sk->sk_send_head = NULL;
1650fe067e8aSDavid S. Miller }
1651fe067e8aSDavid S. Miller 
1652fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1653fe067e8aSDavid S. Miller {
1654fe067e8aSDavid S. Miller 	__skb_queue_tail(&sk->sk_write_queue, skb);
1655fe067e8aSDavid S. Miller }
1656fe067e8aSDavid S. Miller 
1657fe067e8aSDavid S. Miller static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1658fe067e8aSDavid S. Miller {
1659fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, skb);
1660fe067e8aSDavid S. Miller 
1661fe067e8aSDavid S. Miller 	/* Queue it, remembering where we must start sending. */
16626859d494SIlpo Järvinen 	if (sk->sk_send_head == NULL) {
1663fe067e8aSDavid S. Miller 		sk->sk_send_head = skb;
16640f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
16656859d494SIlpo Järvinen 
16666859d494SIlpo Järvinen 		if (tcp_sk(sk)->highest_sack == NULL)
16676859d494SIlpo Järvinen 			tcp_sk(sk)->highest_sack = skb;
16686859d494SIlpo Järvinen 	}
1669fe067e8aSDavid S. Miller }
1670fe067e8aSDavid S. Miller 
1671fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1672fe067e8aSDavid S. Miller {
1673fe067e8aSDavid S. Miller 	__skb_queue_head(&sk->sk_write_queue, skb);
1674fe067e8aSDavid S. Miller }
1675fe067e8aSDavid S. Miller 
1676fe067e8aSDavid S. Miller /* Insert buff after skb on the write queue of sk.  */
1677fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1678fe067e8aSDavid S. Miller 						struct sk_buff *buff,
1679fe067e8aSDavid S. Miller 						struct sock *sk)
1680fe067e8aSDavid S. Miller {
16817de6c033SGerrit Renker 	__skb_queue_after(&sk->sk_write_queue, skb, buff);
1682fe067e8aSDavid S. Miller }
1683fe067e8aSDavid S. Miller 
168443f59c89SDavid S. Miller /* Insert new before skb on the write queue of sk.  */
1685fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1686fe067e8aSDavid S. Miller 						  struct sk_buff *skb,
1687fe067e8aSDavid S. Miller 						  struct sock *sk)
1688fe067e8aSDavid S. Miller {
168943f59c89SDavid S. Miller 	__skb_queue_before(&sk->sk_write_queue, skb, new);
16906e421410SIlpo Järvinen 
16916e421410SIlpo Järvinen 	if (sk->sk_send_head == skb)
16926e421410SIlpo Järvinen 		sk->sk_send_head = new;
1693fe067e8aSDavid S. Miller }
1694fe067e8aSDavid S. Miller 
1695fe067e8aSDavid S. Miller static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1696fe067e8aSDavid S. Miller {
1697fe067e8aSDavid S. Miller 	__skb_unlink(skb, &sk->sk_write_queue);
1698fe067e8aSDavid S. Miller }
1699fe067e8aSDavid S. Miller 
1700a2a385d6SEric Dumazet static inline bool tcp_write_queue_empty(struct sock *sk)
1701fe067e8aSDavid S. Miller {
1702fe067e8aSDavid S. Miller 	return skb_queue_empty(&sk->sk_write_queue);
1703fe067e8aSDavid S. Miller }
1704fe067e8aSDavid S. Miller 
170512d50c46SKrishna Kumar static inline void tcp_push_pending_frames(struct sock *sk)
170612d50c46SKrishna Kumar {
170712d50c46SKrishna Kumar 	if (tcp_send_head(sk)) {
170812d50c46SKrishna Kumar 		struct tcp_sock *tp = tcp_sk(sk);
170912d50c46SKrishna Kumar 
171012d50c46SKrishna Kumar 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
171112d50c46SKrishna Kumar 	}
171212d50c46SKrishna Kumar }
171312d50c46SKrishna Kumar 
1714ecb97192SNeal Cardwell /* Start sequence of the skb just after the highest skb with SACKed
1715ecb97192SNeal Cardwell  * bit, valid only if sacked_out > 0 or when the caller has ensured
1716ecb97192SNeal Cardwell  * validity by itself.
1717a47e5a98SIlpo Järvinen  */
1718a47e5a98SIlpo Järvinen static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1719a47e5a98SIlpo Järvinen {
1720a47e5a98SIlpo Järvinen 	if (!tp->sacked_out)
1721a47e5a98SIlpo Järvinen 		return tp->snd_una;
17226859d494SIlpo Järvinen 
17236859d494SIlpo Järvinen 	if (tp->highest_sack == NULL)
17246859d494SIlpo Järvinen 		return tp->snd_nxt;
17256859d494SIlpo Järvinen 
1726a47e5a98SIlpo Järvinen 	return TCP_SKB_CB(tp->highest_sack)->seq;
1727a47e5a98SIlpo Järvinen }
1728a47e5a98SIlpo Järvinen 
17296859d494SIlpo Järvinen static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
17306859d494SIlpo Järvinen {
17316859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
17326859d494SIlpo Järvinen 						tcp_write_queue_next(sk, skb);
17336859d494SIlpo Järvinen }
17346859d494SIlpo Järvinen 
17356859d494SIlpo Järvinen static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
17366859d494SIlpo Järvinen {
17376859d494SIlpo Järvinen 	return tcp_sk(sk)->highest_sack;
17386859d494SIlpo Järvinen }
17396859d494SIlpo Järvinen 
17406859d494SIlpo Järvinen static inline void tcp_highest_sack_reset(struct sock *sk)
17416859d494SIlpo Järvinen {
17426859d494SIlpo Järvinen 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
17436859d494SIlpo Järvinen }
17446859d494SIlpo Järvinen 
17456859d494SIlpo Järvinen /* Called when old skb is about to be deleted (to be combined with new skb) */
17466859d494SIlpo Järvinen static inline void tcp_highest_sack_combine(struct sock *sk,
17476859d494SIlpo Järvinen 					    struct sk_buff *old,
17486859d494SIlpo Järvinen 					    struct sk_buff *new)
17496859d494SIlpo Järvinen {
17506859d494SIlpo Järvinen 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
17516859d494SIlpo Järvinen 		tcp_sk(sk)->highest_sack = new;
17526859d494SIlpo Järvinen }
17536859d494SIlpo Järvinen 
1754b1f0a0e9SFlorian Westphal /* This helper checks if socket has IP_TRANSPARENT set */
1755b1f0a0e9SFlorian Westphal static inline bool inet_sk_transparent(const struct sock *sk)
1756b1f0a0e9SFlorian Westphal {
1757b1f0a0e9SFlorian Westphal 	switch (sk->sk_state) {
1758b1f0a0e9SFlorian Westphal 	case TCP_TIME_WAIT:
1759b1f0a0e9SFlorian Westphal 		return inet_twsk(sk)->tw_transparent;
1760b1f0a0e9SFlorian Westphal 	case TCP_NEW_SYN_RECV:
1761b1f0a0e9SFlorian Westphal 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1762b1f0a0e9SFlorian Westphal 	}
1763b1f0a0e9SFlorian Westphal 	return inet_sk(sk)->transparent;
1764b1f0a0e9SFlorian Westphal }
1765b1f0a0e9SFlorian Westphal 
17665aa4b32fSAndreas Petlund /* Determines whether this is a thin stream (which may suffer from
17675aa4b32fSAndreas Petlund  * increased latency). Used to trigger latency-reducing mechanisms.
17685aa4b32fSAndreas Petlund  */
1769a2a385d6SEric Dumazet static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
17705aa4b32fSAndreas Petlund {
17715aa4b32fSAndreas Petlund 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
17725aa4b32fSAndreas Petlund }
17735aa4b32fSAndreas Petlund 
17741da177e4SLinus Torvalds /* /proc */
17751da177e4SLinus Torvalds enum tcp_seq_states {
17761da177e4SLinus Torvalds 	TCP_SEQ_STATE_LISTENING,
17771da177e4SLinus Torvalds 	TCP_SEQ_STATE_ESTABLISHED,
17781da177e4SLinus Torvalds };
17791da177e4SLinus Torvalds 
178073cb88ecSArjan van de Ven int tcp_seq_open(struct inode *inode, struct file *file);
178173cb88ecSArjan van de Ven 
17821da177e4SLinus Torvalds struct tcp_seq_afinfo {
17831da177e4SLinus Torvalds 	char				*name;
17841da177e4SLinus Torvalds 	sa_family_t			family;
178573cb88ecSArjan van de Ven 	const struct file_operations	*seq_fops;
17869427c4b3SDenis V. Lunev 	struct seq_operations		seq_ops;
17871da177e4SLinus Torvalds };
17881da177e4SLinus Torvalds 
17891da177e4SLinus Torvalds struct tcp_iter_state {
1790a4146b1bSDenis V. Lunev 	struct seq_net_private	p;
17911da177e4SLinus Torvalds 	sa_family_t		family;
17921da177e4SLinus Torvalds 	enum tcp_seq_states	state;
17931da177e4SLinus Torvalds 	struct sock		*syn_wait_sk;
1794a7cb5a49SEric W. Biederman 	int			bucket, offset, sbucket, num;
1795a8b690f9STom Herbert 	loff_t			last_pos;
17961da177e4SLinus Torvalds };
17971da177e4SLinus Torvalds 
17985c9f3023SJoe Perches int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
17995c9f3023SJoe Perches void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
18001da177e4SLinus Torvalds 
180120380731SArnaldo Carvalho de Melo extern struct request_sock_ops tcp_request_sock_ops;
1802c6aefafbSGlenn Griffin extern struct request_sock_ops tcp6_request_sock_ops;
180320380731SArnaldo Carvalho de Melo 
18045c9f3023SJoe Perches void tcp_v4_destroy_sock(struct sock *sk);
180520380731SArnaldo Carvalho de Melo 
180628be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1807c8f44affSMichał Mirosław 				netdev_features_t features);
18085c9f3023SJoe Perches struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
18095c9f3023SJoe Perches int tcp_gro_complete(struct sk_buff *skb);
181028850dc7SDaniel Borkmann 
18115c9f3023SJoe Perches void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1812f4c50d99SHerbert Xu 
1813c9bee3b7SEric Dumazet static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1814c9bee3b7SEric Dumazet {
18154979f2d9SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
18164979f2d9SNikolay Borisov 	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1817c9bee3b7SEric Dumazet }
1818c9bee3b7SEric Dumazet 
1819c9bee3b7SEric Dumazet static inline bool tcp_stream_memory_free(const struct sock *sk)
1820c9bee3b7SEric Dumazet {
1821c9bee3b7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1822c9bee3b7SEric Dumazet 	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1823c9bee3b7SEric Dumazet 
1824c9bee3b7SEric Dumazet 	return notsent_bytes < tcp_notsent_lowat(tp);
1825c9bee3b7SEric Dumazet }
1826c9bee3b7SEric Dumazet 
182720380731SArnaldo Carvalho de Melo #ifdef CONFIG_PROC_FS
18285c9f3023SJoe Perches int tcp4_proc_init(void);
18295c9f3023SJoe Perches void tcp4_proc_exit(void);
183020380731SArnaldo Carvalho de Melo #endif
183120380731SArnaldo Carvalho de Melo 
1832ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
18331fb6f159SOctavian Purdila int tcp_conn_request(struct request_sock_ops *rsk_ops,
18341fb6f159SOctavian Purdila 		     const struct tcp_request_sock_ops *af_ops,
18351fb6f159SOctavian Purdila 		     struct sock *sk, struct sk_buff *skb);
18365db92c99SOctavian Purdila 
1837cfb6eeb4SYOSHIFUJI Hideaki /* TCP af-specific functions */
1838cfb6eeb4SYOSHIFUJI Hideaki struct tcp_sock_af_ops {
1839cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1840b83e3debSEric Dumazet 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1841fd3a154aSEric Dumazet 						const struct sock *addr_sk);
1842cfb6eeb4SYOSHIFUJI Hideaki 	int		(*calc_md5_hash)(char *location,
184339f8e58eSEric Dumazet 					 const struct tcp_md5sig_key *md5,
1844318cf7aaSEric Dumazet 					 const struct sock *sk,
1845318cf7aaSEric Dumazet 					 const struct sk_buff *skb);
1846cfb6eeb4SYOSHIFUJI Hideaki 	int		(*md5_parse)(struct sock *sk,
1847cfb6eeb4SYOSHIFUJI Hideaki 				     char __user *optval,
1848cfb6eeb4SYOSHIFUJI Hideaki 				     int optlen);
1849cfb6eeb4SYOSHIFUJI Hideaki #endif
1850cfb6eeb4SYOSHIFUJI Hideaki };
1851cfb6eeb4SYOSHIFUJI Hideaki 
1852cfb6eeb4SYOSHIFUJI Hideaki struct tcp_request_sock_ops {
18532aec4a29SOctavian Purdila 	u16 mss_clamp;
1854cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1855b83e3debSEric Dumazet 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1856fd3a154aSEric Dumazet 						 const struct sock *addr_sk);
1857e3afe7b7SJohn Dykstra 	int		(*calc_md5_hash) (char *location,
185839f8e58eSEric Dumazet 					  const struct tcp_md5sig_key *md5,
1859318cf7aaSEric Dumazet 					  const struct sock *sk,
1860318cf7aaSEric Dumazet 					  const struct sk_buff *skb);
1861cfb6eeb4SYOSHIFUJI Hideaki #endif
1862b40cf18eSEric Dumazet 	void (*init_req)(struct request_sock *req,
1863b40cf18eSEric Dumazet 			 const struct sock *sk_listener,
186416bea70aSOctavian Purdila 			 struct sk_buff *skb);
1865fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
18663f684b4bSEric Dumazet 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1867fb7b37a7SOctavian Purdila 				 __u16 *mss);
1868fb7b37a7SOctavian Purdila #endif
1869f964629eSEric Dumazet 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
18704396e461SSoheil Hassas Yeganeh 				       const struct request_sock *req);
187184b114b9SEric Dumazet 	u32 (*init_seq)(const struct sk_buff *skb);
18725d2ed052SEric Dumazet 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
18730f935dbeSEric Dumazet 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1874d6274bd8SOctavian Purdila 			   struct flowi *fl, struct request_sock *req,
1875dc6ef6beSEric Dumazet 			   struct tcp_fastopen_cookie *foc,
1876b3d05147SEric Dumazet 			   enum tcp_synack_type synack_type);
1877cfb6eeb4SYOSHIFUJI Hideaki };
1878cfb6eeb4SYOSHIFUJI Hideaki 
1879fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
1880fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18813f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1882fb7b37a7SOctavian Purdila 					 __u16 *mss)
1883fb7b37a7SOctavian Purdila {
18843f684b4bSEric Dumazet 	tcp_synq_overflow(sk);
188502a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
18863f684b4bSEric Dumazet 	return ops->cookie_init_seq(skb, mss);
1887fb7b37a7SOctavian Purdila }
1888fb7b37a7SOctavian Purdila #else
1889fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18903f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1891fb7b37a7SOctavian Purdila 					 __u16 *mss)
1892fb7b37a7SOctavian Purdila {
1893fb7b37a7SOctavian Purdila 	return 0;
1894fb7b37a7SOctavian Purdila }
1895fb7b37a7SOctavian Purdila #endif
1896fb7b37a7SOctavian Purdila 
18975c9f3023SJoe Perches int tcpv4_offload_init(void);
189828850dc7SDaniel Borkmann 
18995c9f3023SJoe Perches void tcp_v4_init(void);
19005c9f3023SJoe Perches void tcp_init(void);
190120380731SArnaldo Carvalho de Melo 
1902659a8ad5SYuchung Cheng /* tcp_recovery.c */
1903128eda86SEric Dumazet extern void tcp_rack_mark_lost(struct sock *sk);
19041d0833dfSYuchung Cheng extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
19059a568de4SEric Dumazet 			     u64 xmit_time);
190657dde7f7SYuchung Cheng extern void tcp_rack_reo_timeout(struct sock *sk);
1907659a8ad5SYuchung Cheng 
1908e25f866fSCong Wang /*
1909e25f866fSCong Wang  * Save and compile IPv4 options, return a pointer to it
1910e25f866fSCong Wang  */
1911e25f866fSCong Wang static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1912e25f866fSCong Wang {
1913e25f866fSCong Wang 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1914e25f866fSCong Wang 	struct ip_options_rcu *dopt = NULL;
1915e25f866fSCong Wang 
1916461b74c3SCong Wang 	if (opt->optlen) {
1917e25f866fSCong Wang 		int opt_size = sizeof(*dopt) + opt->optlen;
1918e25f866fSCong Wang 
1919e25f866fSCong Wang 		dopt = kmalloc(opt_size, GFP_ATOMIC);
1920e25f866fSCong Wang 		if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1921e25f866fSCong Wang 			kfree(dopt);
1922e25f866fSCong Wang 			dopt = NULL;
1923e25f866fSCong Wang 		}
1924e25f866fSCong Wang 	}
1925e25f866fSCong Wang 	return dopt;
1926e25f866fSCong Wang }
1927e25f866fSCong Wang 
192898781965SEric Dumazet /* locally generated TCP pure ACKs have skb->truesize == 2
192998781965SEric Dumazet  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
193098781965SEric Dumazet  * This is much faster than dissecting the packet to find out.
193198781965SEric Dumazet  * (Think of GRE encapsulations, IPv4, IPv6, ...)
193298781965SEric Dumazet  */
193398781965SEric Dumazet static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
193498781965SEric Dumazet {
193598781965SEric Dumazet 	return skb->truesize == 2;
193698781965SEric Dumazet }
193798781965SEric Dumazet 
193898781965SEric Dumazet static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
193998781965SEric Dumazet {
194098781965SEric Dumazet 	skb->truesize = 2;
194198781965SEric Dumazet }
194298781965SEric Dumazet 
1943473bd239STom Herbert static inline int tcp_inq(struct sock *sk)
1944473bd239STom Herbert {
1945473bd239STom Herbert 	struct tcp_sock *tp = tcp_sk(sk);
1946473bd239STom Herbert 	int answ;
1947473bd239STom Herbert 
1948473bd239STom Herbert 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1949473bd239STom Herbert 		answ = 0;
1950473bd239STom Herbert 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1951473bd239STom Herbert 		   !tp->urg_data ||
1952473bd239STom Herbert 		   before(tp->urg_seq, tp->copied_seq) ||
1953473bd239STom Herbert 		   !before(tp->urg_seq, tp->rcv_nxt)) {
1954473bd239STom Herbert 
1955473bd239STom Herbert 		answ = tp->rcv_nxt - tp->copied_seq;
1956473bd239STom Herbert 
1957473bd239STom Herbert 		/* Subtract 1, if FIN was received */
1958473bd239STom Herbert 		if (answ && sock_flag(sk, SOCK_DONE))
1959473bd239STom Herbert 			answ--;
1960473bd239STom Herbert 	} else {
1961473bd239STom Herbert 		answ = tp->urg_seq - tp->copied_seq;
1962473bd239STom Herbert 	}
1963473bd239STom Herbert 
1964473bd239STom Herbert 	return answ;
1965473bd239STom Herbert }
1966473bd239STom Herbert 
196732035585STom Herbert int tcp_peek_len(struct socket *sock);
196832035585STom Herbert 
1969a44d6eacSMartin KaFai Lau static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1970a44d6eacSMartin KaFai Lau {
1971a44d6eacSMartin KaFai Lau 	u16 segs_in;
1972a44d6eacSMartin KaFai Lau 
1973a44d6eacSMartin KaFai Lau 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1974a44d6eacSMartin KaFai Lau 	tp->segs_in += segs_in;
1975a44d6eacSMartin KaFai Lau 	if (skb->len > tcp_hdrlen(skb))
1976a44d6eacSMartin KaFai Lau 		tp->data_segs_in += segs_in;
1977a44d6eacSMartin KaFai Lau }
1978a44d6eacSMartin KaFai Lau 
19799caad864SEric Dumazet /*
19809caad864SEric Dumazet  * TCP listen path runs lockless.
19819caad864SEric Dumazet  * We forced "struct sock" to be const qualified to make sure
19829caad864SEric Dumazet  * we don't modify one of its field by mistake.
19839caad864SEric Dumazet  * Here, we increment sk_drops which is an atomic_t, so we can safely
19849caad864SEric Dumazet  * make sock writable again.
19859caad864SEric Dumazet  */
19869caad864SEric Dumazet static inline void tcp_listendrop(const struct sock *sk)
19879caad864SEric Dumazet {
19889caad864SEric Dumazet 	atomic_inc(&((struct sock *)sk)->sk_drops);
198902a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
19909caad864SEric Dumazet }
19919caad864SEric Dumazet 
1992218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
1993218af599SEric Dumazet 
19941da177e4SLinus Torvalds #endif	/* _TCP_H */
1995