xref: /linux/include/net/tcp.h (revision 20b654dfe1beaca60ab51894ff405a049248433d)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * INET		An implementation of the TCP/IP protocol suite for the LINUX
31da177e4SLinus Torvalds  *		operating system.  INET is implemented using the  BSD Socket
41da177e4SLinus Torvalds  *		interface as the means of communication with the user level.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  *		Definitions for the TCP module.
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  * Version:	@(#)tcp.h	1.0.5	05/23/93
91da177e4SLinus Torvalds  *
1002c30a84SJesper Juhl  * Authors:	Ross Biro
111da177e4SLinus Torvalds  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
121da177e4SLinus Torvalds  *
131da177e4SLinus Torvalds  *		This program is free software; you can redistribute it and/or
141da177e4SLinus Torvalds  *		modify it under the terms of the GNU General Public License
151da177e4SLinus Torvalds  *		as published by the Free Software Foundation; either version
161da177e4SLinus Torvalds  *		2 of the License, or (at your option) any later version.
171da177e4SLinus Torvalds  */
181da177e4SLinus Torvalds #ifndef _TCP_H
191da177e4SLinus Torvalds #define _TCP_H
201da177e4SLinus Torvalds 
211da177e4SLinus Torvalds #define FASTRETRANS_DEBUG 1
221da177e4SLinus Torvalds 
231da177e4SLinus Torvalds #include <linux/list.h>
241da177e4SLinus Torvalds #include <linux/tcp.h>
25187f1882SPaul Gortmaker #include <linux/bug.h>
261da177e4SLinus Torvalds #include <linux/slab.h>
271da177e4SLinus Torvalds #include <linux/cache.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
29fb286bb2SHerbert Xu #include <linux/skbuff.h>
30c6aefafbSGlenn Griffin #include <linux/cryptohash.h>
31435cf559SWilliam Allen Simpson #include <linux/kref.h>
32740b0f18SEric Dumazet #include <linux/ktime.h>
333f421baaSArnaldo Carvalho de Melo 
343f421baaSArnaldo Carvalho de Melo #include <net/inet_connection_sock.h>
35295ff7edSArnaldo Carvalho de Melo #include <net/inet_timewait_sock.h>
3677d8bf9cSArnaldo Carvalho de Melo #include <net/inet_hashtables.h>
371da177e4SLinus Torvalds #include <net/checksum.h>
382e6599cbSArnaldo Carvalho de Melo #include <net/request_sock.h>
391da177e4SLinus Torvalds #include <net/sock.h>
401da177e4SLinus Torvalds #include <net/snmp.h>
411da177e4SLinus Torvalds #include <net/ip.h>
42c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h>
43bdf1ee5dSIlpo Järvinen #include <net/inet_ecn.h>
440c266898SSatoru SATOH #include <net/dst.h>
45c752f073SArnaldo Carvalho de Melo 
461da177e4SLinus Torvalds #include <linux/seq_file.h>
47180d8cd9SGlauber Costa #include <linux/memcontrol.h>
4840304b2aSLawrence Brakmo #include <linux/bpf-cgroup.h>
4940304b2aSLawrence Brakmo 
500f7ff927SArnaldo Carvalho de Melo extern struct inet_hashinfo tcp_hashinfo;
511da177e4SLinus Torvalds 
52dd24c001SEric Dumazet extern struct percpu_counter tcp_orphan_count;
535c9f3023SJoe Perches void tcp_time_wait(struct sock *sk, int state, int timeo);
541da177e4SLinus Torvalds 
551da177e4SLinus Torvalds #define MAX_TCP_HEADER	(128 + MAX_HEADER)
5633ad798cSAdam Langley #define MAX_TCP_OPTION_SPACE 40
571da177e4SLinus Torvalds 
581da177e4SLinus Torvalds /*
591da177e4SLinus Torvalds  * Never offer a window over 32767 without using window scaling. Some
601da177e4SLinus Torvalds  * poor stacks do signed 16bit maths!
611da177e4SLinus Torvalds  */
621da177e4SLinus Torvalds #define MAX_TCP_WINDOW		32767U
631da177e4SLinus Torvalds 
641da177e4SLinus Torvalds /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
651da177e4SLinus Torvalds #define TCP_MIN_MSS		88U
661da177e4SLinus Torvalds 
675d424d5aSJohn Heffner /* The least MTU to use for probing */
68dcd8fb85SFan Du #define TCP_BASE_MSS		1024
695d424d5aSJohn Heffner 
7005cbc0dbSFan Du /* probing interval, default to 10 minutes as per RFC4821 */
7105cbc0dbSFan Du #define TCP_PROBE_INTERVAL	600
7205cbc0dbSFan Du 
736b58e0a5SFan Du /* Specify interval when tcp mtu probing will stop */
746b58e0a5SFan Du #define TCP_PROBE_THRESHOLD	8
756b58e0a5SFan Du 
761da177e4SLinus Torvalds /* After receiving this amount of duplicate ACKs fast retransmit starts. */
771da177e4SLinus Torvalds #define TCP_FASTRETRANS_THRESH 3
781da177e4SLinus Torvalds 
791da177e4SLinus Torvalds /* Maximal number of ACKs sent quickly to accelerate slow-start. */
801da177e4SLinus Torvalds #define TCP_MAX_QUICKACKS	16U
811da177e4SLinus Torvalds 
82589c49cbSGao Feng /* Maximal number of window scale according to RFC1323 */
83589c49cbSGao Feng #define TCP_MAX_WSCALE		14U
84589c49cbSGao Feng 
851da177e4SLinus Torvalds /* urg_data states */
861da177e4SLinus Torvalds #define TCP_URG_VALID	0x0100
871da177e4SLinus Torvalds #define TCP_URG_NOTYET	0x0200
881da177e4SLinus Torvalds #define TCP_URG_READ	0x0400
891da177e4SLinus Torvalds 
901da177e4SLinus Torvalds #define TCP_RETR1	3	/*
911da177e4SLinus Torvalds 				 * This is how many retries it does before it
921da177e4SLinus Torvalds 				 * tries to figure out if the gateway is
931da177e4SLinus Torvalds 				 * down. Minimal RFC value is 3; it corresponds
941da177e4SLinus Torvalds 				 * to ~3sec-8min depending on RTO.
951da177e4SLinus Torvalds 				 */
961da177e4SLinus Torvalds 
971da177e4SLinus Torvalds #define TCP_RETR2	15	/*
981da177e4SLinus Torvalds 				 * This should take at least
991da177e4SLinus Torvalds 				 * 90 minutes to time out.
1001da177e4SLinus Torvalds 				 * RFC1122 says that the limit is 100 sec.
1011da177e4SLinus Torvalds 				 * 15 is ~13-30min depending on RTO.
1021da177e4SLinus Torvalds 				 */
1031da177e4SLinus Torvalds 
1046c9ff979SAlex Bergmann #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
1056c9ff979SAlex Bergmann 				 * when active opening a connection.
1066c9ff979SAlex Bergmann 				 * RFC1122 says the minimum retry MUST
1076c9ff979SAlex Bergmann 				 * be at least 180secs.  Nevertheless
1086c9ff979SAlex Bergmann 				 * this value is corresponding to
1096c9ff979SAlex Bergmann 				 * 63secs of retransmission with the
1106c9ff979SAlex Bergmann 				 * current initial RTO.
1116c9ff979SAlex Bergmann 				 */
1121da177e4SLinus Torvalds 
1136c9ff979SAlex Bergmann #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
1146c9ff979SAlex Bergmann 				 * when passive opening a connection.
1156c9ff979SAlex Bergmann 				 * This is corresponding to 31secs of
1166c9ff979SAlex Bergmann 				 * retransmission with the current
1176c9ff979SAlex Bergmann 				 * initial RTO.
1186c9ff979SAlex Bergmann 				 */
1191da177e4SLinus Torvalds 
1201da177e4SLinus Torvalds #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
1211da177e4SLinus Torvalds 				  * state, about 60 seconds	*/
1221da177e4SLinus Torvalds #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
1231da177e4SLinus Torvalds                                  /* BSD style FIN_WAIT2 deadlock breaker.
1241da177e4SLinus Torvalds 				  * It used to be 3min, new value is 60sec,
1251da177e4SLinus Torvalds 				  * to combine FIN-WAIT-2 timeout with
1261da177e4SLinus Torvalds 				  * TIME-WAIT timer.
1271da177e4SLinus Torvalds 				  */
1281da177e4SLinus Torvalds 
1291da177e4SLinus Torvalds #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
1301da177e4SLinus Torvalds #if HZ >= 100
1311da177e4SLinus Torvalds #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
1321da177e4SLinus Torvalds #define TCP_ATO_MIN	((unsigned)(HZ/25))
1331da177e4SLinus Torvalds #else
1341da177e4SLinus Torvalds #define TCP_DELACK_MIN	4U
1351da177e4SLinus Torvalds #define TCP_ATO_MIN	4U
1361da177e4SLinus Torvalds #endif
1371da177e4SLinus Torvalds #define TCP_RTO_MAX	((unsigned)(120*HZ))
1381da177e4SLinus Torvalds #define TCP_RTO_MIN	((unsigned)(HZ/5))
139bb4d991aSYuchung Cheng #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
140fd4f2ceaSEric Dumazet #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
1419ad7c049SJerry Chu #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
1429ad7c049SJerry Chu 						 * used as a fallback RTO for the
1439ad7c049SJerry Chu 						 * initial data transmission if no
1449ad7c049SJerry Chu 						 * valid RTT sample has been acquired,
1459ad7c049SJerry Chu 						 * most likely due to retrans in 3WHS.
1469ad7c049SJerry Chu 						 */
1471da177e4SLinus Torvalds 
1481da177e4SLinus Torvalds #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
1491da177e4SLinus Torvalds 					                 * for local resources.
1501da177e4SLinus Torvalds 					                 */
1511da177e4SLinus Torvalds #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
1521da177e4SLinus Torvalds #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
1531da177e4SLinus Torvalds #define TCP_KEEPALIVE_INTVL	(75*HZ)
1541da177e4SLinus Torvalds 
1551da177e4SLinus Torvalds #define MAX_TCP_KEEPIDLE	32767
1561da177e4SLinus Torvalds #define MAX_TCP_KEEPINTVL	32767
1571da177e4SLinus Torvalds #define MAX_TCP_KEEPCNT		127
1581da177e4SLinus Torvalds #define MAX_TCP_SYNCNT		127
1591da177e4SLinus Torvalds 
1601da177e4SLinus Torvalds #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
1611da177e4SLinus Torvalds 
1621da177e4SLinus Torvalds #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
1631da177e4SLinus Torvalds #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
1641da177e4SLinus Torvalds 					 * after this time. It should be equal
1651da177e4SLinus Torvalds 					 * (or greater than) TCP_TIMEWAIT_LEN
1661da177e4SLinus Torvalds 					 * to provide reliability equal to one
1671da177e4SLinus Torvalds 					 * provided by timewait state.
1681da177e4SLinus Torvalds 					 */
1691da177e4SLinus Torvalds #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
1701da177e4SLinus Torvalds 					 * timestamps. It must be less than
1711da177e4SLinus Torvalds 					 * minimal timewait lifetime.
1721da177e4SLinus Torvalds 					 */
1731da177e4SLinus Torvalds /*
1741da177e4SLinus Torvalds  *	TCP option
1751da177e4SLinus Torvalds  */
1761da177e4SLinus Torvalds 
1771da177e4SLinus Torvalds #define TCPOPT_NOP		1	/* Padding */
1781da177e4SLinus Torvalds #define TCPOPT_EOL		0	/* End of options */
1791da177e4SLinus Torvalds #define TCPOPT_MSS		2	/* Segment size negotiating */
1801da177e4SLinus Torvalds #define TCPOPT_WINDOW		3	/* Window scaling */
1811da177e4SLinus Torvalds #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
1821da177e4SLinus Torvalds #define TCPOPT_SACK             5       /* SACK Block */
1831da177e4SLinus Torvalds #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
184cfb6eeb4SYOSHIFUJI Hideaki #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
1857f9b838bSDaniel Lee #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
1862100c8d2SYuchung Cheng #define TCPOPT_EXP		254	/* Experimental */
1872100c8d2SYuchung Cheng /* Magic number to be after the option value for sharing TCP
1882100c8d2SYuchung Cheng  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
1892100c8d2SYuchung Cheng  */
1902100c8d2SYuchung Cheng #define TCPOPT_FASTOPEN_MAGIC	0xF989
19160e2a778SUrsula Braun #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
1921da177e4SLinus Torvalds 
1931da177e4SLinus Torvalds /*
1941da177e4SLinus Torvalds  *     TCP option lengths
1951da177e4SLinus Torvalds  */
1961da177e4SLinus Torvalds 
1971da177e4SLinus Torvalds #define TCPOLEN_MSS            4
1981da177e4SLinus Torvalds #define TCPOLEN_WINDOW         3
1991da177e4SLinus Torvalds #define TCPOLEN_SACK_PERM      2
2001da177e4SLinus Torvalds #define TCPOLEN_TIMESTAMP      10
201cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG         18
2027f9b838bSDaniel Lee #define TCPOLEN_FASTOPEN_BASE  2
2032100c8d2SYuchung Cheng #define TCPOLEN_EXP_FASTOPEN_BASE  4
20460e2a778SUrsula Braun #define TCPOLEN_EXP_SMC_BASE   6
2051da177e4SLinus Torvalds 
2061da177e4SLinus Torvalds /* But this is what stacks really send out. */
2071da177e4SLinus Torvalds #define TCPOLEN_TSTAMP_ALIGNED		12
2081da177e4SLinus Torvalds #define TCPOLEN_WSCALE_ALIGNED		4
2091da177e4SLinus Torvalds #define TCPOLEN_SACKPERM_ALIGNED	4
2101da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE		2
2111da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE_ALIGNED	4
2121da177e4SLinus Torvalds #define TCPOLEN_SACK_PERBLOCK		8
213cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG_ALIGNED		20
21433ad798cSAdam Langley #define TCPOLEN_MSS_ALIGNED		4
21560e2a778SUrsula Braun #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
2161da177e4SLinus Torvalds 
2171da177e4SLinus Torvalds /* Flags in tp->nonagle */
2181da177e4SLinus Torvalds #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
2191da177e4SLinus Torvalds #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
220caa20d9aSStephen Hemminger #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
2211da177e4SLinus Torvalds 
22236e31b0aSAndreas Petlund /* TCP thin-stream limits */
22336e31b0aSAndreas Petlund #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
22436e31b0aSAndreas Petlund 
22521603fc4SJörg Thalheim /* TCP initial congestion window as per rfc6928 */
226442b9635SDavid S. Miller #define TCP_INIT_CWND		10
227442b9635SDavid S. Miller 
228cf60af03SYuchung Cheng /* Bit Flags for sysctl_tcp_fastopen */
229cf60af03SYuchung Cheng #define	TFO_CLIENT_ENABLE	1
23010467163SJerry Chu #define	TFO_SERVER_ENABLE	2
23167da22d2SYuchung Cheng #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
232cf60af03SYuchung Cheng 
23310467163SJerry Chu /* Accept SYN data w/o any cookie option */
23410467163SJerry Chu #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
23510467163SJerry Chu 
23610467163SJerry Chu /* Force enable TFO on all listeners, i.e., not requiring the
237cebc5cbaSYuchung Cheng  * TCP_FASTOPEN socket option.
23810467163SJerry Chu  */
23910467163SJerry Chu #define	TFO_SERVER_WO_SOCKOPT1	0x400
24010467163SJerry Chu 
241295ff7edSArnaldo Carvalho de Melo 
2421da177e4SLinus Torvalds /* sysctl variables for tcp */
2431da177e4SLinus Torvalds extern int sysctl_tcp_max_orphans;
244a4fe34bfSEric W. Biederman extern long sysctl_tcp_mem[3];
245e20223f1SEric Dumazet 
246a0370b3fSYuchung Cheng #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
2471f255691SPriyaranjan Jha #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
248*20b654dfSYuchung Cheng #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
249a0370b3fSYuchung Cheng 
2508d987e5cSEric Dumazet extern atomic_long_t tcp_memory_allocated;
2511748376bSEric Dumazet extern struct percpu_counter tcp_sockets_allocated;
25206044751SEric Dumazet extern unsigned long tcp_memory_pressure;
2531da177e4SLinus Torvalds 
254b8da51ebSEric Dumazet /* optimized version of sk_under_memory_pressure() for TCP sockets */
255b8da51ebSEric Dumazet static inline bool tcp_under_memory_pressure(const struct sock *sk)
256b8da51ebSEric Dumazet {
257baac50bbSJohannes Weiner 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
258baac50bbSJohannes Weiner 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
259e805605cSJohannes Weiner 		return true;
260b8da51ebSEric Dumazet 
261b8da51ebSEric Dumazet 	return tcp_memory_pressure;
262b8da51ebSEric Dumazet }
2631da177e4SLinus Torvalds /*
2641da177e4SLinus Torvalds  * The next routines deal with comparing 32 bit unsigned ints
2651da177e4SLinus Torvalds  * and worry about wraparound (automatic with unsigned arithmetic).
2661da177e4SLinus Torvalds  */
2671da177e4SLinus Torvalds 
268a2a385d6SEric Dumazet static inline bool before(__u32 seq1, __u32 seq2)
2691da177e4SLinus Torvalds {
2700d630cc0SGerrit Renker         return (__s32)(seq1-seq2) < 0;
2711da177e4SLinus Torvalds }
2729a036b9cSGerrit Renker #define after(seq2, seq1) 	before(seq1, seq2)
2731da177e4SLinus Torvalds 
2741da177e4SLinus Torvalds /* is s2<=s1<=s3 ? */
275a2a385d6SEric Dumazet static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
2761da177e4SLinus Torvalds {
2771da177e4SLinus Torvalds 	return seq3 - seq2 >= seq1 - seq2;
2781da177e4SLinus Torvalds }
2791da177e4SLinus Torvalds 
280efcdbf24SArun Sharma static inline bool tcp_out_of_memory(struct sock *sk)
281efcdbf24SArun Sharma {
282efcdbf24SArun Sharma 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283efcdbf24SArun Sharma 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
284efcdbf24SArun Sharma 		return true;
285efcdbf24SArun Sharma 	return false;
286efcdbf24SArun Sharma }
287efcdbf24SArun Sharma 
288a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size);
289a6c5ea4cSEric Dumazet 
290ad1af0feSDavid S. Miller static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
291e4fd5da3SPavel Emelianov {
292ad1af0feSDavid S. Miller 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
293ad1af0feSDavid S. Miller 	int orphans = percpu_counter_read_positive(ocp);
294ad1af0feSDavid S. Miller 
295ad1af0feSDavid S. Miller 	if (orphans << shift > sysctl_tcp_max_orphans) {
296ad1af0feSDavid S. Miller 		orphans = percpu_counter_sum_positive(ocp);
297ad1af0feSDavid S. Miller 		if (orphans << shift > sysctl_tcp_max_orphans)
298ad1af0feSDavid S. Miller 			return true;
299ad1af0feSDavid S. Miller 	}
300ad1af0feSDavid S. Miller 	return false;
301e4fd5da3SPavel Emelianov }
3021da177e4SLinus Torvalds 
3035c9f3023SJoe Perches bool tcp_check_oom(struct sock *sk, int shift);
304efcdbf24SArun Sharma 
305a0f82f64SFlorian Westphal 
3061da177e4SLinus Torvalds extern struct proto tcp_prot;
3071da177e4SLinus Torvalds 
30857ef42d5SPavel Emelyanov #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
30913415e46SEric Dumazet #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
31057ef42d5SPavel Emelyanov #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
311aa2ea058STom Herbert #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
3121da177e4SLinus Torvalds 
3135c9f3023SJoe Perches void tcp_tasklet_init(void);
31446d3ceabSEric Dumazet 
3155c9f3023SJoe Perches void tcp_v4_err(struct sk_buff *skb, u32);
3161da177e4SLinus Torvalds 
3175c9f3023SJoe Perches void tcp_shutdown(struct sock *sk, int how);
3181da177e4SLinus Torvalds 
3197487449cSPaolo Abeni int tcp_v4_early_demux(struct sk_buff *skb);
3205c9f3023SJoe Perches int tcp_v4_rcv(struct sk_buff *skb);
3211da177e4SLinus Torvalds 
3225c9f3023SJoe Perches int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
3231b784140SYing Xue int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
324306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
3255c9f3023SJoe Perches int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
3265c9f3023SJoe Perches 		 int flags);
327306b13ebSTom Herbert int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
328306b13ebSTom Herbert 			size_t size, int flags);
329e3b5616aSDave Watson ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
330e3b5616aSDave Watson 		 size_t size, int flags);
3315c9f3023SJoe Perches void tcp_release_cb(struct sock *sk);
3325c9f3023SJoe Perches void tcp_wfree(struct sk_buff *skb);
3335c9f3023SJoe Perches void tcp_write_timer_handler(struct sock *sk);
3345c9f3023SJoe Perches void tcp_delack_timer_handler(struct sock *sk);
3355c9f3023SJoe Perches int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
33672ab4a86SEric Dumazet int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
3375c9f3023SJoe Perches void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
338e42e24c3SMatvejchikov Ilya 			 const struct tcphdr *th);
3395c9f3023SJoe Perches void tcp_rcv_space_adjust(struct sock *sk);
3405c9f3023SJoe Perches int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
3415c9f3023SJoe Perches void tcp_twsk_destructor(struct sock *sk);
3425c9f3023SJoe Perches ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
34353d3176bSChangli Gao 			struct pipe_inode_info *pipe, size_t len,
34453d3176bSChangli Gao 			unsigned int flags);
3459c55e01cSJens Axboe 
346463c84b9SArnaldo Carvalho de Melo static inline void tcp_dec_quickack_mode(struct sock *sk,
347463c84b9SArnaldo Carvalho de Melo 					 const unsigned int pkts)
3481da177e4SLinus Torvalds {
349463c84b9SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
350fc6415bcSDavid S. Miller 
351463c84b9SArnaldo Carvalho de Melo 	if (icsk->icsk_ack.quick) {
352463c84b9SArnaldo Carvalho de Melo 		if (pkts >= icsk->icsk_ack.quick) {
353463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick = 0;
3541da177e4SLinus Torvalds 			/* Leaving quickack mode we deflate ATO. */
355463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
356fc6415bcSDavid S. Miller 		} else
357463c84b9SArnaldo Carvalho de Melo 			icsk->icsk_ack.quick -= pkts;
3581da177e4SLinus Torvalds 	}
3591da177e4SLinus Torvalds }
3601da177e4SLinus Torvalds 
361bdf1ee5dSIlpo Järvinen #define	TCP_ECN_OK		1
362bdf1ee5dSIlpo Järvinen #define	TCP_ECN_QUEUE_CWR	2
363bdf1ee5dSIlpo Järvinen #define	TCP_ECN_DEMAND_CWR	4
3647a269ffaSEric Dumazet #define	TCP_ECN_SEEN		8
365bdf1ee5dSIlpo Järvinen 
366fd2c3ef7SEric Dumazet enum tcp_tw_status {
3671da177e4SLinus Torvalds 	TCP_TW_SUCCESS = 0,
3681da177e4SLinus Torvalds 	TCP_TW_RST = 1,
3691da177e4SLinus Torvalds 	TCP_TW_ACK = 2,
3701da177e4SLinus Torvalds 	TCP_TW_SYN = 3
3711da177e4SLinus Torvalds };
3721da177e4SLinus Torvalds 
3731da177e4SLinus Torvalds 
3745c9f3023SJoe Perches enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
3751da177e4SLinus Torvalds 					      struct sk_buff *skb,
3768feaf0c0SArnaldo Carvalho de Melo 					      const struct tcphdr *th);
3775c9f3023SJoe Perches struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
378e0f9759fSEric Dumazet 			   struct request_sock *req, bool fastopen,
379e0f9759fSEric Dumazet 			   bool *lost_race);
3805c9f3023SJoe Perches int tcp_child_process(struct sock *parent, struct sock *child,
3811da177e4SLinus Torvalds 		      struct sk_buff *skb);
3825ae344c9SNeal Cardwell void tcp_enter_loss(struct sock *sk);
38357dde7f7SYuchung Cheng void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
3845c9f3023SJoe Perches void tcp_clear_retrans(struct tcp_sock *tp);
3855c9f3023SJoe Perches void tcp_update_metrics(struct sock *sk);
3865c9f3023SJoe Perches void tcp_init_metrics(struct sock *sk);
3875c9f3023SJoe Perches void tcp_metrics_init(void);
388d82bae12SSoheil Hassas Yeganeh bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
3895c9f3023SJoe Perches void tcp_close(struct sock *sk, long timeout);
3905c9f3023SJoe Perches void tcp_init_sock(struct sock *sk);
39127204aaaSWei Wang void tcp_init_transfer(struct sock *sk, int bpf_op);
392ade994f4SAl Viro __poll_t tcp_poll(struct file *file, struct socket *sock,
39353d3176bSChangli Gao 		      struct poll_table_struct *wait);
3945c9f3023SJoe Perches int tcp_getsockopt(struct sock *sk, int level, int optname,
3953fdadf7dSDmitry Mishin 		   char __user *optval, int __user *optlen);
3965c9f3023SJoe Perches int tcp_setsockopt(struct sock *sk, int level, int optname,
39753d3176bSChangli Gao 		   char __user *optval, unsigned int optlen);
3985c9f3023SJoe Perches int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
39953d3176bSChangli Gao 			  char __user *optval, int __user *optlen);
4005c9f3023SJoe Perches int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
401b7058842SDavid S. Miller 			  char __user *optval, unsigned int optlen);
4025c9f3023SJoe Perches void tcp_set_keepalive(struct sock *sk, int val);
40342cb80a2SEric Dumazet void tcp_syn_ack_timeout(const struct request_sock *req);
4041b784140SYing Xue int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
4051b784140SYing Xue 		int flags, int *addr_len);
406d1361840SEric Dumazet int tcp_set_rcvlowat(struct sock *sk, int val);
40703f45c88SEric Dumazet void tcp_data_ready(struct sock *sk);
40893ab6cc6SEric Dumazet int tcp_mmap(struct file *file, struct socket *sock,
40993ab6cc6SEric Dumazet 	     struct vm_area_struct *vma);
410eed29f17SEric Dumazet void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
4111a2c6181SChristoph Paasch 		       struct tcp_options_received *opt_rx,
4122100c8d2SYuchung Cheng 		       int estab, struct tcp_fastopen_cookie *foc);
4135c9f3023SJoe Perches const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
4147d5d5525SYOSHIFUJI Hideaki 
4151da177e4SLinus Torvalds /*
4161da177e4SLinus Torvalds  *	TCP v4 functions exported for the inet6 API
4171da177e4SLinus Torvalds  */
4181da177e4SLinus Torvalds 
4195c9f3023SJoe Perches void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
4204fab9071SNeal Cardwell void tcp_v4_mtu_reduced(struct sock *sk);
4219cf74903SEric Dumazet void tcp_req_err(struct sock *sk, u32 seq, bool abort);
4225c9f3023SJoe Perches int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
423c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk,
42460236fddSArnaldo Carvalho de Melo 				      struct request_sock *req,
4251da177e4SLinus Torvalds 				      struct sk_buff *skb);
42681164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
4270c27171eSEric Dumazet struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
42860236fddSArnaldo Carvalho de Melo 				  struct request_sock *req,
4295e0724d0SEric Dumazet 				  struct dst_entry *dst,
4305e0724d0SEric Dumazet 				  struct request_sock *req_unhash,
4315e0724d0SEric Dumazet 				  bool *own_req);
4325c9f3023SJoe Perches int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
4335c9f3023SJoe Perches int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
4345c9f3023SJoe Perches int tcp_connect(struct sock *sk);
435b3d05147SEric Dumazet enum tcp_synack_type {
436b3d05147SEric Dumazet 	TCP_SYNACK_NORMAL,
437b3d05147SEric Dumazet 	TCP_SYNACK_FASTOPEN,
438b3d05147SEric Dumazet 	TCP_SYNACK_COOKIE,
439b3d05147SEric Dumazet };
4405d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
441e6b4d113SWilliam Allen Simpson 				struct request_sock *req,
442ca6fb065SEric Dumazet 				struct tcp_fastopen_cookie *foc,
443b3d05147SEric Dumazet 				enum tcp_synack_type synack_type);
4445c9f3023SJoe Perches int tcp_disconnect(struct sock *sk, int flags);
4451da177e4SLinus Torvalds 
446370816aeSPavel Emelyanov void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
447292e8d8cSPavel Emelyanov int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
44863d02d15SEric Dumazet void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
4491da177e4SLinus Torvalds 
4501da177e4SLinus Torvalds /* From syncookies.c */
451b80c0e78SEric Dumazet struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
452b80c0e78SEric Dumazet 				 struct request_sock *req,
45384b114b9SEric Dumazet 				 struct dst_entry *dst, u32 tsoff);
4545c9f3023SJoe Perches int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
4550198230bSPatrick McHardy 		      u32 cookie);
456461b74c3SCong Wang struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
457e05c82d3SEric Dumazet #ifdef CONFIG_SYN_COOKIES
4588c27bd75SFlorian Westphal 
45963262315SEric Dumazet /* Syncookies use a monotonic timer which increments every 60 seconds.
4608c27bd75SFlorian Westphal  * This counter is used both as a hash input and partially encoded into
4618c27bd75SFlorian Westphal  * the cookie value.  A cookie is only validated further if the delta
4628c27bd75SFlorian Westphal  * between the current counter value and the encoded one is less than this,
46363262315SEric Dumazet  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
4648c27bd75SFlorian Westphal  * the counter advances immediately after a cookie is generated).
4658c27bd75SFlorian Westphal  */
4668c27bd75SFlorian Westphal #define MAX_SYNCOOKIE_AGE	2
467264ea103SEric Dumazet #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
468264ea103SEric Dumazet #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
469264ea103SEric Dumazet 
470264ea103SEric Dumazet /* syncookies: remember time of last synqueue overflow
471264ea103SEric Dumazet  * But do not dirty this field too often (once per second is enough)
4723f684b4bSEric Dumazet  * It is racy as we do not hold a lock, but race is very minor.
473264ea103SEric Dumazet  */
4743f684b4bSEric Dumazet static inline void tcp_synq_overflow(const struct sock *sk)
475264ea103SEric Dumazet {
476264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
477264ea103SEric Dumazet 	unsigned long now = jiffies;
478264ea103SEric Dumazet 
479264ea103SEric Dumazet 	if (time_after(now, last_overflow + HZ))
480264ea103SEric Dumazet 		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
481264ea103SEric Dumazet }
482264ea103SEric Dumazet 
483264ea103SEric Dumazet /* syncookies: no recent synqueue overflow on this listening socket? */
484264ea103SEric Dumazet static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
485264ea103SEric Dumazet {
486264ea103SEric Dumazet 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
487264ea103SEric Dumazet 
488264ea103SEric Dumazet 	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
489264ea103SEric Dumazet }
4908c27bd75SFlorian Westphal 
4918c27bd75SFlorian Westphal static inline u32 tcp_cookie_time(void)
4928c27bd75SFlorian Westphal {
49363262315SEric Dumazet 	u64 val = get_jiffies_64();
49463262315SEric Dumazet 
495264ea103SEric Dumazet 	do_div(val, TCP_SYNCOOKIE_PERIOD);
49663262315SEric Dumazet 	return val;
4978c27bd75SFlorian Westphal }
4988c27bd75SFlorian Westphal 
4995c9f3023SJoe Perches u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
5005c9f3023SJoe Perches 			      u16 *mssp);
5013f684b4bSEric Dumazet __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
5029a568de4SEric Dumazet u64 cookie_init_timestamp(struct request_sock *req);
503f9301034SEric Dumazet bool cookie_timestamp_decode(const struct net *net,
504f9301034SEric Dumazet 			     struct tcp_options_received *opt);
505f1673381SFlorian Westphal bool cookie_ecn_ok(const struct tcp_options_received *opt,
506f7b3bec6SFlorian Westphal 		   const struct net *net, const struct dst_entry *dst);
5074dfc2817SFlorian Westphal 
508c6aefafbSGlenn Griffin /* From net/ipv6/syncookies.c */
5095c9f3023SJoe Perches int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
51081eb6a14SPatrick McHardy 		      u32 cookie);
5115c9f3023SJoe Perches struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
512f1673381SFlorian Westphal 
5135c9f3023SJoe Perches u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
51481eb6a14SPatrick McHardy 			      const struct tcphdr *th, u16 *mssp);
5153f684b4bSEric Dumazet __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
516e05c82d3SEric Dumazet #endif
5171da177e4SLinus Torvalds /* tcp_output.c */
5181da177e4SLinus Torvalds 
5195c9f3023SJoe Perches void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
5209e412ba7SIlpo Järvinen 			       int nonagle);
52110d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
52210d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
5235c9f3023SJoe Perches void tcp_retransmit_timer(struct sock *sk);
5245c9f3023SJoe Perches void tcp_xmit_retransmit_queue(struct sock *);
5255c9f3023SJoe Perches void tcp_simple_retransmit(struct sock *);
52657dde7f7SYuchung Cheng void tcp_enter_recovery(struct sock *sk, bool ece_ack);
5275c9f3023SJoe Perches int tcp_trim_head(struct sock *, struct sk_buff *, u32);
52875c119afSEric Dumazet enum tcp_queue {
52975c119afSEric Dumazet 	TCP_FRAG_IN_WRITE_QUEUE,
53075c119afSEric Dumazet 	TCP_FRAG_IN_RTX_QUEUE,
53175c119afSEric Dumazet };
53275c119afSEric Dumazet int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
53375c119afSEric Dumazet 		 struct sk_buff *skb, u32 len,
53475c119afSEric Dumazet 		 unsigned int mss_now, gfp_t gfp);
5351da177e4SLinus Torvalds 
5365c9f3023SJoe Perches void tcp_send_probe0(struct sock *);
5375c9f3023SJoe Perches void tcp_send_partial(struct sock *);
538e520af48SEric Dumazet int tcp_write_wakeup(struct sock *, int mib);
5395c9f3023SJoe Perches void tcp_send_fin(struct sock *sk);
5405c9f3023SJoe Perches void tcp_send_active_reset(struct sock *sk, gfp_t priority);
5415c9f3023SJoe Perches int tcp_send_synack(struct sock *);
5425c9f3023SJoe Perches void tcp_push_one(struct sock *, unsigned int mss_now);
5435c9f3023SJoe Perches void tcp_send_ack(struct sock *sk);
5445c9f3023SJoe Perches void tcp_send_delayed_ack(struct sock *sk);
5455c9f3023SJoe Perches void tcp_send_loss_probe(struct sock *sk);
546ed66dfafSNeal Cardwell bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
547cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb,
548cfea5a68SMartin KaFai Lau 			     const struct sk_buff *next_skb);
5491da177e4SLinus Torvalds 
550a762a980SDavid S. Miller /* tcp_input.c */
5515c9f3023SJoe Perches void tcp_rearm_rto(struct sock *sk);
5520f1c28aeSYuchung Cheng void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
5535c9f3023SJoe Perches void tcp_reset(struct sock *sk);
5544f41b1c5SYuchung Cheng void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
555e3e17b77SEric Dumazet void tcp_fin(struct sock *sk);
556a762a980SDavid S. Miller 
5571da177e4SLinus Torvalds /* tcp_timer.c */
5585c9f3023SJoe Perches void tcp_init_xmit_timers(struct sock *);
559463c84b9SArnaldo Carvalho de Melo static inline void tcp_clear_xmit_timers(struct sock *sk)
560463c84b9SArnaldo Carvalho de Melo {
56173a6bab5SEric Dumazet 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
56273a6bab5SEric Dumazet 		sock_put(sk);
56373a6bab5SEric Dumazet 
564463c84b9SArnaldo Carvalho de Melo 	inet_csk_clear_xmit_timers(sk);
565463c84b9SArnaldo Carvalho de Melo }
5661da177e4SLinus Torvalds 
5675c9f3023SJoe Perches unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
5685c9f3023SJoe Perches unsigned int tcp_current_mss(struct sock *sk);
5690c54b85fSIlpo Järvinen 
5700c54b85fSIlpo Järvinen /* Bound MSS / TSO packet size with the half of the window */
5710c54b85fSIlpo Järvinen static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
5720c54b85fSIlpo Järvinen {
57301f83d69SAlexey Kuznetsov 	int cutoff;
57401f83d69SAlexey Kuznetsov 
57501f83d69SAlexey Kuznetsov 	/* When peer uses tiny windows, there is no use in packetizing
57601f83d69SAlexey Kuznetsov 	 * to sub-MSS pieces for the sake of SWS or making sure there
57701f83d69SAlexey Kuznetsov 	 * are enough packets in the pipe for fast recovery.
57801f83d69SAlexey Kuznetsov 	 *
57901f83d69SAlexey Kuznetsov 	 * On the other hand, for extremely large MSS devices, handling
58001f83d69SAlexey Kuznetsov 	 * smaller than MSS windows in this way does make sense.
58101f83d69SAlexey Kuznetsov 	 */
5822631b79fSSeymour, Shane M 	if (tp->max_window > TCP_MSS_DEFAULT)
58301f83d69SAlexey Kuznetsov 		cutoff = (tp->max_window >> 1);
58401f83d69SAlexey Kuznetsov 	else
58501f83d69SAlexey Kuznetsov 		cutoff = tp->max_window;
58601f83d69SAlexey Kuznetsov 
58701f83d69SAlexey Kuznetsov 	if (cutoff && pktsize > cutoff)
58801f83d69SAlexey Kuznetsov 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
5890c54b85fSIlpo Järvinen 	else
5900c54b85fSIlpo Järvinen 		return pktsize;
5910c54b85fSIlpo Järvinen }
5921da177e4SLinus Torvalds 
59317b085eaSArnaldo Carvalho de Melo /* tcp.c */
5940df48c26SEric Dumazet void tcp_get_info(struct sock *, struct tcp_info *);
5951da177e4SLinus Torvalds 
5961da177e4SLinus Torvalds /* Read 'sendfile()'-style from a TCP socket */
5975c9f3023SJoe Perches int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
5981da177e4SLinus Torvalds 		  sk_read_actor_t recv_actor);
5991da177e4SLinus Torvalds 
6005c9f3023SJoe Perches void tcp_initialize_rcv_mss(struct sock *sk);
6011da177e4SLinus Torvalds 
6025c9f3023SJoe Perches int tcp_mtu_to_mss(struct sock *sk, int pmtu);
6035c9f3023SJoe Perches int tcp_mss_to_mtu(struct sock *sk, int mss);
6045c9f3023SJoe Perches void tcp_mtup_init(struct sock *sk);
6055c9f3023SJoe Perches void tcp_init_buffer_space(struct sock *sk);
6065d424d5aSJohn Heffner 
607f1ecd5d9SDamian Lukowski static inline void tcp_bound_rto(const struct sock *sk)
608f1ecd5d9SDamian Lukowski {
609f1ecd5d9SDamian Lukowski 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
610f1ecd5d9SDamian Lukowski 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
611f1ecd5d9SDamian Lukowski }
612f1ecd5d9SDamian Lukowski 
613f1ecd5d9SDamian Lukowski static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
614f1ecd5d9SDamian Lukowski {
615740b0f18SEric Dumazet 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
616f1ecd5d9SDamian Lukowski }
617f1ecd5d9SDamian Lukowski 
61831770e34SFlorian Westphal static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
61931770e34SFlorian Westphal {
62031770e34SFlorian Westphal 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
62131770e34SFlorian Westphal 			       ntohl(TCP_FLAG_ACK) |
62231770e34SFlorian Westphal 			       snd_wnd);
62331770e34SFlorian Westphal }
62431770e34SFlorian Westphal 
62531770e34SFlorian Westphal static inline void tcp_fast_path_on(struct tcp_sock *tp)
62631770e34SFlorian Westphal {
62731770e34SFlorian Westphal 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
62831770e34SFlorian Westphal }
62931770e34SFlorian Westphal 
63031770e34SFlorian Westphal static inline void tcp_fast_path_check(struct sock *sk)
63131770e34SFlorian Westphal {
63231770e34SFlorian Westphal 	struct tcp_sock *tp = tcp_sk(sk);
63331770e34SFlorian Westphal 
63431770e34SFlorian Westphal 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
63531770e34SFlorian Westphal 	    tp->rcv_wnd &&
63631770e34SFlorian Westphal 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
63731770e34SFlorian Westphal 	    !tp->urg_data)
63831770e34SFlorian Westphal 		tcp_fast_path_on(tp);
63931770e34SFlorian Westphal }
64031770e34SFlorian Westphal 
6410c266898SSatoru SATOH /* Compute the actual rto_min value */
6420c266898SSatoru SATOH static inline u32 tcp_rto_min(struct sock *sk)
6430c266898SSatoru SATOH {
644cf533ea5SEric Dumazet 	const struct dst_entry *dst = __sk_dst_get(sk);
6450c266898SSatoru SATOH 	u32 rto_min = TCP_RTO_MIN;
6460c266898SSatoru SATOH 
6470c266898SSatoru SATOH 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
6480c266898SSatoru SATOH 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
6490c266898SSatoru SATOH 	return rto_min;
6500c266898SSatoru SATOH }
6510c266898SSatoru SATOH 
652740b0f18SEric Dumazet static inline u32 tcp_rto_min_us(struct sock *sk)
653740b0f18SEric Dumazet {
654740b0f18SEric Dumazet 	return jiffies_to_usecs(tcp_rto_min(sk));
655740b0f18SEric Dumazet }
656740b0f18SEric Dumazet 
65781164413SDaniel Borkmann static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
65881164413SDaniel Borkmann {
65981164413SDaniel Borkmann 	return dst_metric_locked(dst, RTAX_CC_ALGO);
66081164413SDaniel Borkmann }
66181164413SDaniel Borkmann 
662f6722583SYuchung Cheng /* Minimum RTT in usec. ~0 means not available. */
663f6722583SYuchung Cheng static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
664f6722583SYuchung Cheng {
66564033892SNeal Cardwell 	return minmax_get(&tp->rtt_min);
666f6722583SYuchung Cheng }
667f6722583SYuchung Cheng 
6681da177e4SLinus Torvalds /* Compute the actual receive window we are currently advertising.
6691da177e4SLinus Torvalds  * Rcv_nxt can be after the window if our peer push more data
6701da177e4SLinus Torvalds  * than the offered window.
6711da177e4SLinus Torvalds  */
67240efc6faSStephen Hemminger static inline u32 tcp_receive_window(const struct tcp_sock *tp)
6731da177e4SLinus Torvalds {
6741da177e4SLinus Torvalds 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
6751da177e4SLinus Torvalds 
6761da177e4SLinus Torvalds 	if (win < 0)
6771da177e4SLinus Torvalds 		win = 0;
6781da177e4SLinus Torvalds 	return (u32) win;
6791da177e4SLinus Torvalds }
6801da177e4SLinus Torvalds 
6811da177e4SLinus Torvalds /* Choose a new window, without checks for shrinking, and without
6821da177e4SLinus Torvalds  * scaling applied to the result.  The caller does these things
6831da177e4SLinus Torvalds  * if necessary.  This is a "raw" window selection.
6841da177e4SLinus Torvalds  */
6855c9f3023SJoe Perches u32 __tcp_select_window(struct sock *sk);
6861da177e4SLinus Torvalds 
687ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk);
688ee995283SPavel Emelyanov 
689ec66eda8SEric Dumazet /* TCP uses 32bit jiffies to save some space.
690ec66eda8SEric Dumazet  * Note that this is different from tcp_time_stamp, which
691ec66eda8SEric Dumazet  * historically has been the same until linux-4.13.
692ec66eda8SEric Dumazet  */
693ec66eda8SEric Dumazet #define tcp_jiffies32 ((u32)jiffies)
694ec66eda8SEric Dumazet 
6959a568de4SEric Dumazet /*
6969a568de4SEric Dumazet  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
6979a568de4SEric Dumazet  * It is no longer tied to jiffies, but to 1 ms clock.
6989a568de4SEric Dumazet  * Note: double check if you want to use tcp_jiffies32 instead of this.
6991da177e4SLinus Torvalds  */
7009a568de4SEric Dumazet #define TCP_TS_HZ	1000
7019a568de4SEric Dumazet 
7029a568de4SEric Dumazet static inline u64 tcp_clock_ns(void)
7039a568de4SEric Dumazet {
7049a568de4SEric Dumazet 	return local_clock();
7059a568de4SEric Dumazet }
7069a568de4SEric Dumazet 
7079a568de4SEric Dumazet static inline u64 tcp_clock_us(void)
7089a568de4SEric Dumazet {
7099a568de4SEric Dumazet 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
7109a568de4SEric Dumazet }
7119a568de4SEric Dumazet 
7129a568de4SEric Dumazet /* This should only be used in contexts where tp->tcp_mstamp is up to date */
7139a568de4SEric Dumazet static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
7149a568de4SEric Dumazet {
7159a568de4SEric Dumazet 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
7169a568de4SEric Dumazet }
7179a568de4SEric Dumazet 
7189a568de4SEric Dumazet /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
7199a568de4SEric Dumazet static inline u32 tcp_time_stamp_raw(void)
7209a568de4SEric Dumazet {
7219a568de4SEric Dumazet 	return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
7229a568de4SEric Dumazet }
7239a568de4SEric Dumazet 
7249a568de4SEric Dumazet 
7259a568de4SEric Dumazet /* Refresh 1us clock of a TCP socket,
7269a568de4SEric Dumazet  * ensuring monotically increasing values.
7279a568de4SEric Dumazet  */
7289a568de4SEric Dumazet static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
7299a568de4SEric Dumazet {
7309a568de4SEric Dumazet 	u64 val = tcp_clock_us();
7319a568de4SEric Dumazet 
7329a568de4SEric Dumazet 	if (val > tp->tcp_mstamp)
7339a568de4SEric Dumazet 		tp->tcp_mstamp = val;
7349a568de4SEric Dumazet }
7359a568de4SEric Dumazet 
7369a568de4SEric Dumazet static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
7379a568de4SEric Dumazet {
7389a568de4SEric Dumazet 	return max_t(s64, t1 - t0, 0);
7399a568de4SEric Dumazet }
7401da177e4SLinus Torvalds 
7417faee5c0SEric Dumazet static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
7427faee5c0SEric Dumazet {
7439a568de4SEric Dumazet 	return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
7447faee5c0SEric Dumazet }
7457faee5c0SEric Dumazet 
7467faee5c0SEric Dumazet 
747a3433f35SChangli Gao #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
748a3433f35SChangli Gao 
749a3433f35SChangli Gao #define TCPHDR_FIN 0x01
750a3433f35SChangli Gao #define TCPHDR_SYN 0x02
751a3433f35SChangli Gao #define TCPHDR_RST 0x04
752a3433f35SChangli Gao #define TCPHDR_PSH 0x08
753a3433f35SChangli Gao #define TCPHDR_ACK 0x10
754a3433f35SChangli Gao #define TCPHDR_URG 0x20
755a3433f35SChangli Gao #define TCPHDR_ECE 0x40
756a3433f35SChangli Gao #define TCPHDR_CWR 0x80
757a3433f35SChangli Gao 
75849213555SDaniel Borkmann #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
75949213555SDaniel Borkmann 
760caa20d9aSStephen Hemminger /* This is what the send packet queuing engine uses to pass
761f86586faSEric Dumazet  * TCP per-packet control information to the transmission code.
762f86586faSEric Dumazet  * We also store the host-order sequence numbers in here too.
763f86586faSEric Dumazet  * This is 44 bytes if IPV6 is enabled.
764f86586faSEric Dumazet  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
7651da177e4SLinus Torvalds  */
7661da177e4SLinus Torvalds struct tcp_skb_cb {
7671da177e4SLinus Torvalds 	__u32		seq;		/* Starting sequence number	*/
7681da177e4SLinus Torvalds 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
769cd7d8498SEric Dumazet 	union {
770cd7d8498SEric Dumazet 		/* Note : tcp_tw_isn is used in input path only
771cd7d8498SEric Dumazet 		 *	  (isn chosen by tcp_timewait_state_process())
772cd7d8498SEric Dumazet 		 *
773f69ad292SEric Dumazet 		 * 	  tcp_gso_segs/size are used in write queue only,
774f69ad292SEric Dumazet 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
775cd7d8498SEric Dumazet 		 */
776cd7d8498SEric Dumazet 		__u32		tcp_tw_isn;
777f69ad292SEric Dumazet 		struct {
778f69ad292SEric Dumazet 			u16	tcp_gso_segs;
779f69ad292SEric Dumazet 			u16	tcp_gso_size;
780f69ad292SEric Dumazet 		};
781cd7d8498SEric Dumazet 	};
7824de075e0SEric Dumazet 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
783f4f9f6e7SNeal Cardwell 
784713bafeaSYuchung Cheng 	__u8		sacked;		/* State flags for SACK.	*/
7851da177e4SLinus Torvalds #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
7861da177e4SLinus Torvalds #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
7871da177e4SLinus Torvalds #define TCPCB_LOST		0x04	/* SKB is lost			*/
7881da177e4SLinus Torvalds #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
7899d186cacSAndrey Vagin #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
7901da177e4SLinus Torvalds #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
7919d186cacSAndrey Vagin #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
7929d186cacSAndrey Vagin 				TCPCB_REPAIRED)
7931da177e4SLinus Torvalds 
794f4f9f6e7SNeal Cardwell 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
7956b084928SSoheil Hassas Yeganeh 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
796c134ecb8SMartin KaFai Lau 			eor:1,		/* Is skb MSG_EOR marked? */
79798aaa913SMike Maloney 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
79898aaa913SMike Maloney 			unused:5;
7991da177e4SLinus Torvalds 	__u32		ack_seq;	/* Sequence number ACK'd	*/
800971f10ecSEric Dumazet 	union {
801b75803d5SLawrence Brakmo 		struct {
802b9f64820SYuchung Cheng 			/* There is space for up to 24 bytes */
803d7722e85SSoheil Hassas Yeganeh 			__u32 in_flight:30,/* Bytes in flight at transmit */
804d7722e85SSoheil Hassas Yeganeh 			      is_app_limited:1, /* cwnd not fully used? */
805d7722e85SSoheil Hassas Yeganeh 			      unused:1;
806b9f64820SYuchung Cheng 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
807b9f64820SYuchung Cheng 			__u32 delivered;
808b9f64820SYuchung Cheng 			/* start of send pipeline phase */
8099a568de4SEric Dumazet 			u64 first_tx_mstamp;
810b9f64820SYuchung Cheng 			/* when we reached the "delivered" count */
8119a568de4SEric Dumazet 			u64 delivered_mstamp;
812b75803d5SLawrence Brakmo 		} tx;   /* only used for outgoing skbs */
813b75803d5SLawrence Brakmo 		union {
814971f10ecSEric Dumazet 			struct inet_skb_parm	h4;
815971f10ecSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
816971f10ecSEric Dumazet 			struct inet6_skb_parm	h6;
817971f10ecSEric Dumazet #endif
818b75803d5SLawrence Brakmo 		} header;	/* For incoming skbs */
81934f79502SJohn Fastabend 		struct {
82034f79502SJohn Fastabend 			__u32 flags;
821e5cd3abcSJohn Fastabend 			struct sock *sk_redir;
8228108a775SJohn Fastabend 			void *data_end;
82334f79502SJohn Fastabend 		} bpf;
824b75803d5SLawrence Brakmo 	};
8251da177e4SLinus Torvalds };
8261da177e4SLinus Torvalds 
8271da177e4SLinus Torvalds #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
8281da177e4SLinus Torvalds 
829870c3151SEric Dumazet 
830815afe17SEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
831870c3151SEric Dumazet /* This is the variant of inet6_iif() that must be used by TCP,
832870c3151SEric Dumazet  * as TCP moves IP6CB into a different location in skb->cb[]
833870c3151SEric Dumazet  */
834870c3151SEric Dumazet static inline int tcp_v6_iif(const struct sk_buff *skb)
835870c3151SEric Dumazet {
836a04a480dSDavid Ahern 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
83774b20582SDavid Ahern 
83874b20582SDavid Ahern 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
839870c3151SEric Dumazet }
8404297a0efSDavid Ahern 
8414297a0efSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */
8424297a0efSDavid Ahern static inline int tcp_v6_sdif(const struct sk_buff *skb)
8434297a0efSDavid Ahern {
8444297a0efSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
8454297a0efSDavid Ahern 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
8464297a0efSDavid Ahern 		return TCP_SKB_CB(skb)->header.h6.iif;
8474297a0efSDavid Ahern #endif
8484297a0efSDavid Ahern 	return 0;
8494297a0efSDavid Ahern }
850815afe17SEric Dumazet #endif
851870c3151SEric Dumazet 
852a04a480dSDavid Ahern static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
853a04a480dSDavid Ahern {
854a04a480dSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
855a04a480dSDavid Ahern 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
856b4d1605aSDavid Ahern 	    skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
857a04a480dSDavid Ahern 		return true;
858a04a480dSDavid Ahern #endif
859a04a480dSDavid Ahern 	return false;
860a04a480dSDavid Ahern }
861a04a480dSDavid Ahern 
8623fa6f616SDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */
8633fa6f616SDavid Ahern static inline int tcp_v4_sdif(struct sk_buff *skb)
8643fa6f616SDavid Ahern {
8653fa6f616SDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
8663fa6f616SDavid Ahern 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
8673fa6f616SDavid Ahern 		return TCP_SKB_CB(skb)->header.h4.iif;
8683fa6f616SDavid Ahern #endif
8693fa6f616SDavid Ahern 	return 0;
8703fa6f616SDavid Ahern }
8713fa6f616SDavid Ahern 
8721da177e4SLinus Torvalds /* Due to TSO, an SKB can be composed of multiple actual
8731da177e4SLinus Torvalds  * packets.  To keep these tracked properly, we use this.
8741da177e4SLinus Torvalds  */
8751da177e4SLinus Torvalds static inline int tcp_skb_pcount(const struct sk_buff *skb)
8761da177e4SLinus Torvalds {
877cd7d8498SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_segs;
878cd7d8498SEric Dumazet }
879cd7d8498SEric Dumazet 
880cd7d8498SEric Dumazet static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
881cd7d8498SEric Dumazet {
882cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
883cd7d8498SEric Dumazet }
884cd7d8498SEric Dumazet 
885cd7d8498SEric Dumazet static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
886cd7d8498SEric Dumazet {
887cd7d8498SEric Dumazet 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
8881da177e4SLinus Torvalds }
8891da177e4SLinus Torvalds 
890f69ad292SEric Dumazet /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
8911da177e4SLinus Torvalds static inline int tcp_skb_mss(const struct sk_buff *skb)
8921da177e4SLinus Torvalds {
893f69ad292SEric Dumazet 	return TCP_SKB_CB(skb)->tcp_gso_size;
8941da177e4SLinus Torvalds }
8951da177e4SLinus Torvalds 
896c134ecb8SMartin KaFai Lau static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
897c134ecb8SMartin KaFai Lau {
898c134ecb8SMartin KaFai Lau 	return likely(!TCP_SKB_CB(skb)->eor);
899c134ecb8SMartin KaFai Lau }
900c134ecb8SMartin KaFai Lau 
901317a76f9SStephen Hemminger /* Events passed to congestion control interface */
902317a76f9SStephen Hemminger enum tcp_ca_event {
903317a76f9SStephen Hemminger 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
904317a76f9SStephen Hemminger 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
905317a76f9SStephen Hemminger 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
906317a76f9SStephen Hemminger 	CA_EVENT_LOSS,		/* loss timeout */
9079890092eSFlorian Westphal 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
9089890092eSFlorian Westphal 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
9099890092eSFlorian Westphal 	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
9109890092eSFlorian Westphal 	CA_EVENT_NON_DELAYED_ACK,
9117354c8c3SFlorian Westphal };
9127354c8c3SFlorian Westphal 
9139890092eSFlorian Westphal /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
9147354c8c3SFlorian Westphal enum tcp_ca_ack_event_flags {
915c1d2b4c3SFlorian Westphal 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
916c1d2b4c3SFlorian Westphal 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
917c1d2b4c3SFlorian Westphal 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
918317a76f9SStephen Hemminger };
919317a76f9SStephen Hemminger 
920317a76f9SStephen Hemminger /*
921317a76f9SStephen Hemminger  * Interface for adding new TCP congestion control handlers
922317a76f9SStephen Hemminger  */
923317a76f9SStephen Hemminger #define TCP_CA_NAME_MAX	16
9243ff825b2SStephen Hemminger #define TCP_CA_MAX	128
9253ff825b2SStephen Hemminger #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
9263ff825b2SStephen Hemminger 
927c5c6a8abSDaniel Borkmann #define TCP_CA_UNSPEC	0
928c5c6a8abSDaniel Borkmann 
92930e502a3SDaniel Borkmann /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
930164891aaSStephen Hemminger #define TCP_CONG_NON_RESTRICTED 0x1
93130e502a3SDaniel Borkmann /* Requires ECN/ECT set on all packets */
93230e502a3SDaniel Borkmann #define TCP_CONG_NEEDS_ECN	0x2
933164891aaSStephen Hemminger 
93464f40ff5SEric Dumazet union tcp_cc_info;
93564f40ff5SEric Dumazet 
936756ee172SLawrence Brakmo struct ack_sample {
937756ee172SLawrence Brakmo 	u32 pkts_acked;
938756ee172SLawrence Brakmo 	s32 rtt_us;
9396f094b9eSLawrence Brakmo 	u32 in_flight;
940756ee172SLawrence Brakmo };
941756ee172SLawrence Brakmo 
942b9f64820SYuchung Cheng /* A rate sample measures the number of (original/retransmitted) data
943b9f64820SYuchung Cheng  * packets delivered "delivered" over an interval of time "interval_us".
944b9f64820SYuchung Cheng  * The tcp_rate.c code fills in the rate sample, and congestion
945b9f64820SYuchung Cheng  * control modules that define a cong_control function to run at the end
946b9f64820SYuchung Cheng  * of ACK processing can optionally chose to consult this sample when
947b9f64820SYuchung Cheng  * setting cwnd and pacing rate.
948b9f64820SYuchung Cheng  * A sample is invalid if "delivered" or "interval_us" is negative.
949b9f64820SYuchung Cheng  */
950b9f64820SYuchung Cheng struct rate_sample {
9519a568de4SEric Dumazet 	u64  prior_mstamp; /* starting timestamp for interval */
952b9f64820SYuchung Cheng 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
953b9f64820SYuchung Cheng 	s32  delivered;		/* number of packets delivered over interval */
954b9f64820SYuchung Cheng 	long interval_us;	/* time for tp->delivered to incr "delivered" */
955b9f64820SYuchung Cheng 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
956b9f64820SYuchung Cheng 	int  losses;		/* number of packets marked lost upon ACK */
957b9f64820SYuchung Cheng 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
958b9f64820SYuchung Cheng 	u32  prior_in_flight;	/* in flight before this ACK */
959d7722e85SSoheil Hassas Yeganeh 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
960b9f64820SYuchung Cheng 	bool is_retrans;	/* is sample from retransmission? */
961e4286603SYuchung Cheng 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
962b9f64820SYuchung Cheng };
963b9f64820SYuchung Cheng 
964317a76f9SStephen Hemminger struct tcp_congestion_ops {
965317a76f9SStephen Hemminger 	struct list_head	list;
966c5c6a8abSDaniel Borkmann 	u32 key;
967c5c6a8abSDaniel Borkmann 	u32 flags;
968317a76f9SStephen Hemminger 
969317a76f9SStephen Hemminger 	/* initialize private data (optional) */
9706687e988SArnaldo Carvalho de Melo 	void (*init)(struct sock *sk);
971317a76f9SStephen Hemminger 	/* cleanup private data  (optional) */
9726687e988SArnaldo Carvalho de Melo 	void (*release)(struct sock *sk);
973317a76f9SStephen Hemminger 
974317a76f9SStephen Hemminger 	/* return slow start threshold (required) */
9756687e988SArnaldo Carvalho de Melo 	u32 (*ssthresh)(struct sock *sk);
976317a76f9SStephen Hemminger 	/* do new cwnd calculation (required) */
97724901551SEric Dumazet 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
978317a76f9SStephen Hemminger 	/* call before changing ca_state (optional) */
9796687e988SArnaldo Carvalho de Melo 	void (*set_state)(struct sock *sk, u8 new_state);
980317a76f9SStephen Hemminger 	/* call when cwnd event occurs (optional) */
9816687e988SArnaldo Carvalho de Melo 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
9827354c8c3SFlorian Westphal 	/* call when ack arrives (optional) */
9837354c8c3SFlorian Westphal 	void (*in_ack_event)(struct sock *sk, u32 flags);
9841e0ce2a1SAnmol Sarma 	/* new value of cwnd after loss (required) */
9856687e988SArnaldo Carvalho de Melo 	u32  (*undo_cwnd)(struct sock *sk);
986317a76f9SStephen Hemminger 	/* hook for packet ack accounting (optional) */
987756ee172SLawrence Brakmo 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
988dcb8c9b4SEric Dumazet 	/* override sysctl_tcp_min_tso_segs */
989dcb8c9b4SEric Dumazet 	u32 (*min_tso_segs)(struct sock *sk);
99077bfc174SYuchung Cheng 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
99177bfc174SYuchung Cheng 	u32 (*sndbuf_expand)(struct sock *sk);
992c0402760SYuchung Cheng 	/* call when packets are delivered to update cwnd and pacing rate,
993c0402760SYuchung Cheng 	 * after all the ca_state processing. (optional)
994c0402760SYuchung Cheng 	 */
995c0402760SYuchung Cheng 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
99673c1f4a0SArnaldo Carvalho de Melo 	/* get info for inet_diag (optional) */
99764f40ff5SEric Dumazet 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
99864f40ff5SEric Dumazet 			   union tcp_cc_info *info);
999317a76f9SStephen Hemminger 
1000317a76f9SStephen Hemminger 	char 		name[TCP_CA_NAME_MAX];
1001317a76f9SStephen Hemminger 	struct module 	*owner;
1002317a76f9SStephen Hemminger };
1003317a76f9SStephen Hemminger 
10045c9f3023SJoe Perches int tcp_register_congestion_control(struct tcp_congestion_ops *type);
10055c9f3023SJoe Perches void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1006317a76f9SStephen Hemminger 
100755d8694fSFlorian Westphal void tcp_assign_congestion_control(struct sock *sk);
10085c9f3023SJoe Perches void tcp_init_congestion_control(struct sock *sk);
10095c9f3023SJoe Perches void tcp_cleanup_congestion_control(struct sock *sk);
10106670e152SStephen Hemminger int tcp_set_default_congestion_control(struct net *net, const char *name);
10116670e152SStephen Hemminger void tcp_get_default_congestion_control(struct net *net, char *name);
10125c9f3023SJoe Perches void tcp_get_available_congestion_control(char *buf, size_t len);
10135c9f3023SJoe Perches void tcp_get_allowed_congestion_control(char *buf, size_t len);
10145c9f3023SJoe Perches int tcp_set_allowed_congestion_control(char *allowed);
1015ebfa00c5SSabrina Dubroca int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
1016e73ebb08SNeal Cardwell u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1017e73ebb08SNeal Cardwell void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1018317a76f9SStephen Hemminger 
10195c9f3023SJoe Perches u32 tcp_reno_ssthresh(struct sock *sk);
1020e9799183SFlorian Westphal u32 tcp_reno_undo_cwnd(struct sock *sk);
102124901551SEric Dumazet void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1022a8acfbacSDavid S. Miller extern struct tcp_congestion_ops tcp_reno;
1023317a76f9SStephen Hemminger 
1024c5c6a8abSDaniel Borkmann struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
10256670e152SStephen Hemminger u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
1026ea697639SDaniel Borkmann #ifdef CONFIG_INET
1027c5c6a8abSDaniel Borkmann char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1028ea697639SDaniel Borkmann #else
1029ea697639SDaniel Borkmann static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1030ea697639SDaniel Borkmann {
1031ea697639SDaniel Borkmann 	return NULL;
1032ea697639SDaniel Borkmann }
1033ea697639SDaniel Borkmann #endif
1034c5c6a8abSDaniel Borkmann 
103530e502a3SDaniel Borkmann static inline bool tcp_ca_needs_ecn(const struct sock *sk)
103630e502a3SDaniel Borkmann {
103730e502a3SDaniel Borkmann 	const struct inet_connection_sock *icsk = inet_csk(sk);
103830e502a3SDaniel Borkmann 
103930e502a3SDaniel Borkmann 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
104030e502a3SDaniel Borkmann }
104130e502a3SDaniel Borkmann 
10426687e988SArnaldo Carvalho de Melo static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1043317a76f9SStephen Hemminger {
10446687e988SArnaldo Carvalho de Melo 	struct inet_connection_sock *icsk = inet_csk(sk);
10456687e988SArnaldo Carvalho de Melo 
10466687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->set_state)
10476687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->set_state(sk, ca_state);
10486687e988SArnaldo Carvalho de Melo 	icsk->icsk_ca_state = ca_state;
1049317a76f9SStephen Hemminger }
1050317a76f9SStephen Hemminger 
10516687e988SArnaldo Carvalho de Melo static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1052317a76f9SStephen Hemminger {
10536687e988SArnaldo Carvalho de Melo 	const struct inet_connection_sock *icsk = inet_csk(sk);
10546687e988SArnaldo Carvalho de Melo 
10556687e988SArnaldo Carvalho de Melo 	if (icsk->icsk_ca_ops->cwnd_event)
10566687e988SArnaldo Carvalho de Melo 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1057317a76f9SStephen Hemminger }
1058317a76f9SStephen Hemminger 
1059b9f64820SYuchung Cheng /* From tcp_rate.c */
1060b9f64820SYuchung Cheng void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1061b9f64820SYuchung Cheng void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1062b9f64820SYuchung Cheng 			    struct rate_sample *rs);
1063b9f64820SYuchung Cheng void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1064d4761754SYousuk Seung 		  bool is_sack_reneg, struct rate_sample *rs);
1065d7722e85SSoheil Hassas Yeganeh void tcp_rate_check_app_limited(struct sock *sk);
1066b9f64820SYuchung Cheng 
1067e60402d0SIlpo Järvinen /* These functions determine how the current flow behaves in respect of SACK
1068e60402d0SIlpo Järvinen  * handling. SACK is negotiated with the peer, and therefore it can vary
1069e60402d0SIlpo Järvinen  * between different flows.
1070e60402d0SIlpo Järvinen  *
1071e60402d0SIlpo Järvinen  * tcp_is_sack - SACK enabled
1072e60402d0SIlpo Järvinen  * tcp_is_reno - No SACK
1073e60402d0SIlpo Järvinen  */
1074e60402d0SIlpo Järvinen static inline int tcp_is_sack(const struct tcp_sock *tp)
1075e60402d0SIlpo Järvinen {
1076e60402d0SIlpo Järvinen 	return tp->rx_opt.sack_ok;
1077e60402d0SIlpo Järvinen }
1078e60402d0SIlpo Järvinen 
1079a2a385d6SEric Dumazet static inline bool tcp_is_reno(const struct tcp_sock *tp)
1080e60402d0SIlpo Järvinen {
1081e60402d0SIlpo Järvinen 	return !tcp_is_sack(tp);
1082e60402d0SIlpo Järvinen }
1083e60402d0SIlpo Järvinen 
108483ae4088SIlpo Järvinen static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
108583ae4088SIlpo Järvinen {
108683ae4088SIlpo Järvinen 	return tp->sacked_out + tp->lost_out;
108783ae4088SIlpo Järvinen }
108883ae4088SIlpo Järvinen 
10891da177e4SLinus Torvalds /* This determines how many packets are "in the network" to the best
10901da177e4SLinus Torvalds  * of our knowledge.  In many cases it is conservative, but where
10911da177e4SLinus Torvalds  * detailed information is available from the receiver (via SACK
10921da177e4SLinus Torvalds  * blocks etc.) we can make more aggressive calculations.
10931da177e4SLinus Torvalds  *
10941da177e4SLinus Torvalds  * Use this for decisions involving congestion control, use just
10951da177e4SLinus Torvalds  * tp->packets_out to determine if the send queue is empty or not.
10961da177e4SLinus Torvalds  *
10971da177e4SLinus Torvalds  * Read this equation as:
10981da177e4SLinus Torvalds  *
10991da177e4SLinus Torvalds  *	"Packets sent once on transmission queue" MINUS
11001da177e4SLinus Torvalds  *	"Packets left network, but not honestly ACKed yet" PLUS
11011da177e4SLinus Torvalds  *	"Packets fast retransmitted"
11021da177e4SLinus Torvalds  */
110340efc6faSStephen Hemminger static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
11041da177e4SLinus Torvalds {
110583ae4088SIlpo Järvinen 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
11061da177e4SLinus Torvalds }
11071da177e4SLinus Torvalds 
11080b6a05c1SIlpo Järvinen #define TCP_INFINITE_SSTHRESH	0x7fffffff
11090b6a05c1SIlpo Järvinen 
1110071d5080SYuchung Cheng static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1111071d5080SYuchung Cheng {
111276174004SYuchung Cheng 	return tp->snd_cwnd < tp->snd_ssthresh;
1113071d5080SYuchung Cheng }
1114071d5080SYuchung Cheng 
11150b6a05c1SIlpo Järvinen static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
11160b6a05c1SIlpo Järvinen {
11170b6a05c1SIlpo Järvinen 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
11180b6a05c1SIlpo Järvinen }
11190b6a05c1SIlpo Järvinen 
1120684bad11SYuchung Cheng static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1121684bad11SYuchung Cheng {
1122684bad11SYuchung Cheng 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1123684bad11SYuchung Cheng 	       (1 << inet_csk(sk)->icsk_ca_state);
1124684bad11SYuchung Cheng }
1125684bad11SYuchung Cheng 
11261da177e4SLinus Torvalds /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1127684bad11SYuchung Cheng  * The exception is cwnd reduction phase, when cwnd is decreasing towards
11281da177e4SLinus Torvalds  * ssthresh.
11291da177e4SLinus Torvalds  */
11306687e988SArnaldo Carvalho de Melo static inline __u32 tcp_current_ssthresh(const struct sock *sk)
11311da177e4SLinus Torvalds {
11326687e988SArnaldo Carvalho de Melo 	const struct tcp_sock *tp = tcp_sk(sk);
1133cf533ea5SEric Dumazet 
1134684bad11SYuchung Cheng 	if (tcp_in_cwnd_reduction(sk))
11351da177e4SLinus Torvalds 		return tp->snd_ssthresh;
11361da177e4SLinus Torvalds 	else
11371da177e4SLinus Torvalds 		return max(tp->snd_ssthresh,
11381da177e4SLinus Torvalds 			   ((tp->snd_cwnd >> 1) +
11391da177e4SLinus Torvalds 			    (tp->snd_cwnd >> 2)));
11401da177e4SLinus Torvalds }
11411da177e4SLinus Torvalds 
1142b9c4595bSIlpo Järvinen /* Use define here intentionally to get WARN_ON location shown at the caller */
1143b9c4595bSIlpo Järvinen #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
11441da177e4SLinus Torvalds 
11455ee2c941SChristoph Paasch void tcp_enter_cwr(struct sock *sk);
11465c9f3023SJoe Perches __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
11471da177e4SLinus Torvalds 
11486b5a5c0dSNeal Cardwell /* The maximum number of MSS of available cwnd for which TSO defers
11496b5a5c0dSNeal Cardwell  * sending if not using sysctl_tcp_tso_win_divisor.
11506b5a5c0dSNeal Cardwell  */
11516b5a5c0dSNeal Cardwell static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
11526b5a5c0dSNeal Cardwell {
11536b5a5c0dSNeal Cardwell 	return 3;
11546b5a5c0dSNeal Cardwell }
11556b5a5c0dSNeal Cardwell 
115690840defSIlpo Järvinen /* Returns end sequence number of the receiver's advertised window */
115790840defSIlpo Järvinen static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
115890840defSIlpo Järvinen {
115990840defSIlpo Järvinen 	return tp->snd_una + tp->snd_wnd;
116090840defSIlpo Järvinen }
1161e114a710SEric Dumazet 
1162e114a710SEric Dumazet /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1163e114a710SEric Dumazet  * flexible approach. The RFC suggests cwnd should not be raised unless
1164ca8a2263SNeal Cardwell  * it was fully used previously. And that's exactly what we do in
1165ca8a2263SNeal Cardwell  * congestion avoidance mode. But in slow start we allow cwnd to grow
1166ca8a2263SNeal Cardwell  * as long as the application has used half the cwnd.
1167e114a710SEric Dumazet  * Example :
1168e114a710SEric Dumazet  *    cwnd is 10 (IW10), but application sends 9 frames.
1169e114a710SEric Dumazet  *    We allow cwnd to reach 18 when all frames are ACKed.
1170e114a710SEric Dumazet  * This check is safe because it's as aggressive as slow start which already
1171e114a710SEric Dumazet  * risks 100% overshoot. The advantage is that we discourage application to
1172e114a710SEric Dumazet  * either send more filler packets or data to artificially blow up the cwnd
1173e114a710SEric Dumazet  * usage, and allow application-limited process to probe bw more aggressively.
1174e114a710SEric Dumazet  */
117524901551SEric Dumazet static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1176e114a710SEric Dumazet {
1177e114a710SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1178e114a710SEric Dumazet 
1179ca8a2263SNeal Cardwell 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1180071d5080SYuchung Cheng 	if (tcp_in_slow_start(tp))
1181ca8a2263SNeal Cardwell 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1182ca8a2263SNeal Cardwell 
1183ca8a2263SNeal Cardwell 	return tp->is_cwnd_limited;
1184e114a710SEric Dumazet }
1185f4805edeSStephen Hemminger 
118621c8fe99SEric Dumazet /* Something is really bad, we could not queue an additional packet,
118721c8fe99SEric Dumazet  * because qdisc is full or receiver sent a 0 window.
118821c8fe99SEric Dumazet  * We do not want to add fuel to the fire, or abort too early,
118921c8fe99SEric Dumazet  * so make sure the timer we arm now is at least 200ms in the future,
119021c8fe99SEric Dumazet  * regardless of current icsk_rto value (as it could be ~2ms)
119121c8fe99SEric Dumazet  */
119221c8fe99SEric Dumazet static inline unsigned long tcp_probe0_base(const struct sock *sk)
119321c8fe99SEric Dumazet {
119421c8fe99SEric Dumazet 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
119521c8fe99SEric Dumazet }
119621c8fe99SEric Dumazet 
119721c8fe99SEric Dumazet /* Variant of inet_csk_rto_backoff() used for zero window probes */
119821c8fe99SEric Dumazet static inline unsigned long tcp_probe0_when(const struct sock *sk,
119921c8fe99SEric Dumazet 					    unsigned long max_when)
120021c8fe99SEric Dumazet {
120121c8fe99SEric Dumazet 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
120221c8fe99SEric Dumazet 
120321c8fe99SEric Dumazet 	return (unsigned long)min_t(u64, when, max_when);
120421c8fe99SEric Dumazet }
120521c8fe99SEric Dumazet 
12069e412ba7SIlpo Järvinen static inline void tcp_check_probe_timer(struct sock *sk)
12071da177e4SLinus Torvalds {
120821c8fe99SEric Dumazet 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
12093f421baaSArnaldo Carvalho de Melo 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
121021c8fe99SEric Dumazet 					  tcp_probe0_base(sk), TCP_RTO_MAX);
12111da177e4SLinus Torvalds }
12121da177e4SLinus Torvalds 
1213ee7537b6SHantzis Fotis static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
12141da177e4SLinus Torvalds {
12151da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
12161da177e4SLinus Torvalds }
12171da177e4SLinus Torvalds 
1218ee7537b6SHantzis Fotis static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
12191da177e4SLinus Torvalds {
12201da177e4SLinus Torvalds 	tp->snd_wl1 = seq;
12211da177e4SLinus Torvalds }
12221da177e4SLinus Torvalds 
12231da177e4SLinus Torvalds /*
12241da177e4SLinus Torvalds  * Calculate(/check) TCP checksum
12251da177e4SLinus Torvalds  */
1226ba7808eaSFrederik Deweerdt static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1227ba7808eaSFrederik Deweerdt 				   __be32 daddr, __wsum base)
12281da177e4SLinus Torvalds {
12291da177e4SLinus Torvalds 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
12301da177e4SLinus Torvalds }
12311da177e4SLinus Torvalds 
1232b51655b9SAl Viro static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
12331da177e4SLinus Torvalds {
1234fb286bb2SHerbert Xu 	return __skb_checksum_complete(skb);
12351da177e4SLinus Torvalds }
12361da177e4SLinus Torvalds 
1237a2a385d6SEric Dumazet static inline bool tcp_checksum_complete(struct sk_buff *skb)
12381da177e4SLinus Torvalds {
123960476372SHerbert Xu 	return !skb_csum_unnecessary(skb) &&
12401da177e4SLinus Torvalds 		__tcp_checksum_complete(skb);
12411da177e4SLinus Torvalds }
12421da177e4SLinus Torvalds 
1243c9c33212SEric Dumazet bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1244ac6e7800SEric Dumazet int tcp_filter(struct sock *sk, struct sk_buff *skb);
12451da177e4SLinus Torvalds 
12461da177e4SLinus Torvalds #undef STATE_TRACE
12471da177e4SLinus Torvalds 
12481da177e4SLinus Torvalds #ifdef STATE_TRACE
12491da177e4SLinus Torvalds static const char *statename[]={
12501da177e4SLinus Torvalds 	"Unused","Established","Syn Sent","Syn Recv",
12511da177e4SLinus Torvalds 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
12521da177e4SLinus Torvalds 	"Close Wait","Last ACK","Listen","Closing"
12531da177e4SLinus Torvalds };
12541da177e4SLinus Torvalds #endif
12555c9f3023SJoe Perches void tcp_set_state(struct sock *sk, int state);
12561da177e4SLinus Torvalds 
12575c9f3023SJoe Perches void tcp_done(struct sock *sk);
12581da177e4SLinus Torvalds 
1259c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err);
1260c1e64e29SLorenzo Colitti 
126140efc6faSStephen Hemminger static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
12621da177e4SLinus Torvalds {
12631da177e4SLinus Torvalds 	rx_opt->dsack = 0;
12641da177e4SLinus Torvalds 	rx_opt->num_sacks = 0;
12651da177e4SLinus Torvalds }
12661da177e4SLinus Torvalds 
12675c9f3023SJoe Perches u32 tcp_default_init_rwnd(u32 mss);
12686f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta);
12696f021c62SEric Dumazet 
12706f021c62SEric Dumazet static inline void tcp_slow_start_after_idle_check(struct sock *sk)
12716f021c62SEric Dumazet {
12721b1fc3fdSWei Wang 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
12736f021c62SEric Dumazet 	struct tcp_sock *tp = tcp_sk(sk);
12746f021c62SEric Dumazet 	s32 delta;
12756f021c62SEric Dumazet 
1276b510f0d2SEric Dumazet 	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
12771b1fc3fdSWei Wang 	    ca_ops->cong_control)
12786f021c62SEric Dumazet 		return;
1279d635fbe2SEric Dumazet 	delta = tcp_jiffies32 - tp->lsndtime;
12806f021c62SEric Dumazet 	if (delta > inet_csk(sk)->icsk_rto)
12816f021c62SEric Dumazet 		tcp_cwnd_restart(sk, delta);
12826f021c62SEric Dumazet }
128385f16525SYuchung Cheng 
12841da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. */
1285ceef9ab6SEric Dumazet void tcp_select_initial_window(const struct sock *sk, int __space,
1286ceef9ab6SEric Dumazet 			       __u32 mss, __u32 *rcv_wnd,
12875c9f3023SJoe Perches 			       __u32 *window_clamp, int wscale_ok,
12885c9f3023SJoe Perches 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
12891da177e4SLinus Torvalds 
129094f0893eSEric Dumazet static inline int tcp_win_from_space(const struct sock *sk, int space)
12911da177e4SLinus Torvalds {
129294f0893eSEric Dumazet 	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1293c4836742SGao Feng 
1294c4836742SGao Feng 	return tcp_adv_win_scale <= 0 ?
1295c4836742SGao Feng 		(space>>(-tcp_adv_win_scale)) :
1296c4836742SGao Feng 		space - (space>>tcp_adv_win_scale);
12971da177e4SLinus Torvalds }
12981da177e4SLinus Torvalds 
12991da177e4SLinus Torvalds /* Note: caller must be prepared to deal with negative returns */
13001da177e4SLinus Torvalds static inline int tcp_space(const struct sock *sk)
13011da177e4SLinus Torvalds {
130294f0893eSEric Dumazet 	return tcp_win_from_space(sk, sk->sk_rcvbuf -
13031da177e4SLinus Torvalds 				  atomic_read(&sk->sk_rmem_alloc));
13041da177e4SLinus Torvalds }
13051da177e4SLinus Torvalds 
13061da177e4SLinus Torvalds static inline int tcp_full_space(const struct sock *sk)
13071da177e4SLinus Torvalds {
130894f0893eSEric Dumazet 	return tcp_win_from_space(sk, sk->sk_rcvbuf);
13091da177e4SLinus Torvalds }
13101da177e4SLinus Torvalds 
1311843f4a55SYuchung Cheng extern void tcp_openreq_init_rwin(struct request_sock *req,
1312b1964b5fSEric Dumazet 				  const struct sock *sk_listener,
1313b1964b5fSEric Dumazet 				  const struct dst_entry *dst);
1314843f4a55SYuchung Cheng 
13155c9f3023SJoe Perches void tcp_enter_memory_pressure(struct sock *sk);
131606044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk);
13171da177e4SLinus Torvalds 
13181da177e4SLinus Torvalds static inline int keepalive_intvl_when(const struct tcp_sock *tp)
13191da177e4SLinus Torvalds {
1320b840d15dSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
1321b840d15dSNikolay Borisov 
1322b840d15dSNikolay Borisov 	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
13231da177e4SLinus Torvalds }
13241da177e4SLinus Torvalds 
13251da177e4SLinus Torvalds static inline int keepalive_time_when(const struct tcp_sock *tp)
13261da177e4SLinus Torvalds {
132713b287e8SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
132813b287e8SNikolay Borisov 
132913b287e8SNikolay Borisov 	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
13301da177e4SLinus Torvalds }
13311da177e4SLinus Torvalds 
1332df19a626SEric Dumazet static inline int keepalive_probes(const struct tcp_sock *tp)
1333df19a626SEric Dumazet {
13349bd6861bSNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
13359bd6861bSNikolay Borisov 
13369bd6861bSNikolay Borisov 	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1337df19a626SEric Dumazet }
1338df19a626SEric Dumazet 
13396c37e5deSFlavio Leitner static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
13406c37e5deSFlavio Leitner {
13416c37e5deSFlavio Leitner 	const struct inet_connection_sock *icsk = &tp->inet_conn;
13426c37e5deSFlavio Leitner 
134370eabf0eSEric Dumazet 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
134470eabf0eSEric Dumazet 			  tcp_jiffies32 - tp->rcv_tstamp);
13456c37e5deSFlavio Leitner }
13466c37e5deSFlavio Leitner 
1347463c84b9SArnaldo Carvalho de Melo static inline int tcp_fin_time(const struct sock *sk)
13481da177e4SLinus Torvalds {
13491e579caaSNikolay Borisov 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1350463c84b9SArnaldo Carvalho de Melo 	const int rto = inet_csk(sk)->icsk_rto;
13511da177e4SLinus Torvalds 
1352463c84b9SArnaldo Carvalho de Melo 	if (fin_timeout < (rto << 2) - (rto >> 1))
1353463c84b9SArnaldo Carvalho de Melo 		fin_timeout = (rto << 2) - (rto >> 1);
13541da177e4SLinus Torvalds 
13551da177e4SLinus Torvalds 	return fin_timeout;
13561da177e4SLinus Torvalds }
13571da177e4SLinus Torvalds 
1358a2a385d6SEric Dumazet static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1359c887e6d2SIlpo Järvinen 				  int paws_win)
13601da177e4SLinus Torvalds {
1361c887e6d2SIlpo Järvinen 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1362a2a385d6SEric Dumazet 		return true;
1363c887e6d2SIlpo Järvinen 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1364a2a385d6SEric Dumazet 		return true;
1365bc2ce894SEric Dumazet 	/*
1366bc2ce894SEric Dumazet 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1367bc2ce894SEric Dumazet 	 * then following tcp messages have valid values. Ignore 0 value,
1368bc2ce894SEric Dumazet 	 * or else 'negative' tsval might forbid us to accept their packets.
1369bc2ce894SEric Dumazet 	 */
1370bc2ce894SEric Dumazet 	if (!rx_opt->ts_recent)
1371a2a385d6SEric Dumazet 		return true;
1372a2a385d6SEric Dumazet 	return false;
1373c887e6d2SIlpo Järvinen }
1374c887e6d2SIlpo Järvinen 
1375a2a385d6SEric Dumazet static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1376c887e6d2SIlpo Järvinen 				   int rst)
1377c887e6d2SIlpo Järvinen {
1378c887e6d2SIlpo Järvinen 	if (tcp_paws_check(rx_opt, 0))
1379a2a385d6SEric Dumazet 		return false;
13801da177e4SLinus Torvalds 
13811da177e4SLinus Torvalds 	/* RST segments are not recommended to carry timestamp,
13821da177e4SLinus Torvalds 	   and, if they do, it is recommended to ignore PAWS because
13831da177e4SLinus Torvalds 	   "their cleanup function should take precedence over timestamps."
13841da177e4SLinus Torvalds 	   Certainly, it is mistake. It is necessary to understand the reasons
13851da177e4SLinus Torvalds 	   of this constraint to relax it: if peer reboots, clock may go
13861da177e4SLinus Torvalds 	   out-of-sync and half-open connections will not be reset.
13871da177e4SLinus Torvalds 	   Actually, the problem would be not existing if all
13881da177e4SLinus Torvalds 	   the implementations followed draft about maintaining clock
13891da177e4SLinus Torvalds 	   via reboots. Linux-2.2 DOES NOT!
13901da177e4SLinus Torvalds 
13911da177e4SLinus Torvalds 	   However, we can relax time bounds for RST segments to MSL.
13921da177e4SLinus Torvalds 	 */
13939d729f72SJames Morris 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1394a2a385d6SEric Dumazet 		return false;
1395a2a385d6SEric Dumazet 	return true;
13961da177e4SLinus Torvalds }
13971da177e4SLinus Torvalds 
13987970ddc8SEric Dumazet bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
13997970ddc8SEric Dumazet 			  int mib_idx, u32 *last_oow_ack_time);
1400032ee423SNeal Cardwell 
1401a9c19329SPavel Emelyanov static inline void tcp_mib_init(struct net *net)
14021da177e4SLinus Torvalds {
14031da177e4SLinus Torvalds 	/* See RFC 2012 */
14046aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
14056aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
14066aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
14076aef70a8SEric Dumazet 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
14081da177e4SLinus Torvalds }
14091da177e4SLinus Torvalds 
14106a438bbeSStephen Hemminger /* from STCP */
1411ef9da47cSIlpo Järvinen static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
14120800f170SDavid S. Miller {
14136a438bbeSStephen Hemminger 	tp->lost_skb_hint = NULL;
1414ef9da47cSIlpo Järvinen }
1415ef9da47cSIlpo Järvinen 
1416ef9da47cSIlpo Järvinen static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1417ef9da47cSIlpo Järvinen {
1418ef9da47cSIlpo Järvinen 	tcp_clear_retrans_hints_partial(tp);
14196a438bbeSStephen Hemminger 	tp->retransmit_skb_hint = NULL;
1420b7689205SIlpo Järvinen }
1421b7689205SIlpo Järvinen 
1422a915da9bSEric Dumazet union tcp_md5_addr {
1423a915da9bSEric Dumazet 	struct in_addr  a4;
1424a915da9bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1425a915da9bSEric Dumazet 	struct in6_addr	a6;
1426a915da9bSEric Dumazet #endif
1427a915da9bSEric Dumazet };
1428a915da9bSEric Dumazet 
1429cfb6eeb4SYOSHIFUJI Hideaki /* - key database */
1430cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key {
1431a915da9bSEric Dumazet 	struct hlist_node	node;
1432cfb6eeb4SYOSHIFUJI Hideaki 	u8			keylen;
1433a915da9bSEric Dumazet 	u8			family; /* AF_INET or AF_INET6 */
1434a915da9bSEric Dumazet 	union tcp_md5_addr	addr;
14356797318eSIvan Delalande 	u8			prefixlen;
1436a915da9bSEric Dumazet 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1437a915da9bSEric Dumazet 	struct rcu_head		rcu;
1438cfb6eeb4SYOSHIFUJI Hideaki };
1439cfb6eeb4SYOSHIFUJI Hideaki 
1440cfb6eeb4SYOSHIFUJI Hideaki /* - sock block */
1441cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_info {
1442a915da9bSEric Dumazet 	struct hlist_head	head;
1443a8afca03SEric Dumazet 	struct rcu_head		rcu;
1444cfb6eeb4SYOSHIFUJI Hideaki };
1445cfb6eeb4SYOSHIFUJI Hideaki 
1446cfb6eeb4SYOSHIFUJI Hideaki /* - pseudo header */
1447cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr {
1448cfb6eeb4SYOSHIFUJI Hideaki 	__be32		saddr;
1449cfb6eeb4SYOSHIFUJI Hideaki 	__be32		daddr;
1450cfb6eeb4SYOSHIFUJI Hideaki 	__u8		pad;
1451cfb6eeb4SYOSHIFUJI Hideaki 	__u8		protocol;
1452cfb6eeb4SYOSHIFUJI Hideaki 	__be16		len;
1453cfb6eeb4SYOSHIFUJI Hideaki };
1454cfb6eeb4SYOSHIFUJI Hideaki 
1455cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr {
1456cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr	saddr;
1457cfb6eeb4SYOSHIFUJI Hideaki 	struct in6_addr daddr;
1458cfb6eeb4SYOSHIFUJI Hideaki 	__be32		len;
1459cfb6eeb4SYOSHIFUJI Hideaki 	__be32		protocol;	/* including padding */
1460cfb6eeb4SYOSHIFUJI Hideaki };
1461cfb6eeb4SYOSHIFUJI Hideaki 
1462cfb6eeb4SYOSHIFUJI Hideaki union tcp_md5sum_block {
1463cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp4_pseudohdr ip4;
1464dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6)
1465cfb6eeb4SYOSHIFUJI Hideaki 	struct tcp6_pseudohdr ip6;
1466cfb6eeb4SYOSHIFUJI Hideaki #endif
1467cfb6eeb4SYOSHIFUJI Hideaki };
1468cfb6eeb4SYOSHIFUJI Hideaki 
1469cfb6eeb4SYOSHIFUJI Hideaki /* - pool: digest algorithm, hash description and scratch buffer */
1470cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool {
1471cf80e0e4SHerbert Xu 	struct ahash_request	*md5_req;
147219689e38SEric Dumazet 	void			*scratch;
1473cfb6eeb4SYOSHIFUJI Hideaki };
1474cfb6eeb4SYOSHIFUJI Hideaki 
1475cfb6eeb4SYOSHIFUJI Hideaki /* - functions */
147639f8e58eSEric Dumazet int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
147739f8e58eSEric Dumazet 			const struct sock *sk, const struct sk_buff *skb);
14785c9f3023SJoe Perches int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
14796797318eSIvan Delalande 		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
14806797318eSIvan Delalande 		   gfp_t gfp);
14815c9f3023SJoe Perches int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
14826797318eSIvan Delalande 		   int family, u8 prefixlen);
1483b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1484fd3a154aSEric Dumazet 					 const struct sock *addr_sk);
1485cfb6eeb4SYOSHIFUJI Hideaki 
14869501f972SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1487b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
14885c9f3023SJoe Perches 					 const union tcp_md5_addr *addr,
14895c9f3023SJoe Perches 					 int family);
1490a915da9bSEric Dumazet #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
14919501f972SYOSHIFUJI Hideaki #else
1492b83e3debSEric Dumazet static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1493a915da9bSEric Dumazet 					 const union tcp_md5_addr *addr,
1494a915da9bSEric Dumazet 					 int family)
1495a915da9bSEric Dumazet {
1496a915da9bSEric Dumazet 	return NULL;
1497a915da9bSEric Dumazet }
14989501f972SYOSHIFUJI Hideaki #define tcp_twsk_md5_key(twsk)	NULL
14999501f972SYOSHIFUJI Hideaki #endif
15009501f972SYOSHIFUJI Hideaki 
15015c9f3023SJoe Perches bool tcp_alloc_md5sig_pool(void);
1502cfb6eeb4SYOSHIFUJI Hideaki 
15035c9f3023SJoe Perches struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
150471cea17eSEric Dumazet static inline void tcp_put_md5sig_pool(void)
150571cea17eSEric Dumazet {
150671cea17eSEric Dumazet 	local_bh_enable();
150771cea17eSEric Dumazet }
150835790c04SEric Dumazet 
15095c9f3023SJoe Perches int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
151095c96174SEric Dumazet 			  unsigned int header_len);
15115c9f3023SJoe Perches int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1512cf533ea5SEric Dumazet 		     const struct tcp_md5sig_key *key);
1513cfb6eeb4SYOSHIFUJI Hideaki 
151410467163SJerry Chu /* From tcp_fastopen.c */
15155c9f3023SJoe Perches void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
15167268586bSYuchung Cheng 			    struct tcp_fastopen_cookie *cookie);
15175c9f3023SJoe Perches void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
15182646c831SDaniel Lee 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
15192646c831SDaniel Lee 			    u16 try_exp);
1520783237e8SYuchung Cheng struct tcp_fastopen_request {
1521783237e8SYuchung Cheng 	/* Fast Open cookie. Size 0 means a cookie request */
1522783237e8SYuchung Cheng 	struct tcp_fastopen_cookie	cookie;
1523783237e8SYuchung Cheng 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1524f5ddcbbbSEric Dumazet 	size_t				size;
1525f5ddcbbbSEric Dumazet 	int				copied;	/* queued in tcp_connect() */
1526783237e8SYuchung Cheng };
1527783237e8SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp);
15281fba70e5SYuchung Cheng void tcp_fastopen_destroy_cipher(struct sock *sk);
152943713848SHaishuang Yan void tcp_fastopen_ctx_destroy(struct net *net);
15301fba70e5SYuchung Cheng int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
15311fba70e5SYuchung Cheng 			      void *key, unsigned int len);
153261d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
15337c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
15345b7ed089SYuchung Cheng 			      struct request_sock *req,
153571c02379SChristoph Paasch 			      struct tcp_fastopen_cookie *foc,
153671c02379SChristoph Paasch 			      const struct dst_entry *dst);
153743713848SHaishuang Yan void tcp_fastopen_init_key_once(struct net *net);
1538065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1539065263f4SWei Wang 			     struct tcp_fastopen_cookie *cookie);
154019f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
154110467163SJerry Chu #define TCP_FASTOPEN_KEY_LENGTH 16
154210467163SJerry Chu 
154310467163SJerry Chu /* Fastopen key context */
154410467163SJerry Chu struct tcp_fastopen_context {
15457ae8639cSEric Dumazet 	struct crypto_cipher	*tfm;
154610467163SJerry Chu 	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
154710467163SJerry Chu 	struct rcu_head		rcu;
154810467163SJerry Chu };
154910467163SJerry Chu 
1550cf1ef3f0SWei Wang extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
155146c2fa39SWei Wang void tcp_fastopen_active_disable(struct sock *sk);
1552cf1ef3f0SWei Wang bool tcp_fastopen_active_should_disable(struct sock *sk);
1553cf1ef3f0SWei Wang void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
15547268586bSYuchung Cheng void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
1555cf1ef3f0SWei Wang 
155605b055e8SFrancis Yan /* Latencies incurred by various limits for a sender. They are
155705b055e8SFrancis Yan  * chronograph-like stats that are mutually exclusive.
155805b055e8SFrancis Yan  */
155905b055e8SFrancis Yan enum tcp_chrono {
156005b055e8SFrancis Yan 	TCP_CHRONO_UNSPEC,
156105b055e8SFrancis Yan 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
156205b055e8SFrancis Yan 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
156305b055e8SFrancis Yan 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
156405b055e8SFrancis Yan 	__TCP_CHRONO_MAX,
156505b055e8SFrancis Yan };
156605b055e8SFrancis Yan 
156705b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
156805b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
156905b055e8SFrancis Yan 
1570e2080072SEric Dumazet /* This helper is needed, because skb->tcp_tsorted_anchor uses
1571e2080072SEric Dumazet  * the same memory storage than skb->destructor/_skb_refdst
1572e2080072SEric Dumazet  */
1573e2080072SEric Dumazet static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1574e2080072SEric Dumazet {
1575e2080072SEric Dumazet 	skb->destructor = NULL;
1576e2080072SEric Dumazet 	skb->_skb_refdst = 0UL;
1577e2080072SEric Dumazet }
1578e2080072SEric Dumazet 
1579e2080072SEric Dumazet #define tcp_skb_tsorted_save(skb) {		\
1580e2080072SEric Dumazet 	unsigned long _save = skb->_skb_refdst;	\
1581e2080072SEric Dumazet 	skb->_skb_refdst = 0UL;
1582e2080072SEric Dumazet 
1583e2080072SEric Dumazet #define tcp_skb_tsorted_restore(skb)		\
1584e2080072SEric Dumazet 	skb->_skb_refdst = _save;		\
1585e2080072SEric Dumazet }
1586e2080072SEric Dumazet 
1587ac3f09baSEric Dumazet void tcp_write_queue_purge(struct sock *sk);
1588fe067e8aSDavid S. Miller 
158975c119afSEric Dumazet static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
159075c119afSEric Dumazet {
159175c119afSEric Dumazet 	return skb_rb_first(&sk->tcp_rtx_queue);
159275c119afSEric Dumazet }
159375c119afSEric Dumazet 
1594cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1595fe067e8aSDavid S. Miller {
1596cd07a8eaSDavid S. Miller 	return skb_peek(&sk->sk_write_queue);
1597fe067e8aSDavid S. Miller }
1598fe067e8aSDavid S. Miller 
1599cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1600fe067e8aSDavid S. Miller {
1601cd07a8eaSDavid S. Miller 	return skb_peek_tail(&sk->sk_write_queue);
1602fe067e8aSDavid S. Miller }
1603fe067e8aSDavid S. Miller 
1604234b6860SIlpo Järvinen #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1605cd07a8eaSDavid S. Miller 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1606234b6860SIlpo Järvinen 
1607cf533ea5SEric Dumazet static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1608fe067e8aSDavid S. Miller {
160975c119afSEric Dumazet 	return skb_peek(&sk->sk_write_queue);
1610fe067e8aSDavid S. Miller }
1611fe067e8aSDavid S. Miller 
1612cd07a8eaSDavid S. Miller static inline bool tcp_skb_is_last(const struct sock *sk,
1613cd07a8eaSDavid S. Miller 				   const struct sk_buff *skb)
1614cd07a8eaSDavid S. Miller {
1615cd07a8eaSDavid S. Miller 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1616cd07a8eaSDavid S. Miller }
1617cd07a8eaSDavid S. Miller 
161875c119afSEric Dumazet static inline bool tcp_write_queue_empty(const struct sock *sk)
1619fe067e8aSDavid S. Miller {
162075c119afSEric Dumazet 	return skb_queue_empty(&sk->sk_write_queue);
162175c119afSEric Dumazet }
162275c119afSEric Dumazet 
162375c119afSEric Dumazet static inline bool tcp_rtx_queue_empty(const struct sock *sk)
162475c119afSEric Dumazet {
162575c119afSEric Dumazet 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
162675c119afSEric Dumazet }
162775c119afSEric Dumazet 
162875c119afSEric Dumazet static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
162975c119afSEric Dumazet {
163075c119afSEric Dumazet 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1631fe067e8aSDavid S. Miller }
1632fe067e8aSDavid S. Miller 
1633fe067e8aSDavid S. Miller static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1634fe067e8aSDavid S. Miller {
163575c119afSEric Dumazet 	if (tcp_write_queue_empty(sk))
16360f87230dSFrancis Yan 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1637fe067e8aSDavid S. Miller }
1638fe067e8aSDavid S. Miller 
1639fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1640fe067e8aSDavid S. Miller {
1641fe067e8aSDavid S. Miller 	__skb_queue_tail(&sk->sk_write_queue, skb);
1642fe067e8aSDavid S. Miller }
1643fe067e8aSDavid S. Miller 
1644fe067e8aSDavid S. Miller static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1645fe067e8aSDavid S. Miller {
1646fe067e8aSDavid S. Miller 	__tcp_add_write_queue_tail(sk, skb);
1647fe067e8aSDavid S. Miller 
1648fe067e8aSDavid S. Miller 	/* Queue it, remembering where we must start sending. */
164950895b9dSEric Dumazet 	if (sk->sk_write_queue.next == skb)
16500f87230dSFrancis Yan 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1651fe067e8aSDavid S. Miller }
1652fe067e8aSDavid S. Miller 
165343f59c89SDavid S. Miller /* Insert new before skb on the write queue of sk.  */
1654fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1655fe067e8aSDavid S. Miller 						  struct sk_buff *skb,
1656fe067e8aSDavid S. Miller 						  struct sock *sk)
1657fe067e8aSDavid S. Miller {
165843f59c89SDavid S. Miller 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1659fe067e8aSDavid S. Miller }
1660fe067e8aSDavid S. Miller 
1661fe067e8aSDavid S. Miller static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1662fe067e8aSDavid S. Miller {
16634a269818SEric Dumazet 	tcp_skb_tsorted_anchor_cleanup(skb);
1664fe067e8aSDavid S. Miller 	__skb_unlink(skb, &sk->sk_write_queue);
1665fe067e8aSDavid S. Miller }
1666fe067e8aSDavid S. Miller 
166775c119afSEric Dumazet void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
166875c119afSEric Dumazet 
166975c119afSEric Dumazet static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1670fe067e8aSDavid S. Miller {
167175c119afSEric Dumazet 	tcp_skb_tsorted_anchor_cleanup(skb);
167275c119afSEric Dumazet 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
167375c119afSEric Dumazet }
167475c119afSEric Dumazet 
167575c119afSEric Dumazet static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
167675c119afSEric Dumazet {
167775c119afSEric Dumazet 	list_del(&skb->tcp_tsorted_anchor);
167875c119afSEric Dumazet 	tcp_rtx_queue_unlink(skb, sk);
167975c119afSEric Dumazet 	sk_wmem_free_skb(sk, skb);
1680fe067e8aSDavid S. Miller }
1681fe067e8aSDavid S. Miller 
168212d50c46SKrishna Kumar static inline void tcp_push_pending_frames(struct sock *sk)
168312d50c46SKrishna Kumar {
168412d50c46SKrishna Kumar 	if (tcp_send_head(sk)) {
168512d50c46SKrishna Kumar 		struct tcp_sock *tp = tcp_sk(sk);
168612d50c46SKrishna Kumar 
168712d50c46SKrishna Kumar 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
168812d50c46SKrishna Kumar 	}
168912d50c46SKrishna Kumar }
169012d50c46SKrishna Kumar 
1691ecb97192SNeal Cardwell /* Start sequence of the skb just after the highest skb with SACKed
1692ecb97192SNeal Cardwell  * bit, valid only if sacked_out > 0 or when the caller has ensured
1693ecb97192SNeal Cardwell  * validity by itself.
1694a47e5a98SIlpo Järvinen  */
1695a47e5a98SIlpo Järvinen static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1696a47e5a98SIlpo Järvinen {
1697a47e5a98SIlpo Järvinen 	if (!tp->sacked_out)
1698a47e5a98SIlpo Järvinen 		return tp->snd_una;
16996859d494SIlpo Järvinen 
17006859d494SIlpo Järvinen 	if (tp->highest_sack == NULL)
17016859d494SIlpo Järvinen 		return tp->snd_nxt;
17026859d494SIlpo Järvinen 
1703a47e5a98SIlpo Järvinen 	return TCP_SKB_CB(tp->highest_sack)->seq;
1704a47e5a98SIlpo Järvinen }
1705a47e5a98SIlpo Järvinen 
17066859d494SIlpo Järvinen static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
17076859d494SIlpo Järvinen {
170850895b9dSEric Dumazet 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
17096859d494SIlpo Järvinen }
17106859d494SIlpo Järvinen 
17116859d494SIlpo Järvinen static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
17126859d494SIlpo Järvinen {
17136859d494SIlpo Järvinen 	return tcp_sk(sk)->highest_sack;
17146859d494SIlpo Järvinen }
17156859d494SIlpo Järvinen 
17166859d494SIlpo Järvinen static inline void tcp_highest_sack_reset(struct sock *sk)
17176859d494SIlpo Järvinen {
171850895b9dSEric Dumazet 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
17196859d494SIlpo Järvinen }
17206859d494SIlpo Järvinen 
17212b7cda9cSEric Dumazet /* Called when old skb is about to be deleted and replaced by new skb */
17222b7cda9cSEric Dumazet static inline void tcp_highest_sack_replace(struct sock *sk,
17236859d494SIlpo Järvinen 					    struct sk_buff *old,
17246859d494SIlpo Järvinen 					    struct sk_buff *new)
17256859d494SIlpo Järvinen {
17262b7cda9cSEric Dumazet 	if (old == tcp_highest_sack(sk))
17276859d494SIlpo Järvinen 		tcp_sk(sk)->highest_sack = new;
17286859d494SIlpo Järvinen }
17296859d494SIlpo Järvinen 
1730b1f0a0e9SFlorian Westphal /* This helper checks if socket has IP_TRANSPARENT set */
1731b1f0a0e9SFlorian Westphal static inline bool inet_sk_transparent(const struct sock *sk)
1732b1f0a0e9SFlorian Westphal {
1733b1f0a0e9SFlorian Westphal 	switch (sk->sk_state) {
1734b1f0a0e9SFlorian Westphal 	case TCP_TIME_WAIT:
1735b1f0a0e9SFlorian Westphal 		return inet_twsk(sk)->tw_transparent;
1736b1f0a0e9SFlorian Westphal 	case TCP_NEW_SYN_RECV:
1737b1f0a0e9SFlorian Westphal 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1738b1f0a0e9SFlorian Westphal 	}
1739b1f0a0e9SFlorian Westphal 	return inet_sk(sk)->transparent;
1740b1f0a0e9SFlorian Westphal }
1741b1f0a0e9SFlorian Westphal 
17425aa4b32fSAndreas Petlund /* Determines whether this is a thin stream (which may suffer from
17435aa4b32fSAndreas Petlund  * increased latency). Used to trigger latency-reducing mechanisms.
17445aa4b32fSAndreas Petlund  */
1745a2a385d6SEric Dumazet static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
17465aa4b32fSAndreas Petlund {
17475aa4b32fSAndreas Petlund 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
17485aa4b32fSAndreas Petlund }
17495aa4b32fSAndreas Petlund 
17501da177e4SLinus Torvalds /* /proc */
17511da177e4SLinus Torvalds enum tcp_seq_states {
17521da177e4SLinus Torvalds 	TCP_SEQ_STATE_LISTENING,
17531da177e4SLinus Torvalds 	TCP_SEQ_STATE_ESTABLISHED,
17541da177e4SLinus Torvalds };
17551da177e4SLinus Torvalds 
175673cb88ecSArjan van de Ven int tcp_seq_open(struct inode *inode, struct file *file);
175773cb88ecSArjan van de Ven 
17581da177e4SLinus Torvalds struct tcp_seq_afinfo {
17591da177e4SLinus Torvalds 	char				*name;
17601da177e4SLinus Torvalds 	sa_family_t			family;
176173cb88ecSArjan van de Ven 	const struct file_operations	*seq_fops;
17629427c4b3SDenis V. Lunev 	struct seq_operations		seq_ops;
17631da177e4SLinus Torvalds };
17641da177e4SLinus Torvalds 
17651da177e4SLinus Torvalds struct tcp_iter_state {
1766a4146b1bSDenis V. Lunev 	struct seq_net_private	p;
17671da177e4SLinus Torvalds 	sa_family_t		family;
17681da177e4SLinus Torvalds 	enum tcp_seq_states	state;
17691da177e4SLinus Torvalds 	struct sock		*syn_wait_sk;
1770a7cb5a49SEric W. Biederman 	int			bucket, offset, sbucket, num;
1771a8b690f9STom Herbert 	loff_t			last_pos;
17721da177e4SLinus Torvalds };
17731da177e4SLinus Torvalds 
17745c9f3023SJoe Perches int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
17755c9f3023SJoe Perches void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
17761da177e4SLinus Torvalds 
177720380731SArnaldo Carvalho de Melo extern struct request_sock_ops tcp_request_sock_ops;
1778c6aefafbSGlenn Griffin extern struct request_sock_ops tcp6_request_sock_ops;
177920380731SArnaldo Carvalho de Melo 
17805c9f3023SJoe Perches void tcp_v4_destroy_sock(struct sock *sk);
178120380731SArnaldo Carvalho de Melo 
178228be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1783c8f44affSMichał Mirosław 				netdev_features_t features);
17845c9f3023SJoe Perches struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
17855c9f3023SJoe Perches int tcp_gro_complete(struct sk_buff *skb);
178628850dc7SDaniel Borkmann 
17875c9f3023SJoe Perches void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1788f4c50d99SHerbert Xu 
1789c9bee3b7SEric Dumazet static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1790c9bee3b7SEric Dumazet {
17914979f2d9SNikolay Borisov 	struct net *net = sock_net((struct sock *)tp);
17924979f2d9SNikolay Borisov 	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1793c9bee3b7SEric Dumazet }
1794c9bee3b7SEric Dumazet 
1795c9bee3b7SEric Dumazet static inline bool tcp_stream_memory_free(const struct sock *sk)
1796c9bee3b7SEric Dumazet {
1797c9bee3b7SEric Dumazet 	const struct tcp_sock *tp = tcp_sk(sk);
1798c9bee3b7SEric Dumazet 	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1799c9bee3b7SEric Dumazet 
1800c9bee3b7SEric Dumazet 	return notsent_bytes < tcp_notsent_lowat(tp);
1801c9bee3b7SEric Dumazet }
1802c9bee3b7SEric Dumazet 
180320380731SArnaldo Carvalho de Melo #ifdef CONFIG_PROC_FS
18045c9f3023SJoe Perches int tcp4_proc_init(void);
18055c9f3023SJoe Perches void tcp4_proc_exit(void);
180620380731SArnaldo Carvalho de Melo #endif
180720380731SArnaldo Carvalho de Melo 
1808ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
18091fb6f159SOctavian Purdila int tcp_conn_request(struct request_sock_ops *rsk_ops,
18101fb6f159SOctavian Purdila 		     const struct tcp_request_sock_ops *af_ops,
18111fb6f159SOctavian Purdila 		     struct sock *sk, struct sk_buff *skb);
18125db92c99SOctavian Purdila 
1813cfb6eeb4SYOSHIFUJI Hideaki /* TCP af-specific functions */
1814cfb6eeb4SYOSHIFUJI Hideaki struct tcp_sock_af_ops {
1815cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1816b83e3debSEric Dumazet 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1817fd3a154aSEric Dumazet 						const struct sock *addr_sk);
1818cfb6eeb4SYOSHIFUJI Hideaki 	int		(*calc_md5_hash)(char *location,
181939f8e58eSEric Dumazet 					 const struct tcp_md5sig_key *md5,
1820318cf7aaSEric Dumazet 					 const struct sock *sk,
1821318cf7aaSEric Dumazet 					 const struct sk_buff *skb);
1822cfb6eeb4SYOSHIFUJI Hideaki 	int		(*md5_parse)(struct sock *sk,
18238917a777SIvan Delalande 				     int optname,
1824cfb6eeb4SYOSHIFUJI Hideaki 				     char __user *optval,
1825cfb6eeb4SYOSHIFUJI Hideaki 				     int optlen);
1826cfb6eeb4SYOSHIFUJI Hideaki #endif
1827cfb6eeb4SYOSHIFUJI Hideaki };
1828cfb6eeb4SYOSHIFUJI Hideaki 
1829cfb6eeb4SYOSHIFUJI Hideaki struct tcp_request_sock_ops {
18302aec4a29SOctavian Purdila 	u16 mss_clamp;
1831cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG
1832b83e3debSEric Dumazet 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1833fd3a154aSEric Dumazet 						 const struct sock *addr_sk);
1834e3afe7b7SJohn Dykstra 	int		(*calc_md5_hash) (char *location,
183539f8e58eSEric Dumazet 					  const struct tcp_md5sig_key *md5,
1836318cf7aaSEric Dumazet 					  const struct sock *sk,
1837318cf7aaSEric Dumazet 					  const struct sk_buff *skb);
1838cfb6eeb4SYOSHIFUJI Hideaki #endif
1839b40cf18eSEric Dumazet 	void (*init_req)(struct request_sock *req,
1840b40cf18eSEric Dumazet 			 const struct sock *sk_listener,
184116bea70aSOctavian Purdila 			 struct sk_buff *skb);
1842fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
18433f684b4bSEric Dumazet 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1844fb7b37a7SOctavian Purdila 				 __u16 *mss);
1845fb7b37a7SOctavian Purdila #endif
1846f964629eSEric Dumazet 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
18474396e461SSoheil Hassas Yeganeh 				       const struct request_sock *req);
184884b114b9SEric Dumazet 	u32 (*init_seq)(const struct sk_buff *skb);
18495d2ed052SEric Dumazet 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
18500f935dbeSEric Dumazet 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1851d6274bd8SOctavian Purdila 			   struct flowi *fl, struct request_sock *req,
1852dc6ef6beSEric Dumazet 			   struct tcp_fastopen_cookie *foc,
1853b3d05147SEric Dumazet 			   enum tcp_synack_type synack_type);
1854cfb6eeb4SYOSHIFUJI Hideaki };
1855cfb6eeb4SYOSHIFUJI Hideaki 
1856fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES
1857fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18583f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1859fb7b37a7SOctavian Purdila 					 __u16 *mss)
1860fb7b37a7SOctavian Purdila {
18613f684b4bSEric Dumazet 	tcp_synq_overflow(sk);
186202a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
18633f684b4bSEric Dumazet 	return ops->cookie_init_seq(skb, mss);
1864fb7b37a7SOctavian Purdila }
1865fb7b37a7SOctavian Purdila #else
1866fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
18673f684b4bSEric Dumazet 					 const struct sock *sk, struct sk_buff *skb,
1868fb7b37a7SOctavian Purdila 					 __u16 *mss)
1869fb7b37a7SOctavian Purdila {
1870fb7b37a7SOctavian Purdila 	return 0;
1871fb7b37a7SOctavian Purdila }
1872fb7b37a7SOctavian Purdila #endif
1873fb7b37a7SOctavian Purdila 
18745c9f3023SJoe Perches int tcpv4_offload_init(void);
187528850dc7SDaniel Borkmann 
18765c9f3023SJoe Perches void tcp_v4_init(void);
18775c9f3023SJoe Perches void tcp_init(void);
187820380731SArnaldo Carvalho de Melo 
1879659a8ad5SYuchung Cheng /* tcp_recovery.c */
1880128eda86SEric Dumazet extern void tcp_rack_mark_lost(struct sock *sk);
18811d0833dfSYuchung Cheng extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
18829a568de4SEric Dumazet 			     u64 xmit_time);
188357dde7f7SYuchung Cheng extern void tcp_rack_reo_timeout(struct sock *sk);
18841f255691SPriyaranjan Jha extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs);
1885659a8ad5SYuchung Cheng 
1886e1a10ef7SNeal Cardwell /* At how many usecs into the future should the RTO fire? */
1887e1a10ef7SNeal Cardwell static inline s64 tcp_rto_delta_us(const struct sock *sk)
1888e1a10ef7SNeal Cardwell {
188975c119afSEric Dumazet 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
1890e1a10ef7SNeal Cardwell 	u32 rto = inet_csk(sk)->icsk_rto;
1891e1a10ef7SNeal Cardwell 	u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1892e1a10ef7SNeal Cardwell 
1893e1a10ef7SNeal Cardwell 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
1894e1a10ef7SNeal Cardwell }
1895e1a10ef7SNeal Cardwell 
1896e25f866fSCong Wang /*
1897e25f866fSCong Wang  * Save and compile IPv4 options, return a pointer to it
1898e25f866fSCong Wang  */
189991ed1e66SPaolo Abeni static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
190091ed1e66SPaolo Abeni 							 struct sk_buff *skb)
1901e25f866fSCong Wang {
1902e25f866fSCong Wang 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1903e25f866fSCong Wang 	struct ip_options_rcu *dopt = NULL;
1904e25f866fSCong Wang 
1905461b74c3SCong Wang 	if (opt->optlen) {
1906e25f866fSCong Wang 		int opt_size = sizeof(*dopt) + opt->optlen;
1907e25f866fSCong Wang 
1908e25f866fSCong Wang 		dopt = kmalloc(opt_size, GFP_ATOMIC);
190991ed1e66SPaolo Abeni 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
1910e25f866fSCong Wang 			kfree(dopt);
1911e25f866fSCong Wang 			dopt = NULL;
1912e25f866fSCong Wang 		}
1913e25f866fSCong Wang 	}
1914e25f866fSCong Wang 	return dopt;
1915e25f866fSCong Wang }
1916e25f866fSCong Wang 
191798781965SEric Dumazet /* locally generated TCP pure ACKs have skb->truesize == 2
191898781965SEric Dumazet  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
191998781965SEric Dumazet  * This is much faster than dissecting the packet to find out.
192098781965SEric Dumazet  * (Think of GRE encapsulations, IPv4, IPv6, ...)
192198781965SEric Dumazet  */
192298781965SEric Dumazet static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
192398781965SEric Dumazet {
192498781965SEric Dumazet 	return skb->truesize == 2;
192598781965SEric Dumazet }
192698781965SEric Dumazet 
192798781965SEric Dumazet static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
192898781965SEric Dumazet {
192998781965SEric Dumazet 	skb->truesize = 2;
193098781965SEric Dumazet }
193198781965SEric Dumazet 
1932473bd239STom Herbert static inline int tcp_inq(struct sock *sk)
1933473bd239STom Herbert {
1934473bd239STom Herbert 	struct tcp_sock *tp = tcp_sk(sk);
1935473bd239STom Herbert 	int answ;
1936473bd239STom Herbert 
1937473bd239STom Herbert 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1938473bd239STom Herbert 		answ = 0;
1939473bd239STom Herbert 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1940473bd239STom Herbert 		   !tp->urg_data ||
1941473bd239STom Herbert 		   before(tp->urg_seq, tp->copied_seq) ||
1942473bd239STom Herbert 		   !before(tp->urg_seq, tp->rcv_nxt)) {
1943473bd239STom Herbert 
1944473bd239STom Herbert 		answ = tp->rcv_nxt - tp->copied_seq;
1945473bd239STom Herbert 
1946473bd239STom Herbert 		/* Subtract 1, if FIN was received */
1947473bd239STom Herbert 		if (answ && sock_flag(sk, SOCK_DONE))
1948473bd239STom Herbert 			answ--;
1949473bd239STom Herbert 	} else {
1950473bd239STom Herbert 		answ = tp->urg_seq - tp->copied_seq;
1951473bd239STom Herbert 	}
1952473bd239STom Herbert 
1953473bd239STom Herbert 	return answ;
1954473bd239STom Herbert }
1955473bd239STom Herbert 
195632035585STom Herbert int tcp_peek_len(struct socket *sock);
195732035585STom Herbert 
1958a44d6eacSMartin KaFai Lau static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1959a44d6eacSMartin KaFai Lau {
1960a44d6eacSMartin KaFai Lau 	u16 segs_in;
1961a44d6eacSMartin KaFai Lau 
1962a44d6eacSMartin KaFai Lau 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1963a44d6eacSMartin KaFai Lau 	tp->segs_in += segs_in;
1964a44d6eacSMartin KaFai Lau 	if (skb->len > tcp_hdrlen(skb))
1965a44d6eacSMartin KaFai Lau 		tp->data_segs_in += segs_in;
1966a44d6eacSMartin KaFai Lau }
1967a44d6eacSMartin KaFai Lau 
19689caad864SEric Dumazet /*
19699caad864SEric Dumazet  * TCP listen path runs lockless.
19709caad864SEric Dumazet  * We forced "struct sock" to be const qualified to make sure
19719caad864SEric Dumazet  * we don't modify one of its field by mistake.
19729caad864SEric Dumazet  * Here, we increment sk_drops which is an atomic_t, so we can safely
19739caad864SEric Dumazet  * make sock writable again.
19749caad864SEric Dumazet  */
19759caad864SEric Dumazet static inline void tcp_listendrop(const struct sock *sk)
19769caad864SEric Dumazet {
19779caad864SEric Dumazet 	atomic_inc(&((struct sock *)sk)->sk_drops);
197802a1d6e7SEric Dumazet 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
19799caad864SEric Dumazet }
19809caad864SEric Dumazet 
1981218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
1982218af599SEric Dumazet 
1983734942ccSDave Watson /*
1984734942ccSDave Watson  * Interface for adding Upper Level Protocols over TCP
1985734942ccSDave Watson  */
1986734942ccSDave Watson 
1987734942ccSDave Watson #define TCP_ULP_NAME_MAX	16
1988734942ccSDave Watson #define TCP_ULP_MAX		128
1989734942ccSDave Watson #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
1990734942ccSDave Watson 
1991b11a632cSJohn Fastabend enum {
1992b11a632cSJohn Fastabend 	TCP_ULP_TLS,
19931aa12bdfSJohn Fastabend 	TCP_ULP_BPF,
1994b11a632cSJohn Fastabend };
1995b11a632cSJohn Fastabend 
1996734942ccSDave Watson struct tcp_ulp_ops {
1997734942ccSDave Watson 	struct list_head	list;
1998734942ccSDave Watson 
1999734942ccSDave Watson 	/* initialize ulp */
2000734942ccSDave Watson 	int (*init)(struct sock *sk);
2001734942ccSDave Watson 	/* cleanup ulp */
2002734942ccSDave Watson 	void (*release)(struct sock *sk);
2003734942ccSDave Watson 
2004b11a632cSJohn Fastabend 	int		uid;
2005734942ccSDave Watson 	char		name[TCP_ULP_NAME_MAX];
2006b11a632cSJohn Fastabend 	bool		user_visible;
2007734942ccSDave Watson 	struct module	*owner;
2008734942ccSDave Watson };
2009734942ccSDave Watson int tcp_register_ulp(struct tcp_ulp_ops *type);
2010734942ccSDave Watson void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2011734942ccSDave Watson int tcp_set_ulp(struct sock *sk, const char *name);
20121aa12bdfSJohn Fastabend int tcp_set_ulp_id(struct sock *sk, const int ulp);
2013734942ccSDave Watson void tcp_get_available_ulp(char *buf, size_t len);
2014734942ccSDave Watson void tcp_cleanup_ulp(struct sock *sk);
2015734942ccSDave Watson 
201640304b2aSLawrence Brakmo /* Call BPF_SOCK_OPS program that returns an int. If the return value
201740304b2aSLawrence Brakmo  * is < 0, then the BPF op failed (for example if the loaded BPF
201840304b2aSLawrence Brakmo  * program does not support the chosen operation or there is no BPF
201940304b2aSLawrence Brakmo  * program loaded).
202040304b2aSLawrence Brakmo  */
202140304b2aSLawrence Brakmo #ifdef CONFIG_BPF
2022de525be2SLawrence Brakmo static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
202340304b2aSLawrence Brakmo {
202440304b2aSLawrence Brakmo 	struct bpf_sock_ops_kern sock_ops;
202540304b2aSLawrence Brakmo 	int ret;
202640304b2aSLawrence Brakmo 
2027b73042b8SLawrence Brakmo 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2028f19397a5SLawrence Brakmo 	if (sk_fullsock(sk)) {
2029f19397a5SLawrence Brakmo 		sock_ops.is_fullsock = 1;
203040304b2aSLawrence Brakmo 		sock_owned_by_me(sk);
2031f19397a5SLawrence Brakmo 	}
203240304b2aSLawrence Brakmo 
203340304b2aSLawrence Brakmo 	sock_ops.sk = sk;
203440304b2aSLawrence Brakmo 	sock_ops.op = op;
2035de525be2SLawrence Brakmo 	if (nargs > 0)
2036de525be2SLawrence Brakmo 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
203740304b2aSLawrence Brakmo 
203840304b2aSLawrence Brakmo 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
203940304b2aSLawrence Brakmo 	if (ret == 0)
204040304b2aSLawrence Brakmo 		ret = sock_ops.reply;
204140304b2aSLawrence Brakmo 	else
204240304b2aSLawrence Brakmo 		ret = -1;
204340304b2aSLawrence Brakmo 	return ret;
204440304b2aSLawrence Brakmo }
2045de525be2SLawrence Brakmo 
2046de525be2SLawrence Brakmo static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2047de525be2SLawrence Brakmo {
2048de525be2SLawrence Brakmo 	u32 args[2] = {arg1, arg2};
2049de525be2SLawrence Brakmo 
2050de525be2SLawrence Brakmo 	return tcp_call_bpf(sk, op, 2, args);
2051de525be2SLawrence Brakmo }
2052de525be2SLawrence Brakmo 
2053de525be2SLawrence Brakmo static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2054de525be2SLawrence Brakmo 				    u32 arg3)
2055de525be2SLawrence Brakmo {
2056de525be2SLawrence Brakmo 	u32 args[3] = {arg1, arg2, arg3};
2057de525be2SLawrence Brakmo 
2058de525be2SLawrence Brakmo 	return tcp_call_bpf(sk, op, 3, args);
2059de525be2SLawrence Brakmo }
2060de525be2SLawrence Brakmo 
206140304b2aSLawrence Brakmo #else
2062de525be2SLawrence Brakmo static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
206340304b2aSLawrence Brakmo {
206440304b2aSLawrence Brakmo 	return -EPERM;
206540304b2aSLawrence Brakmo }
2066de525be2SLawrence Brakmo 
2067de525be2SLawrence Brakmo static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2068de525be2SLawrence Brakmo {
2069de525be2SLawrence Brakmo 	return -EPERM;
2070de525be2SLawrence Brakmo }
2071de525be2SLawrence Brakmo 
2072de525be2SLawrence Brakmo static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2073de525be2SLawrence Brakmo 				    u32 arg3)
2074de525be2SLawrence Brakmo {
2075de525be2SLawrence Brakmo 	return -EPERM;
2076de525be2SLawrence Brakmo }
2077de525be2SLawrence Brakmo 
207840304b2aSLawrence Brakmo #endif
207940304b2aSLawrence Brakmo 
20808550f328SLawrence Brakmo static inline u32 tcp_timeout_init(struct sock *sk)
20818550f328SLawrence Brakmo {
20828550f328SLawrence Brakmo 	int timeout;
20838550f328SLawrence Brakmo 
2084de525be2SLawrence Brakmo 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
20858550f328SLawrence Brakmo 
20868550f328SLawrence Brakmo 	if (timeout <= 0)
20878550f328SLawrence Brakmo 		timeout = TCP_TIMEOUT_INIT;
20888550f328SLawrence Brakmo 	return timeout;
20898550f328SLawrence Brakmo }
20908550f328SLawrence Brakmo 
209113d3b1ebSLawrence Brakmo static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
209213d3b1ebSLawrence Brakmo {
209313d3b1ebSLawrence Brakmo 	int rwnd;
209413d3b1ebSLawrence Brakmo 
2095de525be2SLawrence Brakmo 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
209613d3b1ebSLawrence Brakmo 
209713d3b1ebSLawrence Brakmo 	if (rwnd < 0)
209813d3b1ebSLawrence Brakmo 		rwnd = 0;
209913d3b1ebSLawrence Brakmo 	return rwnd;
210013d3b1ebSLawrence Brakmo }
210191b5b21cSLawrence Brakmo 
210291b5b21cSLawrence Brakmo static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
210391b5b21cSLawrence Brakmo {
2104de525be2SLawrence Brakmo 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
210591b5b21cSLawrence Brakmo }
210660e2a778SUrsula Braun 
210760e2a778SUrsula Braun #if IS_ENABLED(CONFIG_SMC)
210860e2a778SUrsula Braun extern struct static_key_false tcp_have_smc;
210960e2a778SUrsula Braun #endif
21106dac1523SIlya Lesokhin 
21116dac1523SIlya Lesokhin #if IS_ENABLED(CONFIG_TLS_DEVICE)
21126dac1523SIlya Lesokhin void clean_acked_data_enable(struct inet_connection_sock *icsk,
21136dac1523SIlya Lesokhin 			     void (*cad)(struct sock *sk, u32 ack_seq));
21146dac1523SIlya Lesokhin void clean_acked_data_disable(struct inet_connection_sock *icsk);
21156dac1523SIlya Lesokhin 
21166dac1523SIlya Lesokhin #endif
21176dac1523SIlya Lesokhin 
21181da177e4SLinus Torvalds #endif	/* _TCP_H */
2119