xref: /linux/include/net/tcp.h (revision 22ac5ad4a7d4e201d19b7f04ce8d79346c80a34b)
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Definitions for the TCP module.
7  *
8  * Version:	@(#)tcp.h	1.0.5	05/23/93
9  *
10  * Authors:	Ross Biro
11  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *
13  *		This program is free software; you can redistribute it and/or
14  *		modify it under the terms of the GNU General Public License
15  *		as published by the Free Software Foundation; either version
16  *		2 of the License, or (at your option) any later version.
17  */
18 #ifndef _TCP_H
19 #define _TCP_H
20 
21 #define FASTRETRANS_DEBUG 1
22 
23 #include <linux/list.h>
24 #include <linux/tcp.h>
25 #include <linux/bug.h>
26 #include <linux/slab.h>
27 #include <linux/cache.h>
28 #include <linux/percpu.h>
29 #include <linux/skbuff.h>
30 #include <linux/cryptohash.h>
31 #include <linux/kref.h>
32 #include <linux/ktime.h>
33 
34 #include <net/inet_connection_sock.h>
35 #include <net/inet_timewait_sock.h>
36 #include <net/inet_hashtables.h>
37 #include <net/checksum.h>
38 #include <net/request_sock.h>
39 #include <net/sock.h>
40 #include <net/snmp.h>
41 #include <net/ip.h>
42 #include <net/tcp_states.h>
43 #include <net/inet_ecn.h>
44 #include <net/dst.h>
45 
46 #include <linux/seq_file.h>
47 #include <linux/memcontrol.h>
48 #include <linux/bpf-cgroup.h>
49 
50 extern struct inet_hashinfo tcp_hashinfo;
51 
52 extern struct percpu_counter tcp_orphan_count;
53 void tcp_time_wait(struct sock *sk, int state, int timeo);
54 
55 #define MAX_TCP_HEADER	(128 + MAX_HEADER)
56 #define MAX_TCP_OPTION_SPACE 40
57 
58 /*
59  * Never offer a window over 32767 without using window scaling. Some
60  * poor stacks do signed 16bit maths!
61  */
62 #define MAX_TCP_WINDOW		32767U
63 
64 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
65 #define TCP_MIN_MSS		88U
66 
67 /* The least MTU to use for probing */
68 #define TCP_BASE_MSS		1024
69 
70 /* probing interval, default to 10 minutes as per RFC4821 */
71 #define TCP_PROBE_INTERVAL	600
72 
73 /* Specify interval when tcp mtu probing will stop */
74 #define TCP_PROBE_THRESHOLD	8
75 
76 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
77 #define TCP_FASTRETRANS_THRESH 3
78 
79 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
80 #define TCP_MAX_QUICKACKS	16U
81 
82 /* Maximal number of window scale according to RFC1323 */
83 #define TCP_MAX_WSCALE		14U
84 
85 /* urg_data states */
86 #define TCP_URG_VALID	0x0100
87 #define TCP_URG_NOTYET	0x0200
88 #define TCP_URG_READ	0x0400
89 
90 #define TCP_RETR1	3	/*
91 				 * This is how many retries it does before it
92 				 * tries to figure out if the gateway is
93 				 * down. Minimal RFC value is 3; it corresponds
94 				 * to ~3sec-8min depending on RTO.
95 				 */
96 
97 #define TCP_RETR2	15	/*
98 				 * This should take at least
99 				 * 90 minutes to time out.
100 				 * RFC1122 says that the limit is 100 sec.
101 				 * 15 is ~13-30min depending on RTO.
102 				 */
103 
104 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
105 				 * when active opening a connection.
106 				 * RFC1122 says the minimum retry MUST
107 				 * be at least 180secs.  Nevertheless
108 				 * this value is corresponding to
109 				 * 63secs of retransmission with the
110 				 * current initial RTO.
111 				 */
112 
113 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
114 				 * when passive opening a connection.
115 				 * This is corresponding to 31secs of
116 				 * retransmission with the current
117 				 * initial RTO.
118 				 */
119 
120 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
121 				  * state, about 60 seconds	*/
122 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
123                                  /* BSD style FIN_WAIT2 deadlock breaker.
124 				  * It used to be 3min, new value is 60sec,
125 				  * to combine FIN-WAIT-2 timeout with
126 				  * TIME-WAIT timer.
127 				  */
128 
129 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
130 #if HZ >= 100
131 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
132 #define TCP_ATO_MIN	((unsigned)(HZ/25))
133 #else
134 #define TCP_DELACK_MIN	4U
135 #define TCP_ATO_MIN	4U
136 #endif
137 #define TCP_RTO_MAX	((unsigned)(120*HZ))
138 #define TCP_RTO_MIN	((unsigned)(HZ/5))
139 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
140 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
141 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
142 						 * used as a fallback RTO for the
143 						 * initial data transmission if no
144 						 * valid RTT sample has been acquired,
145 						 * most likely due to retrans in 3WHS.
146 						 */
147 
148 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
149 					                 * for local resources.
150 					                 */
151 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
152 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
153 #define TCP_KEEPALIVE_INTVL	(75*HZ)
154 
155 #define MAX_TCP_KEEPIDLE	32767
156 #define MAX_TCP_KEEPINTVL	32767
157 #define MAX_TCP_KEEPCNT		127
158 #define MAX_TCP_SYNCNT		127
159 
160 #define TCP_SYNQ_INTERVAL	(HZ/5)	/* Period of SYNACK timer */
161 
162 #define TCP_PAWS_24DAYS	(60 * 60 * 24 * 24)
163 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
164 					 * after this time. It should be equal
165 					 * (or greater than) TCP_TIMEWAIT_LEN
166 					 * to provide reliability equal to one
167 					 * provided by timewait state.
168 					 */
169 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
170 					 * timestamps. It must be less than
171 					 * minimal timewait lifetime.
172 					 */
173 /*
174  *	TCP option
175  */
176 
177 #define TCPOPT_NOP		1	/* Padding */
178 #define TCPOPT_EOL		0	/* End of options */
179 #define TCPOPT_MSS		2	/* Segment size negotiating */
180 #define TCPOPT_WINDOW		3	/* Window scaling */
181 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
182 #define TCPOPT_SACK             5       /* SACK Block */
183 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
184 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
185 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
186 #define TCPOPT_EXP		254	/* Experimental */
187 /* Magic number to be after the option value for sharing TCP
188  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
189  */
190 #define TCPOPT_FASTOPEN_MAGIC	0xF989
191 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
192 
193 /*
194  *     TCP option lengths
195  */
196 
197 #define TCPOLEN_MSS            4
198 #define TCPOLEN_WINDOW         3
199 #define TCPOLEN_SACK_PERM      2
200 #define TCPOLEN_TIMESTAMP      10
201 #define TCPOLEN_MD5SIG         18
202 #define TCPOLEN_FASTOPEN_BASE  2
203 #define TCPOLEN_EXP_FASTOPEN_BASE  4
204 #define TCPOLEN_EXP_SMC_BASE   6
205 
206 /* But this is what stacks really send out. */
207 #define TCPOLEN_TSTAMP_ALIGNED		12
208 #define TCPOLEN_WSCALE_ALIGNED		4
209 #define TCPOLEN_SACKPERM_ALIGNED	4
210 #define TCPOLEN_SACK_BASE		2
211 #define TCPOLEN_SACK_BASE_ALIGNED	4
212 #define TCPOLEN_SACK_PERBLOCK		8
213 #define TCPOLEN_MD5SIG_ALIGNED		20
214 #define TCPOLEN_MSS_ALIGNED		4
215 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
216 
217 /* Flags in tp->nonagle */
218 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
219 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
220 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
221 
222 /* TCP thin-stream limits */
223 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
224 
225 /* TCP initial congestion window as per rfc6928 */
226 #define TCP_INIT_CWND		10
227 
228 /* Bit Flags for sysctl_tcp_fastopen */
229 #define	TFO_CLIENT_ENABLE	1
230 #define	TFO_SERVER_ENABLE	2
231 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
232 
233 /* Accept SYN data w/o any cookie option */
234 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
235 
236 /* Force enable TFO on all listeners, i.e., not requiring the
237  * TCP_FASTOPEN socket option.
238  */
239 #define	TFO_SERVER_WO_SOCKOPT1	0x400
240 
241 
242 /* sysctl variables for tcp */
243 extern int sysctl_tcp_max_orphans;
244 extern long sysctl_tcp_mem[3];
245 extern int sysctl_tcp_wmem[3];
246 extern int sysctl_tcp_rmem[3];
247 
248 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
249 
250 extern atomic_long_t tcp_memory_allocated;
251 extern struct percpu_counter tcp_sockets_allocated;
252 extern unsigned long tcp_memory_pressure;
253 
254 /* optimized version of sk_under_memory_pressure() for TCP sockets */
255 static inline bool tcp_under_memory_pressure(const struct sock *sk)
256 {
257 	if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
258 	    mem_cgroup_under_socket_pressure(sk->sk_memcg))
259 		return true;
260 
261 	return tcp_memory_pressure;
262 }
263 /*
264  * The next routines deal with comparing 32 bit unsigned ints
265  * and worry about wraparound (automatic with unsigned arithmetic).
266  */
267 
268 static inline bool before(__u32 seq1, __u32 seq2)
269 {
270         return (__s32)(seq1-seq2) < 0;
271 }
272 #define after(seq2, seq1) 	before(seq1, seq2)
273 
274 /* is s2<=s1<=s3 ? */
275 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
276 {
277 	return seq3 - seq2 >= seq1 - seq2;
278 }
279 
280 static inline bool tcp_out_of_memory(struct sock *sk)
281 {
282 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
283 	    sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
284 		return true;
285 	return false;
286 }
287 
288 void sk_forced_mem_schedule(struct sock *sk, int size);
289 
290 static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
291 {
292 	struct percpu_counter *ocp = sk->sk_prot->orphan_count;
293 	int orphans = percpu_counter_read_positive(ocp);
294 
295 	if (orphans << shift > sysctl_tcp_max_orphans) {
296 		orphans = percpu_counter_sum_positive(ocp);
297 		if (orphans << shift > sysctl_tcp_max_orphans)
298 			return true;
299 	}
300 	return false;
301 }
302 
303 bool tcp_check_oom(struct sock *sk, int shift);
304 
305 
306 extern struct proto tcp_prot;
307 
308 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
309 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
310 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
311 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
312 
313 void tcp_tasklet_init(void);
314 
315 void tcp_v4_err(struct sk_buff *skb, u32);
316 
317 void tcp_shutdown(struct sock *sk, int how);
318 
319 int tcp_v4_early_demux(struct sk_buff *skb);
320 int tcp_v4_rcv(struct sk_buff *skb);
321 
322 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
323 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
324 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
325 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
326 		 int flags);
327 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
328 			size_t size, int flags);
329 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
330 		 size_t size, int flags);
331 void tcp_release_cb(struct sock *sk);
332 void tcp_wfree(struct sk_buff *skb);
333 void tcp_write_timer_handler(struct sock *sk);
334 void tcp_delack_timer_handler(struct sock *sk);
335 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
336 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
337 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
338 			 const struct tcphdr *th);
339 void tcp_rcv_space_adjust(struct sock *sk);
340 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
341 void tcp_twsk_destructor(struct sock *sk);
342 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
343 			struct pipe_inode_info *pipe, size_t len,
344 			unsigned int flags);
345 
346 static inline void tcp_dec_quickack_mode(struct sock *sk,
347 					 const unsigned int pkts)
348 {
349 	struct inet_connection_sock *icsk = inet_csk(sk);
350 
351 	if (icsk->icsk_ack.quick) {
352 		if (pkts >= icsk->icsk_ack.quick) {
353 			icsk->icsk_ack.quick = 0;
354 			/* Leaving quickack mode we deflate ATO. */
355 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
356 		} else
357 			icsk->icsk_ack.quick -= pkts;
358 	}
359 }
360 
361 #define	TCP_ECN_OK		1
362 #define	TCP_ECN_QUEUE_CWR	2
363 #define	TCP_ECN_DEMAND_CWR	4
364 #define	TCP_ECN_SEEN		8
365 
366 enum tcp_tw_status {
367 	TCP_TW_SUCCESS = 0,
368 	TCP_TW_RST = 1,
369 	TCP_TW_ACK = 2,
370 	TCP_TW_SYN = 3
371 };
372 
373 
374 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
375 					      struct sk_buff *skb,
376 					      const struct tcphdr *th);
377 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
378 			   struct request_sock *req, bool fastopen);
379 int tcp_child_process(struct sock *parent, struct sock *child,
380 		      struct sk_buff *skb);
381 void tcp_enter_loss(struct sock *sk);
382 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
383 void tcp_clear_retrans(struct tcp_sock *tp);
384 void tcp_update_metrics(struct sock *sk);
385 void tcp_init_metrics(struct sock *sk);
386 void tcp_metrics_init(void);
387 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
388 void tcp_disable_fack(struct tcp_sock *tp);
389 void tcp_close(struct sock *sk, long timeout);
390 void tcp_init_sock(struct sock *sk);
391 void tcp_init_transfer(struct sock *sk, int bpf_op);
392 unsigned int tcp_poll(struct file *file, struct socket *sock,
393 		      struct poll_table_struct *wait);
394 int tcp_getsockopt(struct sock *sk, int level, int optname,
395 		   char __user *optval, int __user *optlen);
396 int tcp_setsockopt(struct sock *sk, int level, int optname,
397 		   char __user *optval, unsigned int optlen);
398 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
399 			  char __user *optval, int __user *optlen);
400 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
401 			  char __user *optval, unsigned int optlen);
402 void tcp_set_keepalive(struct sock *sk, int val);
403 void tcp_syn_ack_timeout(const struct request_sock *req);
404 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
405 		int flags, int *addr_len);
406 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
407 		       struct tcp_options_received *opt_rx,
408 		       int estab, struct tcp_fastopen_cookie *foc);
409 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
410 
411 /*
412  *	TCP v4 functions exported for the inet6 API
413  */
414 
415 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
416 void tcp_v4_mtu_reduced(struct sock *sk);
417 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
418 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
419 struct sock *tcp_create_openreq_child(const struct sock *sk,
420 				      struct request_sock *req,
421 				      struct sk_buff *skb);
422 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
423 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
424 				  struct request_sock *req,
425 				  struct dst_entry *dst,
426 				  struct request_sock *req_unhash,
427 				  bool *own_req);
428 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
429 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
430 int tcp_connect(struct sock *sk);
431 enum tcp_synack_type {
432 	TCP_SYNACK_NORMAL,
433 	TCP_SYNACK_FASTOPEN,
434 	TCP_SYNACK_COOKIE,
435 };
436 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
437 				struct request_sock *req,
438 				struct tcp_fastopen_cookie *foc,
439 				enum tcp_synack_type synack_type);
440 int tcp_disconnect(struct sock *sk, int flags);
441 
442 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
443 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
444 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
445 
446 /* From syncookies.c */
447 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
448 				 struct request_sock *req,
449 				 struct dst_entry *dst, u32 tsoff);
450 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
451 		      u32 cookie);
452 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
453 #ifdef CONFIG_SYN_COOKIES
454 
455 /* Syncookies use a monotonic timer which increments every 60 seconds.
456  * This counter is used both as a hash input and partially encoded into
457  * the cookie value.  A cookie is only validated further if the delta
458  * between the current counter value and the encoded one is less than this,
459  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
460  * the counter advances immediately after a cookie is generated).
461  */
462 #define MAX_SYNCOOKIE_AGE	2
463 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
464 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
465 
466 /* syncookies: remember time of last synqueue overflow
467  * But do not dirty this field too often (once per second is enough)
468  * It is racy as we do not hold a lock, but race is very minor.
469  */
470 static inline void tcp_synq_overflow(const struct sock *sk)
471 {
472 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
473 	unsigned long now = jiffies;
474 
475 	if (time_after(now, last_overflow + HZ))
476 		tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
477 }
478 
479 /* syncookies: no recent synqueue overflow on this listening socket? */
480 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
481 {
482 	unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
483 
484 	return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
485 }
486 
487 static inline u32 tcp_cookie_time(void)
488 {
489 	u64 val = get_jiffies_64();
490 
491 	do_div(val, TCP_SYNCOOKIE_PERIOD);
492 	return val;
493 }
494 
495 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
496 			      u16 *mssp);
497 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
498 u64 cookie_init_timestamp(struct request_sock *req);
499 bool cookie_timestamp_decode(const struct net *net,
500 			     struct tcp_options_received *opt);
501 bool cookie_ecn_ok(const struct tcp_options_received *opt,
502 		   const struct net *net, const struct dst_entry *dst);
503 
504 /* From net/ipv6/syncookies.c */
505 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
506 		      u32 cookie);
507 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
508 
509 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
510 			      const struct tcphdr *th, u16 *mssp);
511 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
512 #endif
513 /* tcp_output.c */
514 
515 u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
516 		     int min_tso_segs);
517 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
518 			       int nonagle);
519 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
520 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
521 void tcp_retransmit_timer(struct sock *sk);
522 void tcp_xmit_retransmit_queue(struct sock *);
523 void tcp_simple_retransmit(struct sock *);
524 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
525 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
526 enum tcp_queue {
527 	TCP_FRAG_IN_WRITE_QUEUE,
528 	TCP_FRAG_IN_RTX_QUEUE,
529 };
530 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
531 		 struct sk_buff *skb, u32 len,
532 		 unsigned int mss_now, gfp_t gfp);
533 
534 void tcp_send_probe0(struct sock *);
535 void tcp_send_partial(struct sock *);
536 int tcp_write_wakeup(struct sock *, int mib);
537 void tcp_send_fin(struct sock *sk);
538 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
539 int tcp_send_synack(struct sock *);
540 void tcp_push_one(struct sock *, unsigned int mss_now);
541 void tcp_send_ack(struct sock *sk);
542 void tcp_send_delayed_ack(struct sock *sk);
543 void tcp_send_loss_probe(struct sock *sk);
544 bool tcp_schedule_loss_probe(struct sock *sk);
545 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
546 			     const struct sk_buff *next_skb);
547 
548 /* tcp_input.c */
549 void tcp_rearm_rto(struct sock *sk);
550 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
551 void tcp_reset(struct sock *sk);
552 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
553 void tcp_fin(struct sock *sk);
554 
555 /* tcp_timer.c */
556 void tcp_init_xmit_timers(struct sock *);
557 static inline void tcp_clear_xmit_timers(struct sock *sk)
558 {
559 	hrtimer_cancel(&tcp_sk(sk)->pacing_timer);
560 	inet_csk_clear_xmit_timers(sk);
561 }
562 
563 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
564 unsigned int tcp_current_mss(struct sock *sk);
565 
566 /* Bound MSS / TSO packet size with the half of the window */
567 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
568 {
569 	int cutoff;
570 
571 	/* When peer uses tiny windows, there is no use in packetizing
572 	 * to sub-MSS pieces for the sake of SWS or making sure there
573 	 * are enough packets in the pipe for fast recovery.
574 	 *
575 	 * On the other hand, for extremely large MSS devices, handling
576 	 * smaller than MSS windows in this way does make sense.
577 	 */
578 	if (tp->max_window > TCP_MSS_DEFAULT)
579 		cutoff = (tp->max_window >> 1);
580 	else
581 		cutoff = tp->max_window;
582 
583 	if (cutoff && pktsize > cutoff)
584 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
585 	else
586 		return pktsize;
587 }
588 
589 /* tcp.c */
590 void tcp_get_info(struct sock *, struct tcp_info *);
591 
592 /* Read 'sendfile()'-style from a TCP socket */
593 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
594 		  sk_read_actor_t recv_actor);
595 
596 void tcp_initialize_rcv_mss(struct sock *sk);
597 
598 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
599 int tcp_mss_to_mtu(struct sock *sk, int mss);
600 void tcp_mtup_init(struct sock *sk);
601 void tcp_init_buffer_space(struct sock *sk);
602 
603 static inline void tcp_bound_rto(const struct sock *sk)
604 {
605 	if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
606 		inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
607 }
608 
609 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
610 {
611 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
612 }
613 
614 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
615 {
616 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
617 			       ntohl(TCP_FLAG_ACK) |
618 			       snd_wnd);
619 }
620 
621 static inline void tcp_fast_path_on(struct tcp_sock *tp)
622 {
623 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
624 }
625 
626 static inline void tcp_fast_path_check(struct sock *sk)
627 {
628 	struct tcp_sock *tp = tcp_sk(sk);
629 
630 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
631 	    tp->rcv_wnd &&
632 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
633 	    !tp->urg_data)
634 		tcp_fast_path_on(tp);
635 }
636 
637 /* Compute the actual rto_min value */
638 static inline u32 tcp_rto_min(struct sock *sk)
639 {
640 	const struct dst_entry *dst = __sk_dst_get(sk);
641 	u32 rto_min = TCP_RTO_MIN;
642 
643 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
644 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
645 	return rto_min;
646 }
647 
648 static inline u32 tcp_rto_min_us(struct sock *sk)
649 {
650 	return jiffies_to_usecs(tcp_rto_min(sk));
651 }
652 
653 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
654 {
655 	return dst_metric_locked(dst, RTAX_CC_ALGO);
656 }
657 
658 /* Minimum RTT in usec. ~0 means not available. */
659 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
660 {
661 	return minmax_get(&tp->rtt_min);
662 }
663 
664 /* Compute the actual receive window we are currently advertising.
665  * Rcv_nxt can be after the window if our peer push more data
666  * than the offered window.
667  */
668 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
669 {
670 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
671 
672 	if (win < 0)
673 		win = 0;
674 	return (u32) win;
675 }
676 
677 /* Choose a new window, without checks for shrinking, and without
678  * scaling applied to the result.  The caller does these things
679  * if necessary.  This is a "raw" window selection.
680  */
681 u32 __tcp_select_window(struct sock *sk);
682 
683 void tcp_send_window_probe(struct sock *sk);
684 
685 /* TCP uses 32bit jiffies to save some space.
686  * Note that this is different from tcp_time_stamp, which
687  * historically has been the same until linux-4.13.
688  */
689 #define tcp_jiffies32 ((u32)jiffies)
690 
691 /*
692  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
693  * It is no longer tied to jiffies, but to 1 ms clock.
694  * Note: double check if you want to use tcp_jiffies32 instead of this.
695  */
696 #define TCP_TS_HZ	1000
697 
698 static inline u64 tcp_clock_ns(void)
699 {
700 	return local_clock();
701 }
702 
703 static inline u64 tcp_clock_us(void)
704 {
705 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
706 }
707 
708 /* This should only be used in contexts where tp->tcp_mstamp is up to date */
709 static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
710 {
711 	return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
712 }
713 
714 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */
715 static inline u32 tcp_time_stamp_raw(void)
716 {
717 	return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
718 }
719 
720 
721 /* Refresh 1us clock of a TCP socket,
722  * ensuring monotically increasing values.
723  */
724 static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
725 {
726 	u64 val = tcp_clock_us();
727 
728 	if (val > tp->tcp_mstamp)
729 		tp->tcp_mstamp = val;
730 }
731 
732 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
733 {
734 	return max_t(s64, t1 - t0, 0);
735 }
736 
737 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
738 {
739 	return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
740 }
741 
742 
743 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
744 
745 #define TCPHDR_FIN 0x01
746 #define TCPHDR_SYN 0x02
747 #define TCPHDR_RST 0x04
748 #define TCPHDR_PSH 0x08
749 #define TCPHDR_ACK 0x10
750 #define TCPHDR_URG 0x20
751 #define TCPHDR_ECE 0x40
752 #define TCPHDR_CWR 0x80
753 
754 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
755 
756 /* This is what the send packet queuing engine uses to pass
757  * TCP per-packet control information to the transmission code.
758  * We also store the host-order sequence numbers in here too.
759  * This is 44 bytes if IPV6 is enabled.
760  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
761  */
762 struct tcp_skb_cb {
763 	__u32		seq;		/* Starting sequence number	*/
764 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
765 	union {
766 		/* Note : tcp_tw_isn is used in input path only
767 		 *	  (isn chosen by tcp_timewait_state_process())
768 		 *
769 		 * 	  tcp_gso_segs/size are used in write queue only,
770 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
771 		 */
772 		__u32		tcp_tw_isn;
773 		struct {
774 			u16	tcp_gso_segs;
775 			u16	tcp_gso_size;
776 		};
777 	};
778 	__u8		tcp_flags;	/* TCP header flags. (tcp[13])	*/
779 
780 	__u8		sacked;		/* State flags for SACK/FACK.	*/
781 #define TCPCB_SACKED_ACKED	0x01	/* SKB ACK'd by a SACK block	*/
782 #define TCPCB_SACKED_RETRANS	0x02	/* SKB retransmitted		*/
783 #define TCPCB_LOST		0x04	/* SKB is lost			*/
784 #define TCPCB_TAGBITS		0x07	/* All tag bits			*/
785 #define TCPCB_REPAIRED		0x10	/* SKB repaired (no skb_mstamp)	*/
786 #define TCPCB_EVER_RETRANS	0x80	/* Ever retransmitted frame	*/
787 #define TCPCB_RETRANS		(TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
788 				TCPCB_REPAIRED)
789 
790 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
791 	__u8		txstamp_ack:1,	/* Record TX timestamp for ack? */
792 			eor:1,		/* Is skb MSG_EOR marked? */
793 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
794 			unused:5;
795 	__u32		ack_seq;	/* Sequence number ACK'd	*/
796 	union {
797 		struct {
798 			/* There is space for up to 24 bytes */
799 			__u32 in_flight:30,/* Bytes in flight at transmit */
800 			      is_app_limited:1, /* cwnd not fully used? */
801 			      unused:1;
802 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
803 			__u32 delivered;
804 			/* start of send pipeline phase */
805 			u64 first_tx_mstamp;
806 			/* when we reached the "delivered" count */
807 			u64 delivered_mstamp;
808 		} tx;   /* only used for outgoing skbs */
809 		union {
810 			struct inet_skb_parm	h4;
811 #if IS_ENABLED(CONFIG_IPV6)
812 			struct inet6_skb_parm	h6;
813 #endif
814 		} header;	/* For incoming skbs */
815 		struct {
816 			__u32 key;
817 			__u32 flags;
818 			struct bpf_map *map;
819 			void *data_end;
820 		} bpf;
821 	};
822 };
823 
824 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
825 
826 
827 #if IS_ENABLED(CONFIG_IPV6)
828 /* This is the variant of inet6_iif() that must be used by TCP,
829  * as TCP moves IP6CB into a different location in skb->cb[]
830  */
831 static inline int tcp_v6_iif(const struct sk_buff *skb)
832 {
833 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
834 
835 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
836 }
837 
838 /* TCP_SKB_CB reference means this can not be used from early demux */
839 static inline int tcp_v6_sdif(const struct sk_buff *skb)
840 {
841 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
842 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
843 		return TCP_SKB_CB(skb)->header.h6.iif;
844 #endif
845 	return 0;
846 }
847 #endif
848 
849 /* TCP_SKB_CB reference means this can not be used from early demux */
850 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
851 {
852 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
853 	if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
854 	    skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
855 		return true;
856 #endif
857 	return false;
858 }
859 
860 /* TCP_SKB_CB reference means this can not be used from early demux */
861 static inline int tcp_v4_sdif(struct sk_buff *skb)
862 {
863 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
864 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
865 		return TCP_SKB_CB(skb)->header.h4.iif;
866 #endif
867 	return 0;
868 }
869 
870 /* Due to TSO, an SKB can be composed of multiple actual
871  * packets.  To keep these tracked properly, we use this.
872  */
873 static inline int tcp_skb_pcount(const struct sk_buff *skb)
874 {
875 	return TCP_SKB_CB(skb)->tcp_gso_segs;
876 }
877 
878 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
879 {
880 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
881 }
882 
883 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
884 {
885 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
886 }
887 
888 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
889 static inline int tcp_skb_mss(const struct sk_buff *skb)
890 {
891 	return TCP_SKB_CB(skb)->tcp_gso_size;
892 }
893 
894 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
895 {
896 	return likely(!TCP_SKB_CB(skb)->eor);
897 }
898 
899 /* Events passed to congestion control interface */
900 enum tcp_ca_event {
901 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
902 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
903 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
904 	CA_EVENT_LOSS,		/* loss timeout */
905 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
906 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
907 	CA_EVENT_DELAYED_ACK,	/* Delayed ack is sent */
908 	CA_EVENT_NON_DELAYED_ACK,
909 };
910 
911 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
912 enum tcp_ca_ack_event_flags {
913 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
914 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
915 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
916 };
917 
918 /*
919  * Interface for adding new TCP congestion control handlers
920  */
921 #define TCP_CA_NAME_MAX	16
922 #define TCP_CA_MAX	128
923 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
924 
925 #define TCP_CA_UNSPEC	0
926 
927 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
928 #define TCP_CONG_NON_RESTRICTED 0x1
929 /* Requires ECN/ECT set on all packets */
930 #define TCP_CONG_NEEDS_ECN	0x2
931 
932 union tcp_cc_info;
933 
934 struct ack_sample {
935 	u32 pkts_acked;
936 	s32 rtt_us;
937 	u32 in_flight;
938 };
939 
940 /* A rate sample measures the number of (original/retransmitted) data
941  * packets delivered "delivered" over an interval of time "interval_us".
942  * The tcp_rate.c code fills in the rate sample, and congestion
943  * control modules that define a cong_control function to run at the end
944  * of ACK processing can optionally chose to consult this sample when
945  * setting cwnd and pacing rate.
946  * A sample is invalid if "delivered" or "interval_us" is negative.
947  */
948 struct rate_sample {
949 	u64  prior_mstamp; /* starting timestamp for interval */
950 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
951 	s32  delivered;		/* number of packets delivered over interval */
952 	long interval_us;	/* time for tp->delivered to incr "delivered" */
953 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
954 	int  losses;		/* number of packets marked lost upon ACK */
955 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
956 	u32  prior_in_flight;	/* in flight before this ACK */
957 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
958 	bool is_retrans;	/* is sample from retransmission? */
959 };
960 
961 struct tcp_congestion_ops {
962 	struct list_head	list;
963 	u32 key;
964 	u32 flags;
965 
966 	/* initialize private data (optional) */
967 	void (*init)(struct sock *sk);
968 	/* cleanup private data  (optional) */
969 	void (*release)(struct sock *sk);
970 
971 	/* return slow start threshold (required) */
972 	u32 (*ssthresh)(struct sock *sk);
973 	/* do new cwnd calculation (required) */
974 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
975 	/* call before changing ca_state (optional) */
976 	void (*set_state)(struct sock *sk, u8 new_state);
977 	/* call when cwnd event occurs (optional) */
978 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
979 	/* call when ack arrives (optional) */
980 	void (*in_ack_event)(struct sock *sk, u32 flags);
981 	/* new value of cwnd after loss (required) */
982 	u32  (*undo_cwnd)(struct sock *sk);
983 	/* hook for packet ack accounting (optional) */
984 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
985 	/* suggest number of segments for each skb to transmit (optional) */
986 	u32 (*tso_segs_goal)(struct sock *sk);
987 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
988 	u32 (*sndbuf_expand)(struct sock *sk);
989 	/* call when packets are delivered to update cwnd and pacing rate,
990 	 * after all the ca_state processing. (optional)
991 	 */
992 	void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
993 	/* get info for inet_diag (optional) */
994 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
995 			   union tcp_cc_info *info);
996 
997 	char 		name[TCP_CA_NAME_MAX];
998 	struct module 	*owner;
999 };
1000 
1001 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1002 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1003 
1004 void tcp_assign_congestion_control(struct sock *sk);
1005 void tcp_init_congestion_control(struct sock *sk);
1006 void tcp_cleanup_congestion_control(struct sock *sk);
1007 int tcp_set_default_congestion_control(const char *name);
1008 void tcp_get_default_congestion_control(char *name);
1009 void tcp_get_available_congestion_control(char *buf, size_t len);
1010 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1011 int tcp_set_allowed_congestion_control(char *allowed);
1012 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
1013 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1014 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1015 
1016 u32 tcp_reno_ssthresh(struct sock *sk);
1017 u32 tcp_reno_undo_cwnd(struct sock *sk);
1018 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1019 extern struct tcp_congestion_ops tcp_reno;
1020 
1021 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1022 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1023 #ifdef CONFIG_INET
1024 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1025 #else
1026 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1027 {
1028 	return NULL;
1029 }
1030 #endif
1031 
1032 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1033 {
1034 	const struct inet_connection_sock *icsk = inet_csk(sk);
1035 
1036 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1037 }
1038 
1039 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
1040 {
1041 	struct inet_connection_sock *icsk = inet_csk(sk);
1042 
1043 	if (icsk->icsk_ca_ops->set_state)
1044 		icsk->icsk_ca_ops->set_state(sk, ca_state);
1045 	icsk->icsk_ca_state = ca_state;
1046 }
1047 
1048 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1049 {
1050 	const struct inet_connection_sock *icsk = inet_csk(sk);
1051 
1052 	if (icsk->icsk_ca_ops->cwnd_event)
1053 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1054 }
1055 
1056 /* From tcp_rate.c */
1057 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1058 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1059 			    struct rate_sample *rs);
1060 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1061 		  struct rate_sample *rs);
1062 void tcp_rate_check_app_limited(struct sock *sk);
1063 
1064 /* These functions determine how the current flow behaves in respect of SACK
1065  * handling. SACK is negotiated with the peer, and therefore it can vary
1066  * between different flows.
1067  *
1068  * tcp_is_sack - SACK enabled
1069  * tcp_is_reno - No SACK
1070  * tcp_is_fack - FACK enabled, implies SACK enabled
1071  */
1072 static inline int tcp_is_sack(const struct tcp_sock *tp)
1073 {
1074 	return tp->rx_opt.sack_ok;
1075 }
1076 
1077 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1078 {
1079 	return !tcp_is_sack(tp);
1080 }
1081 
1082 static inline bool tcp_is_fack(const struct tcp_sock *tp)
1083 {
1084 	return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1085 }
1086 
1087 static inline void tcp_enable_fack(struct tcp_sock *tp)
1088 {
1089 	tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1090 }
1091 
1092 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1093 {
1094 	return tp->sacked_out + tp->lost_out;
1095 }
1096 
1097 /* This determines how many packets are "in the network" to the best
1098  * of our knowledge.  In many cases it is conservative, but where
1099  * detailed information is available from the receiver (via SACK
1100  * blocks etc.) we can make more aggressive calculations.
1101  *
1102  * Use this for decisions involving congestion control, use just
1103  * tp->packets_out to determine if the send queue is empty or not.
1104  *
1105  * Read this equation as:
1106  *
1107  *	"Packets sent once on transmission queue" MINUS
1108  *	"Packets left network, but not honestly ACKed yet" PLUS
1109  *	"Packets fast retransmitted"
1110  */
1111 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1112 {
1113 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1114 }
1115 
1116 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1117 
1118 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1119 {
1120 	return tp->snd_cwnd < tp->snd_ssthresh;
1121 }
1122 
1123 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1124 {
1125 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1126 }
1127 
1128 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1129 {
1130 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1131 	       (1 << inet_csk(sk)->icsk_ca_state);
1132 }
1133 
1134 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1135  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1136  * ssthresh.
1137  */
1138 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1139 {
1140 	const struct tcp_sock *tp = tcp_sk(sk);
1141 
1142 	if (tcp_in_cwnd_reduction(sk))
1143 		return tp->snd_ssthresh;
1144 	else
1145 		return max(tp->snd_ssthresh,
1146 			   ((tp->snd_cwnd >> 1) +
1147 			    (tp->snd_cwnd >> 2)));
1148 }
1149 
1150 /* Use define here intentionally to get WARN_ON location shown at the caller */
1151 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1152 
1153 void tcp_enter_cwr(struct sock *sk);
1154 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1155 
1156 /* The maximum number of MSS of available cwnd for which TSO defers
1157  * sending if not using sysctl_tcp_tso_win_divisor.
1158  */
1159 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1160 {
1161 	return 3;
1162 }
1163 
1164 /* Returns end sequence number of the receiver's advertised window */
1165 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1166 {
1167 	return tp->snd_una + tp->snd_wnd;
1168 }
1169 
1170 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1171  * flexible approach. The RFC suggests cwnd should not be raised unless
1172  * it was fully used previously. And that's exactly what we do in
1173  * congestion avoidance mode. But in slow start we allow cwnd to grow
1174  * as long as the application has used half the cwnd.
1175  * Example :
1176  *    cwnd is 10 (IW10), but application sends 9 frames.
1177  *    We allow cwnd to reach 18 when all frames are ACKed.
1178  * This check is safe because it's as aggressive as slow start which already
1179  * risks 100% overshoot. The advantage is that we discourage application to
1180  * either send more filler packets or data to artificially blow up the cwnd
1181  * usage, and allow application-limited process to probe bw more aggressively.
1182  */
1183 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1184 {
1185 	const struct tcp_sock *tp = tcp_sk(sk);
1186 
1187 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1188 	if (tcp_in_slow_start(tp))
1189 		return tp->snd_cwnd < 2 * tp->max_packets_out;
1190 
1191 	return tp->is_cwnd_limited;
1192 }
1193 
1194 /* Something is really bad, we could not queue an additional packet,
1195  * because qdisc is full or receiver sent a 0 window.
1196  * We do not want to add fuel to the fire, or abort too early,
1197  * so make sure the timer we arm now is at least 200ms in the future,
1198  * regardless of current icsk_rto value (as it could be ~2ms)
1199  */
1200 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1201 {
1202 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1203 }
1204 
1205 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1206 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1207 					    unsigned long max_when)
1208 {
1209 	u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1210 
1211 	return (unsigned long)min_t(u64, when, max_when);
1212 }
1213 
1214 static inline void tcp_check_probe_timer(struct sock *sk)
1215 {
1216 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1217 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1218 					  tcp_probe0_base(sk), TCP_RTO_MAX);
1219 }
1220 
1221 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1222 {
1223 	tp->snd_wl1 = seq;
1224 }
1225 
1226 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1227 {
1228 	tp->snd_wl1 = seq;
1229 }
1230 
1231 /*
1232  * Calculate(/check) TCP checksum
1233  */
1234 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1235 				   __be32 daddr, __wsum base)
1236 {
1237 	return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1238 }
1239 
1240 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1241 {
1242 	return __skb_checksum_complete(skb);
1243 }
1244 
1245 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1246 {
1247 	return !skb_csum_unnecessary(skb) &&
1248 		__tcp_checksum_complete(skb);
1249 }
1250 
1251 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1252 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1253 
1254 #undef STATE_TRACE
1255 
1256 #ifdef STATE_TRACE
1257 static const char *statename[]={
1258 	"Unused","Established","Syn Sent","Syn Recv",
1259 	"Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1260 	"Close Wait","Last ACK","Listen","Closing"
1261 };
1262 #endif
1263 void tcp_set_state(struct sock *sk, int state);
1264 
1265 void tcp_done(struct sock *sk);
1266 
1267 int tcp_abort(struct sock *sk, int err);
1268 
1269 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1270 {
1271 	rx_opt->dsack = 0;
1272 	rx_opt->num_sacks = 0;
1273 }
1274 
1275 u32 tcp_default_init_rwnd(u32 mss);
1276 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1277 
1278 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1279 {
1280 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1281 	struct tcp_sock *tp = tcp_sk(sk);
1282 	s32 delta;
1283 
1284 	if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out ||
1285 	    ca_ops->cong_control)
1286 		return;
1287 	delta = tcp_jiffies32 - tp->lsndtime;
1288 	if (delta > inet_csk(sk)->icsk_rto)
1289 		tcp_cwnd_restart(sk, delta);
1290 }
1291 
1292 /* Determine a window scaling and initial window to offer. */
1293 void tcp_select_initial_window(const struct sock *sk, int __space,
1294 			       __u32 mss, __u32 *rcv_wnd,
1295 			       __u32 *window_clamp, int wscale_ok,
1296 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1297 
1298 static inline int tcp_win_from_space(const struct sock *sk, int space)
1299 {
1300 	int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
1301 
1302 	return tcp_adv_win_scale <= 0 ?
1303 		(space>>(-tcp_adv_win_scale)) :
1304 		space - (space>>tcp_adv_win_scale);
1305 }
1306 
1307 /* Note: caller must be prepared to deal with negative returns */
1308 static inline int tcp_space(const struct sock *sk)
1309 {
1310 	return tcp_win_from_space(sk, sk->sk_rcvbuf -
1311 				  atomic_read(&sk->sk_rmem_alloc));
1312 }
1313 
1314 static inline int tcp_full_space(const struct sock *sk)
1315 {
1316 	return tcp_win_from_space(sk, sk->sk_rcvbuf);
1317 }
1318 
1319 extern void tcp_openreq_init_rwin(struct request_sock *req,
1320 				  const struct sock *sk_listener,
1321 				  const struct dst_entry *dst);
1322 
1323 void tcp_enter_memory_pressure(struct sock *sk);
1324 void tcp_leave_memory_pressure(struct sock *sk);
1325 
1326 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1327 {
1328 	struct net *net = sock_net((struct sock *)tp);
1329 
1330 	return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1331 }
1332 
1333 static inline int keepalive_time_when(const struct tcp_sock *tp)
1334 {
1335 	struct net *net = sock_net((struct sock *)tp);
1336 
1337 	return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1338 }
1339 
1340 static inline int keepalive_probes(const struct tcp_sock *tp)
1341 {
1342 	struct net *net = sock_net((struct sock *)tp);
1343 
1344 	return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1345 }
1346 
1347 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1348 {
1349 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1350 
1351 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1352 			  tcp_jiffies32 - tp->rcv_tstamp);
1353 }
1354 
1355 static inline int tcp_fin_time(const struct sock *sk)
1356 {
1357 	int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1358 	const int rto = inet_csk(sk)->icsk_rto;
1359 
1360 	if (fin_timeout < (rto << 2) - (rto >> 1))
1361 		fin_timeout = (rto << 2) - (rto >> 1);
1362 
1363 	return fin_timeout;
1364 }
1365 
1366 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1367 				  int paws_win)
1368 {
1369 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1370 		return true;
1371 	if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1372 		return true;
1373 	/*
1374 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1375 	 * then following tcp messages have valid values. Ignore 0 value,
1376 	 * or else 'negative' tsval might forbid us to accept their packets.
1377 	 */
1378 	if (!rx_opt->ts_recent)
1379 		return true;
1380 	return false;
1381 }
1382 
1383 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1384 				   int rst)
1385 {
1386 	if (tcp_paws_check(rx_opt, 0))
1387 		return false;
1388 
1389 	/* RST segments are not recommended to carry timestamp,
1390 	   and, if they do, it is recommended to ignore PAWS because
1391 	   "their cleanup function should take precedence over timestamps."
1392 	   Certainly, it is mistake. It is necessary to understand the reasons
1393 	   of this constraint to relax it: if peer reboots, clock may go
1394 	   out-of-sync and half-open connections will not be reset.
1395 	   Actually, the problem would be not existing if all
1396 	   the implementations followed draft about maintaining clock
1397 	   via reboots. Linux-2.2 DOES NOT!
1398 
1399 	   However, we can relax time bounds for RST segments to MSL.
1400 	 */
1401 	if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1402 		return false;
1403 	return true;
1404 }
1405 
1406 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1407 			  int mib_idx, u32 *last_oow_ack_time);
1408 
1409 static inline void tcp_mib_init(struct net *net)
1410 {
1411 	/* See RFC 2012 */
1412 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1413 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1414 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1415 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1416 }
1417 
1418 /* from STCP */
1419 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1420 {
1421 	tp->lost_skb_hint = NULL;
1422 }
1423 
1424 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1425 {
1426 	tcp_clear_retrans_hints_partial(tp);
1427 	tp->retransmit_skb_hint = NULL;
1428 }
1429 
1430 union tcp_md5_addr {
1431 	struct in_addr  a4;
1432 #if IS_ENABLED(CONFIG_IPV6)
1433 	struct in6_addr	a6;
1434 #endif
1435 };
1436 
1437 /* - key database */
1438 struct tcp_md5sig_key {
1439 	struct hlist_node	node;
1440 	u8			keylen;
1441 	u8			family; /* AF_INET or AF_INET6 */
1442 	union tcp_md5_addr	addr;
1443 	u8			prefixlen;
1444 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1445 	struct rcu_head		rcu;
1446 };
1447 
1448 /* - sock block */
1449 struct tcp_md5sig_info {
1450 	struct hlist_head	head;
1451 	struct rcu_head		rcu;
1452 };
1453 
1454 /* - pseudo header */
1455 struct tcp4_pseudohdr {
1456 	__be32		saddr;
1457 	__be32		daddr;
1458 	__u8		pad;
1459 	__u8		protocol;
1460 	__be16		len;
1461 };
1462 
1463 struct tcp6_pseudohdr {
1464 	struct in6_addr	saddr;
1465 	struct in6_addr daddr;
1466 	__be32		len;
1467 	__be32		protocol;	/* including padding */
1468 };
1469 
1470 union tcp_md5sum_block {
1471 	struct tcp4_pseudohdr ip4;
1472 #if IS_ENABLED(CONFIG_IPV6)
1473 	struct tcp6_pseudohdr ip6;
1474 #endif
1475 };
1476 
1477 /* - pool: digest algorithm, hash description and scratch buffer */
1478 struct tcp_md5sig_pool {
1479 	struct ahash_request	*md5_req;
1480 	void			*scratch;
1481 };
1482 
1483 /* - functions */
1484 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1485 			const struct sock *sk, const struct sk_buff *skb);
1486 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1487 		   int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1488 		   gfp_t gfp);
1489 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1490 		   int family, u8 prefixlen);
1491 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1492 					 const struct sock *addr_sk);
1493 
1494 #ifdef CONFIG_TCP_MD5SIG
1495 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1496 					 const union tcp_md5_addr *addr,
1497 					 int family);
1498 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1499 #else
1500 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1501 					 const union tcp_md5_addr *addr,
1502 					 int family)
1503 {
1504 	return NULL;
1505 }
1506 #define tcp_twsk_md5_key(twsk)	NULL
1507 #endif
1508 
1509 bool tcp_alloc_md5sig_pool(void);
1510 
1511 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1512 static inline void tcp_put_md5sig_pool(void)
1513 {
1514 	local_bh_enable();
1515 }
1516 
1517 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1518 			  unsigned int header_len);
1519 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1520 		     const struct tcp_md5sig_key *key);
1521 
1522 /* From tcp_fastopen.c */
1523 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1524 			    struct tcp_fastopen_cookie *cookie, int *syn_loss,
1525 			    unsigned long *last_syn_loss);
1526 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1527 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
1528 			    u16 try_exp);
1529 struct tcp_fastopen_request {
1530 	/* Fast Open cookie. Size 0 means a cookie request */
1531 	struct tcp_fastopen_cookie	cookie;
1532 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
1533 	size_t				size;
1534 	int				copied;	/* queued in tcp_connect() */
1535 };
1536 void tcp_free_fastopen_req(struct tcp_sock *tp);
1537 void tcp_fastopen_destroy_cipher(struct sock *sk);
1538 void tcp_fastopen_ctx_destroy(struct net *net);
1539 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
1540 			      void *key, unsigned int len);
1541 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1542 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1543 			      struct request_sock *req,
1544 			      struct tcp_fastopen_cookie *foc,
1545 			      const struct dst_entry *dst);
1546 void tcp_fastopen_init_key_once(struct net *net);
1547 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
1548 			     struct tcp_fastopen_cookie *cookie);
1549 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
1550 #define TCP_FASTOPEN_KEY_LENGTH 16
1551 
1552 /* Fastopen key context */
1553 struct tcp_fastopen_context {
1554 	struct crypto_cipher	*tfm;
1555 	__u8			key[TCP_FASTOPEN_KEY_LENGTH];
1556 	struct rcu_head		rcu;
1557 };
1558 
1559 extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
1560 void tcp_fastopen_active_disable(struct sock *sk);
1561 bool tcp_fastopen_active_should_disable(struct sock *sk);
1562 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
1563 void tcp_fastopen_active_timeout_reset(void);
1564 
1565 /* Latencies incurred by various limits for a sender. They are
1566  * chronograph-like stats that are mutually exclusive.
1567  */
1568 enum tcp_chrono {
1569 	TCP_CHRONO_UNSPEC,
1570 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
1571 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
1572 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
1573 	__TCP_CHRONO_MAX,
1574 };
1575 
1576 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
1577 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
1578 
1579 /* This helper is needed, because skb->tcp_tsorted_anchor uses
1580  * the same memory storage than skb->destructor/_skb_refdst
1581  */
1582 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
1583 {
1584 	skb->destructor = NULL;
1585 	skb->_skb_refdst = 0UL;
1586 }
1587 
1588 #define tcp_skb_tsorted_save(skb) {		\
1589 	unsigned long _save = skb->_skb_refdst;	\
1590 	skb->_skb_refdst = 0UL;
1591 
1592 #define tcp_skb_tsorted_restore(skb)		\
1593 	skb->_skb_refdst = _save;		\
1594 }
1595 
1596 void tcp_write_queue_purge(struct sock *sk);
1597 
1598 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1599 {
1600 	return skb_rb_first(&sk->tcp_rtx_queue);
1601 }
1602 
1603 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1604 {
1605 	return skb_peek(&sk->sk_write_queue);
1606 }
1607 
1608 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1609 {
1610 	return skb_peek_tail(&sk->sk_write_queue);
1611 }
1612 
1613 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
1614 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1615 
1616 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1617 {
1618 	return skb_peek(&sk->sk_write_queue);
1619 }
1620 
1621 static inline bool tcp_skb_is_last(const struct sock *sk,
1622 				   const struct sk_buff *skb)
1623 {
1624 	return skb_queue_is_last(&sk->sk_write_queue, skb);
1625 }
1626 
1627 static inline bool tcp_write_queue_empty(const struct sock *sk)
1628 {
1629 	return skb_queue_empty(&sk->sk_write_queue);
1630 }
1631 
1632 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
1633 {
1634 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
1635 }
1636 
1637 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
1638 {
1639 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
1640 }
1641 
1642 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1643 {
1644 	if (tcp_write_queue_empty(sk))
1645 		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
1646 
1647 	if (tcp_sk(sk)->highest_sack == skb_unlinked)
1648 		tcp_sk(sk)->highest_sack = NULL;
1649 }
1650 
1651 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1652 {
1653 	__skb_queue_tail(&sk->sk_write_queue, skb);
1654 }
1655 
1656 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1657 {
1658 	__tcp_add_write_queue_tail(sk, skb);
1659 
1660 	/* Queue it, remembering where we must start sending. */
1661 	if (sk->sk_write_queue.next == skb) {
1662 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
1663 
1664 		if (tcp_sk(sk)->highest_sack == NULL)
1665 			tcp_sk(sk)->highest_sack = skb;
1666 	}
1667 }
1668 
1669 /* Insert new before skb on the write queue of sk.  */
1670 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1671 						  struct sk_buff *skb,
1672 						  struct sock *sk)
1673 {
1674 	__skb_queue_before(&sk->sk_write_queue, skb, new);
1675 }
1676 
1677 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1678 {
1679 	tcp_skb_tsorted_anchor_cleanup(skb);
1680 	__skb_unlink(skb, &sk->sk_write_queue);
1681 }
1682 
1683 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
1684 
1685 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
1686 {
1687 	tcp_skb_tsorted_anchor_cleanup(skb);
1688 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
1689 }
1690 
1691 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
1692 {
1693 	list_del(&skb->tcp_tsorted_anchor);
1694 	tcp_rtx_queue_unlink(skb, sk);
1695 	sk_wmem_free_skb(sk, skb);
1696 }
1697 
1698 static inline void tcp_push_pending_frames(struct sock *sk)
1699 {
1700 	if (tcp_send_head(sk)) {
1701 		struct tcp_sock *tp = tcp_sk(sk);
1702 
1703 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1704 	}
1705 }
1706 
1707 /* Start sequence of the skb just after the highest skb with SACKed
1708  * bit, valid only if sacked_out > 0 or when the caller has ensured
1709  * validity by itself.
1710  */
1711 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1712 {
1713 	if (!tp->sacked_out)
1714 		return tp->snd_una;
1715 
1716 	if (tp->highest_sack == NULL)
1717 		return tp->snd_nxt;
1718 
1719 	return TCP_SKB_CB(tp->highest_sack)->seq;
1720 }
1721 
1722 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1723 {
1724 	struct sk_buff *next = skb_rb_next(skb);
1725 
1726 	tcp_sk(sk)->highest_sack = next ?: tcp_send_head(sk);
1727 }
1728 
1729 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1730 {
1731 	return tcp_sk(sk)->highest_sack;
1732 }
1733 
1734 static inline void tcp_highest_sack_reset(struct sock *sk)
1735 {
1736 	struct sk_buff *skb = tcp_rtx_queue_head(sk);
1737 
1738 	tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
1739 }
1740 
1741 /* Called when old skb is about to be deleted (to be combined with new skb) */
1742 static inline void tcp_highest_sack_combine(struct sock *sk,
1743 					    struct sk_buff *old,
1744 					    struct sk_buff *new)
1745 {
1746 	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1747 		tcp_sk(sk)->highest_sack = new;
1748 }
1749 
1750 /* This helper checks if socket has IP_TRANSPARENT set */
1751 static inline bool inet_sk_transparent(const struct sock *sk)
1752 {
1753 	switch (sk->sk_state) {
1754 	case TCP_TIME_WAIT:
1755 		return inet_twsk(sk)->tw_transparent;
1756 	case TCP_NEW_SYN_RECV:
1757 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
1758 	}
1759 	return inet_sk(sk)->transparent;
1760 }
1761 
1762 /* Determines whether this is a thin stream (which may suffer from
1763  * increased latency). Used to trigger latency-reducing mechanisms.
1764  */
1765 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1766 {
1767 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1768 }
1769 
1770 /* /proc */
1771 enum tcp_seq_states {
1772 	TCP_SEQ_STATE_LISTENING,
1773 	TCP_SEQ_STATE_ESTABLISHED,
1774 };
1775 
1776 int tcp_seq_open(struct inode *inode, struct file *file);
1777 
1778 struct tcp_seq_afinfo {
1779 	char				*name;
1780 	sa_family_t			family;
1781 	const struct file_operations	*seq_fops;
1782 	struct seq_operations		seq_ops;
1783 };
1784 
1785 struct tcp_iter_state {
1786 	struct seq_net_private	p;
1787 	sa_family_t		family;
1788 	enum tcp_seq_states	state;
1789 	struct sock		*syn_wait_sk;
1790 	int			bucket, offset, sbucket, num;
1791 	loff_t			last_pos;
1792 };
1793 
1794 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1795 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1796 
1797 extern struct request_sock_ops tcp_request_sock_ops;
1798 extern struct request_sock_ops tcp6_request_sock_ops;
1799 
1800 void tcp_v4_destroy_sock(struct sock *sk);
1801 
1802 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1803 				netdev_features_t features);
1804 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1805 int tcp_gro_complete(struct sk_buff *skb);
1806 
1807 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1808 
1809 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1810 {
1811 	struct net *net = sock_net((struct sock *)tp);
1812 	return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1813 }
1814 
1815 static inline bool tcp_stream_memory_free(const struct sock *sk)
1816 {
1817 	const struct tcp_sock *tp = tcp_sk(sk);
1818 	u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1819 
1820 	return notsent_bytes < tcp_notsent_lowat(tp);
1821 }
1822 
1823 #ifdef CONFIG_PROC_FS
1824 int tcp4_proc_init(void);
1825 void tcp4_proc_exit(void);
1826 #endif
1827 
1828 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1829 int tcp_conn_request(struct request_sock_ops *rsk_ops,
1830 		     const struct tcp_request_sock_ops *af_ops,
1831 		     struct sock *sk, struct sk_buff *skb);
1832 
1833 /* TCP af-specific functions */
1834 struct tcp_sock_af_ops {
1835 #ifdef CONFIG_TCP_MD5SIG
1836 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
1837 						const struct sock *addr_sk);
1838 	int		(*calc_md5_hash)(char *location,
1839 					 const struct tcp_md5sig_key *md5,
1840 					 const struct sock *sk,
1841 					 const struct sk_buff *skb);
1842 	int		(*md5_parse)(struct sock *sk,
1843 				     int optname,
1844 				     char __user *optval,
1845 				     int optlen);
1846 #endif
1847 };
1848 
1849 struct tcp_request_sock_ops {
1850 	u16 mss_clamp;
1851 #ifdef CONFIG_TCP_MD5SIG
1852 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1853 						 const struct sock *addr_sk);
1854 	int		(*calc_md5_hash) (char *location,
1855 					  const struct tcp_md5sig_key *md5,
1856 					  const struct sock *sk,
1857 					  const struct sk_buff *skb);
1858 #endif
1859 	void (*init_req)(struct request_sock *req,
1860 			 const struct sock *sk_listener,
1861 			 struct sk_buff *skb);
1862 #ifdef CONFIG_SYN_COOKIES
1863 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
1864 				 __u16 *mss);
1865 #endif
1866 	struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1867 				       const struct request_sock *req);
1868 	u32 (*init_seq)(const struct sk_buff *skb);
1869 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
1870 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1871 			   struct flowi *fl, struct request_sock *req,
1872 			   struct tcp_fastopen_cookie *foc,
1873 			   enum tcp_synack_type synack_type);
1874 };
1875 
1876 #ifdef CONFIG_SYN_COOKIES
1877 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1878 					 const struct sock *sk, struct sk_buff *skb,
1879 					 __u16 *mss)
1880 {
1881 	tcp_synq_overflow(sk);
1882 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1883 	return ops->cookie_init_seq(skb, mss);
1884 }
1885 #else
1886 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1887 					 const struct sock *sk, struct sk_buff *skb,
1888 					 __u16 *mss)
1889 {
1890 	return 0;
1891 }
1892 #endif
1893 
1894 int tcpv4_offload_init(void);
1895 
1896 void tcp_v4_init(void);
1897 void tcp_init(void);
1898 
1899 /* tcp_recovery.c */
1900 extern void tcp_rack_mark_lost(struct sock *sk);
1901 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
1902 			     u64 xmit_time);
1903 extern void tcp_rack_reo_timeout(struct sock *sk);
1904 
1905 /* At how many usecs into the future should the RTO fire? */
1906 static inline s64 tcp_rto_delta_us(const struct sock *sk)
1907 {
1908 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
1909 	u32 rto = inet_csk(sk)->icsk_rto;
1910 	u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto);
1911 
1912 	return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
1913 }
1914 
1915 /*
1916  * Save and compile IPv4 options, return a pointer to it
1917  */
1918 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
1919 							 struct sk_buff *skb)
1920 {
1921 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1922 	struct ip_options_rcu *dopt = NULL;
1923 
1924 	if (opt->optlen) {
1925 		int opt_size = sizeof(*dopt) + opt->optlen;
1926 
1927 		dopt = kmalloc(opt_size, GFP_ATOMIC);
1928 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
1929 			kfree(dopt);
1930 			dopt = NULL;
1931 		}
1932 	}
1933 	return dopt;
1934 }
1935 
1936 /* locally generated TCP pure ACKs have skb->truesize == 2
1937  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
1938  * This is much faster than dissecting the packet to find out.
1939  * (Think of GRE encapsulations, IPv4, IPv6, ...)
1940  */
1941 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1942 {
1943 	return skb->truesize == 2;
1944 }
1945 
1946 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1947 {
1948 	skb->truesize = 2;
1949 }
1950 
1951 static inline int tcp_inq(struct sock *sk)
1952 {
1953 	struct tcp_sock *tp = tcp_sk(sk);
1954 	int answ;
1955 
1956 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1957 		answ = 0;
1958 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
1959 		   !tp->urg_data ||
1960 		   before(tp->urg_seq, tp->copied_seq) ||
1961 		   !before(tp->urg_seq, tp->rcv_nxt)) {
1962 
1963 		answ = tp->rcv_nxt - tp->copied_seq;
1964 
1965 		/* Subtract 1, if FIN was received */
1966 		if (answ && sock_flag(sk, SOCK_DONE))
1967 			answ--;
1968 	} else {
1969 		answ = tp->urg_seq - tp->copied_seq;
1970 	}
1971 
1972 	return answ;
1973 }
1974 
1975 int tcp_peek_len(struct socket *sock);
1976 
1977 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1978 {
1979 	u16 segs_in;
1980 
1981 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1982 	tp->segs_in += segs_in;
1983 	if (skb->len > tcp_hdrlen(skb))
1984 		tp->data_segs_in += segs_in;
1985 }
1986 
1987 /*
1988  * TCP listen path runs lockless.
1989  * We forced "struct sock" to be const qualified to make sure
1990  * we don't modify one of its field by mistake.
1991  * Here, we increment sk_drops which is an atomic_t, so we can safely
1992  * make sock writable again.
1993  */
1994 static inline void tcp_listendrop(const struct sock *sk)
1995 {
1996 	atomic_inc(&((struct sock *)sk)->sk_drops);
1997 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
1998 }
1999 
2000 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2001 
2002 /*
2003  * Interface for adding Upper Level Protocols over TCP
2004  */
2005 
2006 #define TCP_ULP_NAME_MAX	16
2007 #define TCP_ULP_MAX		128
2008 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2009 
2010 struct tcp_ulp_ops {
2011 	struct list_head	list;
2012 
2013 	/* initialize ulp */
2014 	int (*init)(struct sock *sk);
2015 	/* cleanup ulp */
2016 	void (*release)(struct sock *sk);
2017 
2018 	char		name[TCP_ULP_NAME_MAX];
2019 	struct module	*owner;
2020 };
2021 int tcp_register_ulp(struct tcp_ulp_ops *type);
2022 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2023 int tcp_set_ulp(struct sock *sk, const char *name);
2024 void tcp_get_available_ulp(char *buf, size_t len);
2025 void tcp_cleanup_ulp(struct sock *sk);
2026 
2027 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2028  * is < 0, then the BPF op failed (for example if the loaded BPF
2029  * program does not support the chosen operation or there is no BPF
2030  * program loaded).
2031  */
2032 #ifdef CONFIG_BPF
2033 static inline int tcp_call_bpf(struct sock *sk, int op)
2034 {
2035 	struct bpf_sock_ops_kern sock_ops;
2036 	int ret;
2037 
2038 	if (sk_fullsock(sk))
2039 		sock_owned_by_me(sk);
2040 
2041 	memset(&sock_ops, 0, sizeof(sock_ops));
2042 	sock_ops.sk = sk;
2043 	sock_ops.op = op;
2044 
2045 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2046 	if (ret == 0)
2047 		ret = sock_ops.reply;
2048 	else
2049 		ret = -1;
2050 	return ret;
2051 }
2052 #else
2053 static inline int tcp_call_bpf(struct sock *sk, int op)
2054 {
2055 	return -EPERM;
2056 }
2057 #endif
2058 
2059 static inline u32 tcp_timeout_init(struct sock *sk)
2060 {
2061 	int timeout;
2062 
2063 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT);
2064 
2065 	if (timeout <= 0)
2066 		timeout = TCP_TIMEOUT_INIT;
2067 	return timeout;
2068 }
2069 
2070 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2071 {
2072 	int rwnd;
2073 
2074 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT);
2075 
2076 	if (rwnd < 0)
2077 		rwnd = 0;
2078 	return rwnd;
2079 }
2080 
2081 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2082 {
2083 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1);
2084 }
2085 
2086 #if IS_ENABLED(CONFIG_SMC)
2087 extern struct static_key_false tcp_have_smc;
2088 #endif
2089 #endif	/* _TCP_H */
2090