xref: /linux/include/net/tcp.h (revision e46ff213f7a5f5aaebd6bca589517844aa0fe73a)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 #include <linux/bits.h>
30 
31 #include <net/inet_connection_sock.h>
32 #include <net/inet_timewait_sock.h>
33 #include <net/inet_hashtables.h>
34 #include <net/checksum.h>
35 #include <net/request_sock.h>
36 #include <net/sock_reuseport.h>
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 #include <net/ip.h>
40 #include <net/tcp_states.h>
41 #include <net/tcp_ao.h>
42 #include <net/inet_ecn.h>
43 #include <net/dst.h>
44 #include <net/mptcp.h>
45 #include <net/xfrm.h>
46 #include <net/secure_seq.h>
47 
48 #include <linux/seq_file.h>
49 #include <linux/memcontrol.h>
50 #include <linux/bpf-cgroup.h>
51 #include <linux/siphash.h>
52 
53 extern struct inet_hashinfo tcp_hashinfo;
54 
55 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
56 int tcp_orphan_count_sum(void);
57 
58 static inline void tcp_orphan_count_inc(void)
59 {
60 	this_cpu_inc(tcp_orphan_count);
61 }
62 
63 static inline void tcp_orphan_count_dec(void)
64 {
65 	this_cpu_dec(tcp_orphan_count);
66 }
67 
68 DECLARE_PER_CPU(u32, tcp_tw_isn);
69 
70 void tcp_time_wait(struct sock *sk, int state, int timeo);
71 
72 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
73 #define MAX_TCP_OPTION_SPACE 40
74 #define TCP_MIN_SND_MSS		48
75 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
76 
77 /*
78  * Never offer a window over 32767 without using window scaling. Some
79  * poor stacks do signed 16bit maths!
80  */
81 #define MAX_TCP_WINDOW		32767U
82 
83 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
84 #define TCP_MIN_MSS		88U
85 
86 /* The initial MTU to use for probing */
87 #define TCP_BASE_MSS		1024
88 
89 /* probing interval, default to 10 minutes as per RFC4821 */
90 #define TCP_PROBE_INTERVAL	600
91 
92 /* Specify interval when tcp mtu probing will stop */
93 #define TCP_PROBE_THRESHOLD	8
94 
95 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
96 #define TCP_FASTRETRANS_THRESH 3
97 
98 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
99 #define TCP_MAX_QUICKACKS	16U
100 
101 /* Maximal number of window scale according to RFC1323 */
102 #define TCP_MAX_WSCALE		14U
103 
104 /* Default sending frequency of accurate ECN option per RTT */
105 #define TCP_ACCECN_OPTION_BEACON	3
106 
107 /* urg_data states */
108 #define TCP_URG_VALID	0x0100
109 #define TCP_URG_NOTYET	0x0200
110 #define TCP_URG_READ	0x0400
111 
112 #define TCP_RETR1	3	/*
113 				 * This is how many retries it does before it
114 				 * tries to figure out if the gateway is
115 				 * down. Minimal RFC value is 3; it corresponds
116 				 * to ~3sec-8min depending on RTO.
117 				 */
118 
119 #define TCP_RETR2	15	/*
120 				 * This should take at least
121 				 * 90 minutes to time out.
122 				 * RFC1122 says that the limit is 100 sec.
123 				 * 15 is ~13-30min depending on RTO.
124 				 */
125 
126 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
127 				 * when active opening a connection.
128 				 * RFC1122 says the minimum retry MUST
129 				 * be at least 180secs.  Nevertheless
130 				 * this value is corresponding to
131 				 * 63secs of retransmission with the
132 				 * current initial RTO.
133 				 */
134 
135 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
136 				 * when passive opening a connection.
137 				 * This is corresponding to 31secs of
138 				 * retransmission with the current
139 				 * initial RTO.
140 				 */
141 
142 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
143 				  * state, about 60 seconds	*/
144 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
145                                  /* BSD style FIN_WAIT2 deadlock breaker.
146 				  * It used to be 3min, new value is 60sec,
147 				  * to combine FIN-WAIT-2 timeout with
148 				  * TIME-WAIT timer.
149 				  */
150 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
151 
152 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
153 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
154 
155 #if HZ >= 100
156 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
157 #define TCP_ATO_MIN	((unsigned)(HZ/25))
158 #else
159 #define TCP_DELACK_MIN	4U
160 #define TCP_ATO_MIN	4U
161 #endif
162 #define TCP_RTO_MAX_SEC 120
163 #define TCP_RTO_MAX	((unsigned)(TCP_RTO_MAX_SEC * HZ))
164 #define TCP_RTO_MIN	((unsigned)(HZ / 5))
165 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
166 
167 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
168 
169 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
170 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
171 						 * used as a fallback RTO for the
172 						 * initial data transmission if no
173 						 * valid RTT sample has been acquired,
174 						 * most likely due to retrans in 3WHS.
175 						 */
176 
177 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
178 					                 * for local resources.
179 					                 */
180 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
181 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
182 #define TCP_KEEPALIVE_INTVL	(75*HZ)
183 
184 #define MAX_TCP_KEEPIDLE	32767
185 #define MAX_TCP_KEEPINTVL	32767
186 #define MAX_TCP_KEEPCNT		127
187 #define MAX_TCP_SYNCNT		127
188 
189 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
190  * to avoid overflows. This assumes a clock smaller than 1 Mhz.
191  * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
192  */
193 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
194 
195 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
196 					 * after this time. It should be equal
197 					 * (or greater than) TCP_TIMEWAIT_LEN
198 					 * to provide reliability equal to one
199 					 * provided by timewait state.
200 					 */
201 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
202 					 * timestamps. It must be less than
203 					 * minimal timewait lifetime.
204 					 */
205 /*
206  *	TCP option
207  */
208 
209 #define TCPOPT_NOP		1	/* Padding */
210 #define TCPOPT_EOL		0	/* End of options */
211 #define TCPOPT_MSS		2	/* Segment size negotiating */
212 #define TCPOPT_WINDOW		3	/* Window scaling */
213 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
214 #define TCPOPT_SACK             5       /* SACK Block */
215 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
216 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
217 #define TCPOPT_AO		29	/* Authentication Option (RFC5925) */
218 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
219 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
220 #define TCPOPT_ACCECN0		172	/* 0xAC: Accurate ECN Order 0 */
221 #define TCPOPT_ACCECN1		174	/* 0xAE: Accurate ECN Order 1 */
222 #define TCPOPT_EXP		254	/* Experimental */
223 /* Magic number to be after the option value for sharing TCP
224  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
225  */
226 #define TCPOPT_FASTOPEN_MAGIC	0xF989
227 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
228 
229 /*
230  *     TCP option lengths
231  */
232 
233 #define TCPOLEN_MSS            4
234 #define TCPOLEN_WINDOW         3
235 #define TCPOLEN_SACK_PERM      2
236 #define TCPOLEN_TIMESTAMP      10
237 #define TCPOLEN_MD5SIG         18
238 #define TCPOLEN_FASTOPEN_BASE  2
239 #define TCPOLEN_ACCECN_BASE    2
240 #define TCPOLEN_EXP_FASTOPEN_BASE  4
241 #define TCPOLEN_EXP_SMC_BASE   6
242 
243 /* But this is what stacks really send out. */
244 #define TCPOLEN_TSTAMP_ALIGNED		12
245 #define TCPOLEN_WSCALE_ALIGNED		4
246 #define TCPOLEN_SACKPERM_ALIGNED	4
247 #define TCPOLEN_SACK_BASE		2
248 #define TCPOLEN_SACK_BASE_ALIGNED	4
249 #define TCPOLEN_SACK_PERBLOCK		8
250 #define TCPOLEN_MD5SIG_ALIGNED		20
251 #define TCPOLEN_MSS_ALIGNED		4
252 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
253 #define TCPOLEN_ACCECN_PERFIELD		3
254 
255 /* Maximum number of byte counters in AccECN option + size */
256 #define TCP_ACCECN_NUMFIELDS		3
257 #define TCP_ACCECN_MAXSIZE		(TCPOLEN_ACCECN_BASE + \
258 					 TCPOLEN_ACCECN_PERFIELD * \
259 					 TCP_ACCECN_NUMFIELDS)
260 #define TCP_ACCECN_SAFETY_SHIFT		1 /* SAFETY_FACTOR in accecn draft */
261 
262 /* Flags in tp->nonagle */
263 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
264 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
265 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
266 
267 /* TCP thin-stream limits */
268 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
269 
270 /* TCP initial congestion window as per rfc6928 */
271 #define TCP_INIT_CWND		10
272 
273 /* Bit Flags for sysctl_tcp_fastopen */
274 #define	TFO_CLIENT_ENABLE	1
275 #define	TFO_SERVER_ENABLE	2
276 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
277 
278 /* Accept SYN data w/o any cookie option */
279 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
280 
281 /* Force enable TFO on all listeners, i.e., not requiring the
282  * TCP_FASTOPEN socket option.
283  */
284 #define	TFO_SERVER_WO_SOCKOPT1	0x400
285 
286 
287 /* sysctl variables for tcp */
288 extern int sysctl_tcp_max_orphans;
289 extern long sysctl_tcp_mem[3];
290 
291 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
292 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
293 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
294 
295 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
296 
297 extern struct percpu_counter tcp_sockets_allocated;
298 extern unsigned long tcp_memory_pressure;
299 
300 /* optimized version of sk_under_memory_pressure() for TCP sockets */
301 static inline bool tcp_under_memory_pressure(const struct sock *sk)
302 {
303 	if (mem_cgroup_sk_enabled(sk) &&
304 	    mem_cgroup_sk_under_memory_pressure(sk))
305 		return true;
306 
307 	if (sk->sk_bypass_prot_mem)
308 		return false;
309 
310 	return READ_ONCE(tcp_memory_pressure);
311 }
312 /*
313  * The next routines deal with comparing 32 bit unsigned ints
314  * and worry about wraparound (automatic with unsigned arithmetic).
315  */
316 
317 static inline bool before(__u32 seq1, __u32 seq2)
318 {
319         return (__s32)(seq1-seq2) < 0;
320 }
321 #define after(seq2, seq1) 	before(seq1, seq2)
322 
323 /* is s2<=s1<=s3 ? */
324 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
325 {
326 	return seq3 - seq2 >= seq1 - seq2;
327 }
328 
329 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
330 {
331 	sk_wmem_queued_add(sk, -skb->truesize);
332 	if (!skb_zcopy_pure(skb))
333 		sk_mem_uncharge(sk, skb->truesize);
334 	else
335 		sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
336 	__kfree_skb(skb);
337 }
338 
339 void sk_forced_mem_schedule(struct sock *sk, int size);
340 
341 bool tcp_check_oom(const struct sock *sk, int shift);
342 
343 
344 extern struct proto tcp_prot;
345 
346 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
347 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
348 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
349 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
350 
351 /*
352  * TCP splice context
353  */
354 struct tcp_splice_state {
355 	struct pipe_inode_info *pipe;
356 	size_t len;
357 	unsigned int flags;
358 };
359 
360 void tcp_tsq_work_init(void);
361 
362 int tcp_v4_err(struct sk_buff *skb, u32);
363 
364 void tcp_shutdown(struct sock *sk, int how);
365 
366 int tcp_v4_rcv(struct sk_buff *skb);
367 
368 void tcp_remove_empty_skb(struct sock *sk);
369 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
370 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
371 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
372 			 size_t size, struct ubuf_info *uarg);
373 void tcp_splice_eof(struct socket *sock);
374 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
375 int tcp_wmem_schedule(struct sock *sk, int copy);
376 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
377 	      int size_goal);
378 
379 void tcp_release_cb(struct sock *sk);
380 
381 static inline bool tcp_release_cb_cond(struct sock *sk)
382 {
383 #ifdef CONFIG_INET
384 	if (likely(sk->sk_prot->release_cb == tcp_release_cb)) {
385 		if (unlikely(smp_load_acquire(&sk->sk_tsq_flags) & TCP_DEFERRED_ALL))
386 			tcp_release_cb(sk);
387 		return true;
388 	}
389 #endif
390 	return false;
391 }
392 
393 void tcp_wfree(struct sk_buff *skb);
394 void tcp_write_timer_handler(struct sock *sk);
395 void tcp_delack_timer_handler(struct sock *sk);
396 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
397 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
398 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
399 void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
400 void tcp_rcv_space_adjust(struct sock *sk);
401 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
402 void tcp_twsk_destructor(struct sock *sk);
403 void tcp_twsk_purge(struct list_head *net_exit_list);
404 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
405 			 unsigned int offset, size_t len);
406 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
407 			struct pipe_inode_info *pipe, size_t len,
408 			unsigned int flags);
409 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
410 				     bool force_schedule);
411 
412 static inline void tcp_dec_quickack_mode(struct sock *sk)
413 {
414 	struct inet_connection_sock *icsk = inet_csk(sk);
415 
416 	if (icsk->icsk_ack.quick) {
417 		/* How many ACKs S/ACKing new data have we sent? */
418 		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
419 
420 		if (pkts >= icsk->icsk_ack.quick) {
421 			icsk->icsk_ack.quick = 0;
422 			/* Leaving quickack mode we deflate ATO. */
423 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
424 		} else
425 			icsk->icsk_ack.quick -= pkts;
426 	}
427 }
428 
429 #define	TCP_ECN_MODE_RFC3168	BIT(0)
430 #define	TCP_ECN_QUEUE_CWR	BIT(1)
431 #define	TCP_ECN_DEMAND_CWR	BIT(2)
432 #define	TCP_ECN_SEEN		BIT(3)
433 #define	TCP_ECN_MODE_ACCECN	BIT(4)
434 
435 #define	TCP_ECN_DISABLED	0
436 #define	TCP_ECN_MODE_PENDING	(TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
437 #define	TCP_ECN_MODE_ANY	(TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
438 
439 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
440 {
441 	return tp->ecn_flags & TCP_ECN_MODE_ANY;
442 }
443 
444 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
445 {
446 	return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
447 }
448 
449 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
450 {
451 	return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
452 }
453 
454 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
455 {
456 	return !tcp_ecn_mode_any(tp);
457 }
458 
459 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
460 {
461 	return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
462 }
463 
464 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
465 {
466 	tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
467 	tp->ecn_flags |= mode;
468 }
469 
470 enum tcp_tw_status {
471 	TCP_TW_SUCCESS = 0,
472 	TCP_TW_RST = 1,
473 	TCP_TW_ACK = 2,
474 	TCP_TW_SYN = 3,
475 	TCP_TW_ACK_OOW = 4
476 };
477 
478 
479 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
480 					      struct sk_buff *skb,
481 					      const struct tcphdr *th,
482 					      u32 *tw_isn,
483 					      enum skb_drop_reason *drop_reason);
484 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
485 			   struct request_sock *req, bool fastopen,
486 			   bool *lost_race, enum skb_drop_reason *drop_reason);
487 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
488 				       struct sk_buff *skb);
489 void tcp_enter_loss(struct sock *sk);
490 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
491 void tcp_clear_retrans(struct tcp_sock *tp);
492 void tcp_update_pacing_rate(struct sock *sk);
493 void tcp_set_rto(struct sock *sk);
494 void tcp_update_metrics(struct sock *sk);
495 void tcp_init_metrics(struct sock *sk);
496 void tcp_metrics_init(void);
497 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
498 void __tcp_close(struct sock *sk, long timeout);
499 void tcp_close(struct sock *sk, long timeout);
500 void tcp_init_sock(struct sock *sk);
501 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
502 __poll_t tcp_poll(struct file *file, struct socket *sock,
503 		      struct poll_table_struct *wait);
504 int do_tcp_getsockopt(struct sock *sk, int level,
505 		      int optname, sockptr_t optval, sockptr_t optlen);
506 int tcp_getsockopt(struct sock *sk, int level, int optname,
507 		   char __user *optval, int __user *optlen);
508 bool tcp_bpf_bypass_getsockopt(int level, int optname);
509 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
510 		      sockptr_t optval, unsigned int optlen);
511 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
512 		   unsigned int optlen);
513 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
514 void tcp_set_keepalive(struct sock *sk, int val);
515 void tcp_syn_ack_timeout(const struct request_sock *req);
516 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
517 		int flags);
518 int tcp_set_rcvlowat(struct sock *sk, int val);
519 int tcp_set_window_clamp(struct sock *sk, int val);
520 
521 static inline void
522 tcp_update_recv_tstamps(struct sk_buff *skb,
523 			struct scm_timestamping_internal *tss)
524 {
525 	tss->ts[0] = skb->tstamp;
526 	tss->ts[2] = skb_hwtstamps(skb)->hwtstamp;
527 }
528 
529 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
530 			struct scm_timestamping_internal *tss);
531 void tcp_data_ready(struct sock *sk);
532 #ifdef CONFIG_MMU
533 int tcp_mmap(struct file *file, struct socket *sock,
534 	     struct vm_area_struct *vma);
535 #endif
536 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
537 		       struct tcp_options_received *opt_rx,
538 		       int estab, struct tcp_fastopen_cookie *foc);
539 
540 /*
541  *	BPF SKB-less helpers
542  */
543 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
544 			 struct tcphdr *th, u32 *cookie);
545 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
546 			 struct tcphdr *th, u32 *cookie);
547 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
548 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
549 			  const struct tcp_request_sock_ops *af_ops,
550 			  struct sock *sk, struct tcphdr *th);
551 /*
552  *	TCP v4 functions exported for the inet6 API
553  */
554 
555 void tcp_v4_mtu_reduced(struct sock *sk);
556 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
557 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
558 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
559 struct sock *tcp_create_openreq_child(const struct sock *sk,
560 				      struct request_sock *req,
561 				      struct sk_buff *skb);
562 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
563 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
564 				  struct request_sock *req,
565 				  struct dst_entry *dst,
566 				  struct request_sock *req_unhash,
567 				  bool *own_req,
568 				  void (*opt_child_init)(struct sock *newsk,
569 							 const struct sock *sk));
570 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
571 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
572 int tcp_connect(struct sock *sk);
573 enum tcp_synack_type {
574 	TCP_SYNACK_NORMAL,
575 	TCP_SYNACK_FASTOPEN,
576 	TCP_SYNACK_COOKIE,
577 	TCP_SYNACK_RETRANS,
578 };
579 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
580 				struct request_sock *req,
581 				struct tcp_fastopen_cookie *foc,
582 				enum tcp_synack_type synack_type,
583 				struct sk_buff *syn_skb);
584 int tcp_disconnect(struct sock *sk, int flags);
585 
586 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
587 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
588 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
589 
590 /* From syncookies.c */
591 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
592 				 struct request_sock *req,
593 				 struct dst_entry *dst);
594 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
595 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
596 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
597 					    struct sock *sk, struct sk_buff *skb,
598 					    struct tcp_options_received *tcp_opt,
599 					    int mss, u32 tsoff);
600 
601 #if IS_ENABLED(CONFIG_BPF)
602 struct bpf_tcp_req_attrs {
603 	u32 rcv_tsval;
604 	u32 rcv_tsecr;
605 	u16 mss;
606 	u8 rcv_wscale;
607 	u8 snd_wscale;
608 	u8 ecn_ok;
609 	u8 wscale_ok;
610 	u8 sack_ok;
611 	u8 tstamp_ok;
612 	u8 usec_ts_ok;
613 	u8 reserved[3];
614 };
615 #endif
616 
617 #ifdef CONFIG_SYN_COOKIES
618 
619 /* Syncookies use a monotonic timer which increments every 60 seconds.
620  * This counter is used both as a hash input and partially encoded into
621  * the cookie value.  A cookie is only validated further if the delta
622  * between the current counter value and the encoded one is less than this,
623  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
624  * the counter advances immediately after a cookie is generated).
625  */
626 #define MAX_SYNCOOKIE_AGE	2
627 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
628 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
629 
630 /* syncookies: remember time of last synqueue overflow
631  * But do not dirty this field too often (once per second is enough)
632  * It is racy as we do not hold a lock, but race is very minor.
633  */
634 static inline void tcp_synq_overflow(const struct sock *sk)
635 {
636 	unsigned int last_overflow;
637 	unsigned int now = jiffies;
638 
639 	if (sk->sk_reuseport) {
640 		struct sock_reuseport *reuse;
641 
642 		reuse = rcu_dereference(sk->sk_reuseport_cb);
643 		if (likely(reuse)) {
644 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
645 			if (!time_between32(now, last_overflow,
646 					    last_overflow + HZ))
647 				WRITE_ONCE(reuse->synq_overflow_ts, now);
648 			return;
649 		}
650 	}
651 
652 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
653 	if (!time_between32(now, last_overflow, last_overflow + HZ))
654 		WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
655 }
656 
657 /* syncookies: no recent synqueue overflow on this listening socket? */
658 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
659 {
660 	unsigned int last_overflow;
661 	unsigned int now = jiffies;
662 
663 	if (sk->sk_reuseport) {
664 		struct sock_reuseport *reuse;
665 
666 		reuse = rcu_dereference(sk->sk_reuseport_cb);
667 		if (likely(reuse)) {
668 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
669 			return !time_between32(now, last_overflow - HZ,
670 					       last_overflow +
671 					       TCP_SYNCOOKIE_VALID);
672 		}
673 	}
674 
675 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
676 
677 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
678 	 * then we're under synflood. However, we have to use
679 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
680 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
681 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
682 	 * which could lead to rejecting a valid syncookie.
683 	 */
684 	return !time_between32(now, last_overflow - HZ,
685 			       last_overflow + TCP_SYNCOOKIE_VALID);
686 }
687 
688 static inline u32 tcp_cookie_time(void)
689 {
690 	u64 val = get_jiffies_64();
691 
692 	do_div(val, TCP_SYNCOOKIE_PERIOD);
693 	return val;
694 }
695 
696 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
697 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
698 {
699 	if (usec_ts)
700 		return div_u64(val, NSEC_PER_USEC);
701 
702 	return div_u64(val, NSEC_PER_MSEC);
703 }
704 
705 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
706 			      u16 *mssp);
707 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
708 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
709 bool cookie_timestamp_decode(const struct net *net,
710 			     struct tcp_options_received *opt);
711 
712 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
713 {
714 	return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
715 		dst_feature(dst, RTAX_FEATURE_ECN);
716 }
717 
718 #if IS_ENABLED(CONFIG_BPF)
719 static inline bool cookie_bpf_ok(struct sk_buff *skb)
720 {
721 	return skb->sk;
722 }
723 
724 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
725 #else
726 static inline bool cookie_bpf_ok(struct sk_buff *skb)
727 {
728 	return false;
729 }
730 
731 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
732 						    struct sk_buff *skb)
733 {
734 	return NULL;
735 }
736 #endif
737 
738 /* From net/ipv6/syncookies.c */
739 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
740 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
741 
742 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
743 			      const struct tcphdr *th, u16 *mssp);
744 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
745 #endif
746 /* tcp_output.c */
747 
748 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
749 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
750 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
751 			       int nonagle);
752 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
753 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
754 void tcp_retransmit_timer(struct sock *sk);
755 void tcp_xmit_retransmit_queue(struct sock *);
756 void tcp_simple_retransmit(struct sock *);
757 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
758 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
759 enum tcp_queue {
760 	TCP_FRAG_IN_WRITE_QUEUE,
761 	TCP_FRAG_IN_RTX_QUEUE,
762 };
763 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
764 		 struct sk_buff *skb, u32 len,
765 		 unsigned int mss_now, gfp_t gfp);
766 
767 void tcp_send_probe0(struct sock *);
768 int tcp_write_wakeup(struct sock *, int mib);
769 void tcp_send_fin(struct sock *sk);
770 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
771 			   enum sk_rst_reason reason);
772 int tcp_send_synack(struct sock *);
773 void tcp_push_one(struct sock *, unsigned int mss_now);
774 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
775 void tcp_send_ack(struct sock *sk);
776 void tcp_send_delayed_ack(struct sock *sk);
777 void tcp_send_loss_probe(struct sock *sk);
778 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
779 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
780 			     const struct sk_buff *next_skb);
781 
782 /* tcp_input.c */
783 void tcp_rearm_rto(struct sock *sk);
784 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
785 void tcp_done_with_error(struct sock *sk, int err);
786 void tcp_reset(struct sock *sk, struct sk_buff *skb);
787 void tcp_fin(struct sock *sk);
788 void __tcp_check_space(struct sock *sk);
789 static inline void tcp_check_space(struct sock *sk)
790 {
791 	/* pairs with tcp_poll() */
792 	smp_mb();
793 
794 	if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
795 		__tcp_check_space(sk);
796 }
797 void tcp_sack_compress_send_ack(struct sock *sk);
798 
799 static inline void tcp_cleanup_skb(struct sk_buff *skb)
800 {
801 	skb_dst_drop(skb);
802 	secpath_reset(skb);
803 }
804 
805 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
806 {
807 	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
808 	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
809 	__skb_queue_tail(&sk->sk_receive_queue, skb);
810 }
811 
812 /* tcp_timer.c */
813 void tcp_init_xmit_timers(struct sock *);
814 static inline void tcp_clear_xmit_timers(struct sock *sk)
815 {
816 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
817 		__sock_put(sk);
818 
819 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
820 		__sock_put(sk);
821 
822 	inet_csk_clear_xmit_timers(sk);
823 }
824 
825 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
826 unsigned int tcp_current_mss(struct sock *sk);
827 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
828 
829 /* Bound MSS / TSO packet size with the half of the window */
830 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
831 {
832 	int cutoff;
833 
834 	/* When peer uses tiny windows, there is no use in packetizing
835 	 * to sub-MSS pieces for the sake of SWS or making sure there
836 	 * are enough packets in the pipe for fast recovery.
837 	 *
838 	 * On the other hand, for extremely large MSS devices, handling
839 	 * smaller than MSS windows in this way does make sense.
840 	 */
841 	if (tp->max_window > TCP_MSS_DEFAULT)
842 		cutoff = (tp->max_window >> 1);
843 	else
844 		cutoff = tp->max_window;
845 
846 	if (cutoff && pktsize > cutoff)
847 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
848 	else
849 		return pktsize;
850 }
851 
852 /* tcp.c */
853 void tcp_get_info(struct sock *, struct tcp_info *);
854 void tcp_rate_check_app_limited(struct sock *sk);
855 
856 /* Read 'sendfile()'-style from a TCP socket */
857 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
858 		  sk_read_actor_t recv_actor);
859 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
860 			sk_read_actor_t recv_actor, bool noack,
861 			u32 *copied_seq);
862 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
863 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
864 void tcp_read_done(struct sock *sk, size_t len);
865 
866 void tcp_initialize_rcv_mss(struct sock *sk);
867 
868 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
869 int tcp_mss_to_mtu(struct sock *sk, int mss);
870 void tcp_mtup_init(struct sock *sk);
871 
872 static inline unsigned int tcp_rto_max(const struct sock *sk)
873 {
874 	return READ_ONCE(inet_csk(sk)->icsk_rto_max);
875 }
876 
877 static inline void tcp_bound_rto(struct sock *sk)
878 {
879 	inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
880 }
881 
882 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
883 {
884 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
885 }
886 
887 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req)
888 {
889 	u64 timeout = (u64)req->timeout << req->num_timeout;
890 
891 	return (unsigned long)min_t(u64, timeout,
892 				    tcp_rto_max(req->rsk_listener));
893 }
894 
895 u32 tcp_delack_max(const struct sock *sk);
896 
897 /* Compute the actual rto_min value */
898 static inline u32 tcp_rto_min(const struct sock *sk)
899 {
900 	const struct dst_entry *dst = __sk_dst_get(sk);
901 	u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
902 
903 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
904 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
905 	return rto_min;
906 }
907 
908 static inline u32 tcp_rto_min_us(const struct sock *sk)
909 {
910 	return jiffies_to_usecs(tcp_rto_min(sk));
911 }
912 
913 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
914 {
915 	return dst_metric_locked(dst, RTAX_CC_ALGO);
916 }
917 
918 /* Minimum RTT in usec. ~0 means not available. */
919 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
920 {
921 	return minmax_get(&tp->rtt_min);
922 }
923 
924 /* Compute the actual receive window we are currently advertising.
925  * Rcv_nxt can be after the window if our peer push more data
926  * than the offered window.
927  */
928 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
929 {
930 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
931 
932 	if (win < 0)
933 		win = 0;
934 	return (u32) win;
935 }
936 
937 /* Compute the maximum receive window we ever advertised.
938  * Rcv_nxt can be after the window if our peer push more data
939  * than the offered window.
940  */
941 static inline u32 tcp_max_receive_window(const struct tcp_sock *tp)
942 {
943 	s32 win = tp->rcv_mwnd_seq - tp->rcv_nxt;
944 
945 	if (win < 0)
946 		win = 0;
947 	return (u32) win;
948 }
949 
950 /* Check if we need to update the maximum receive window sequence number */
951 static inline void tcp_update_max_rcv_wnd_seq(struct tcp_sock *tp)
952 {
953 	u32 wre = tp->rcv_wup + tp->rcv_wnd;
954 
955 	if (after(wre, tp->rcv_mwnd_seq))
956 		tp->rcv_mwnd_seq = wre;
957 }
958 
959 /* Choose a new window, without checks for shrinking, and without
960  * scaling applied to the result.  The caller does these things
961  * if necessary.  This is a "raw" window selection.
962  */
963 u32 __tcp_select_window(struct sock *sk);
964 
965 void tcp_send_window_probe(struct sock *sk);
966 
967 /* TCP uses 32bit jiffies to save some space.
968  * Note that this is different from tcp_time_stamp, which
969  * historically has been the same until linux-4.13.
970  */
971 #define tcp_jiffies32 ((u32)jiffies)
972 
973 /*
974  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
975  * It is no longer tied to jiffies, but to 1 ms clock.
976  * Note: double check if you want to use tcp_jiffies32 instead of this.
977  */
978 #define TCP_TS_HZ	1000
979 
980 static inline u64 tcp_clock_ns(void)
981 {
982 	return ktime_get_ns();
983 }
984 
985 static inline u64 tcp_clock_us(void)
986 {
987 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
988 }
989 
990 static inline u64 tcp_clock_ms(void)
991 {
992 	return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
993 }
994 
995 /* TCP Timestamp included in TS option (RFC 1323) can either use ms
996  * or usec resolution. Each socket carries a flag to select one or other
997  * resolution, as the route attribute could change anytime.
998  * Each flow must stick to initial resolution.
999  */
1000 static inline u32 tcp_clock_ts(bool usec_ts)
1001 {
1002 	return usec_ts ? tcp_clock_us() : tcp_clock_ms();
1003 }
1004 
1005 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
1006 {
1007 	return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
1008 }
1009 
1010 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
1011 {
1012 	if (tp->tcp_usec_ts)
1013 		return tp->tcp_mstamp;
1014 	return tcp_time_stamp_ms(tp);
1015 }
1016 
1017 void tcp_mstamp_refresh(struct tcp_sock *tp);
1018 
1019 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
1020 {
1021 	return max_t(s64, t1 - t0, 0);
1022 }
1023 
1024 /* provide the departure time in us unit */
1025 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
1026 {
1027 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
1028 }
1029 
1030 /* Provide skb TSval in usec or ms unit */
1031 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
1032 {
1033 	if (usec_ts)
1034 		return tcp_skb_timestamp_us(skb);
1035 
1036 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
1037 }
1038 
1039 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
1040 {
1041 	return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
1042 }
1043 
1044 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
1045 {
1046 	return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
1047 }
1048 
1049 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
1050 
1051 #define TCPHDR_FIN	BIT(0)
1052 #define TCPHDR_SYN	BIT(1)
1053 #define TCPHDR_RST	BIT(2)
1054 #define TCPHDR_PSH	BIT(3)
1055 #define TCPHDR_ACK	BIT(4)
1056 #define TCPHDR_URG	BIT(5)
1057 #define TCPHDR_ECE	BIT(6)
1058 #define TCPHDR_CWR	BIT(7)
1059 #define TCPHDR_AE	BIT(8)
1060 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
1061 			   TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
1062 			   TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1063 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
1064 			    TCPHDR_FLAGS_MASK)
1065 
1066 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1067 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
1068 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
1069 
1070 #define TCP_ACCECN_CEP_ACE_MASK 0x7
1071 #define TCP_ACCECN_ACE_MAX_DELTA 6
1072 
1073 /* To avoid/detect middlebox interference, not all counters start at 0.
1074  * See draft-ietf-tcpm-accurate-ecn for the latest values.
1075  */
1076 #define TCP_ACCECN_CEP_INIT_OFFSET 5
1077 #define TCP_ACCECN_E1B_INIT_OFFSET 1
1078 #define TCP_ACCECN_E0B_INIT_OFFSET 1
1079 #define TCP_ACCECN_CEB_INIT_OFFSET 0
1080 
1081 /* State flags for sacked in struct tcp_skb_cb */
1082 enum tcp_skb_cb_sacked_flags {
1083 	TCPCB_SACKED_ACKED	= (1 << 0),	/* SKB ACK'd by a SACK block	*/
1084 	TCPCB_SACKED_RETRANS	= (1 << 1),	/* SKB retransmitted		*/
1085 	TCPCB_LOST		= (1 << 2),	/* SKB is lost			*/
1086 	TCPCB_TAGBITS		= (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
1087 				   TCPCB_LOST),	/* All tag bits			*/
1088 	TCPCB_REPAIRED		= (1 << 4),	/* SKB repaired (no skb_mstamp_ns)	*/
1089 	TCPCB_EVER_RETRANS	= (1 << 7),	/* Ever retransmitted frame	*/
1090 	TCPCB_RETRANS		= (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
1091 				   TCPCB_REPAIRED),
1092 };
1093 
1094 /* This is what the send packet queuing engine uses to pass
1095  * TCP per-packet control information to the transmission code.
1096  * We also store the host-order sequence numbers in here too.
1097  * This is 44 bytes if IPV6 is enabled.
1098  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1099  */
1100 struct tcp_skb_cb {
1101 	__u32		seq;		/* Starting sequence number	*/
1102 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
1103 	union {
1104 		/* Note :
1105 		 * 	  tcp_gso_segs/size are used in write queue only,
1106 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
1107 		 */
1108 		struct {
1109 			u16	tcp_gso_segs;
1110 			u16	tcp_gso_size;
1111 		};
1112 	};
1113 	__u16		tcp_flags;	/* TCP header flags (tcp[12-13])*/
1114 
1115 	__u8		sacked;		/* State flags for SACK.	*/
1116 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
1117 #define TSTAMP_ACK_SK	0x1
1118 #define TSTAMP_ACK_BPF	0x2
1119 	__u8		txstamp_ack:2,	/* Record TX timestamp for ack? */
1120 			eor:1,		/* Is skb MSG_EOR marked? */
1121 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
1122 			unused:4;
1123 	__u32		ack_seq;	/* Sequence number ACK'd	*/
1124 	union {
1125 		struct {
1126 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
1127 			/* There is space for up to 24 bytes */
1128 			__u32 is_app_limited:1, /* cwnd not fully used? */
1129 			      delivered_ce:20,
1130 			      unused:11;
1131 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
1132 			__u32 delivered;
1133 			/* start of send pipeline phase */
1134 			u64 first_tx_mstamp;
1135 			/* when we reached the "delivered" count */
1136 			u64 delivered_mstamp;
1137 		} tx;   /* only used for outgoing skbs */
1138 		union {
1139 			struct inet_skb_parm	h4;
1140 #if IS_ENABLED(CONFIG_IPV6)
1141 			struct inet6_skb_parm	h6;
1142 #endif
1143 		} header;	/* For incoming skbs */
1144 	};
1145 };
1146 
1147 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
1148 
1149 extern const struct inet_connection_sock_af_ops ipv4_specific;
1150 
1151 #if IS_ENABLED(CONFIG_IPV6)
1152 /* This is the variant of inet6_iif() that must be used by TCP,
1153  * as TCP moves IP6CB into a different location in skb->cb[]
1154  */
1155 static inline int tcp_v6_iif(const struct sk_buff *skb)
1156 {
1157 	return TCP_SKB_CB(skb)->header.h6.iif;
1158 }
1159 
1160 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1161 {
1162 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1163 
1164 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1165 }
1166 
1167 /* TCP_SKB_CB reference means this can not be used from early demux */
1168 static inline int tcp_v6_sdif(const struct sk_buff *skb)
1169 {
1170 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1171 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1172 		return TCP_SKB_CB(skb)->header.h6.iif;
1173 #endif
1174 	return 0;
1175 }
1176 
1177 extern const struct inet_connection_sock_af_ops ipv6_specific;
1178 
1179 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1180 
1181 #endif
1182 
1183 /* TCP_SKB_CB reference means this can not be used from early demux */
1184 static inline int tcp_v4_sdif(struct sk_buff *skb)
1185 {
1186 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1187 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1188 		return TCP_SKB_CB(skb)->header.h4.iif;
1189 #endif
1190 	return 0;
1191 }
1192 
1193 /* Due to TSO, an SKB can be composed of multiple actual
1194  * packets.  To keep these tracked properly, we use this.
1195  */
1196 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1197 {
1198 	return TCP_SKB_CB(skb)->tcp_gso_segs;
1199 }
1200 
1201 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1202 {
1203 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1204 }
1205 
1206 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1207 {
1208 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1209 }
1210 
1211 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1212 static inline int tcp_skb_mss(const struct sk_buff *skb)
1213 {
1214 	return TCP_SKB_CB(skb)->tcp_gso_size;
1215 }
1216 
1217 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1218 {
1219 	return likely(!TCP_SKB_CB(skb)->eor);
1220 }
1221 
1222 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1223 					const struct sk_buff *from)
1224 {
1225 	/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1226 	return likely(tcp_skb_can_collapse_to(to) &&
1227 		      mptcp_skb_can_collapse(to, from) &&
1228 		      skb_pure_zcopy_same(to, from) &&
1229 		      skb_frags_readable(to) == skb_frags_readable(from));
1230 }
1231 
1232 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1233 					   const struct sk_buff *from)
1234 {
1235 	return likely(mptcp_skb_can_collapse(to, from) &&
1236 		      !skb_cmp_decrypted(to, from));
1237 }
1238 
1239 /* Events passed to congestion control interface */
1240 enum tcp_ca_event {
1241 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1242 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1243 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1244 	CA_EVENT_LOSS,		/* loss timeout */
1245 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1246 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1247 };
1248 
1249 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1250 enum tcp_ca_ack_event_flags {
1251 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1252 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1253 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1254 };
1255 
1256 /*
1257  * Interface for adding new TCP congestion control handlers
1258  */
1259 #define TCP_CA_NAME_MAX	16
1260 #define TCP_CA_MAX	128
1261 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1262 
1263 #define TCP_CA_UNSPEC	0
1264 
1265 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1266 #define TCP_CONG_NON_RESTRICTED		BIT(0)
1267 /* Requires ECN/ECT set on all packets */
1268 #define TCP_CONG_NEEDS_ECN		BIT(1)
1269 /* Require successfully negotiated AccECN capability */
1270 #define TCP_CONG_NEEDS_ACCECN		BIT(2)
1271 /* Use ECT(1) instead of ECT(0) while the CA is uninitialized */
1272 #define TCP_CONG_ECT_1_NEGOTIATION	BIT(3)
1273 /* Cannot fallback to RFC3168 during AccECN negotiation */
1274 #define TCP_CONG_NO_FALLBACK_RFC3168	BIT(4)
1275 #define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \
1276 			TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \
1277 			TCP_CONG_NO_FALLBACK_RFC3168)
1278 
1279 union tcp_cc_info;
1280 
1281 struct ack_sample {
1282 	u32 pkts_acked;
1283 	s32 rtt_us;
1284 	u32 in_flight;
1285 };
1286 
1287 /* A rate sample measures the number of (original/retransmitted) data
1288  * packets delivered "delivered" over an interval of time "interval_us".
1289  * The tcp_rate.c code fills in the rate sample, and congestion
1290  * control modules that define a cong_control function to run at the end
1291  * of ACK processing can optionally chose to consult this sample when
1292  * setting cwnd and pacing rate.
1293  * A sample is invalid if "delivered" or "interval_us" is negative.
1294  */
1295 struct rate_sample {
1296 	u64  prior_mstamp; /* starting timestamp for interval */
1297 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1298 	u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1299 	s32  delivered;		/* number of packets delivered over interval */
1300 	s32  delivered_ce;	/* number of packets delivered w/ CE marks*/
1301 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1302 	u32 snd_interval_us;	/* snd interval for delivered packets */
1303 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1304 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1305 	int  losses;		/* number of packets marked lost upon ACK */
1306 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1307 	u32  prior_in_flight;	/* in flight before this ACK */
1308 	u32  last_end_seq;	/* end_seq of most recently ACKed packet */
1309 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1310 	bool is_retrans;	/* is sample from retransmission? */
1311 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1312 };
1313 
1314 struct tcp_congestion_ops {
1315 /* fast path fields are put first to fill one cache line */
1316 
1317 	/* A congestion control (CC) must provide one of either:
1318 	 *
1319 	 * (a) a cong_avoid function, if the CC wants to use the core TCP
1320 	 *     stack's default functionality to implement a "classic"
1321 	 *     (Reno/CUBIC-style) response to packet loss, RFC3168 ECN,
1322 	 *     idle periods, pacing rate computations, etc.
1323 	 *
1324 	 * (b) a cong_control function, if the CC wants custom behavior and
1325 	 *      complete control of all congestion control behaviors.
1326 	 */
1327 	/* (a) "classic" response: calculate new cwnd.
1328 	 */
1329 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1330 	/* (b) "custom" response: call when packets are delivered to update
1331 	 * cwnd and pacing rate, after all the ca_state processing.
1332 	 */
1333 	void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1334 
1335 	/* return slow start threshold (required) */
1336 	u32 (*ssthresh)(struct sock *sk);
1337 
1338 	/* call before changing ca_state (optional) */
1339 	void (*set_state)(struct sock *sk, u8 new_state);
1340 
1341 	/* call when cwnd event occurs (optional) */
1342 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1343 
1344 	/* call when CA_EVENT_TX_START cwnd event occurs (optional) */
1345 	void (*cwnd_event_tx_start)(struct sock *sk);
1346 
1347 	/* call when ack arrives (optional) */
1348 	void (*in_ack_event)(struct sock *sk, u32 flags);
1349 
1350 	/* hook for packet ack accounting (optional) */
1351 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1352 
1353 	/* override sysctl_tcp_min_tso_segs (optional) */
1354 	u32 (*min_tso_segs)(struct sock *sk);
1355 
1356 	/* new value of cwnd after loss (required) */
1357 	u32  (*undo_cwnd)(struct sock *sk);
1358 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1359 	u32 (*sndbuf_expand)(struct sock *sk);
1360 
1361 /* control/slow paths put last */
1362 	/* get info for inet_diag (optional) */
1363 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1364 			   union tcp_cc_info *info);
1365 
1366 	char 			name[TCP_CA_NAME_MAX];
1367 	struct module		*owner;
1368 	struct list_head	list;
1369 	u32			key;
1370 	u32			flags;
1371 
1372 	/* initialize private data (optional) */
1373 	void (*init)(struct sock *sk);
1374 	/* cleanup private data  (optional) */
1375 	void (*release)(struct sock *sk);
1376 } ____cacheline_aligned_in_smp;
1377 
1378 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1379 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1380 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1381 				  struct tcp_congestion_ops *old_type);
1382 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1383 
1384 void tcp_assign_congestion_control(struct sock *sk);
1385 void tcp_init_congestion_control(struct sock *sk);
1386 void tcp_cleanup_congestion_control(struct sock *sk);
1387 int tcp_set_default_congestion_control(struct net *net, const char *name);
1388 void tcp_get_default_congestion_control(struct net *net, char *name);
1389 void tcp_get_available_congestion_control(char *buf, size_t len);
1390 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1391 int tcp_set_allowed_congestion_control(char *allowed);
1392 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1393 			       bool cap_net_admin);
1394 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1395 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1396 
1397 u32 tcp_reno_ssthresh(struct sock *sk);
1398 u32 tcp_reno_undo_cwnd(struct sock *sk);
1399 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1400 extern struct tcp_congestion_ops tcp_reno;
1401 
1402 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1403 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1404 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1405 #ifdef CONFIG_INET
1406 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1407 #else
1408 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1409 {
1410 	return NULL;
1411 }
1412 #endif
1413 
1414 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1415 {
1416 	const struct inet_connection_sock *icsk = inet_csk(sk);
1417 
1418 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1419 }
1420 
1421 static inline bool tcp_ca_needs_accecn(const struct sock *sk)
1422 {
1423 	const struct inet_connection_sock *icsk = inet_csk(sk);
1424 
1425 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN;
1426 }
1427 
1428 static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk)
1429 {
1430 	const struct inet_connection_sock *icsk = inet_csk(sk);
1431 
1432 	return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION;
1433 }
1434 
1435 static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk)
1436 {
1437 	const struct inet_connection_sock *icsk = inet_csk(sk);
1438 
1439 	return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168;
1440 }
1441 
1442 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1443 {
1444 	const struct inet_connection_sock *icsk = inet_csk(sk);
1445 
1446 	if (event == CA_EVENT_TX_START) {
1447 		if (icsk->icsk_ca_ops->cwnd_event_tx_start)
1448 			icsk->icsk_ca_ops->cwnd_event_tx_start(sk);
1449 		return;
1450 	}
1451 	if (icsk->icsk_ca_ops->cwnd_event)
1452 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1453 }
1454 
1455 /* From tcp_cong.c */
1456 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1457 
1458 
1459 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1460 {
1461 	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1462 }
1463 
1464 /* These functions determine how the current flow behaves in respect of SACK
1465  * handling. SACK is negotiated with the peer, and therefore it can vary
1466  * between different flows.
1467  *
1468  * tcp_is_sack - SACK enabled
1469  * tcp_is_reno - No SACK
1470  */
1471 static inline int tcp_is_sack(const struct tcp_sock *tp)
1472 {
1473 	return likely(tp->rx_opt.sack_ok);
1474 }
1475 
1476 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1477 {
1478 	return !tcp_is_sack(tp);
1479 }
1480 
1481 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1482 {
1483 	return tp->sacked_out + tp->lost_out;
1484 }
1485 
1486 /* This determines how many packets are "in the network" to the best
1487  * of our knowledge.  In many cases it is conservative, but where
1488  * detailed information is available from the receiver (via SACK
1489  * blocks etc.) we can make more aggressive calculations.
1490  *
1491  * Use this for decisions involving congestion control, use just
1492  * tp->packets_out to determine if the send queue is empty or not.
1493  *
1494  * Read this equation as:
1495  *
1496  *	"Packets sent once on transmission queue" MINUS
1497  *	"Packets left network, but not honestly ACKed yet" PLUS
1498  *	"Packets fast retransmitted"
1499  */
1500 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1501 {
1502 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1503 }
1504 
1505 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1506 
1507 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1508 {
1509 	return tp->snd_cwnd;
1510 }
1511 
1512 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1513 {
1514 	WARN_ON_ONCE((int)val <= 0);
1515 	tp->snd_cwnd = val;
1516 }
1517 
1518 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1519 {
1520 	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1521 }
1522 
1523 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1524 {
1525 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1526 }
1527 
1528 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1529 {
1530 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1531 	       (1 << inet_csk(sk)->icsk_ca_state);
1532 }
1533 
1534 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1535  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1536  * ssthresh.
1537  */
1538 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1539 {
1540 	const struct tcp_sock *tp = tcp_sk(sk);
1541 
1542 	if (tcp_in_cwnd_reduction(sk))
1543 		return tp->snd_ssthresh;
1544 	else
1545 		return max(tp->snd_ssthresh,
1546 			   ((tcp_snd_cwnd(tp) >> 1) +
1547 			    (tcp_snd_cwnd(tp) >> 2)));
1548 }
1549 
1550 /* Use define here intentionally to get WARN_ON location shown at the caller */
1551 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1552 
1553 void tcp_enter_cwr(struct sock *sk);
1554 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1555 
1556 /* The maximum number of MSS of available cwnd for which TSO defers
1557  * sending if not using sysctl_tcp_tso_win_divisor.
1558  */
1559 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1560 {
1561 	return 3;
1562 }
1563 
1564 /* Returns end sequence number of the receiver's advertised window */
1565 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1566 {
1567 	return tp->snd_una + tp->snd_wnd;
1568 }
1569 
1570 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1571  * flexible approach. The RFC suggests cwnd should not be raised unless
1572  * it was fully used previously. And that's exactly what we do in
1573  * congestion avoidance mode. But in slow start we allow cwnd to grow
1574  * as long as the application has used half the cwnd.
1575  * Example :
1576  *    cwnd is 10 (IW10), but application sends 9 frames.
1577  *    We allow cwnd to reach 18 when all frames are ACKed.
1578  * This check is safe because it's as aggressive as slow start which already
1579  * risks 100% overshoot. The advantage is that we discourage application to
1580  * either send more filler packets or data to artificially blow up the cwnd
1581  * usage, and allow application-limited process to probe bw more aggressively.
1582  */
1583 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1584 {
1585 	const struct tcp_sock *tp = tcp_sk(sk);
1586 
1587 	if (tp->is_cwnd_limited)
1588 		return true;
1589 
1590 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1591 	if (tcp_in_slow_start(tp))
1592 		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1593 
1594 	return false;
1595 }
1596 
1597 /* BBR congestion control needs pacing.
1598  * Same remark for SO_MAX_PACING_RATE.
1599  * sch_fq packet scheduler is efficiently handling pacing,
1600  * but is not always installed/used.
1601  * Return true if TCP stack should pace packets itself.
1602  */
1603 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1604 {
1605 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1606 }
1607 
1608 /* Estimates in how many jiffies next packet for this flow can be sent.
1609  * Scheduling a retransmit timer too early would be silly.
1610  */
1611 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1612 {
1613 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1614 
1615 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1616 }
1617 
1618 static inline void tcp_reset_xmit_timer(struct sock *sk,
1619 					const int what,
1620 					unsigned long when,
1621 					bool pace_delay)
1622 {
1623 	if (pace_delay)
1624 		when += tcp_pacing_delay(sk);
1625 	inet_csk_reset_xmit_timer(sk, what, when,
1626 				  tcp_rto_max(sk));
1627 }
1628 
1629 /* Something is really bad, we could not queue an additional packet,
1630  * because qdisc is full or receiver sent a 0 window, or we are paced.
1631  * We do not want to add fuel to the fire, or abort too early,
1632  * so make sure the timer we arm now is at least 200ms in the future,
1633  * regardless of current icsk_rto value (as it could be ~2ms)
1634  */
1635 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1636 {
1637 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1638 }
1639 
1640 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1641 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1642 					    unsigned long max_when)
1643 {
1644 	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1645 			   inet_csk(sk)->icsk_backoff);
1646 	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1647 
1648 	return (unsigned long)min_t(u64, when, max_when);
1649 }
1650 
1651 static inline void tcp_check_probe_timer(struct sock *sk)
1652 {
1653 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1654 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1655 				     tcp_probe0_base(sk), true);
1656 }
1657 
1658 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1659 {
1660 	tp->snd_wl1 = seq;
1661 }
1662 
1663 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1664 {
1665 	tp->snd_wl1 = seq;
1666 }
1667 
1668 /*
1669  * Calculate(/check) TCP checksum
1670  */
1671 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1672 				   __be32 daddr, __wsum base)
1673 {
1674 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1675 }
1676 
1677 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1678 {
1679 	return !skb_csum_unnecessary(skb) &&
1680 		__skb_checksum_complete(skb);
1681 }
1682 
1683 enum skb_drop_reason tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1684 
1685 static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
1686 			     enum skb_drop_reason *reason)
1687 {
1688 	const struct tcphdr *th = (const struct tcphdr *)skb->data;
1689 
1690 	return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason);
1691 }
1692 
1693 void tcp_set_state(struct sock *sk, int state);
1694 void tcp_done(struct sock *sk);
1695 int tcp_abort(struct sock *sk, int err);
1696 
1697 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1698 {
1699 	rx_opt->dsack = 0;
1700 	rx_opt->num_sacks = 0;
1701 }
1702 
1703 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1704 
1705 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1706 {
1707 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1708 	struct tcp_sock *tp = tcp_sk(sk);
1709 	s32 delta;
1710 
1711 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1712 	    tp->packets_out || ca_ops->cong_control)
1713 		return;
1714 	delta = tcp_jiffies32 - tp->lsndtime;
1715 	if (delta > inet_csk(sk)->icsk_rto)
1716 		tcp_cwnd_restart(sk, delta);
1717 }
1718 
1719 /* Determine a window scaling and initial window to offer. */
1720 void tcp_select_initial_window(const struct sock *sk, int __space,
1721 			       __u32 mss, __u32 *rcv_wnd,
1722 			       __u32 *window_clamp, int wscale_ok,
1723 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1724 
1725 static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1726 {
1727 	s64 scaled_space = (s64)space * scaling_ratio;
1728 
1729 	return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1730 }
1731 
1732 static inline int tcp_win_from_space(const struct sock *sk, int space)
1733 {
1734 	return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1735 }
1736 
1737 /* inverse of __tcp_win_from_space() */
1738 static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1739 {
1740 	u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1741 
1742 	do_div(val, scaling_ratio);
1743 	return val;
1744 }
1745 
1746 static inline int tcp_space_from_win(const struct sock *sk, int win)
1747 {
1748 	return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1749 }
1750 
1751 /* Assume a 50% default for skb->len/skb->truesize ratio.
1752  * This may be adjusted later in tcp_measure_rcv_mss().
1753  */
1754 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1755 
1756 static inline void tcp_scaling_ratio_init(struct sock *sk)
1757 {
1758 	tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1759 }
1760 
1761 /* Note: caller must be prepared to deal with negative returns */
1762 static inline int tcp_space(const struct sock *sk)
1763 {
1764 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1765 				  READ_ONCE(sk->sk_backlog.len) -
1766 				  atomic_read(&sk->sk_rmem_alloc));
1767 }
1768 
1769 static inline int tcp_full_space(const struct sock *sk)
1770 {
1771 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1772 }
1773 
1774 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1775 {
1776 	int unused_mem = sk_unused_reserved_mem(sk);
1777 	struct tcp_sock *tp = tcp_sk(sk);
1778 
1779 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1780 	if (unused_mem)
1781 		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1782 					 tcp_win_from_space(sk, unused_mem));
1783 }
1784 
1785 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1786 {
1787 	__tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1788 }
1789 
1790 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1791 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1792 
1793 
1794 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1795  * If 87.5 % (7/8) of the space has been consumed, we want to override
1796  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1797  * len/truesize ratio.
1798  */
1799 static inline bool tcp_rmem_pressure(const struct sock *sk)
1800 {
1801 	int rcvbuf, threshold;
1802 
1803 	if (tcp_under_memory_pressure(sk))
1804 		return true;
1805 
1806 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1807 	threshold = rcvbuf - (rcvbuf >> 3);
1808 
1809 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1810 }
1811 
1812 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1813 {
1814 	const struct tcp_sock *tp = tcp_sk(sk);
1815 	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1816 
1817 	if (avail <= 0)
1818 		return false;
1819 
1820 	return (avail >= target) || tcp_rmem_pressure(sk) ||
1821 	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1822 }
1823 
1824 extern void tcp_openreq_init_rwin(struct request_sock *req,
1825 				  const struct sock *sk_listener,
1826 				  const struct dst_entry *dst);
1827 
1828 void tcp_enter_memory_pressure(struct sock *sk);
1829 void tcp_leave_memory_pressure(struct sock *sk);
1830 
1831 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1832 {
1833 	struct net *net = sock_net((struct sock *)tp);
1834 	int val;
1835 
1836 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1837 	 * and do_tcp_setsockopt().
1838 	 */
1839 	val = READ_ONCE(tp->keepalive_intvl);
1840 
1841 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1842 }
1843 
1844 static inline int keepalive_time_when(const struct tcp_sock *tp)
1845 {
1846 	struct net *net = sock_net((struct sock *)tp);
1847 	int val;
1848 
1849 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1850 	val = READ_ONCE(tp->keepalive_time);
1851 
1852 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1853 }
1854 
1855 static inline int keepalive_probes(const struct tcp_sock *tp)
1856 {
1857 	struct net *net = sock_net((struct sock *)tp);
1858 	int val;
1859 
1860 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1861 	 * and do_tcp_setsockopt().
1862 	 */
1863 	val = READ_ONCE(tp->keepalive_probes);
1864 
1865 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1866 }
1867 
1868 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1869 {
1870 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1871 
1872 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1873 			  tcp_jiffies32 - tp->rcv_tstamp);
1874 }
1875 
1876 static inline int tcp_fin_time(const struct sock *sk)
1877 {
1878 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1879 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1880 	const int rto = inet_csk(sk)->icsk_rto;
1881 
1882 	if (fin_timeout < (rto << 2) - (rto >> 1))
1883 		fin_timeout = (rto << 2) - (rto >> 1);
1884 
1885 	return fin_timeout;
1886 }
1887 
1888 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1889 				  int paws_win)
1890 {
1891 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1892 		return true;
1893 	if (unlikely(!time_before32(ktime_get_seconds(),
1894 				    rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1895 		return true;
1896 	/*
1897 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1898 	 * then following tcp messages have valid values. Ignore 0 value,
1899 	 * or else 'negative' tsval might forbid us to accept their packets.
1900 	 */
1901 	if (!rx_opt->ts_recent)
1902 		return true;
1903 	return false;
1904 }
1905 
1906 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1907 				   int rst)
1908 {
1909 	if (tcp_paws_check(rx_opt, 0))
1910 		return false;
1911 
1912 	/* RST segments are not recommended to carry timestamp,
1913 	   and, if they do, it is recommended to ignore PAWS because
1914 	   "their cleanup function should take precedence over timestamps."
1915 	   Certainly, it is mistake. It is necessary to understand the reasons
1916 	   of this constraint to relax it: if peer reboots, clock may go
1917 	   out-of-sync and half-open connections will not be reset.
1918 	   Actually, the problem would be not existing if all
1919 	   the implementations followed draft about maintaining clock
1920 	   via reboots. Linux-2.2 DOES NOT!
1921 
1922 	   However, we can relax time bounds for RST segments to MSL.
1923 	 */
1924 	if (rst && !time_before32(ktime_get_seconds(),
1925 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1926 		return false;
1927 	return true;
1928 }
1929 
1930 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1931 {
1932 	u32 ace;
1933 
1934 	/* mptcp hooks are only on the slow path */
1935 	if (sk_is_mptcp((struct sock *)tp))
1936 		return;
1937 
1938 	ace = tcp_ecn_mode_accecn(tp) ?
1939 	      ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
1940 	       TCP_ACCECN_CEP_ACE_MASK) : 0;
1941 
1942 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1943 			       (ace << 22) |
1944 			       ntohl(TCP_FLAG_ACK) |
1945 			       snd_wnd);
1946 }
1947 
1948 static inline void tcp_fast_path_on(struct tcp_sock *tp)
1949 {
1950 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
1951 }
1952 
1953 static inline void tcp_fast_path_check(struct sock *sk)
1954 {
1955 	struct tcp_sock *tp = tcp_sk(sk);
1956 
1957 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
1958 	    tp->rcv_wnd &&
1959 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1960 	    !tp->urg_data)
1961 		tcp_fast_path_on(tp);
1962 }
1963 
1964 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1965 			  int mib_idx, u32 *last_oow_ack_time);
1966 
1967 static inline void tcp_mib_init(struct net *net)
1968 {
1969 	/* See RFC 2012 */
1970 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1971 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1972 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1973 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1974 }
1975 
1976 /* from STCP */
1977 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1978 {
1979 	tp->retransmit_skb_hint = NULL;
1980 }
1981 
1982 #define tcp_md5_addr tcp_ao_addr
1983 
1984 /* - key database */
1985 struct tcp_md5sig_key {
1986 	struct hlist_node	node;
1987 	u8			keylen;
1988 	u8			family; /* AF_INET or AF_INET6 */
1989 	u8			prefixlen;
1990 	u8			flags;
1991 	union tcp_md5_addr	addr;
1992 	int			l3index; /* set if key added with L3 scope */
1993 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1994 	struct rcu_head		rcu;
1995 };
1996 
1997 /* - sock block */
1998 struct tcp_md5sig_info {
1999 	struct hlist_head	head;
2000 	struct rcu_head		rcu;
2001 };
2002 
2003 /* - pseudo header */
2004 struct tcp4_pseudohdr {
2005 	__be32		saddr;
2006 	__be32		daddr;
2007 	__u8		pad;
2008 	__u8		protocol;
2009 	__be16		len;
2010 };
2011 
2012 struct tcp6_pseudohdr {
2013 	struct in6_addr	saddr;
2014 	struct in6_addr daddr;
2015 	__be32		len;
2016 	__be32		protocol;	/* including padding */
2017 };
2018 
2019 /*
2020  * struct tcp_sigpool - per-CPU pool of ahash_requests
2021  * @scratch: per-CPU temporary area, that can be used between
2022  *	     tcp_sigpool_start() and tcp_sigpool_end() to perform
2023  *	     crypto request
2024  * @req: pre-allocated ahash request
2025  */
2026 struct tcp_sigpool {
2027 	void *scratch;
2028 	struct ahash_request *req;
2029 };
2030 
2031 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
2032 void tcp_sigpool_get(unsigned int id);
2033 void tcp_sigpool_release(unsigned int id);
2034 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
2035 			      const struct sk_buff *skb,
2036 			      unsigned int header_len);
2037 
2038 /**
2039  * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
2040  * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
2041  * @c: returned tcp_sigpool for usage (uninitialized on failure)
2042  *
2043  * Returns: 0 on success, error otherwise.
2044  */
2045 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
2046 /**
2047  * tcp_sigpool_end - enable bh and stop using tcp_sigpool
2048  * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
2049  */
2050 void tcp_sigpool_end(struct tcp_sigpool *c);
2051 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
2052 /* - functions */
2053 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
2054 			 const struct sock *sk, const struct sk_buff *skb);
2055 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
2056 		   int family, u8 prefixlen, int l3index, u8 flags,
2057 		   const u8 *newkey, u8 newkeylen);
2058 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
2059 		     int family, u8 prefixlen, int l3index,
2060 		     struct tcp_md5sig_key *key);
2061 
2062 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
2063 		   int family, u8 prefixlen, int l3index, u8 flags);
2064 void tcp_clear_md5_list(struct sock *sk);
2065 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
2066 					 const struct sock *addr_sk);
2067 
2068 #ifdef CONFIG_TCP_MD5SIG
2069 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
2070 					   const union tcp_md5_addr *addr,
2071 					   int family, bool any_l3index);
2072 static inline struct tcp_md5sig_key *
2073 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2074 		  const union tcp_md5_addr *addr, int family)
2075 {
2076 	if (!static_branch_unlikely(&tcp_md5_needed.key))
2077 		return NULL;
2078 	return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
2079 }
2080 
2081 static inline struct tcp_md5sig_key *
2082 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2083 			      const union tcp_md5_addr *addr, int family)
2084 {
2085 	if (!static_branch_unlikely(&tcp_md5_needed.key))
2086 		return NULL;
2087 	return __tcp_md5_do_lookup(sk, 0, addr, family, true);
2088 }
2089 
2090 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
2091 void tcp_md5_destruct_sock(struct sock *sk);
2092 #else
2093 static inline struct tcp_md5sig_key *
2094 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2095 		  const union tcp_md5_addr *addr, int family)
2096 {
2097 	return NULL;
2098 }
2099 
2100 static inline struct tcp_md5sig_key *
2101 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2102 			      const union tcp_md5_addr *addr, int family)
2103 {
2104 	return NULL;
2105 }
2106 
2107 #define tcp_twsk_md5_key(twsk)	NULL
2108 static inline void tcp_md5_destruct_sock(struct sock *sk)
2109 {
2110 }
2111 #endif
2112 
2113 struct md5_ctx;
2114 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
2115 			   unsigned int header_len);
2116 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
2117 
2118 /* From tcp_fastopen.c */
2119 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2120 			    struct tcp_fastopen_cookie *cookie);
2121 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2122 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
2123 			    u16 try_exp);
2124 struct tcp_fastopen_request {
2125 	/* Fast Open cookie. Size 0 means a cookie request */
2126 	struct tcp_fastopen_cookie	cookie;
2127 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
2128 	size_t				size;
2129 	int				copied;	/* queued in tcp_connect() */
2130 	struct ubuf_info		*uarg;
2131 };
2132 void tcp_free_fastopen_req(struct tcp_sock *tp);
2133 void tcp_fastopen_destroy_cipher(struct sock *sk);
2134 void tcp_fastopen_ctx_destroy(struct net *net);
2135 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
2136 			      void *primary_key, void *backup_key);
2137 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
2138 			    u64 *key);
2139 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
2140 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
2141 			      struct request_sock *req,
2142 			      struct tcp_fastopen_cookie *foc,
2143 			      const struct dst_entry *dst);
2144 void tcp_fastopen_init_key_once(struct net *net);
2145 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
2146 			     struct tcp_fastopen_cookie *cookie);
2147 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
2148 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
2149 #define TCP_FASTOPEN_KEY_MAX 2
2150 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
2151 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
2152 
2153 /* Fastopen key context */
2154 struct tcp_fastopen_context {
2155 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
2156 	int		num;
2157 	struct rcu_head	rcu;
2158 };
2159 
2160 void tcp_fastopen_active_disable(struct sock *sk);
2161 bool tcp_fastopen_active_should_disable(struct sock *sk);
2162 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
2163 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
2164 
2165 /* Caller needs to wrap with rcu_read_(un)lock() */
2166 static inline
2167 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
2168 {
2169 	struct tcp_fastopen_context *ctx;
2170 
2171 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
2172 	if (!ctx)
2173 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
2174 	return ctx;
2175 }
2176 
2177 static inline
2178 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
2179 			       const struct tcp_fastopen_cookie *orig)
2180 {
2181 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
2182 	    orig->len == foc->len &&
2183 	    !memcmp(orig->val, foc->val, foc->len))
2184 		return true;
2185 	return false;
2186 }
2187 
2188 static inline
2189 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
2190 {
2191 	return ctx->num;
2192 }
2193 
2194 /* Latencies incurred by various limits for a sender. They are
2195  * chronograph-like stats that are mutually exclusive.
2196  */
2197 enum tcp_chrono {
2198 	TCP_CHRONO_UNSPEC,
2199 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
2200 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
2201 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
2202 	__TCP_CHRONO_MAX,
2203 };
2204 
2205 static inline void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2206 {
2207 	const u32 now = tcp_jiffies32;
2208 	enum tcp_chrono old = tp->chrono_type;
2209 
2210 	if (old > TCP_CHRONO_UNSPEC)
2211 		tp->chrono_stat[old - 1] += now - tp->chrono_start;
2212 	tp->chrono_start = now;
2213 	tp->chrono_type = new;
2214 }
2215 
2216 static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2217 {
2218 	struct tcp_sock *tp = tcp_sk(sk);
2219 
2220 	/* If there are multiple conditions worthy of tracking in a
2221 	 * chronograph then the highest priority enum takes precedence
2222 	 * over the other conditions. So that if something "more interesting"
2223 	 * starts happening, stop the previous chrono and start a new one.
2224 	 */
2225 	if (type > tp->chrono_type)
2226 		tcp_chrono_set(tp, type);
2227 }
2228 
2229 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2230 
2231 /* This helper is needed, because skb->tcp_tsorted_anchor uses
2232  * the same memory storage than skb->destructor/_skb_refdst
2233  */
2234 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2235 {
2236 	skb->destructor = NULL;
2237 	skb->_skb_refdst = 0UL;
2238 }
2239 
2240 #define tcp_skb_tsorted_save(skb) {		\
2241 	unsigned long _save = skb->_skb_refdst;	\
2242 	skb->_skb_refdst = 0UL;
2243 
2244 #define tcp_skb_tsorted_restore(skb)		\
2245 	skb->_skb_refdst = _save;		\
2246 }
2247 
2248 void tcp_write_queue_purge(struct sock *sk);
2249 
2250 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2251 {
2252 	return skb_rb_first(&sk->tcp_rtx_queue);
2253 }
2254 
2255 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2256 {
2257 	return skb_rb_last(&sk->tcp_rtx_queue);
2258 }
2259 
2260 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2261 {
2262 	return skb_peek_tail(&sk->sk_write_queue);
2263 }
2264 
2265 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
2266 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2267 
2268 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2269 {
2270 	return skb_peek(&sk->sk_write_queue);
2271 }
2272 
2273 static inline bool tcp_skb_is_last(const struct sock *sk,
2274 				   const struct sk_buff *skb)
2275 {
2276 	return skb_queue_is_last(&sk->sk_write_queue, skb);
2277 }
2278 
2279 /**
2280  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2281  * @sk: socket
2282  *
2283  * Since the write queue can have a temporary empty skb in it,
2284  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2285  */
2286 static inline bool tcp_write_queue_empty(const struct sock *sk)
2287 {
2288 	const struct tcp_sock *tp = tcp_sk(sk);
2289 
2290 	return tp->write_seq == tp->snd_nxt;
2291 }
2292 
2293 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2294 {
2295 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2296 }
2297 
2298 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2299 {
2300 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2301 }
2302 
2303 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2304 {
2305 	__skb_queue_tail(&sk->sk_write_queue, skb);
2306 
2307 	/* Queue it, remembering where we must start sending. */
2308 	if (sk->sk_write_queue.next == skb)
2309 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2310 }
2311 
2312 /* Insert new before skb on the write queue of sk.  */
2313 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2314 						  struct sk_buff *skb,
2315 						  struct sock *sk)
2316 {
2317 	__skb_queue_before(&sk->sk_write_queue, skb, new);
2318 }
2319 
2320 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2321 {
2322 	tcp_skb_tsorted_anchor_cleanup(skb);
2323 	__skb_unlink(skb, &sk->sk_write_queue);
2324 }
2325 
2326 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2327 
2328 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2329 {
2330 	tcp_skb_tsorted_anchor_cleanup(skb);
2331 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2332 }
2333 
2334 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2335 {
2336 	list_del(&skb->tcp_tsorted_anchor);
2337 	tcp_rtx_queue_unlink(skb, sk);
2338 	tcp_wmem_free_skb(sk, skb);
2339 }
2340 
2341 static inline void tcp_write_collapse_fence(struct sock *sk)
2342 {
2343 	struct sk_buff *skb = tcp_write_queue_tail(sk);
2344 
2345 	if (skb)
2346 		TCP_SKB_CB(skb)->eor = 1;
2347 }
2348 
2349 static inline void tcp_push_pending_frames(struct sock *sk)
2350 {
2351 	if (tcp_send_head(sk)) {
2352 		struct tcp_sock *tp = tcp_sk(sk);
2353 
2354 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2355 	}
2356 }
2357 
2358 /* Start sequence of the skb just after the highest skb with SACKed
2359  * bit, valid only if sacked_out > 0 or when the caller has ensured
2360  * validity by itself.
2361  */
2362 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2363 {
2364 	if (!tp->sacked_out)
2365 		return tp->snd_una;
2366 
2367 	if (tp->highest_sack == NULL)
2368 		return tp->snd_nxt;
2369 
2370 	return TCP_SKB_CB(tp->highest_sack)->seq;
2371 }
2372 
2373 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2374 {
2375 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2376 }
2377 
2378 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2379 {
2380 	return tcp_sk(sk)->highest_sack;
2381 }
2382 
2383 static inline void tcp_highest_sack_reset(struct sock *sk)
2384 {
2385 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2386 }
2387 
2388 /* Called when old skb is about to be deleted and replaced by new skb */
2389 static inline void tcp_highest_sack_replace(struct sock *sk,
2390 					    struct sk_buff *old,
2391 					    struct sk_buff *new)
2392 {
2393 	if (old == tcp_highest_sack(sk))
2394 		tcp_sk(sk)->highest_sack = new;
2395 }
2396 
2397 /* This helper checks if socket has IP_TRANSPARENT set */
2398 static inline bool inet_sk_transparent(const struct sock *sk)
2399 {
2400 	switch (sk->sk_state) {
2401 	case TCP_TIME_WAIT:
2402 		return inet_twsk(sk)->tw_transparent;
2403 	case TCP_NEW_SYN_RECV:
2404 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
2405 	}
2406 	return inet_test_bit(TRANSPARENT, sk);
2407 }
2408 
2409 /* Determines whether this is a thin stream (which may suffer from
2410  * increased latency). Used to trigger latency-reducing mechanisms.
2411  */
2412 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2413 {
2414 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2415 }
2416 
2417 /* /proc */
2418 enum tcp_seq_states {
2419 	TCP_SEQ_STATE_LISTENING,
2420 	TCP_SEQ_STATE_ESTABLISHED,
2421 };
2422 
2423 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2424 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2425 void tcp_seq_stop(struct seq_file *seq, void *v);
2426 
2427 struct tcp_seq_afinfo {
2428 	sa_family_t			family;
2429 };
2430 
2431 struct tcp_iter_state {
2432 	struct seq_net_private	p;
2433 	enum tcp_seq_states	state;
2434 	struct sock		*syn_wait_sk;
2435 	int			bucket, offset, sbucket, num;
2436 	loff_t			last_pos;
2437 };
2438 
2439 extern struct request_sock_ops tcp_request_sock_ops;
2440 extern struct request_sock_ops tcp6_request_sock_ops;
2441 
2442 void tcp_v4_destroy_sock(struct sock *sk);
2443 
2444 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2445 				netdev_features_t features);
2446 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2447 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2448 				struct tcphdr *th);
2449 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2450 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2451 #ifdef CONFIG_INET
2452 void tcp_gro_complete(struct sk_buff *skb);
2453 #else
2454 static inline void tcp_gro_complete(struct sk_buff *skb) { }
2455 #endif
2456 
2457 static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
2458 				       __be32 daddr)
2459 {
2460 	struct tcphdr *th = tcp_hdr(skb);
2461 
2462 	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
2463 	skb->csum_start = skb_transport_header(skb) - skb->head;
2464 	skb->csum_offset = offsetof(struct tcphdr, check);
2465 }
2466 
2467 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2468 {
2469 	struct net *net = sock_net((struct sock *)tp);
2470 	u32 val;
2471 
2472 	val = READ_ONCE(tp->notsent_lowat);
2473 
2474 	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2475 }
2476 
2477 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2478 
2479 #ifdef CONFIG_PROC_FS
2480 int tcp4_proc_init(void);
2481 void tcp4_proc_exit(void);
2482 #endif
2483 
2484 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2485 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2486 		     const struct tcp_request_sock_ops *af_ops,
2487 		     struct sock *sk, struct sk_buff *skb);
2488 
2489 /* TCP af-specific functions */
2490 struct tcp_sock_af_ops {
2491 #ifdef CONFIG_TCP_MD5SIG
2492 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2493 						const struct sock *addr_sk);
2494 	void		(*calc_md5_hash)(char *location,
2495 					 const struct tcp_md5sig_key *md5,
2496 					 const struct sock *sk,
2497 					 const struct sk_buff *skb);
2498 	int		(*md5_parse)(struct sock *sk,
2499 				     int optname,
2500 				     sockptr_t optval,
2501 				     int optlen);
2502 #endif
2503 #ifdef CONFIG_TCP_AO
2504 	int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2505 	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2506 					struct sock *addr_sk,
2507 					int sndid, int rcvid);
2508 	int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2509 			      const struct sock *sk,
2510 			      __be32 sisn, __be32 disn, bool send);
2511 	int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2512 			    const struct sock *sk, const struct sk_buff *skb,
2513 			    const u8 *tkey, int hash_offset, u32 sne);
2514 #endif
2515 };
2516 
2517 struct tcp_request_sock_ops {
2518 	u16 mss_clamp;
2519 #ifdef CONFIG_TCP_MD5SIG
2520 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2521 						 const struct sock *addr_sk);
2522 	void		(*calc_md5_hash) (char *location,
2523 					  const struct tcp_md5sig_key *md5,
2524 					  const struct sock *sk,
2525 					  const struct sk_buff *skb);
2526 #endif
2527 #ifdef CONFIG_TCP_AO
2528 	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2529 					struct request_sock *req,
2530 					int sndid, int rcvid);
2531 	int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2532 	int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2533 			      struct request_sock *req, const struct sk_buff *skb,
2534 			      int hash_offset, u32 sne);
2535 #endif
2536 #ifdef CONFIG_SYN_COOKIES
2537 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2538 				 __u16 *mss);
2539 #endif
2540 	struct dst_entry *(*route_req)(const struct sock *sk,
2541 				       struct sk_buff *skb,
2542 				       struct flowi *fl,
2543 				       struct request_sock *req,
2544 				       u32 tw_isn);
2545 	union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
2546 					const struct net *net,
2547 					const struct sk_buff *skb);
2548 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2549 			   struct flowi *fl, struct request_sock *req,
2550 			   struct tcp_fastopen_cookie *foc,
2551 			   enum tcp_synack_type synack_type,
2552 			   struct sk_buff *syn_skb);
2553 };
2554 
2555 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2556 #if IS_ENABLED(CONFIG_IPV6)
2557 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2558 #endif
2559 
2560 #ifdef CONFIG_SYN_COOKIES
2561 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2562 					 const struct sock *sk, struct sk_buff *skb,
2563 					 __u16 *mss)
2564 {
2565 	tcp_synq_overflow(sk);
2566 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2567 	return ops->cookie_init_seq(skb, mss);
2568 }
2569 #else
2570 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2571 					 const struct sock *sk, struct sk_buff *skb,
2572 					 __u16 *mss)
2573 {
2574 	return 0;
2575 }
2576 #endif
2577 
2578 struct tcp_key {
2579 	union {
2580 		struct {
2581 			struct tcp_ao_key *ao_key;
2582 			char *traffic_key;
2583 			u32 sne;
2584 			u8 rcv_next;
2585 		};
2586 		struct tcp_md5sig_key *md5_key;
2587 	};
2588 	enum {
2589 		TCP_KEY_NONE = 0,
2590 		TCP_KEY_MD5,
2591 		TCP_KEY_AO,
2592 	} type;
2593 };
2594 
2595 static inline void tcp_get_current_key(const struct sock *sk,
2596 				       struct tcp_key *out)
2597 {
2598 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2599 	const struct tcp_sock *tp = tcp_sk(sk);
2600 #endif
2601 
2602 #ifdef CONFIG_TCP_AO
2603 	if (static_branch_unlikely(&tcp_ao_needed.key)) {
2604 		struct tcp_ao_info *ao;
2605 
2606 		ao = rcu_dereference_protected(tp->ao_info,
2607 					       lockdep_sock_is_held(sk));
2608 		if (ao) {
2609 			out->ao_key = READ_ONCE(ao->current_key);
2610 			out->type = TCP_KEY_AO;
2611 			return;
2612 		}
2613 	}
2614 #endif
2615 #ifdef CONFIG_TCP_MD5SIG
2616 	if (static_branch_unlikely(&tcp_md5_needed.key) &&
2617 	    rcu_access_pointer(tp->md5sig_info)) {
2618 		out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2619 		if (out->md5_key) {
2620 			out->type = TCP_KEY_MD5;
2621 			return;
2622 		}
2623 	}
2624 #endif
2625 	out->type = TCP_KEY_NONE;
2626 }
2627 
2628 static inline bool tcp_key_is_md5(const struct tcp_key *key)
2629 {
2630 	if (static_branch_tcp_md5())
2631 		return key->type == TCP_KEY_MD5;
2632 	return false;
2633 }
2634 
2635 static inline bool tcp_key_is_ao(const struct tcp_key *key)
2636 {
2637 	if (static_branch_tcp_ao())
2638 		return key->type == TCP_KEY_AO;
2639 	return false;
2640 }
2641 
2642 int tcpv4_offload_init(void);
2643 
2644 void tcp_v4_init(void);
2645 void tcp_init(void);
2646 
2647 /* tcp_recovery.c */
2648 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2649 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2650 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2651 				u32 reo_wnd);
2652 extern bool tcp_rack_mark_lost(struct sock *sk);
2653 extern void tcp_rack_reo_timeout(struct sock *sk);
2654 
2655 /* tcp_plb.c */
2656 
2657 /*
2658  * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2659  * expects cong_ratio which represents fraction of traffic that experienced
2660  * congestion over a single RTT. In order to avoid floating point operations,
2661  * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2662  */
2663 #define TCP_PLB_SCALE 8
2664 
2665 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2666 struct tcp_plb_state {
2667 	u8	consec_cong_rounds:5, /* consecutive congested rounds */
2668 		unused:3;
2669 	u32	pause_until; /* jiffies32 when PLB can resume rerouting */
2670 };
2671 
2672 static inline void tcp_plb_init(const struct sock *sk,
2673 				struct tcp_plb_state *plb)
2674 {
2675 	plb->consec_cong_rounds = 0;
2676 	plb->pause_until = 0;
2677 }
2678 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2679 			  const int cong_ratio);
2680 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2681 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2682 
2683 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2684 {
2685 	WARN_ONCE(cond,
2686 		  "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2687 		  str,
2688 		  tcp_snd_cwnd(tcp_sk(sk)),
2689 		  tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2690 		  tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2691 		  tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2692 		  inet_csk(sk)->icsk_ca_state,
2693 		  tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2694 		  inet_csk(sk)->icsk_pmtu_cookie);
2695 }
2696 
2697 /* At how many usecs into the future should the RTO fire? */
2698 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2699 {
2700 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2701 	u32 rto = inet_csk(sk)->icsk_rto;
2702 
2703 	if (likely(skb)) {
2704 		u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2705 
2706 		return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2707 	} else {
2708 		tcp_warn_once(sk, 1, "rtx queue empty: ");
2709 		return jiffies_to_usecs(rto);
2710 	}
2711 
2712 }
2713 
2714 /*
2715  * Save and compile IPv4 options, return a pointer to it
2716  */
2717 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2718 							 struct sk_buff *skb)
2719 {
2720 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2721 	struct ip_options_rcu *dopt = NULL;
2722 
2723 	if (opt->optlen) {
2724 		int opt_size = sizeof(*dopt) + opt->optlen;
2725 
2726 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2727 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2728 			kfree(dopt);
2729 			dopt = NULL;
2730 		}
2731 	}
2732 	return dopt;
2733 }
2734 
2735 /* locally generated TCP pure ACKs have skb->truesize == 2
2736  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2737  * This is much faster than dissecting the packet to find out.
2738  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2739  */
2740 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2741 {
2742 	return skb->truesize == 2;
2743 }
2744 
2745 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2746 {
2747 	skb->truesize = 2;
2748 }
2749 
2750 static inline int tcp_inq(struct sock *sk)
2751 {
2752 	struct tcp_sock *tp = tcp_sk(sk);
2753 	int answ;
2754 
2755 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2756 		answ = 0;
2757 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2758 		   !tp->urg_data ||
2759 		   before(tp->urg_seq, tp->copied_seq) ||
2760 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2761 
2762 		answ = tp->rcv_nxt - tp->copied_seq;
2763 
2764 		/* Subtract 1, if FIN was received */
2765 		if (answ && sock_flag(sk, SOCK_DONE))
2766 			answ--;
2767 	} else {
2768 		answ = tp->urg_seq - tp->copied_seq;
2769 	}
2770 
2771 	return answ;
2772 }
2773 
2774 int tcp_peek_len(struct socket *sock);
2775 
2776 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2777 {
2778 	u16 segs_in;
2779 
2780 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2781 
2782 	/* We update these fields while other threads might
2783 	 * read them from tcp_get_info()
2784 	 */
2785 	WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2786 	if (skb->len > tcp_hdrlen(skb))
2787 		WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2788 }
2789 
2790 /*
2791  * TCP listen path runs lockless.
2792  * We forced "struct sock" to be const qualified to make sure
2793  * we don't modify one of its field by mistake.
2794  * Here, we increment sk_drops which is an atomic_t, so we can safely
2795  * make sock writable again.
2796  */
2797 static inline void tcp_listendrop(const struct sock *sk)
2798 {
2799 	sk_drops_inc((struct sock *)sk);
2800 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2801 }
2802 
2803 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2804 
2805 /*
2806  * Interface for adding Upper Level Protocols over TCP
2807  */
2808 
2809 #define TCP_ULP_NAME_MAX	16
2810 #define TCP_ULP_MAX		128
2811 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2812 
2813 struct tcp_ulp_ops {
2814 	struct list_head	list;
2815 
2816 	/* initialize ulp */
2817 	int (*init)(struct sock *sk);
2818 	/* update ulp */
2819 	void (*update)(struct sock *sk, struct proto *p,
2820 		       void (*write_space)(struct sock *sk));
2821 	/* cleanup ulp */
2822 	void (*release)(struct sock *sk);
2823 	/* diagnostic */
2824 	int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2825 	size_t (*get_info_size)(const struct sock *sk, bool net_admin);
2826 	/* clone ulp */
2827 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2828 		      const gfp_t priority);
2829 
2830 	char		name[TCP_ULP_NAME_MAX];
2831 	struct module	*owner;
2832 };
2833 int tcp_register_ulp(struct tcp_ulp_ops *type);
2834 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2835 int tcp_set_ulp(struct sock *sk, const char *name);
2836 void tcp_get_available_ulp(char *buf, size_t len);
2837 void tcp_cleanup_ulp(struct sock *sk);
2838 void tcp_update_ulp(struct sock *sk, struct proto *p,
2839 		    void (*write_space)(struct sock *sk));
2840 
2841 #define MODULE_ALIAS_TCP_ULP(name)				\
2842 	MODULE_INFO(alias, name);		\
2843 	MODULE_INFO(alias, "tcp-ulp-" name)
2844 
2845 #ifdef CONFIG_NET_SOCK_MSG
2846 struct sk_msg;
2847 struct sk_psock;
2848 
2849 #ifdef CONFIG_BPF_SYSCALL
2850 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2851 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2852 #ifdef CONFIG_BPF_STREAM_PARSER
2853 struct strparser;
2854 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
2855 			   sk_read_actor_t recv_actor);
2856 #endif /* CONFIG_BPF_STREAM_PARSER */
2857 #endif /* CONFIG_BPF_SYSCALL */
2858 
2859 #ifdef CONFIG_INET
2860 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2861 #else
2862 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2863 {
2864 }
2865 #endif
2866 
2867 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2868 			  struct sk_msg *msg, u32 bytes, int flags);
2869 #endif /* CONFIG_NET_SOCK_MSG */
2870 
2871 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2872 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2873 {
2874 }
2875 #endif
2876 
2877 #ifdef CONFIG_CGROUP_BPF
2878 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2879 				      struct sk_buff *skb,
2880 				      unsigned int end_offset)
2881 {
2882 	skops->skb = skb;
2883 	skops->skb_data_end = skb->data + end_offset;
2884 }
2885 #else
2886 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2887 				      struct sk_buff *skb,
2888 				      unsigned int end_offset)
2889 {
2890 }
2891 #endif
2892 
2893 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2894  * is < 0, then the BPF op failed (for example if the loaded BPF
2895  * program does not support the chosen operation or there is no BPF
2896  * program loaded).
2897  */
2898 #ifdef CONFIG_BPF
2899 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2900 {
2901 	struct bpf_sock_ops_kern sock_ops;
2902 	int ret;
2903 
2904 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2905 	if (sk_fullsock(sk)) {
2906 		sock_ops.is_fullsock = 1;
2907 		sock_ops.is_locked_tcp_sock = 1;
2908 		sock_owned_by_me(sk);
2909 	}
2910 
2911 	sock_ops.sk = sk;
2912 	sock_ops.op = op;
2913 	if (nargs > 0)
2914 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2915 
2916 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2917 	if (ret == 0)
2918 		ret = sock_ops.reply;
2919 	else
2920 		ret = -1;
2921 	return ret;
2922 }
2923 
2924 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2925 {
2926 	u32 args[2] = {arg1, arg2};
2927 
2928 	return tcp_call_bpf(sk, op, 2, args);
2929 }
2930 
2931 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2932 				    u32 arg3)
2933 {
2934 	u32 args[3] = {arg1, arg2, arg3};
2935 
2936 	return tcp_call_bpf(sk, op, 3, args);
2937 }
2938 
2939 #else
2940 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2941 {
2942 	return -EPERM;
2943 }
2944 
2945 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2946 {
2947 	return -EPERM;
2948 }
2949 
2950 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2951 				    u32 arg3)
2952 {
2953 	return -EPERM;
2954 }
2955 
2956 #endif
2957 
2958 static inline u32 tcp_timeout_init(struct sock *sk)
2959 {
2960 	int timeout;
2961 
2962 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2963 
2964 	if (timeout <= 0)
2965 		timeout = TCP_TIMEOUT_INIT;
2966 	return min_t(int, timeout, TCP_RTO_MAX);
2967 }
2968 
2969 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2970 {
2971 	int rwnd;
2972 
2973 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2974 
2975 	if (rwnd < 0)
2976 		rwnd = 0;
2977 	return rwnd;
2978 }
2979 
2980 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2981 {
2982 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2983 }
2984 
2985 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2986 {
2987 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2988 		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2989 }
2990 
2991 #if IS_ENABLED(CONFIG_SMC)
2992 extern struct static_key_false tcp_have_smc;
2993 #endif
2994 
2995 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2996 void clean_acked_data_enable(struct tcp_sock *tp,
2997 			     void (*cad)(struct sock *sk, u32 ack_seq));
2998 void clean_acked_data_disable(struct tcp_sock *tp);
2999 void clean_acked_data_flush(void);
3000 #endif
3001 
3002 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
3003 static inline void tcp_add_tx_delay(struct sk_buff *skb,
3004 				    const struct tcp_sock *tp)
3005 {
3006 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
3007 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
3008 }
3009 
3010 /* Compute Earliest Departure Time for some control packets
3011  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
3012  */
3013 static inline u64 tcp_transmit_time(const struct sock *sk)
3014 {
3015 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
3016 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
3017 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
3018 
3019 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
3020 	}
3021 	return 0;
3022 }
3023 
3024 static inline int tcp_parse_auth_options(const struct tcphdr *th,
3025 		const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
3026 {
3027 	const u8 *md5_tmp, *ao_tmp;
3028 	int ret;
3029 
3030 	ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
3031 	if (ret)
3032 		return ret;
3033 
3034 	if (md5_hash)
3035 		*md5_hash = md5_tmp;
3036 
3037 	if (aoh) {
3038 		if (!ao_tmp)
3039 			*aoh = NULL;
3040 		else
3041 			*aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
3042 	}
3043 
3044 	return 0;
3045 }
3046 
3047 static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
3048 				   int family, int l3index, bool stat_inc)
3049 {
3050 #ifdef CONFIG_TCP_AO
3051 	struct tcp_ao_info *ao_info;
3052 	struct tcp_ao_key *ao_key;
3053 
3054 	if (!static_branch_unlikely(&tcp_ao_needed.key))
3055 		return false;
3056 
3057 	ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
3058 					lockdep_sock_is_held(sk));
3059 	if (!ao_info)
3060 		return false;
3061 
3062 	ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
3063 	if (ao_info->ao_required || ao_key) {
3064 		if (stat_inc) {
3065 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
3066 			atomic64_inc(&ao_info->counters.ao_required);
3067 		}
3068 		return true;
3069 	}
3070 #endif
3071 	return false;
3072 }
3073 
3074 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
3075 		const struct request_sock *req, const struct sk_buff *skb,
3076 		const void *saddr, const void *daddr,
3077 		int family, int dif, int sdif);
3078 
3079 static inline int tcp_recv_should_stop(struct sock *sk)
3080 {
3081 	return sk->sk_err ||
3082 	       sk->sk_state == TCP_CLOSE ||
3083 	       (sk->sk_shutdown & RCV_SHUTDOWN) ||
3084 	       signal_pending(current);
3085 }
3086 
3087 #endif	/* _TCP_H */
3088