xref: /linux/include/net/tcp.h (revision 2ed4b46b4fc77749cb0f8dd31a01441b82c8dbaa)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 #include <linux/bits.h>
30 
31 #include <net/inet_connection_sock.h>
32 #include <net/inet_timewait_sock.h>
33 #include <net/inet_hashtables.h>
34 #include <net/checksum.h>
35 #include <net/request_sock.h>
36 #include <net/sock_reuseport.h>
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 #include <net/ip.h>
40 #include <net/tcp_states.h>
41 #include <net/tcp_ao.h>
42 #include <net/inet_ecn.h>
43 #include <net/dst.h>
44 #include <net/mptcp.h>
45 #include <net/xfrm.h>
46 #include <net/secure_seq.h>
47 
48 #include <linux/seq_file.h>
49 #include <linux/memcontrol.h>
50 #include <linux/bpf-cgroup.h>
51 #include <linux/siphash.h>
52 
53 extern struct inet_hashinfo tcp_hashinfo;
54 
55 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
56 int tcp_orphan_count_sum(void);
57 
58 static inline void tcp_orphan_count_inc(void)
59 {
60 	this_cpu_inc(tcp_orphan_count);
61 }
62 
63 static inline void tcp_orphan_count_dec(void)
64 {
65 	this_cpu_dec(tcp_orphan_count);
66 }
67 
68 DECLARE_PER_CPU(u32, tcp_tw_isn);
69 
70 void tcp_time_wait(struct sock *sk, int state, int timeo);
71 
72 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
73 #define MAX_TCP_OPTION_SPACE 40
74 #define TCP_MIN_SND_MSS		48
75 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
76 
77 /*
78  * Never offer a window over 32767 without using window scaling. Some
79  * poor stacks do signed 16bit maths!
80  */
81 #define MAX_TCP_WINDOW		32767U
82 
83 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
84 #define TCP_MIN_MSS		88U
85 
86 /* The initial MTU to use for probing */
87 #define TCP_BASE_MSS		1024
88 
89 /* probing interval, default to 10 minutes as per RFC4821 */
90 #define TCP_PROBE_INTERVAL	600
91 
92 /* Specify interval when tcp mtu probing will stop */
93 #define TCP_PROBE_THRESHOLD	8
94 
95 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
96 #define TCP_FASTRETRANS_THRESH 3
97 
98 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
99 #define TCP_MAX_QUICKACKS	16U
100 
101 /* Maximal number of window scale according to RFC1323 */
102 #define TCP_MAX_WSCALE		14U
103 
104 /* Default sending frequency of accurate ECN option per RTT */
105 #define TCP_ACCECN_OPTION_BEACON	3
106 
107 /* urg_data states */
108 #define TCP_URG_VALID	0x0100
109 #define TCP_URG_NOTYET	0x0200
110 #define TCP_URG_READ	0x0400
111 
112 #define TCP_RETR1	3	/*
113 				 * This is how many retries it does before it
114 				 * tries to figure out if the gateway is
115 				 * down. Minimal RFC value is 3; it corresponds
116 				 * to ~3sec-8min depending on RTO.
117 				 */
118 
119 #define TCP_RETR2	15	/*
120 				 * This should take at least
121 				 * 90 minutes to time out.
122 				 * RFC1122 says that the limit is 100 sec.
123 				 * 15 is ~13-30min depending on RTO.
124 				 */
125 
126 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
127 				 * when active opening a connection.
128 				 * RFC1122 says the minimum retry MUST
129 				 * be at least 180secs.  Nevertheless
130 				 * this value is corresponding to
131 				 * 63secs of retransmission with the
132 				 * current initial RTO.
133 				 */
134 
135 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
136 				 * when passive opening a connection.
137 				 * This is corresponding to 31secs of
138 				 * retransmission with the current
139 				 * initial RTO.
140 				 */
141 
142 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
143 				  * state, about 60 seconds	*/
144 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
145                                  /* BSD style FIN_WAIT2 deadlock breaker.
146 				  * It used to be 3min, new value is 60sec,
147 				  * to combine FIN-WAIT-2 timeout with
148 				  * TIME-WAIT timer.
149 				  */
150 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
151 
152 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
153 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
154 
155 #if HZ >= 100
156 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
157 #define TCP_ATO_MIN	((unsigned)(HZ/25))
158 #else
159 #define TCP_DELACK_MIN	4U
160 #define TCP_ATO_MIN	4U
161 #endif
162 #define TCP_RTO_MAX_SEC 120
163 #define TCP_RTO_MAX	((unsigned)(TCP_RTO_MAX_SEC * HZ))
164 #define TCP_RTO_MIN	((unsigned)(HZ / 5))
165 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
166 
167 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
168 
169 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
170 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
171 						 * used as a fallback RTO for the
172 						 * initial data transmission if no
173 						 * valid RTT sample has been acquired,
174 						 * most likely due to retrans in 3WHS.
175 						 */
176 
177 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
178 					                 * for local resources.
179 					                 */
180 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
181 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
182 #define TCP_KEEPALIVE_INTVL	(75*HZ)
183 
184 #define MAX_TCP_KEEPIDLE	32767
185 #define MAX_TCP_KEEPINTVL	32767
186 #define MAX_TCP_KEEPCNT		127
187 #define MAX_TCP_SYNCNT		127
188 
189 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
190  * to avoid overflows. This assumes a clock smaller than 1 Mhz.
191  * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
192  */
193 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
194 
195 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
196 					 * after this time. It should be equal
197 					 * (or greater than) TCP_TIMEWAIT_LEN
198 					 * to provide reliability equal to one
199 					 * provided by timewait state.
200 					 */
201 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
202 					 * timestamps. It must be less than
203 					 * minimal timewait lifetime.
204 					 */
205 /*
206  *	TCP option
207  */
208 
209 #define TCPOPT_NOP		1	/* Padding */
210 #define TCPOPT_EOL		0	/* End of options */
211 #define TCPOPT_MSS		2	/* Segment size negotiating */
212 #define TCPOPT_WINDOW		3	/* Window scaling */
213 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
214 #define TCPOPT_SACK             5       /* SACK Block */
215 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
216 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
217 #define TCPOPT_AO		29	/* Authentication Option (RFC5925) */
218 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
219 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
220 #define TCPOPT_ACCECN0		172	/* 0xAC: Accurate ECN Order 0 */
221 #define TCPOPT_ACCECN1		174	/* 0xAE: Accurate ECN Order 1 */
222 #define TCPOPT_EXP		254	/* Experimental */
223 /* Magic number to be after the option value for sharing TCP
224  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
225  */
226 #define TCPOPT_FASTOPEN_MAGIC	0xF989
227 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
228 
229 /*
230  *     TCP option lengths
231  */
232 
233 #define TCPOLEN_MSS            4
234 #define TCPOLEN_WINDOW         3
235 #define TCPOLEN_SACK_PERM      2
236 #define TCPOLEN_TIMESTAMP      10
237 #define TCPOLEN_MD5SIG         18
238 #define TCPOLEN_FASTOPEN_BASE  2
239 #define TCPOLEN_ACCECN_BASE    2
240 #define TCPOLEN_EXP_FASTOPEN_BASE  4
241 #define TCPOLEN_EXP_SMC_BASE   6
242 
243 /* But this is what stacks really send out. */
244 #define TCPOLEN_TSTAMP_ALIGNED		12
245 #define TCPOLEN_WSCALE_ALIGNED		4
246 #define TCPOLEN_SACKPERM_ALIGNED	4
247 #define TCPOLEN_SACK_BASE		2
248 #define TCPOLEN_SACK_BASE_ALIGNED	4
249 #define TCPOLEN_SACK_PERBLOCK		8
250 #define TCPOLEN_MD5SIG_ALIGNED		20
251 #define TCPOLEN_MSS_ALIGNED		4
252 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
253 #define TCPOLEN_ACCECN_PERFIELD		3
254 
255 /* Maximum number of byte counters in AccECN option + size */
256 #define TCP_ACCECN_NUMFIELDS		3
257 #define TCP_ACCECN_MAXSIZE		(TCPOLEN_ACCECN_BASE + \
258 					 TCPOLEN_ACCECN_PERFIELD * \
259 					 TCP_ACCECN_NUMFIELDS)
260 #define TCP_ACCECN_SAFETY_SHIFT		1 /* SAFETY_FACTOR in accecn draft */
261 
262 /* Flags in tp->nonagle */
263 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
264 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
265 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
266 
267 /* TCP thin-stream limits */
268 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
269 
270 /* TCP initial congestion window as per rfc6928 */
271 #define TCP_INIT_CWND		10
272 
273 /* Bit Flags for sysctl_tcp_fastopen */
274 #define	TFO_CLIENT_ENABLE	1
275 #define	TFO_SERVER_ENABLE	2
276 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
277 
278 /* Accept SYN data w/o any cookie option */
279 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
280 
281 /* Force enable TFO on all listeners, i.e., not requiring the
282  * TCP_FASTOPEN socket option.
283  */
284 #define	TFO_SERVER_WO_SOCKOPT1	0x400
285 
286 
287 /* sysctl variables for tcp */
288 extern int sysctl_tcp_max_orphans;
289 extern long sysctl_tcp_mem[3];
290 
291 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
292 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
293 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
294 
295 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
296 
297 extern struct percpu_counter tcp_sockets_allocated;
298 extern unsigned long tcp_memory_pressure;
299 
300 /* optimized version of sk_under_memory_pressure() for TCP sockets */
301 static inline bool tcp_under_memory_pressure(const struct sock *sk)
302 {
303 	if (mem_cgroup_sk_enabled(sk) &&
304 	    mem_cgroup_sk_under_memory_pressure(sk))
305 		return true;
306 
307 	if (sk->sk_bypass_prot_mem)
308 		return false;
309 
310 	return READ_ONCE(tcp_memory_pressure);
311 }
312 /*
313  * The next routines deal with comparing 32 bit unsigned ints
314  * and worry about wraparound (automatic with unsigned arithmetic).
315  */
316 
317 static inline bool before(__u32 seq1, __u32 seq2)
318 {
319         return (__s32)(seq1-seq2) < 0;
320 }
321 #define after(seq2, seq1) 	before(seq1, seq2)
322 
323 /* is s2<=s1<=s3 ? */
324 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
325 {
326 	return seq3 - seq2 >= seq1 - seq2;
327 }
328 
329 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
330 {
331 	sk_wmem_queued_add(sk, -skb->truesize);
332 	if (!skb_zcopy_pure(skb))
333 		sk_mem_uncharge(sk, skb->truesize);
334 	else
335 		sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
336 	__kfree_skb(skb);
337 }
338 
339 void sk_forced_mem_schedule(struct sock *sk, int size);
340 
341 bool tcp_check_oom(const struct sock *sk, int shift);
342 
343 
344 extern struct proto tcp_prot;
345 
346 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
347 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
348 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
349 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
350 
351 /*
352  * TCP splice context
353  */
354 struct tcp_splice_state {
355 	struct pipe_inode_info *pipe;
356 	size_t len;
357 	unsigned int flags;
358 };
359 
360 void tcp_tsq_work_init(void);
361 
362 int tcp_v4_err(struct sk_buff *skb, u32);
363 
364 void tcp_shutdown(struct sock *sk, int how);
365 
366 int tcp_v4_early_demux(struct sk_buff *skb);
367 int tcp_v4_rcv(struct sk_buff *skb);
368 
369 void tcp_remove_empty_skb(struct sock *sk);
370 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
371 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
372 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
373 			 size_t size, struct ubuf_info *uarg);
374 void tcp_splice_eof(struct socket *sock);
375 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
376 int tcp_wmem_schedule(struct sock *sk, int copy);
377 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
378 	      int size_goal);
379 void tcp_release_cb(struct sock *sk);
380 void tcp_wfree(struct sk_buff *skb);
381 void tcp_write_timer_handler(struct sock *sk);
382 void tcp_delack_timer_handler(struct sock *sk);
383 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
384 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
385 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
386 void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
387 void tcp_rcv_space_adjust(struct sock *sk);
388 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
389 void tcp_twsk_destructor(struct sock *sk);
390 void tcp_twsk_purge(struct list_head *net_exit_list);
391 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
392 			 unsigned int offset, size_t len);
393 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
394 			struct pipe_inode_info *pipe, size_t len,
395 			unsigned int flags);
396 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
397 				     bool force_schedule);
398 
399 static inline void tcp_dec_quickack_mode(struct sock *sk)
400 {
401 	struct inet_connection_sock *icsk = inet_csk(sk);
402 
403 	if (icsk->icsk_ack.quick) {
404 		/* How many ACKs S/ACKing new data have we sent? */
405 		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
406 
407 		if (pkts >= icsk->icsk_ack.quick) {
408 			icsk->icsk_ack.quick = 0;
409 			/* Leaving quickack mode we deflate ATO. */
410 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
411 		} else
412 			icsk->icsk_ack.quick -= pkts;
413 	}
414 }
415 
416 #define	TCP_ECN_MODE_RFC3168	BIT(0)
417 #define	TCP_ECN_QUEUE_CWR	BIT(1)
418 #define	TCP_ECN_DEMAND_CWR	BIT(2)
419 #define	TCP_ECN_SEEN		BIT(3)
420 #define	TCP_ECN_MODE_ACCECN	BIT(4)
421 
422 #define	TCP_ECN_DISABLED	0
423 #define	TCP_ECN_MODE_PENDING	(TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
424 #define	TCP_ECN_MODE_ANY	(TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
425 
426 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
427 {
428 	return tp->ecn_flags & TCP_ECN_MODE_ANY;
429 }
430 
431 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
432 {
433 	return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
434 }
435 
436 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
437 {
438 	return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
439 }
440 
441 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
442 {
443 	return !tcp_ecn_mode_any(tp);
444 }
445 
446 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
447 {
448 	return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
449 }
450 
451 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
452 {
453 	tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
454 	tp->ecn_flags |= mode;
455 }
456 
457 enum tcp_tw_status {
458 	TCP_TW_SUCCESS = 0,
459 	TCP_TW_RST = 1,
460 	TCP_TW_ACK = 2,
461 	TCP_TW_SYN = 3,
462 	TCP_TW_ACK_OOW = 4
463 };
464 
465 
466 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
467 					      struct sk_buff *skb,
468 					      const struct tcphdr *th,
469 					      u32 *tw_isn,
470 					      enum skb_drop_reason *drop_reason);
471 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
472 			   struct request_sock *req, bool fastopen,
473 			   bool *lost_race, enum skb_drop_reason *drop_reason);
474 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
475 				       struct sk_buff *skb);
476 void tcp_enter_loss(struct sock *sk);
477 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
478 void tcp_clear_retrans(struct tcp_sock *tp);
479 void tcp_update_pacing_rate(struct sock *sk);
480 void tcp_set_rto(struct sock *sk);
481 void tcp_update_metrics(struct sock *sk);
482 void tcp_init_metrics(struct sock *sk);
483 void tcp_metrics_init(void);
484 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
485 void __tcp_close(struct sock *sk, long timeout);
486 void tcp_close(struct sock *sk, long timeout);
487 void tcp_init_sock(struct sock *sk);
488 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
489 __poll_t tcp_poll(struct file *file, struct socket *sock,
490 		      struct poll_table_struct *wait);
491 int do_tcp_getsockopt(struct sock *sk, int level,
492 		      int optname, sockptr_t optval, sockptr_t optlen);
493 int tcp_getsockopt(struct sock *sk, int level, int optname,
494 		   char __user *optval, int __user *optlen);
495 bool tcp_bpf_bypass_getsockopt(int level, int optname);
496 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
497 		      sockptr_t optval, unsigned int optlen);
498 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
499 		   unsigned int optlen);
500 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
501 void tcp_set_keepalive(struct sock *sk, int val);
502 void tcp_syn_ack_timeout(const struct request_sock *req);
503 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
504 		int flags);
505 int tcp_set_rcvlowat(struct sock *sk, int val);
506 int tcp_set_window_clamp(struct sock *sk, int val);
507 
508 static inline void
509 tcp_update_recv_tstamps(struct sk_buff *skb,
510 			struct scm_timestamping_internal *tss)
511 {
512 	tss->ts[0] = skb->tstamp;
513 	tss->ts[2] = skb_hwtstamps(skb)->hwtstamp;
514 }
515 
516 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
517 			struct scm_timestamping_internal *tss);
518 void tcp_data_ready(struct sock *sk);
519 #ifdef CONFIG_MMU
520 int tcp_mmap(struct file *file, struct socket *sock,
521 	     struct vm_area_struct *vma);
522 #endif
523 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
524 		       struct tcp_options_received *opt_rx,
525 		       int estab, struct tcp_fastopen_cookie *foc);
526 
527 /*
528  *	BPF SKB-less helpers
529  */
530 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
531 			 struct tcphdr *th, u32 *cookie);
532 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
533 			 struct tcphdr *th, u32 *cookie);
534 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
535 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
536 			  const struct tcp_request_sock_ops *af_ops,
537 			  struct sock *sk, struct tcphdr *th);
538 /*
539  *	TCP v4 functions exported for the inet6 API
540  */
541 
542 void tcp_v4_mtu_reduced(struct sock *sk);
543 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
544 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
545 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
546 struct sock *tcp_create_openreq_child(const struct sock *sk,
547 				      struct request_sock *req,
548 				      struct sk_buff *skb);
549 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
550 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
551 				  struct request_sock *req,
552 				  struct dst_entry *dst,
553 				  struct request_sock *req_unhash,
554 				  bool *own_req,
555 				  void (*opt_child_init)(struct sock *newsk,
556 							 const struct sock *sk));
557 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
558 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
559 int tcp_connect(struct sock *sk);
560 enum tcp_synack_type {
561 	TCP_SYNACK_NORMAL,
562 	TCP_SYNACK_FASTOPEN,
563 	TCP_SYNACK_COOKIE,
564 	TCP_SYNACK_RETRANS,
565 };
566 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
567 				struct request_sock *req,
568 				struct tcp_fastopen_cookie *foc,
569 				enum tcp_synack_type synack_type,
570 				struct sk_buff *syn_skb);
571 int tcp_disconnect(struct sock *sk, int flags);
572 
573 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
574 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
575 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
576 
577 /* From syncookies.c */
578 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
579 				 struct request_sock *req,
580 				 struct dst_entry *dst);
581 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
582 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
583 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
584 					    struct sock *sk, struct sk_buff *skb,
585 					    struct tcp_options_received *tcp_opt,
586 					    int mss, u32 tsoff);
587 
588 #if IS_ENABLED(CONFIG_BPF)
589 struct bpf_tcp_req_attrs {
590 	u32 rcv_tsval;
591 	u32 rcv_tsecr;
592 	u16 mss;
593 	u8 rcv_wscale;
594 	u8 snd_wscale;
595 	u8 ecn_ok;
596 	u8 wscale_ok;
597 	u8 sack_ok;
598 	u8 tstamp_ok;
599 	u8 usec_ts_ok;
600 	u8 reserved[3];
601 };
602 #endif
603 
604 #ifdef CONFIG_SYN_COOKIES
605 
606 /* Syncookies use a monotonic timer which increments every 60 seconds.
607  * This counter is used both as a hash input and partially encoded into
608  * the cookie value.  A cookie is only validated further if the delta
609  * between the current counter value and the encoded one is less than this,
610  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
611  * the counter advances immediately after a cookie is generated).
612  */
613 #define MAX_SYNCOOKIE_AGE	2
614 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
615 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
616 
617 /* syncookies: remember time of last synqueue overflow
618  * But do not dirty this field too often (once per second is enough)
619  * It is racy as we do not hold a lock, but race is very minor.
620  */
621 static inline void tcp_synq_overflow(const struct sock *sk)
622 {
623 	unsigned int last_overflow;
624 	unsigned int now = jiffies;
625 
626 	if (sk->sk_reuseport) {
627 		struct sock_reuseport *reuse;
628 
629 		reuse = rcu_dereference(sk->sk_reuseport_cb);
630 		if (likely(reuse)) {
631 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
632 			if (!time_between32(now, last_overflow,
633 					    last_overflow + HZ))
634 				WRITE_ONCE(reuse->synq_overflow_ts, now);
635 			return;
636 		}
637 	}
638 
639 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
640 	if (!time_between32(now, last_overflow, last_overflow + HZ))
641 		WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
642 }
643 
644 /* syncookies: no recent synqueue overflow on this listening socket? */
645 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
646 {
647 	unsigned int last_overflow;
648 	unsigned int now = jiffies;
649 
650 	if (sk->sk_reuseport) {
651 		struct sock_reuseport *reuse;
652 
653 		reuse = rcu_dereference(sk->sk_reuseport_cb);
654 		if (likely(reuse)) {
655 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
656 			return !time_between32(now, last_overflow - HZ,
657 					       last_overflow +
658 					       TCP_SYNCOOKIE_VALID);
659 		}
660 	}
661 
662 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
663 
664 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
665 	 * then we're under synflood. However, we have to use
666 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
667 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
668 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
669 	 * which could lead to rejecting a valid syncookie.
670 	 */
671 	return !time_between32(now, last_overflow - HZ,
672 			       last_overflow + TCP_SYNCOOKIE_VALID);
673 }
674 
675 static inline u32 tcp_cookie_time(void)
676 {
677 	u64 val = get_jiffies_64();
678 
679 	do_div(val, TCP_SYNCOOKIE_PERIOD);
680 	return val;
681 }
682 
683 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
684 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
685 {
686 	if (usec_ts)
687 		return div_u64(val, NSEC_PER_USEC);
688 
689 	return div_u64(val, NSEC_PER_MSEC);
690 }
691 
692 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
693 			      u16 *mssp);
694 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
695 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
696 bool cookie_timestamp_decode(const struct net *net,
697 			     struct tcp_options_received *opt);
698 
699 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
700 {
701 	return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
702 		dst_feature(dst, RTAX_FEATURE_ECN);
703 }
704 
705 #if IS_ENABLED(CONFIG_BPF)
706 static inline bool cookie_bpf_ok(struct sk_buff *skb)
707 {
708 	return skb->sk;
709 }
710 
711 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
712 #else
713 static inline bool cookie_bpf_ok(struct sk_buff *skb)
714 {
715 	return false;
716 }
717 
718 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
719 						    struct sk_buff *skb)
720 {
721 	return NULL;
722 }
723 #endif
724 
725 /* From net/ipv6/syncookies.c */
726 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
727 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
728 
729 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
730 			      const struct tcphdr *th, u16 *mssp);
731 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
732 #endif
733 /* tcp_output.c */
734 
735 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
736 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
737 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
738 			       int nonagle);
739 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
740 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
741 void tcp_retransmit_timer(struct sock *sk);
742 void tcp_xmit_retransmit_queue(struct sock *);
743 void tcp_simple_retransmit(struct sock *);
744 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
745 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
746 enum tcp_queue {
747 	TCP_FRAG_IN_WRITE_QUEUE,
748 	TCP_FRAG_IN_RTX_QUEUE,
749 };
750 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
751 		 struct sk_buff *skb, u32 len,
752 		 unsigned int mss_now, gfp_t gfp);
753 
754 void tcp_send_probe0(struct sock *);
755 int tcp_write_wakeup(struct sock *, int mib);
756 void tcp_send_fin(struct sock *sk);
757 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
758 			   enum sk_rst_reason reason);
759 int tcp_send_synack(struct sock *);
760 void tcp_push_one(struct sock *, unsigned int mss_now);
761 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
762 void tcp_send_ack(struct sock *sk);
763 void tcp_send_delayed_ack(struct sock *sk);
764 void tcp_send_loss_probe(struct sock *sk);
765 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
766 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
767 			     const struct sk_buff *next_skb);
768 
769 /* tcp_input.c */
770 void tcp_rearm_rto(struct sock *sk);
771 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
772 void tcp_done_with_error(struct sock *sk, int err);
773 void tcp_reset(struct sock *sk, struct sk_buff *skb);
774 void tcp_fin(struct sock *sk);
775 void __tcp_check_space(struct sock *sk);
776 static inline void tcp_check_space(struct sock *sk)
777 {
778 	/* pairs with tcp_poll() */
779 	smp_mb();
780 
781 	if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
782 		__tcp_check_space(sk);
783 }
784 void tcp_sack_compress_send_ack(struct sock *sk);
785 
786 static inline void tcp_cleanup_skb(struct sk_buff *skb)
787 {
788 	skb_dst_drop(skb);
789 	secpath_reset(skb);
790 }
791 
792 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
793 {
794 	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
795 	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
796 	__skb_queue_tail(&sk->sk_receive_queue, skb);
797 }
798 
799 /* tcp_timer.c */
800 void tcp_init_xmit_timers(struct sock *);
801 static inline void tcp_clear_xmit_timers(struct sock *sk)
802 {
803 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
804 		__sock_put(sk);
805 
806 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
807 		__sock_put(sk);
808 
809 	inet_csk_clear_xmit_timers(sk);
810 }
811 
812 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
813 unsigned int tcp_current_mss(struct sock *sk);
814 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
815 
816 /* Bound MSS / TSO packet size with the half of the window */
817 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
818 {
819 	int cutoff;
820 
821 	/* When peer uses tiny windows, there is no use in packetizing
822 	 * to sub-MSS pieces for the sake of SWS or making sure there
823 	 * are enough packets in the pipe for fast recovery.
824 	 *
825 	 * On the other hand, for extremely large MSS devices, handling
826 	 * smaller than MSS windows in this way does make sense.
827 	 */
828 	if (tp->max_window > TCP_MSS_DEFAULT)
829 		cutoff = (tp->max_window >> 1);
830 	else
831 		cutoff = tp->max_window;
832 
833 	if (cutoff && pktsize > cutoff)
834 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
835 	else
836 		return pktsize;
837 }
838 
839 /* tcp.c */
840 void tcp_get_info(struct sock *, struct tcp_info *);
841 void tcp_rate_check_app_limited(struct sock *sk);
842 
843 /* Read 'sendfile()'-style from a TCP socket */
844 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
845 		  sk_read_actor_t recv_actor);
846 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
847 			sk_read_actor_t recv_actor, bool noack,
848 			u32 *copied_seq);
849 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
850 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
851 void tcp_read_done(struct sock *sk, size_t len);
852 
853 void tcp_initialize_rcv_mss(struct sock *sk);
854 
855 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
856 int tcp_mss_to_mtu(struct sock *sk, int mss);
857 void tcp_mtup_init(struct sock *sk);
858 
859 static inline unsigned int tcp_rto_max(const struct sock *sk)
860 {
861 	return READ_ONCE(inet_csk(sk)->icsk_rto_max);
862 }
863 
864 static inline void tcp_bound_rto(struct sock *sk)
865 {
866 	inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
867 }
868 
869 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
870 {
871 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
872 }
873 
874 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req)
875 {
876 	u64 timeout = (u64)req->timeout << req->num_timeout;
877 
878 	return (unsigned long)min_t(u64, timeout,
879 				    tcp_rto_max(req->rsk_listener));
880 }
881 
882 u32 tcp_delack_max(const struct sock *sk);
883 
884 /* Compute the actual rto_min value */
885 static inline u32 tcp_rto_min(const struct sock *sk)
886 {
887 	const struct dst_entry *dst = __sk_dst_get(sk);
888 	u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
889 
890 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
891 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
892 	return rto_min;
893 }
894 
895 static inline u32 tcp_rto_min_us(const struct sock *sk)
896 {
897 	return jiffies_to_usecs(tcp_rto_min(sk));
898 }
899 
900 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
901 {
902 	return dst_metric_locked(dst, RTAX_CC_ALGO);
903 }
904 
905 /* Minimum RTT in usec. ~0 means not available. */
906 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
907 {
908 	return minmax_get(&tp->rtt_min);
909 }
910 
911 /* Compute the actual receive window we are currently advertising.
912  * Rcv_nxt can be after the window if our peer push more data
913  * than the offered window.
914  */
915 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
916 {
917 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
918 
919 	if (win < 0)
920 		win = 0;
921 	return (u32) win;
922 }
923 
924 /* Choose a new window, without checks for shrinking, and without
925  * scaling applied to the result.  The caller does these things
926  * if necessary.  This is a "raw" window selection.
927  */
928 u32 __tcp_select_window(struct sock *sk);
929 
930 void tcp_send_window_probe(struct sock *sk);
931 
932 /* TCP uses 32bit jiffies to save some space.
933  * Note that this is different from tcp_time_stamp, which
934  * historically has been the same until linux-4.13.
935  */
936 #define tcp_jiffies32 ((u32)jiffies)
937 
938 /*
939  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
940  * It is no longer tied to jiffies, but to 1 ms clock.
941  * Note: double check if you want to use tcp_jiffies32 instead of this.
942  */
943 #define TCP_TS_HZ	1000
944 
945 static inline u64 tcp_clock_ns(void)
946 {
947 	return ktime_get_ns();
948 }
949 
950 static inline u64 tcp_clock_us(void)
951 {
952 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
953 }
954 
955 static inline u64 tcp_clock_ms(void)
956 {
957 	return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
958 }
959 
960 /* TCP Timestamp included in TS option (RFC 1323) can either use ms
961  * or usec resolution. Each socket carries a flag to select one or other
962  * resolution, as the route attribute could change anytime.
963  * Each flow must stick to initial resolution.
964  */
965 static inline u32 tcp_clock_ts(bool usec_ts)
966 {
967 	return usec_ts ? tcp_clock_us() : tcp_clock_ms();
968 }
969 
970 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
971 {
972 	return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
973 }
974 
975 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
976 {
977 	if (tp->tcp_usec_ts)
978 		return tp->tcp_mstamp;
979 	return tcp_time_stamp_ms(tp);
980 }
981 
982 void tcp_mstamp_refresh(struct tcp_sock *tp);
983 
984 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
985 {
986 	return max_t(s64, t1 - t0, 0);
987 }
988 
989 /* provide the departure time in us unit */
990 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
991 {
992 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
993 }
994 
995 /* Provide skb TSval in usec or ms unit */
996 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
997 {
998 	if (usec_ts)
999 		return tcp_skb_timestamp_us(skb);
1000 
1001 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
1002 }
1003 
1004 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
1005 {
1006 	return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
1007 }
1008 
1009 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
1010 {
1011 	return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
1012 }
1013 
1014 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
1015 
1016 #define TCPHDR_FIN	BIT(0)
1017 #define TCPHDR_SYN	BIT(1)
1018 #define TCPHDR_RST	BIT(2)
1019 #define TCPHDR_PSH	BIT(3)
1020 #define TCPHDR_ACK	BIT(4)
1021 #define TCPHDR_URG	BIT(5)
1022 #define TCPHDR_ECE	BIT(6)
1023 #define TCPHDR_CWR	BIT(7)
1024 #define TCPHDR_AE	BIT(8)
1025 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
1026 			   TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
1027 			   TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1028 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
1029 			    TCPHDR_FLAGS_MASK)
1030 
1031 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1032 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
1033 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
1034 
1035 #define TCP_ACCECN_CEP_ACE_MASK 0x7
1036 #define TCP_ACCECN_ACE_MAX_DELTA 6
1037 
1038 /* To avoid/detect middlebox interference, not all counters start at 0.
1039  * See draft-ietf-tcpm-accurate-ecn for the latest values.
1040  */
1041 #define TCP_ACCECN_CEP_INIT_OFFSET 5
1042 #define TCP_ACCECN_E1B_INIT_OFFSET 1
1043 #define TCP_ACCECN_E0B_INIT_OFFSET 1
1044 #define TCP_ACCECN_CEB_INIT_OFFSET 0
1045 
1046 /* State flags for sacked in struct tcp_skb_cb */
1047 enum tcp_skb_cb_sacked_flags {
1048 	TCPCB_SACKED_ACKED	= (1 << 0),	/* SKB ACK'd by a SACK block	*/
1049 	TCPCB_SACKED_RETRANS	= (1 << 1),	/* SKB retransmitted		*/
1050 	TCPCB_LOST		= (1 << 2),	/* SKB is lost			*/
1051 	TCPCB_TAGBITS		= (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
1052 				   TCPCB_LOST),	/* All tag bits			*/
1053 	TCPCB_REPAIRED		= (1 << 4),	/* SKB repaired (no skb_mstamp_ns)	*/
1054 	TCPCB_EVER_RETRANS	= (1 << 7),	/* Ever retransmitted frame	*/
1055 	TCPCB_RETRANS		= (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
1056 				   TCPCB_REPAIRED),
1057 };
1058 
1059 /* This is what the send packet queuing engine uses to pass
1060  * TCP per-packet control information to the transmission code.
1061  * We also store the host-order sequence numbers in here too.
1062  * This is 44 bytes if IPV6 is enabled.
1063  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1064  */
1065 struct tcp_skb_cb {
1066 	__u32		seq;		/* Starting sequence number	*/
1067 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
1068 	union {
1069 		/* Note :
1070 		 * 	  tcp_gso_segs/size are used in write queue only,
1071 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
1072 		 */
1073 		struct {
1074 			u16	tcp_gso_segs;
1075 			u16	tcp_gso_size;
1076 		};
1077 	};
1078 	__u16		tcp_flags;	/* TCP header flags (tcp[12-13])*/
1079 
1080 	__u8		sacked;		/* State flags for SACK.	*/
1081 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
1082 #define TSTAMP_ACK_SK	0x1
1083 #define TSTAMP_ACK_BPF	0x2
1084 	__u8		txstamp_ack:2,	/* Record TX timestamp for ack? */
1085 			eor:1,		/* Is skb MSG_EOR marked? */
1086 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
1087 			unused:4;
1088 	__u32		ack_seq;	/* Sequence number ACK'd	*/
1089 	union {
1090 		struct {
1091 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
1092 			/* There is space for up to 24 bytes */
1093 			__u32 is_app_limited:1, /* cwnd not fully used? */
1094 			      delivered_ce:20,
1095 			      unused:11;
1096 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
1097 			__u32 delivered;
1098 			/* start of send pipeline phase */
1099 			u64 first_tx_mstamp;
1100 			/* when we reached the "delivered" count */
1101 			u64 delivered_mstamp;
1102 		} tx;   /* only used for outgoing skbs */
1103 		union {
1104 			struct inet_skb_parm	h4;
1105 #if IS_ENABLED(CONFIG_IPV6)
1106 			struct inet6_skb_parm	h6;
1107 #endif
1108 		} header;	/* For incoming skbs */
1109 	};
1110 };
1111 
1112 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
1113 
1114 extern const struct inet_connection_sock_af_ops ipv4_specific;
1115 
1116 #if IS_ENABLED(CONFIG_IPV6)
1117 /* This is the variant of inet6_iif() that must be used by TCP,
1118  * as TCP moves IP6CB into a different location in skb->cb[]
1119  */
1120 static inline int tcp_v6_iif(const struct sk_buff *skb)
1121 {
1122 	return TCP_SKB_CB(skb)->header.h6.iif;
1123 }
1124 
1125 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1126 {
1127 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1128 
1129 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1130 }
1131 
1132 /* TCP_SKB_CB reference means this can not be used from early demux */
1133 static inline int tcp_v6_sdif(const struct sk_buff *skb)
1134 {
1135 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1136 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1137 		return TCP_SKB_CB(skb)->header.h6.iif;
1138 #endif
1139 	return 0;
1140 }
1141 
1142 extern const struct inet_connection_sock_af_ops ipv6_specific;
1143 
1144 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1145 
1146 #endif
1147 
1148 /* TCP_SKB_CB reference means this can not be used from early demux */
1149 static inline int tcp_v4_sdif(struct sk_buff *skb)
1150 {
1151 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1152 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1153 		return TCP_SKB_CB(skb)->header.h4.iif;
1154 #endif
1155 	return 0;
1156 }
1157 
1158 /* Due to TSO, an SKB can be composed of multiple actual
1159  * packets.  To keep these tracked properly, we use this.
1160  */
1161 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1162 {
1163 	return TCP_SKB_CB(skb)->tcp_gso_segs;
1164 }
1165 
1166 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1167 {
1168 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1169 }
1170 
1171 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1172 {
1173 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1174 }
1175 
1176 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1177 static inline int tcp_skb_mss(const struct sk_buff *skb)
1178 {
1179 	return TCP_SKB_CB(skb)->tcp_gso_size;
1180 }
1181 
1182 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1183 {
1184 	return likely(!TCP_SKB_CB(skb)->eor);
1185 }
1186 
1187 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1188 					const struct sk_buff *from)
1189 {
1190 	/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1191 	return likely(tcp_skb_can_collapse_to(to) &&
1192 		      mptcp_skb_can_collapse(to, from) &&
1193 		      skb_pure_zcopy_same(to, from) &&
1194 		      skb_frags_readable(to) == skb_frags_readable(from));
1195 }
1196 
1197 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1198 					   const struct sk_buff *from)
1199 {
1200 	return likely(mptcp_skb_can_collapse(to, from) &&
1201 		      !skb_cmp_decrypted(to, from));
1202 }
1203 
1204 /* Events passed to congestion control interface */
1205 enum tcp_ca_event {
1206 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1207 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1208 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1209 	CA_EVENT_LOSS,		/* loss timeout */
1210 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1211 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1212 };
1213 
1214 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1215 enum tcp_ca_ack_event_flags {
1216 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1217 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1218 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1219 };
1220 
1221 /*
1222  * Interface for adding new TCP congestion control handlers
1223  */
1224 #define TCP_CA_NAME_MAX	16
1225 #define TCP_CA_MAX	128
1226 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1227 
1228 #define TCP_CA_UNSPEC	0
1229 
1230 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1231 #define TCP_CONG_NON_RESTRICTED		BIT(0)
1232 /* Requires ECN/ECT set on all packets */
1233 #define TCP_CONG_NEEDS_ECN		BIT(1)
1234 /* Require successfully negotiated AccECN capability */
1235 #define TCP_CONG_NEEDS_ACCECN		BIT(2)
1236 /* Use ECT(1) instead of ECT(0) while the CA is uninitialized */
1237 #define TCP_CONG_ECT_1_NEGOTIATION	BIT(3)
1238 /* Cannot fallback to RFC3168 during AccECN negotiation */
1239 #define TCP_CONG_NO_FALLBACK_RFC3168	BIT(4)
1240 #define TCP_CONG_MASK  (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \
1241 			TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \
1242 			TCP_CONG_NO_FALLBACK_RFC3168)
1243 
1244 union tcp_cc_info;
1245 
1246 struct ack_sample {
1247 	u32 pkts_acked;
1248 	s32 rtt_us;
1249 	u32 in_flight;
1250 };
1251 
1252 /* A rate sample measures the number of (original/retransmitted) data
1253  * packets delivered "delivered" over an interval of time "interval_us".
1254  * The tcp_rate.c code fills in the rate sample, and congestion
1255  * control modules that define a cong_control function to run at the end
1256  * of ACK processing can optionally chose to consult this sample when
1257  * setting cwnd and pacing rate.
1258  * A sample is invalid if "delivered" or "interval_us" is negative.
1259  */
1260 struct rate_sample {
1261 	u64  prior_mstamp; /* starting timestamp for interval */
1262 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1263 	u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1264 	s32  delivered;		/* number of packets delivered over interval */
1265 	s32  delivered_ce;	/* number of packets delivered w/ CE marks*/
1266 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1267 	u32 snd_interval_us;	/* snd interval for delivered packets */
1268 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1269 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1270 	int  losses;		/* number of packets marked lost upon ACK */
1271 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1272 	u32  prior_in_flight;	/* in flight before this ACK */
1273 	u32  last_end_seq;	/* end_seq of most recently ACKed packet */
1274 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1275 	bool is_retrans;	/* is sample from retransmission? */
1276 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1277 };
1278 
1279 struct tcp_congestion_ops {
1280 /* fast path fields are put first to fill one cache line */
1281 
1282 	/* A congestion control (CC) must provide one of either:
1283 	 *
1284 	 * (a) a cong_avoid function, if the CC wants to use the core TCP
1285 	 *     stack's default functionality to implement a "classic"
1286 	 *     (Reno/CUBIC-style) response to packet loss, RFC3168 ECN,
1287 	 *     idle periods, pacing rate computations, etc.
1288 	 *
1289 	 * (b) a cong_control function, if the CC wants custom behavior and
1290 	 *      complete control of all congestion control behaviors.
1291 	 */
1292 	/* (a) "classic" response: calculate new cwnd.
1293 	 */
1294 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1295 	/* (b) "custom" response: call when packets are delivered to update
1296 	 * cwnd and pacing rate, after all the ca_state processing.
1297 	 */
1298 	void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1299 
1300 	/* return slow start threshold (required) */
1301 	u32 (*ssthresh)(struct sock *sk);
1302 
1303 	/* call before changing ca_state (optional) */
1304 	void (*set_state)(struct sock *sk, u8 new_state);
1305 
1306 	/* call when cwnd event occurs (optional) */
1307 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1308 
1309 	/* call when ack arrives (optional) */
1310 	void (*in_ack_event)(struct sock *sk, u32 flags);
1311 
1312 	/* hook for packet ack accounting (optional) */
1313 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1314 
1315 	/* override sysctl_tcp_min_tso_segs (optional) */
1316 	u32 (*min_tso_segs)(struct sock *sk);
1317 
1318 	/* new value of cwnd after loss (required) */
1319 	u32  (*undo_cwnd)(struct sock *sk);
1320 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1321 	u32 (*sndbuf_expand)(struct sock *sk);
1322 
1323 /* control/slow paths put last */
1324 	/* get info for inet_diag (optional) */
1325 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1326 			   union tcp_cc_info *info);
1327 
1328 	char 			name[TCP_CA_NAME_MAX];
1329 	struct module		*owner;
1330 	struct list_head	list;
1331 	u32			key;
1332 	u32			flags;
1333 
1334 	/* initialize private data (optional) */
1335 	void (*init)(struct sock *sk);
1336 	/* cleanup private data  (optional) */
1337 	void (*release)(struct sock *sk);
1338 } ____cacheline_aligned_in_smp;
1339 
1340 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1341 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1342 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1343 				  struct tcp_congestion_ops *old_type);
1344 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1345 
1346 void tcp_assign_congestion_control(struct sock *sk);
1347 void tcp_init_congestion_control(struct sock *sk);
1348 void tcp_cleanup_congestion_control(struct sock *sk);
1349 int tcp_set_default_congestion_control(struct net *net, const char *name);
1350 void tcp_get_default_congestion_control(struct net *net, char *name);
1351 void tcp_get_available_congestion_control(char *buf, size_t len);
1352 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1353 int tcp_set_allowed_congestion_control(char *allowed);
1354 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1355 			       bool cap_net_admin);
1356 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1357 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1358 
1359 u32 tcp_reno_ssthresh(struct sock *sk);
1360 u32 tcp_reno_undo_cwnd(struct sock *sk);
1361 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1362 extern struct tcp_congestion_ops tcp_reno;
1363 
1364 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1365 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1366 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1367 #ifdef CONFIG_INET
1368 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1369 #else
1370 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1371 {
1372 	return NULL;
1373 }
1374 #endif
1375 
1376 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1377 {
1378 	const struct inet_connection_sock *icsk = inet_csk(sk);
1379 
1380 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1381 }
1382 
1383 static inline bool tcp_ca_needs_accecn(const struct sock *sk)
1384 {
1385 	const struct inet_connection_sock *icsk = inet_csk(sk);
1386 
1387 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN;
1388 }
1389 
1390 static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk)
1391 {
1392 	const struct inet_connection_sock *icsk = inet_csk(sk);
1393 
1394 	return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION;
1395 }
1396 
1397 static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk)
1398 {
1399 	const struct inet_connection_sock *icsk = inet_csk(sk);
1400 
1401 	return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168;
1402 }
1403 
1404 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1405 {
1406 	const struct inet_connection_sock *icsk = inet_csk(sk);
1407 
1408 	if (icsk->icsk_ca_ops->cwnd_event)
1409 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1410 }
1411 
1412 /* From tcp_cong.c */
1413 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1414 
1415 
1416 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1417 {
1418 	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1419 }
1420 
1421 /* These functions determine how the current flow behaves in respect of SACK
1422  * handling. SACK is negotiated with the peer, and therefore it can vary
1423  * between different flows.
1424  *
1425  * tcp_is_sack - SACK enabled
1426  * tcp_is_reno - No SACK
1427  */
1428 static inline int tcp_is_sack(const struct tcp_sock *tp)
1429 {
1430 	return likely(tp->rx_opt.sack_ok);
1431 }
1432 
1433 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1434 {
1435 	return !tcp_is_sack(tp);
1436 }
1437 
1438 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1439 {
1440 	return tp->sacked_out + tp->lost_out;
1441 }
1442 
1443 /* This determines how many packets are "in the network" to the best
1444  * of our knowledge.  In many cases it is conservative, but where
1445  * detailed information is available from the receiver (via SACK
1446  * blocks etc.) we can make more aggressive calculations.
1447  *
1448  * Use this for decisions involving congestion control, use just
1449  * tp->packets_out to determine if the send queue is empty or not.
1450  *
1451  * Read this equation as:
1452  *
1453  *	"Packets sent once on transmission queue" MINUS
1454  *	"Packets left network, but not honestly ACKed yet" PLUS
1455  *	"Packets fast retransmitted"
1456  */
1457 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1458 {
1459 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1460 }
1461 
1462 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1463 
1464 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1465 {
1466 	return tp->snd_cwnd;
1467 }
1468 
1469 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1470 {
1471 	WARN_ON_ONCE((int)val <= 0);
1472 	tp->snd_cwnd = val;
1473 }
1474 
1475 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1476 {
1477 	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1478 }
1479 
1480 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1481 {
1482 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1483 }
1484 
1485 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1486 {
1487 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1488 	       (1 << inet_csk(sk)->icsk_ca_state);
1489 }
1490 
1491 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1492  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1493  * ssthresh.
1494  */
1495 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1496 {
1497 	const struct tcp_sock *tp = tcp_sk(sk);
1498 
1499 	if (tcp_in_cwnd_reduction(sk))
1500 		return tp->snd_ssthresh;
1501 	else
1502 		return max(tp->snd_ssthresh,
1503 			   ((tcp_snd_cwnd(tp) >> 1) +
1504 			    (tcp_snd_cwnd(tp) >> 2)));
1505 }
1506 
1507 /* Use define here intentionally to get WARN_ON location shown at the caller */
1508 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1509 
1510 void tcp_enter_cwr(struct sock *sk);
1511 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1512 
1513 /* The maximum number of MSS of available cwnd for which TSO defers
1514  * sending if not using sysctl_tcp_tso_win_divisor.
1515  */
1516 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1517 {
1518 	return 3;
1519 }
1520 
1521 /* Returns end sequence number of the receiver's advertised window */
1522 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1523 {
1524 	return tp->snd_una + tp->snd_wnd;
1525 }
1526 
1527 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1528  * flexible approach. The RFC suggests cwnd should not be raised unless
1529  * it was fully used previously. And that's exactly what we do in
1530  * congestion avoidance mode. But in slow start we allow cwnd to grow
1531  * as long as the application has used half the cwnd.
1532  * Example :
1533  *    cwnd is 10 (IW10), but application sends 9 frames.
1534  *    We allow cwnd to reach 18 when all frames are ACKed.
1535  * This check is safe because it's as aggressive as slow start which already
1536  * risks 100% overshoot. The advantage is that we discourage application to
1537  * either send more filler packets or data to artificially blow up the cwnd
1538  * usage, and allow application-limited process to probe bw more aggressively.
1539  */
1540 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1541 {
1542 	const struct tcp_sock *tp = tcp_sk(sk);
1543 
1544 	if (tp->is_cwnd_limited)
1545 		return true;
1546 
1547 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1548 	if (tcp_in_slow_start(tp))
1549 		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1550 
1551 	return false;
1552 }
1553 
1554 /* BBR congestion control needs pacing.
1555  * Same remark for SO_MAX_PACING_RATE.
1556  * sch_fq packet scheduler is efficiently handling pacing,
1557  * but is not always installed/used.
1558  * Return true if TCP stack should pace packets itself.
1559  */
1560 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1561 {
1562 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1563 }
1564 
1565 /* Estimates in how many jiffies next packet for this flow can be sent.
1566  * Scheduling a retransmit timer too early would be silly.
1567  */
1568 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1569 {
1570 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1571 
1572 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1573 }
1574 
1575 static inline void tcp_reset_xmit_timer(struct sock *sk,
1576 					const int what,
1577 					unsigned long when,
1578 					bool pace_delay)
1579 {
1580 	if (pace_delay)
1581 		when += tcp_pacing_delay(sk);
1582 	inet_csk_reset_xmit_timer(sk, what, when,
1583 				  tcp_rto_max(sk));
1584 }
1585 
1586 /* Something is really bad, we could not queue an additional packet,
1587  * because qdisc is full or receiver sent a 0 window, or we are paced.
1588  * We do not want to add fuel to the fire, or abort too early,
1589  * so make sure the timer we arm now is at least 200ms in the future,
1590  * regardless of current icsk_rto value (as it could be ~2ms)
1591  */
1592 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1593 {
1594 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1595 }
1596 
1597 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1598 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1599 					    unsigned long max_when)
1600 {
1601 	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1602 			   inet_csk(sk)->icsk_backoff);
1603 	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1604 
1605 	return (unsigned long)min_t(u64, when, max_when);
1606 }
1607 
1608 static inline void tcp_check_probe_timer(struct sock *sk)
1609 {
1610 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1611 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1612 				     tcp_probe0_base(sk), true);
1613 }
1614 
1615 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1616 {
1617 	tp->snd_wl1 = seq;
1618 }
1619 
1620 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1621 {
1622 	tp->snd_wl1 = seq;
1623 }
1624 
1625 /*
1626  * Calculate(/check) TCP checksum
1627  */
1628 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1629 				   __be32 daddr, __wsum base)
1630 {
1631 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1632 }
1633 
1634 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1635 {
1636 	return !skb_csum_unnecessary(skb) &&
1637 		__skb_checksum_complete(skb);
1638 }
1639 
1640 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1641 		     enum skb_drop_reason *reason);
1642 
1643 static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
1644 			     enum skb_drop_reason *reason)
1645 {
1646 	const struct tcphdr *th = (const struct tcphdr *)skb->data;
1647 
1648 	return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason);
1649 }
1650 
1651 void tcp_set_state(struct sock *sk, int state);
1652 void tcp_done(struct sock *sk);
1653 int tcp_abort(struct sock *sk, int err);
1654 
1655 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1656 {
1657 	rx_opt->dsack = 0;
1658 	rx_opt->num_sacks = 0;
1659 }
1660 
1661 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1662 
1663 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1664 {
1665 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1666 	struct tcp_sock *tp = tcp_sk(sk);
1667 	s32 delta;
1668 
1669 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1670 	    tp->packets_out || ca_ops->cong_control)
1671 		return;
1672 	delta = tcp_jiffies32 - tp->lsndtime;
1673 	if (delta > inet_csk(sk)->icsk_rto)
1674 		tcp_cwnd_restart(sk, delta);
1675 }
1676 
1677 /* Determine a window scaling and initial window to offer. */
1678 void tcp_select_initial_window(const struct sock *sk, int __space,
1679 			       __u32 mss, __u32 *rcv_wnd,
1680 			       __u32 *window_clamp, int wscale_ok,
1681 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1682 
1683 static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1684 {
1685 	s64 scaled_space = (s64)space * scaling_ratio;
1686 
1687 	return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1688 }
1689 
1690 static inline int tcp_win_from_space(const struct sock *sk, int space)
1691 {
1692 	return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1693 }
1694 
1695 /* inverse of __tcp_win_from_space() */
1696 static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1697 {
1698 	u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1699 
1700 	do_div(val, scaling_ratio);
1701 	return val;
1702 }
1703 
1704 static inline int tcp_space_from_win(const struct sock *sk, int win)
1705 {
1706 	return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1707 }
1708 
1709 /* Assume a 50% default for skb->len/skb->truesize ratio.
1710  * This may be adjusted later in tcp_measure_rcv_mss().
1711  */
1712 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1713 
1714 static inline void tcp_scaling_ratio_init(struct sock *sk)
1715 {
1716 	tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1717 }
1718 
1719 /* Note: caller must be prepared to deal with negative returns */
1720 static inline int tcp_space(const struct sock *sk)
1721 {
1722 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1723 				  READ_ONCE(sk->sk_backlog.len) -
1724 				  atomic_read(&sk->sk_rmem_alloc));
1725 }
1726 
1727 static inline int tcp_full_space(const struct sock *sk)
1728 {
1729 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1730 }
1731 
1732 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1733 {
1734 	int unused_mem = sk_unused_reserved_mem(sk);
1735 	struct tcp_sock *tp = tcp_sk(sk);
1736 
1737 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1738 	if (unused_mem)
1739 		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1740 					 tcp_win_from_space(sk, unused_mem));
1741 }
1742 
1743 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1744 {
1745 	__tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1746 }
1747 
1748 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1749 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1750 
1751 
1752 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1753  * If 87.5 % (7/8) of the space has been consumed, we want to override
1754  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1755  * len/truesize ratio.
1756  */
1757 static inline bool tcp_rmem_pressure(const struct sock *sk)
1758 {
1759 	int rcvbuf, threshold;
1760 
1761 	if (tcp_under_memory_pressure(sk))
1762 		return true;
1763 
1764 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1765 	threshold = rcvbuf - (rcvbuf >> 3);
1766 
1767 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1768 }
1769 
1770 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1771 {
1772 	const struct tcp_sock *tp = tcp_sk(sk);
1773 	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1774 
1775 	if (avail <= 0)
1776 		return false;
1777 
1778 	return (avail >= target) || tcp_rmem_pressure(sk) ||
1779 	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1780 }
1781 
1782 extern void tcp_openreq_init_rwin(struct request_sock *req,
1783 				  const struct sock *sk_listener,
1784 				  const struct dst_entry *dst);
1785 
1786 void tcp_enter_memory_pressure(struct sock *sk);
1787 void tcp_leave_memory_pressure(struct sock *sk);
1788 
1789 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1790 {
1791 	struct net *net = sock_net((struct sock *)tp);
1792 	int val;
1793 
1794 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1795 	 * and do_tcp_setsockopt().
1796 	 */
1797 	val = READ_ONCE(tp->keepalive_intvl);
1798 
1799 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1800 }
1801 
1802 static inline int keepalive_time_when(const struct tcp_sock *tp)
1803 {
1804 	struct net *net = sock_net((struct sock *)tp);
1805 	int val;
1806 
1807 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1808 	val = READ_ONCE(tp->keepalive_time);
1809 
1810 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1811 }
1812 
1813 static inline int keepalive_probes(const struct tcp_sock *tp)
1814 {
1815 	struct net *net = sock_net((struct sock *)tp);
1816 	int val;
1817 
1818 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1819 	 * and do_tcp_setsockopt().
1820 	 */
1821 	val = READ_ONCE(tp->keepalive_probes);
1822 
1823 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1824 }
1825 
1826 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1827 {
1828 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1829 
1830 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1831 			  tcp_jiffies32 - tp->rcv_tstamp);
1832 }
1833 
1834 static inline int tcp_fin_time(const struct sock *sk)
1835 {
1836 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1837 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1838 	const int rto = inet_csk(sk)->icsk_rto;
1839 
1840 	if (fin_timeout < (rto << 2) - (rto >> 1))
1841 		fin_timeout = (rto << 2) - (rto >> 1);
1842 
1843 	return fin_timeout;
1844 }
1845 
1846 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1847 				  int paws_win)
1848 {
1849 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1850 		return true;
1851 	if (unlikely(!time_before32(ktime_get_seconds(),
1852 				    rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1853 		return true;
1854 	/*
1855 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1856 	 * then following tcp messages have valid values. Ignore 0 value,
1857 	 * or else 'negative' tsval might forbid us to accept their packets.
1858 	 */
1859 	if (!rx_opt->ts_recent)
1860 		return true;
1861 	return false;
1862 }
1863 
1864 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1865 				   int rst)
1866 {
1867 	if (tcp_paws_check(rx_opt, 0))
1868 		return false;
1869 
1870 	/* RST segments are not recommended to carry timestamp,
1871 	   and, if they do, it is recommended to ignore PAWS because
1872 	   "their cleanup function should take precedence over timestamps."
1873 	   Certainly, it is mistake. It is necessary to understand the reasons
1874 	   of this constraint to relax it: if peer reboots, clock may go
1875 	   out-of-sync and half-open connections will not be reset.
1876 	   Actually, the problem would be not existing if all
1877 	   the implementations followed draft about maintaining clock
1878 	   via reboots. Linux-2.2 DOES NOT!
1879 
1880 	   However, we can relax time bounds for RST segments to MSL.
1881 	 */
1882 	if (rst && !time_before32(ktime_get_seconds(),
1883 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1884 		return false;
1885 	return true;
1886 }
1887 
1888 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1889 {
1890 	u32 ace;
1891 
1892 	/* mptcp hooks are only on the slow path */
1893 	if (sk_is_mptcp((struct sock *)tp))
1894 		return;
1895 
1896 	ace = tcp_ecn_mode_accecn(tp) ?
1897 	      ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
1898 	       TCP_ACCECN_CEP_ACE_MASK) : 0;
1899 
1900 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1901 			       (ace << 22) |
1902 			       ntohl(TCP_FLAG_ACK) |
1903 			       snd_wnd);
1904 }
1905 
1906 static inline void tcp_fast_path_on(struct tcp_sock *tp)
1907 {
1908 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
1909 }
1910 
1911 static inline void tcp_fast_path_check(struct sock *sk)
1912 {
1913 	struct tcp_sock *tp = tcp_sk(sk);
1914 
1915 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
1916 	    tp->rcv_wnd &&
1917 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1918 	    !tp->urg_data)
1919 		tcp_fast_path_on(tp);
1920 }
1921 
1922 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1923 			  int mib_idx, u32 *last_oow_ack_time);
1924 
1925 static inline void tcp_mib_init(struct net *net)
1926 {
1927 	/* See RFC 2012 */
1928 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1929 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1930 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1931 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1932 }
1933 
1934 /* from STCP */
1935 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1936 {
1937 	tp->retransmit_skb_hint = NULL;
1938 }
1939 
1940 #define tcp_md5_addr tcp_ao_addr
1941 
1942 /* - key database */
1943 struct tcp_md5sig_key {
1944 	struct hlist_node	node;
1945 	u8			keylen;
1946 	u8			family; /* AF_INET or AF_INET6 */
1947 	u8			prefixlen;
1948 	u8			flags;
1949 	union tcp_md5_addr	addr;
1950 	int			l3index; /* set if key added with L3 scope */
1951 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1952 	struct rcu_head		rcu;
1953 };
1954 
1955 /* - sock block */
1956 struct tcp_md5sig_info {
1957 	struct hlist_head	head;
1958 	struct rcu_head		rcu;
1959 };
1960 
1961 /* - pseudo header */
1962 struct tcp4_pseudohdr {
1963 	__be32		saddr;
1964 	__be32		daddr;
1965 	__u8		pad;
1966 	__u8		protocol;
1967 	__be16		len;
1968 };
1969 
1970 struct tcp6_pseudohdr {
1971 	struct in6_addr	saddr;
1972 	struct in6_addr daddr;
1973 	__be32		len;
1974 	__be32		protocol;	/* including padding */
1975 };
1976 
1977 /*
1978  * struct tcp_sigpool - per-CPU pool of ahash_requests
1979  * @scratch: per-CPU temporary area, that can be used between
1980  *	     tcp_sigpool_start() and tcp_sigpool_end() to perform
1981  *	     crypto request
1982  * @req: pre-allocated ahash request
1983  */
1984 struct tcp_sigpool {
1985 	void *scratch;
1986 	struct ahash_request *req;
1987 };
1988 
1989 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1990 void tcp_sigpool_get(unsigned int id);
1991 void tcp_sigpool_release(unsigned int id);
1992 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1993 			      const struct sk_buff *skb,
1994 			      unsigned int header_len);
1995 
1996 /**
1997  * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1998  * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1999  * @c: returned tcp_sigpool for usage (uninitialized on failure)
2000  *
2001  * Returns: 0 on success, error otherwise.
2002  */
2003 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
2004 /**
2005  * tcp_sigpool_end - enable bh and stop using tcp_sigpool
2006  * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
2007  */
2008 void tcp_sigpool_end(struct tcp_sigpool *c);
2009 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
2010 /* - functions */
2011 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
2012 			 const struct sock *sk, const struct sk_buff *skb);
2013 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
2014 		   int family, u8 prefixlen, int l3index, u8 flags,
2015 		   const u8 *newkey, u8 newkeylen);
2016 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
2017 		     int family, u8 prefixlen, int l3index,
2018 		     struct tcp_md5sig_key *key);
2019 
2020 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
2021 		   int family, u8 prefixlen, int l3index, u8 flags);
2022 void tcp_clear_md5_list(struct sock *sk);
2023 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
2024 					 const struct sock *addr_sk);
2025 
2026 #ifdef CONFIG_TCP_MD5SIG
2027 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
2028 					   const union tcp_md5_addr *addr,
2029 					   int family, bool any_l3index);
2030 static inline struct tcp_md5sig_key *
2031 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2032 		  const union tcp_md5_addr *addr, int family)
2033 {
2034 	if (!static_branch_unlikely(&tcp_md5_needed.key))
2035 		return NULL;
2036 	return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
2037 }
2038 
2039 static inline struct tcp_md5sig_key *
2040 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2041 			      const union tcp_md5_addr *addr, int family)
2042 {
2043 	if (!static_branch_unlikely(&tcp_md5_needed.key))
2044 		return NULL;
2045 	return __tcp_md5_do_lookup(sk, 0, addr, family, true);
2046 }
2047 
2048 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
2049 void tcp_md5_destruct_sock(struct sock *sk);
2050 #else
2051 static inline struct tcp_md5sig_key *
2052 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2053 		  const union tcp_md5_addr *addr, int family)
2054 {
2055 	return NULL;
2056 }
2057 
2058 static inline struct tcp_md5sig_key *
2059 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2060 			      const union tcp_md5_addr *addr, int family)
2061 {
2062 	return NULL;
2063 }
2064 
2065 #define tcp_twsk_md5_key(twsk)	NULL
2066 static inline void tcp_md5_destruct_sock(struct sock *sk)
2067 {
2068 }
2069 #endif
2070 
2071 struct md5_ctx;
2072 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
2073 			   unsigned int header_len);
2074 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
2075 
2076 /* From tcp_fastopen.c */
2077 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2078 			    struct tcp_fastopen_cookie *cookie);
2079 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2080 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
2081 			    u16 try_exp);
2082 struct tcp_fastopen_request {
2083 	/* Fast Open cookie. Size 0 means a cookie request */
2084 	struct tcp_fastopen_cookie	cookie;
2085 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
2086 	size_t				size;
2087 	int				copied;	/* queued in tcp_connect() */
2088 	struct ubuf_info		*uarg;
2089 };
2090 void tcp_free_fastopen_req(struct tcp_sock *tp);
2091 void tcp_fastopen_destroy_cipher(struct sock *sk);
2092 void tcp_fastopen_ctx_destroy(struct net *net);
2093 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
2094 			      void *primary_key, void *backup_key);
2095 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
2096 			    u64 *key);
2097 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
2098 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
2099 			      struct request_sock *req,
2100 			      struct tcp_fastopen_cookie *foc,
2101 			      const struct dst_entry *dst);
2102 void tcp_fastopen_init_key_once(struct net *net);
2103 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
2104 			     struct tcp_fastopen_cookie *cookie);
2105 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
2106 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
2107 #define TCP_FASTOPEN_KEY_MAX 2
2108 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
2109 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
2110 
2111 /* Fastopen key context */
2112 struct tcp_fastopen_context {
2113 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
2114 	int		num;
2115 	struct rcu_head	rcu;
2116 };
2117 
2118 void tcp_fastopen_active_disable(struct sock *sk);
2119 bool tcp_fastopen_active_should_disable(struct sock *sk);
2120 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
2121 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
2122 
2123 /* Caller needs to wrap with rcu_read_(un)lock() */
2124 static inline
2125 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
2126 {
2127 	struct tcp_fastopen_context *ctx;
2128 
2129 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
2130 	if (!ctx)
2131 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
2132 	return ctx;
2133 }
2134 
2135 static inline
2136 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
2137 			       const struct tcp_fastopen_cookie *orig)
2138 {
2139 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
2140 	    orig->len == foc->len &&
2141 	    !memcmp(orig->val, foc->val, foc->len))
2142 		return true;
2143 	return false;
2144 }
2145 
2146 static inline
2147 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
2148 {
2149 	return ctx->num;
2150 }
2151 
2152 /* Latencies incurred by various limits for a sender. They are
2153  * chronograph-like stats that are mutually exclusive.
2154  */
2155 enum tcp_chrono {
2156 	TCP_CHRONO_UNSPEC,
2157 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
2158 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
2159 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
2160 	__TCP_CHRONO_MAX,
2161 };
2162 
2163 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
2164 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2165 
2166 /* This helper is needed, because skb->tcp_tsorted_anchor uses
2167  * the same memory storage than skb->destructor/_skb_refdst
2168  */
2169 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2170 {
2171 	skb->destructor = NULL;
2172 	skb->_skb_refdst = 0UL;
2173 }
2174 
2175 #define tcp_skb_tsorted_save(skb) {		\
2176 	unsigned long _save = skb->_skb_refdst;	\
2177 	skb->_skb_refdst = 0UL;
2178 
2179 #define tcp_skb_tsorted_restore(skb)		\
2180 	skb->_skb_refdst = _save;		\
2181 }
2182 
2183 void tcp_write_queue_purge(struct sock *sk);
2184 
2185 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2186 {
2187 	return skb_rb_first(&sk->tcp_rtx_queue);
2188 }
2189 
2190 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2191 {
2192 	return skb_rb_last(&sk->tcp_rtx_queue);
2193 }
2194 
2195 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2196 {
2197 	return skb_peek_tail(&sk->sk_write_queue);
2198 }
2199 
2200 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
2201 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2202 
2203 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2204 {
2205 	return skb_peek(&sk->sk_write_queue);
2206 }
2207 
2208 static inline bool tcp_skb_is_last(const struct sock *sk,
2209 				   const struct sk_buff *skb)
2210 {
2211 	return skb_queue_is_last(&sk->sk_write_queue, skb);
2212 }
2213 
2214 /**
2215  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2216  * @sk: socket
2217  *
2218  * Since the write queue can have a temporary empty skb in it,
2219  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2220  */
2221 static inline bool tcp_write_queue_empty(const struct sock *sk)
2222 {
2223 	const struct tcp_sock *tp = tcp_sk(sk);
2224 
2225 	return tp->write_seq == tp->snd_nxt;
2226 }
2227 
2228 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2229 {
2230 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2231 }
2232 
2233 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2234 {
2235 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2236 }
2237 
2238 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2239 {
2240 	__skb_queue_tail(&sk->sk_write_queue, skb);
2241 
2242 	/* Queue it, remembering where we must start sending. */
2243 	if (sk->sk_write_queue.next == skb)
2244 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2245 }
2246 
2247 /* Insert new before skb on the write queue of sk.  */
2248 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2249 						  struct sk_buff *skb,
2250 						  struct sock *sk)
2251 {
2252 	__skb_queue_before(&sk->sk_write_queue, skb, new);
2253 }
2254 
2255 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2256 {
2257 	tcp_skb_tsorted_anchor_cleanup(skb);
2258 	__skb_unlink(skb, &sk->sk_write_queue);
2259 }
2260 
2261 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2262 
2263 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2264 {
2265 	tcp_skb_tsorted_anchor_cleanup(skb);
2266 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2267 }
2268 
2269 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2270 {
2271 	list_del(&skb->tcp_tsorted_anchor);
2272 	tcp_rtx_queue_unlink(skb, sk);
2273 	tcp_wmem_free_skb(sk, skb);
2274 }
2275 
2276 static inline void tcp_write_collapse_fence(struct sock *sk)
2277 {
2278 	struct sk_buff *skb = tcp_write_queue_tail(sk);
2279 
2280 	if (skb)
2281 		TCP_SKB_CB(skb)->eor = 1;
2282 }
2283 
2284 static inline void tcp_push_pending_frames(struct sock *sk)
2285 {
2286 	if (tcp_send_head(sk)) {
2287 		struct tcp_sock *tp = tcp_sk(sk);
2288 
2289 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2290 	}
2291 }
2292 
2293 /* Start sequence of the skb just after the highest skb with SACKed
2294  * bit, valid only if sacked_out > 0 or when the caller has ensured
2295  * validity by itself.
2296  */
2297 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2298 {
2299 	if (!tp->sacked_out)
2300 		return tp->snd_una;
2301 
2302 	if (tp->highest_sack == NULL)
2303 		return tp->snd_nxt;
2304 
2305 	return TCP_SKB_CB(tp->highest_sack)->seq;
2306 }
2307 
2308 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2309 {
2310 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2311 }
2312 
2313 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2314 {
2315 	return tcp_sk(sk)->highest_sack;
2316 }
2317 
2318 static inline void tcp_highest_sack_reset(struct sock *sk)
2319 {
2320 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2321 }
2322 
2323 /* Called when old skb is about to be deleted and replaced by new skb */
2324 static inline void tcp_highest_sack_replace(struct sock *sk,
2325 					    struct sk_buff *old,
2326 					    struct sk_buff *new)
2327 {
2328 	if (old == tcp_highest_sack(sk))
2329 		tcp_sk(sk)->highest_sack = new;
2330 }
2331 
2332 /* This helper checks if socket has IP_TRANSPARENT set */
2333 static inline bool inet_sk_transparent(const struct sock *sk)
2334 {
2335 	switch (sk->sk_state) {
2336 	case TCP_TIME_WAIT:
2337 		return inet_twsk(sk)->tw_transparent;
2338 	case TCP_NEW_SYN_RECV:
2339 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
2340 	}
2341 	return inet_test_bit(TRANSPARENT, sk);
2342 }
2343 
2344 /* Determines whether this is a thin stream (which may suffer from
2345  * increased latency). Used to trigger latency-reducing mechanisms.
2346  */
2347 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2348 {
2349 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2350 }
2351 
2352 /* /proc */
2353 enum tcp_seq_states {
2354 	TCP_SEQ_STATE_LISTENING,
2355 	TCP_SEQ_STATE_ESTABLISHED,
2356 };
2357 
2358 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2359 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2360 void tcp_seq_stop(struct seq_file *seq, void *v);
2361 
2362 struct tcp_seq_afinfo {
2363 	sa_family_t			family;
2364 };
2365 
2366 struct tcp_iter_state {
2367 	struct seq_net_private	p;
2368 	enum tcp_seq_states	state;
2369 	struct sock		*syn_wait_sk;
2370 	int			bucket, offset, sbucket, num;
2371 	loff_t			last_pos;
2372 };
2373 
2374 extern struct request_sock_ops tcp_request_sock_ops;
2375 extern struct request_sock_ops tcp6_request_sock_ops;
2376 
2377 void tcp_v4_destroy_sock(struct sock *sk);
2378 
2379 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2380 				netdev_features_t features);
2381 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2382 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2383 				struct tcphdr *th);
2384 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2385 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2386 #ifdef CONFIG_INET
2387 void tcp_gro_complete(struct sk_buff *skb);
2388 #else
2389 static inline void tcp_gro_complete(struct sk_buff *skb) { }
2390 #endif
2391 
2392 static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
2393 				       __be32 daddr)
2394 {
2395 	struct tcphdr *th = tcp_hdr(skb);
2396 
2397 	th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
2398 	skb->csum_start = skb_transport_header(skb) - skb->head;
2399 	skb->csum_offset = offsetof(struct tcphdr, check);
2400 }
2401 
2402 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2403 {
2404 	struct net *net = sock_net((struct sock *)tp);
2405 	u32 val;
2406 
2407 	val = READ_ONCE(tp->notsent_lowat);
2408 
2409 	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2410 }
2411 
2412 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2413 
2414 #ifdef CONFIG_PROC_FS
2415 int tcp4_proc_init(void);
2416 void tcp4_proc_exit(void);
2417 #endif
2418 
2419 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2420 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2421 		     const struct tcp_request_sock_ops *af_ops,
2422 		     struct sock *sk, struct sk_buff *skb);
2423 
2424 /* TCP af-specific functions */
2425 struct tcp_sock_af_ops {
2426 #ifdef CONFIG_TCP_MD5SIG
2427 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2428 						const struct sock *addr_sk);
2429 	void		(*calc_md5_hash)(char *location,
2430 					 const struct tcp_md5sig_key *md5,
2431 					 const struct sock *sk,
2432 					 const struct sk_buff *skb);
2433 	int		(*md5_parse)(struct sock *sk,
2434 				     int optname,
2435 				     sockptr_t optval,
2436 				     int optlen);
2437 #endif
2438 #ifdef CONFIG_TCP_AO
2439 	int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2440 	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2441 					struct sock *addr_sk,
2442 					int sndid, int rcvid);
2443 	int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2444 			      const struct sock *sk,
2445 			      __be32 sisn, __be32 disn, bool send);
2446 	int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2447 			    const struct sock *sk, const struct sk_buff *skb,
2448 			    const u8 *tkey, int hash_offset, u32 sne);
2449 #endif
2450 };
2451 
2452 struct tcp_request_sock_ops {
2453 	u16 mss_clamp;
2454 #ifdef CONFIG_TCP_MD5SIG
2455 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2456 						 const struct sock *addr_sk);
2457 	void		(*calc_md5_hash) (char *location,
2458 					  const struct tcp_md5sig_key *md5,
2459 					  const struct sock *sk,
2460 					  const struct sk_buff *skb);
2461 #endif
2462 #ifdef CONFIG_TCP_AO
2463 	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2464 					struct request_sock *req,
2465 					int sndid, int rcvid);
2466 	int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2467 	int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2468 			      struct request_sock *req, const struct sk_buff *skb,
2469 			      int hash_offset, u32 sne);
2470 #endif
2471 #ifdef CONFIG_SYN_COOKIES
2472 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2473 				 __u16 *mss);
2474 #endif
2475 	struct dst_entry *(*route_req)(const struct sock *sk,
2476 				       struct sk_buff *skb,
2477 				       struct flowi *fl,
2478 				       struct request_sock *req,
2479 				       u32 tw_isn);
2480 	union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
2481 					const struct net *net,
2482 					const struct sk_buff *skb);
2483 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2484 			   struct flowi *fl, struct request_sock *req,
2485 			   struct tcp_fastopen_cookie *foc,
2486 			   enum tcp_synack_type synack_type,
2487 			   struct sk_buff *syn_skb);
2488 };
2489 
2490 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2491 #if IS_ENABLED(CONFIG_IPV6)
2492 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2493 #endif
2494 
2495 #ifdef CONFIG_SYN_COOKIES
2496 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2497 					 const struct sock *sk, struct sk_buff *skb,
2498 					 __u16 *mss)
2499 {
2500 	tcp_synq_overflow(sk);
2501 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2502 	return ops->cookie_init_seq(skb, mss);
2503 }
2504 #else
2505 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2506 					 const struct sock *sk, struct sk_buff *skb,
2507 					 __u16 *mss)
2508 {
2509 	return 0;
2510 }
2511 #endif
2512 
2513 struct tcp_key {
2514 	union {
2515 		struct {
2516 			struct tcp_ao_key *ao_key;
2517 			char *traffic_key;
2518 			u32 sne;
2519 			u8 rcv_next;
2520 		};
2521 		struct tcp_md5sig_key *md5_key;
2522 	};
2523 	enum {
2524 		TCP_KEY_NONE = 0,
2525 		TCP_KEY_MD5,
2526 		TCP_KEY_AO,
2527 	} type;
2528 };
2529 
2530 static inline void tcp_get_current_key(const struct sock *sk,
2531 				       struct tcp_key *out)
2532 {
2533 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2534 	const struct tcp_sock *tp = tcp_sk(sk);
2535 #endif
2536 
2537 #ifdef CONFIG_TCP_AO
2538 	if (static_branch_unlikely(&tcp_ao_needed.key)) {
2539 		struct tcp_ao_info *ao;
2540 
2541 		ao = rcu_dereference_protected(tp->ao_info,
2542 					       lockdep_sock_is_held(sk));
2543 		if (ao) {
2544 			out->ao_key = READ_ONCE(ao->current_key);
2545 			out->type = TCP_KEY_AO;
2546 			return;
2547 		}
2548 	}
2549 #endif
2550 #ifdef CONFIG_TCP_MD5SIG
2551 	if (static_branch_unlikely(&tcp_md5_needed.key) &&
2552 	    rcu_access_pointer(tp->md5sig_info)) {
2553 		out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2554 		if (out->md5_key) {
2555 			out->type = TCP_KEY_MD5;
2556 			return;
2557 		}
2558 	}
2559 #endif
2560 	out->type = TCP_KEY_NONE;
2561 }
2562 
2563 static inline bool tcp_key_is_md5(const struct tcp_key *key)
2564 {
2565 	if (static_branch_tcp_md5())
2566 		return key->type == TCP_KEY_MD5;
2567 	return false;
2568 }
2569 
2570 static inline bool tcp_key_is_ao(const struct tcp_key *key)
2571 {
2572 	if (static_branch_tcp_ao())
2573 		return key->type == TCP_KEY_AO;
2574 	return false;
2575 }
2576 
2577 int tcpv4_offload_init(void);
2578 
2579 void tcp_v4_init(void);
2580 void tcp_init(void);
2581 
2582 /* tcp_recovery.c */
2583 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2584 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2585 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2586 				u32 reo_wnd);
2587 extern bool tcp_rack_mark_lost(struct sock *sk);
2588 extern void tcp_rack_reo_timeout(struct sock *sk);
2589 
2590 /* tcp_plb.c */
2591 
2592 /*
2593  * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2594  * expects cong_ratio which represents fraction of traffic that experienced
2595  * congestion over a single RTT. In order to avoid floating point operations,
2596  * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2597  */
2598 #define TCP_PLB_SCALE 8
2599 
2600 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2601 struct tcp_plb_state {
2602 	u8	consec_cong_rounds:5, /* consecutive congested rounds */
2603 		unused:3;
2604 	u32	pause_until; /* jiffies32 when PLB can resume rerouting */
2605 };
2606 
2607 static inline void tcp_plb_init(const struct sock *sk,
2608 				struct tcp_plb_state *plb)
2609 {
2610 	plb->consec_cong_rounds = 0;
2611 	plb->pause_until = 0;
2612 }
2613 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2614 			  const int cong_ratio);
2615 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2616 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2617 
2618 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2619 {
2620 	WARN_ONCE(cond,
2621 		  "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2622 		  str,
2623 		  tcp_snd_cwnd(tcp_sk(sk)),
2624 		  tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2625 		  tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2626 		  tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2627 		  inet_csk(sk)->icsk_ca_state,
2628 		  tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2629 		  inet_csk(sk)->icsk_pmtu_cookie);
2630 }
2631 
2632 /* At how many usecs into the future should the RTO fire? */
2633 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2634 {
2635 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2636 	u32 rto = inet_csk(sk)->icsk_rto;
2637 
2638 	if (likely(skb)) {
2639 		u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2640 
2641 		return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2642 	} else {
2643 		tcp_warn_once(sk, 1, "rtx queue empty: ");
2644 		return jiffies_to_usecs(rto);
2645 	}
2646 
2647 }
2648 
2649 /*
2650  * Save and compile IPv4 options, return a pointer to it
2651  */
2652 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2653 							 struct sk_buff *skb)
2654 {
2655 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2656 	struct ip_options_rcu *dopt = NULL;
2657 
2658 	if (opt->optlen) {
2659 		int opt_size = sizeof(*dopt) + opt->optlen;
2660 
2661 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2662 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2663 			kfree(dopt);
2664 			dopt = NULL;
2665 		}
2666 	}
2667 	return dopt;
2668 }
2669 
2670 /* locally generated TCP pure ACKs have skb->truesize == 2
2671  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2672  * This is much faster than dissecting the packet to find out.
2673  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2674  */
2675 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2676 {
2677 	return skb->truesize == 2;
2678 }
2679 
2680 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2681 {
2682 	skb->truesize = 2;
2683 }
2684 
2685 static inline int tcp_inq(struct sock *sk)
2686 {
2687 	struct tcp_sock *tp = tcp_sk(sk);
2688 	int answ;
2689 
2690 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2691 		answ = 0;
2692 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2693 		   !tp->urg_data ||
2694 		   before(tp->urg_seq, tp->copied_seq) ||
2695 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2696 
2697 		answ = tp->rcv_nxt - tp->copied_seq;
2698 
2699 		/* Subtract 1, if FIN was received */
2700 		if (answ && sock_flag(sk, SOCK_DONE))
2701 			answ--;
2702 	} else {
2703 		answ = tp->urg_seq - tp->copied_seq;
2704 	}
2705 
2706 	return answ;
2707 }
2708 
2709 int tcp_peek_len(struct socket *sock);
2710 
2711 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2712 {
2713 	u16 segs_in;
2714 
2715 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2716 
2717 	/* We update these fields while other threads might
2718 	 * read them from tcp_get_info()
2719 	 */
2720 	WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2721 	if (skb->len > tcp_hdrlen(skb))
2722 		WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2723 }
2724 
2725 /*
2726  * TCP listen path runs lockless.
2727  * We forced "struct sock" to be const qualified to make sure
2728  * we don't modify one of its field by mistake.
2729  * Here, we increment sk_drops which is an atomic_t, so we can safely
2730  * make sock writable again.
2731  */
2732 static inline void tcp_listendrop(const struct sock *sk)
2733 {
2734 	sk_drops_inc((struct sock *)sk);
2735 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2736 }
2737 
2738 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2739 
2740 /*
2741  * Interface for adding Upper Level Protocols over TCP
2742  */
2743 
2744 #define TCP_ULP_NAME_MAX	16
2745 #define TCP_ULP_MAX		128
2746 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2747 
2748 struct tcp_ulp_ops {
2749 	struct list_head	list;
2750 
2751 	/* initialize ulp */
2752 	int (*init)(struct sock *sk);
2753 	/* update ulp */
2754 	void (*update)(struct sock *sk, struct proto *p,
2755 		       void (*write_space)(struct sock *sk));
2756 	/* cleanup ulp */
2757 	void (*release)(struct sock *sk);
2758 	/* diagnostic */
2759 	int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2760 	size_t (*get_info_size)(const struct sock *sk, bool net_admin);
2761 	/* clone ulp */
2762 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2763 		      const gfp_t priority);
2764 
2765 	char		name[TCP_ULP_NAME_MAX];
2766 	struct module	*owner;
2767 };
2768 int tcp_register_ulp(struct tcp_ulp_ops *type);
2769 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2770 int tcp_set_ulp(struct sock *sk, const char *name);
2771 void tcp_get_available_ulp(char *buf, size_t len);
2772 void tcp_cleanup_ulp(struct sock *sk);
2773 void tcp_update_ulp(struct sock *sk, struct proto *p,
2774 		    void (*write_space)(struct sock *sk));
2775 
2776 #define MODULE_ALIAS_TCP_ULP(name)				\
2777 	MODULE_INFO(alias, name);		\
2778 	MODULE_INFO(alias, "tcp-ulp-" name)
2779 
2780 #ifdef CONFIG_NET_SOCK_MSG
2781 struct sk_msg;
2782 struct sk_psock;
2783 
2784 #ifdef CONFIG_BPF_SYSCALL
2785 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2786 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2787 #ifdef CONFIG_BPF_STREAM_PARSER
2788 struct strparser;
2789 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
2790 			   sk_read_actor_t recv_actor);
2791 #endif /* CONFIG_BPF_STREAM_PARSER */
2792 #endif /* CONFIG_BPF_SYSCALL */
2793 
2794 #ifdef CONFIG_INET
2795 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2796 #else
2797 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2798 {
2799 }
2800 #endif
2801 
2802 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2803 			  struct sk_msg *msg, u32 bytes, int flags);
2804 #endif /* CONFIG_NET_SOCK_MSG */
2805 
2806 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2807 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2808 {
2809 }
2810 #endif
2811 
2812 #ifdef CONFIG_CGROUP_BPF
2813 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2814 				      struct sk_buff *skb,
2815 				      unsigned int end_offset)
2816 {
2817 	skops->skb = skb;
2818 	skops->skb_data_end = skb->data + end_offset;
2819 }
2820 #else
2821 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2822 				      struct sk_buff *skb,
2823 				      unsigned int end_offset)
2824 {
2825 }
2826 #endif
2827 
2828 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2829  * is < 0, then the BPF op failed (for example if the loaded BPF
2830  * program does not support the chosen operation or there is no BPF
2831  * program loaded).
2832  */
2833 #ifdef CONFIG_BPF
2834 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2835 {
2836 	struct bpf_sock_ops_kern sock_ops;
2837 	int ret;
2838 
2839 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2840 	if (sk_fullsock(sk)) {
2841 		sock_ops.is_fullsock = 1;
2842 		sock_ops.is_locked_tcp_sock = 1;
2843 		sock_owned_by_me(sk);
2844 	}
2845 
2846 	sock_ops.sk = sk;
2847 	sock_ops.op = op;
2848 	if (nargs > 0)
2849 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2850 
2851 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2852 	if (ret == 0)
2853 		ret = sock_ops.reply;
2854 	else
2855 		ret = -1;
2856 	return ret;
2857 }
2858 
2859 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2860 {
2861 	u32 args[2] = {arg1, arg2};
2862 
2863 	return tcp_call_bpf(sk, op, 2, args);
2864 }
2865 
2866 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2867 				    u32 arg3)
2868 {
2869 	u32 args[3] = {arg1, arg2, arg3};
2870 
2871 	return tcp_call_bpf(sk, op, 3, args);
2872 }
2873 
2874 #else
2875 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2876 {
2877 	return -EPERM;
2878 }
2879 
2880 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2881 {
2882 	return -EPERM;
2883 }
2884 
2885 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2886 				    u32 arg3)
2887 {
2888 	return -EPERM;
2889 }
2890 
2891 #endif
2892 
2893 static inline u32 tcp_timeout_init(struct sock *sk)
2894 {
2895 	int timeout;
2896 
2897 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2898 
2899 	if (timeout <= 0)
2900 		timeout = TCP_TIMEOUT_INIT;
2901 	return min_t(int, timeout, TCP_RTO_MAX);
2902 }
2903 
2904 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2905 {
2906 	int rwnd;
2907 
2908 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2909 
2910 	if (rwnd < 0)
2911 		rwnd = 0;
2912 	return rwnd;
2913 }
2914 
2915 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2916 {
2917 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2918 }
2919 
2920 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2921 {
2922 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2923 		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2924 }
2925 
2926 #if IS_ENABLED(CONFIG_SMC)
2927 extern struct static_key_false tcp_have_smc;
2928 #endif
2929 
2930 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2931 void clean_acked_data_enable(struct tcp_sock *tp,
2932 			     void (*cad)(struct sock *sk, u32 ack_seq));
2933 void clean_acked_data_disable(struct tcp_sock *tp);
2934 void clean_acked_data_flush(void);
2935 #endif
2936 
2937 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2938 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2939 				    const struct tcp_sock *tp)
2940 {
2941 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2942 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2943 }
2944 
2945 /* Compute Earliest Departure Time for some control packets
2946  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2947  */
2948 static inline u64 tcp_transmit_time(const struct sock *sk)
2949 {
2950 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2951 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2952 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2953 
2954 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2955 	}
2956 	return 0;
2957 }
2958 
2959 static inline int tcp_parse_auth_options(const struct tcphdr *th,
2960 		const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2961 {
2962 	const u8 *md5_tmp, *ao_tmp;
2963 	int ret;
2964 
2965 	ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2966 	if (ret)
2967 		return ret;
2968 
2969 	if (md5_hash)
2970 		*md5_hash = md5_tmp;
2971 
2972 	if (aoh) {
2973 		if (!ao_tmp)
2974 			*aoh = NULL;
2975 		else
2976 			*aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2977 	}
2978 
2979 	return 0;
2980 }
2981 
2982 static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2983 				   int family, int l3index, bool stat_inc)
2984 {
2985 #ifdef CONFIG_TCP_AO
2986 	struct tcp_ao_info *ao_info;
2987 	struct tcp_ao_key *ao_key;
2988 
2989 	if (!static_branch_unlikely(&tcp_ao_needed.key))
2990 		return false;
2991 
2992 	ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2993 					lockdep_sock_is_held(sk));
2994 	if (!ao_info)
2995 		return false;
2996 
2997 	ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2998 	if (ao_info->ao_required || ao_key) {
2999 		if (stat_inc) {
3000 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
3001 			atomic64_inc(&ao_info->counters.ao_required);
3002 		}
3003 		return true;
3004 	}
3005 #endif
3006 	return false;
3007 }
3008 
3009 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
3010 		const struct request_sock *req, const struct sk_buff *skb,
3011 		const void *saddr, const void *daddr,
3012 		int family, int dif, int sdif);
3013 
3014 #endif	/* _TCP_H */
3015