xref: /linux/include/net/tcp.h (revision 0061b5199d7c81076181a64529f7a799ebb89399)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * INET		An implementation of the TCP/IP protocol suite for the LINUX
4  *		operating system.  INET is implemented using the  BSD Socket
5  *		interface as the means of communication with the user level.
6  *
7  *		Definitions for the TCP module.
8  *
9  * Version:	@(#)tcp.h	1.0.5	05/23/93
10  *
11  * Authors:	Ross Biro
12  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13  */
14 #ifndef _TCP_H
15 #define _TCP_H
16 
17 #define FASTRETRANS_DEBUG 1
18 
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 #include <linux/bits.h>
30 
31 #include <net/inet_connection_sock.h>
32 #include <net/inet_timewait_sock.h>
33 #include <net/inet_hashtables.h>
34 #include <net/checksum.h>
35 #include <net/request_sock.h>
36 #include <net/sock_reuseport.h>
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 #include <net/ip.h>
40 #include <net/tcp_states.h>
41 #include <net/tcp_ao.h>
42 #include <net/inet_ecn.h>
43 #include <net/dst.h>
44 #include <net/mptcp.h>
45 #include <net/xfrm.h>
46 
47 #include <linux/seq_file.h>
48 #include <linux/memcontrol.h>
49 #include <linux/bpf-cgroup.h>
50 #include <linux/siphash.h>
51 
52 extern struct inet_hashinfo tcp_hashinfo;
53 
54 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
55 int tcp_orphan_count_sum(void);
56 
57 static inline void tcp_orphan_count_inc(void)
58 {
59 	this_cpu_inc(tcp_orphan_count);
60 }
61 
62 static inline void tcp_orphan_count_dec(void)
63 {
64 	this_cpu_dec(tcp_orphan_count);
65 }
66 
67 DECLARE_PER_CPU(u32, tcp_tw_isn);
68 
69 void tcp_time_wait(struct sock *sk, int state, int timeo);
70 
71 #define MAX_TCP_HEADER	L1_CACHE_ALIGN(128 + MAX_HEADER)
72 #define MAX_TCP_OPTION_SPACE 40
73 #define TCP_MIN_SND_MSS		48
74 #define TCP_MIN_GSO_SIZE	(TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
75 
76 /*
77  * Never offer a window over 32767 without using window scaling. Some
78  * poor stacks do signed 16bit maths!
79  */
80 #define MAX_TCP_WINDOW		32767U
81 
82 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
83 #define TCP_MIN_MSS		88U
84 
85 /* The initial MTU to use for probing */
86 #define TCP_BASE_MSS		1024
87 
88 /* probing interval, default to 10 minutes as per RFC4821 */
89 #define TCP_PROBE_INTERVAL	600
90 
91 /* Specify interval when tcp mtu probing will stop */
92 #define TCP_PROBE_THRESHOLD	8
93 
94 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
95 #define TCP_FASTRETRANS_THRESH 3
96 
97 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
98 #define TCP_MAX_QUICKACKS	16U
99 
100 /* Maximal number of window scale according to RFC1323 */
101 #define TCP_MAX_WSCALE		14U
102 
103 /* Default sending frequency of accurate ECN option per RTT */
104 #define TCP_ACCECN_OPTION_BEACON	3
105 
106 /* urg_data states */
107 #define TCP_URG_VALID	0x0100
108 #define TCP_URG_NOTYET	0x0200
109 #define TCP_URG_READ	0x0400
110 
111 #define TCP_RETR1	3	/*
112 				 * This is how many retries it does before it
113 				 * tries to figure out if the gateway is
114 				 * down. Minimal RFC value is 3; it corresponds
115 				 * to ~3sec-8min depending on RTO.
116 				 */
117 
118 #define TCP_RETR2	15	/*
119 				 * This should take at least
120 				 * 90 minutes to time out.
121 				 * RFC1122 says that the limit is 100 sec.
122 				 * 15 is ~13-30min depending on RTO.
123 				 */
124 
125 #define TCP_SYN_RETRIES	 6	/* This is how many retries are done
126 				 * when active opening a connection.
127 				 * RFC1122 says the minimum retry MUST
128 				 * be at least 180secs.  Nevertheless
129 				 * this value is corresponding to
130 				 * 63secs of retransmission with the
131 				 * current initial RTO.
132 				 */
133 
134 #define TCP_SYNACK_RETRIES 5	/* This is how may retries are done
135 				 * when passive opening a connection.
136 				 * This is corresponding to 31secs of
137 				 * retransmission with the current
138 				 * initial RTO.
139 				 */
140 
141 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
142 				  * state, about 60 seconds	*/
143 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
144                                  /* BSD style FIN_WAIT2 deadlock breaker.
145 				  * It used to be 3min, new value is 60sec,
146 				  * to combine FIN-WAIT-2 timeout with
147 				  * TIME-WAIT timer.
148 				  */
149 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
150 
151 #define TCP_DELACK_MAX	((unsigned)(HZ/5))	/* maximal time to delay before sending an ACK */
152 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
153 
154 #if HZ >= 100
155 #define TCP_DELACK_MIN	((unsigned)(HZ/25))	/* minimal time to delay before sending an ACK */
156 #define TCP_ATO_MIN	((unsigned)(HZ/25))
157 #else
158 #define TCP_DELACK_MIN	4U
159 #define TCP_ATO_MIN	4U
160 #endif
161 #define TCP_RTO_MAX_SEC 120
162 #define TCP_RTO_MAX	((unsigned)(TCP_RTO_MAX_SEC * HZ))
163 #define TCP_RTO_MIN	((unsigned)(HZ / 5))
164 #define TCP_TIMEOUT_MIN	(2U) /* Min timeout for TCP timers in jiffies */
165 
166 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
167 
168 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))	/* RFC6298 2.1 initial RTO value	*/
169 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))	/* RFC 1122 initial RTO value, now
170 						 * used as a fallback RTO for the
171 						 * initial data transmission if no
172 						 * valid RTT sample has been acquired,
173 						 * most likely due to retrans in 3WHS.
174 						 */
175 
176 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
177 					                 * for local resources.
178 					                 */
179 #define TCP_KEEPALIVE_TIME	(120*60*HZ)	/* two hours */
180 #define TCP_KEEPALIVE_PROBES	9		/* Max of 9 keepalive probes	*/
181 #define TCP_KEEPALIVE_INTVL	(75*HZ)
182 
183 #define MAX_TCP_KEEPIDLE	32767
184 #define MAX_TCP_KEEPINTVL	32767
185 #define MAX_TCP_KEEPCNT		127
186 #define MAX_TCP_SYNCNT		127
187 
188 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
189  * to avoid overflows. This assumes a clock smaller than 1 Mhz.
190  * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
191  */
192 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
193 
194 #define TCP_PAWS_MSL	60		/* Per-host timestamps are invalidated
195 					 * after this time. It should be equal
196 					 * (or greater than) TCP_TIMEWAIT_LEN
197 					 * to provide reliability equal to one
198 					 * provided by timewait state.
199 					 */
200 #define TCP_PAWS_WINDOW	1		/* Replay window for per-host
201 					 * timestamps. It must be less than
202 					 * minimal timewait lifetime.
203 					 */
204 /*
205  *	TCP option
206  */
207 
208 #define TCPOPT_NOP		1	/* Padding */
209 #define TCPOPT_EOL		0	/* End of options */
210 #define TCPOPT_MSS		2	/* Segment size negotiating */
211 #define TCPOPT_WINDOW		3	/* Window scaling */
212 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
213 #define TCPOPT_SACK             5       /* SACK Block */
214 #define TCPOPT_TIMESTAMP	8	/* Better RTT estimations/PAWS */
215 #define TCPOPT_MD5SIG		19	/* MD5 Signature (RFC2385) */
216 #define TCPOPT_AO		29	/* Authentication Option (RFC5925) */
217 #define TCPOPT_MPTCP		30	/* Multipath TCP (RFC6824) */
218 #define TCPOPT_FASTOPEN		34	/* Fast open (RFC7413) */
219 #define TCPOPT_ACCECN0		172	/* 0xAC: Accurate ECN Order 0 */
220 #define TCPOPT_ACCECN1		174	/* 0xAE: Accurate ECN Order 1 */
221 #define TCPOPT_EXP		254	/* Experimental */
222 /* Magic number to be after the option value for sharing TCP
223  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
224  */
225 #define TCPOPT_FASTOPEN_MAGIC	0xF989
226 #define TCPOPT_SMC_MAGIC	0xE2D4C3D9
227 
228 /*
229  *     TCP option lengths
230  */
231 
232 #define TCPOLEN_MSS            4
233 #define TCPOLEN_WINDOW         3
234 #define TCPOLEN_SACK_PERM      2
235 #define TCPOLEN_TIMESTAMP      10
236 #define TCPOLEN_MD5SIG         18
237 #define TCPOLEN_FASTOPEN_BASE  2
238 #define TCPOLEN_ACCECN_BASE    2
239 #define TCPOLEN_EXP_FASTOPEN_BASE  4
240 #define TCPOLEN_EXP_SMC_BASE   6
241 
242 /* But this is what stacks really send out. */
243 #define TCPOLEN_TSTAMP_ALIGNED		12
244 #define TCPOLEN_WSCALE_ALIGNED		4
245 #define TCPOLEN_SACKPERM_ALIGNED	4
246 #define TCPOLEN_SACK_BASE		2
247 #define TCPOLEN_SACK_BASE_ALIGNED	4
248 #define TCPOLEN_SACK_PERBLOCK		8
249 #define TCPOLEN_MD5SIG_ALIGNED		20
250 #define TCPOLEN_MSS_ALIGNED		4
251 #define TCPOLEN_EXP_SMC_BASE_ALIGNED	8
252 #define TCPOLEN_ACCECN_PERFIELD		3
253 
254 /* Maximum number of byte counters in AccECN option + size */
255 #define TCP_ACCECN_NUMFIELDS		3
256 #define TCP_ACCECN_MAXSIZE		(TCPOLEN_ACCECN_BASE + \
257 					 TCPOLEN_ACCECN_PERFIELD * \
258 					 TCP_ACCECN_NUMFIELDS)
259 #define TCP_ACCECN_SAFETY_SHIFT		1 /* SAFETY_FACTOR in accecn draft */
260 
261 /* Flags in tp->nonagle */
262 #define TCP_NAGLE_OFF		1	/* Nagle's algo is disabled */
263 #define TCP_NAGLE_CORK		2	/* Socket is corked	    */
264 #define TCP_NAGLE_PUSH		4	/* Cork is overridden for already queued data */
265 
266 /* TCP thin-stream limits */
267 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
268 
269 /* TCP initial congestion window as per rfc6928 */
270 #define TCP_INIT_CWND		10
271 
272 /* Bit Flags for sysctl_tcp_fastopen */
273 #define	TFO_CLIENT_ENABLE	1
274 #define	TFO_SERVER_ENABLE	2
275 #define	TFO_CLIENT_NO_COOKIE	4	/* Data in SYN w/o cookie option */
276 
277 /* Accept SYN data w/o any cookie option */
278 #define	TFO_SERVER_COOKIE_NOT_REQD	0x200
279 
280 /* Force enable TFO on all listeners, i.e., not requiring the
281  * TCP_FASTOPEN socket option.
282  */
283 #define	TFO_SERVER_WO_SOCKOPT1	0x400
284 
285 
286 /* sysctl variables for tcp */
287 extern int sysctl_tcp_max_orphans;
288 extern long sysctl_tcp_mem[3];
289 
290 #define TCP_RACK_LOSS_DETECTION  0x1 /* Use RACK to detect losses */
291 #define TCP_RACK_STATIC_REO_WND  0x2 /* Use static RACK reo wnd */
292 #define TCP_RACK_NO_DUPTHRESH    0x4 /* Do not use DUPACK threshold in RACK */
293 
294 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
295 
296 extern struct percpu_counter tcp_sockets_allocated;
297 extern unsigned long tcp_memory_pressure;
298 
299 /* optimized version of sk_under_memory_pressure() for TCP sockets */
300 static inline bool tcp_under_memory_pressure(const struct sock *sk)
301 {
302 	if (mem_cgroup_sk_enabled(sk) &&
303 	    mem_cgroup_sk_under_memory_pressure(sk))
304 		return true;
305 
306 	if (sk->sk_bypass_prot_mem)
307 		return false;
308 
309 	return READ_ONCE(tcp_memory_pressure);
310 }
311 /*
312  * The next routines deal with comparing 32 bit unsigned ints
313  * and worry about wraparound (automatic with unsigned arithmetic).
314  */
315 
316 static inline bool before(__u32 seq1, __u32 seq2)
317 {
318         return (__s32)(seq1-seq2) < 0;
319 }
320 #define after(seq2, seq1) 	before(seq1, seq2)
321 
322 /* is s2<=s1<=s3 ? */
323 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
324 {
325 	return seq3 - seq2 >= seq1 - seq2;
326 }
327 
328 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
329 {
330 	sk_wmem_queued_add(sk, -skb->truesize);
331 	if (!skb_zcopy_pure(skb))
332 		sk_mem_uncharge(sk, skb->truesize);
333 	else
334 		sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
335 	__kfree_skb(skb);
336 }
337 
338 void sk_forced_mem_schedule(struct sock *sk, int size);
339 
340 bool tcp_check_oom(const struct sock *sk, int shift);
341 
342 
343 extern struct proto tcp_prot;
344 
345 #define TCP_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.tcp_statistics, field)
346 #define __TCP_INC_STATS(net, field)	__SNMP_INC_STATS((net)->mib.tcp_statistics, field)
347 #define TCP_DEC_STATS(net, field)	SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
348 #define TCP_ADD_STATS(net, field, val)	SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
349 
350 /*
351  * TCP splice context
352  */
353 struct tcp_splice_state {
354 	struct pipe_inode_info *pipe;
355 	size_t len;
356 	unsigned int flags;
357 };
358 
359 void tcp_tsq_work_init(void);
360 
361 int tcp_v4_err(struct sk_buff *skb, u32);
362 
363 void tcp_shutdown(struct sock *sk, int how);
364 
365 int tcp_v4_early_demux(struct sk_buff *skb);
366 int tcp_v4_rcv(struct sk_buff *skb);
367 
368 void tcp_remove_empty_skb(struct sock *sk);
369 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
370 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
371 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
372 			 size_t size, struct ubuf_info *uarg);
373 void tcp_splice_eof(struct socket *sock);
374 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
375 int tcp_wmem_schedule(struct sock *sk, int copy);
376 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
377 	      int size_goal);
378 void tcp_release_cb(struct sock *sk);
379 void tcp_wfree(struct sk_buff *skb);
380 void tcp_write_timer_handler(struct sock *sk);
381 void tcp_delack_timer_handler(struct sock *sk);
382 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
383 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
384 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
385 void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
386 void tcp_rcv_space_adjust(struct sock *sk);
387 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
388 void tcp_twsk_destructor(struct sock *sk);
389 void tcp_twsk_purge(struct list_head *net_exit_list);
390 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
391 			 unsigned int offset, size_t len);
392 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
393 			struct pipe_inode_info *pipe, size_t len,
394 			unsigned int flags);
395 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
396 				     bool force_schedule);
397 
398 static inline void tcp_dec_quickack_mode(struct sock *sk)
399 {
400 	struct inet_connection_sock *icsk = inet_csk(sk);
401 
402 	if (icsk->icsk_ack.quick) {
403 		/* How many ACKs S/ACKing new data have we sent? */
404 		const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
405 
406 		if (pkts >= icsk->icsk_ack.quick) {
407 			icsk->icsk_ack.quick = 0;
408 			/* Leaving quickack mode we deflate ATO. */
409 			icsk->icsk_ack.ato   = TCP_ATO_MIN;
410 		} else
411 			icsk->icsk_ack.quick -= pkts;
412 	}
413 }
414 
415 #define	TCP_ECN_MODE_RFC3168	BIT(0)
416 #define	TCP_ECN_QUEUE_CWR	BIT(1)
417 #define	TCP_ECN_DEMAND_CWR	BIT(2)
418 #define	TCP_ECN_SEEN		BIT(3)
419 #define	TCP_ECN_MODE_ACCECN	BIT(4)
420 
421 #define	TCP_ECN_DISABLED	0
422 #define	TCP_ECN_MODE_PENDING	(TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
423 #define	TCP_ECN_MODE_ANY	(TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
424 
425 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
426 {
427 	return tp->ecn_flags & TCP_ECN_MODE_ANY;
428 }
429 
430 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
431 {
432 	return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
433 }
434 
435 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
436 {
437 	return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
438 }
439 
440 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
441 {
442 	return !tcp_ecn_mode_any(tp);
443 }
444 
445 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
446 {
447 	return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
448 }
449 
450 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
451 {
452 	tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
453 	tp->ecn_flags |= mode;
454 }
455 
456 enum tcp_tw_status {
457 	TCP_TW_SUCCESS = 0,
458 	TCP_TW_RST = 1,
459 	TCP_TW_ACK = 2,
460 	TCP_TW_SYN = 3,
461 	TCP_TW_ACK_OOW = 4
462 };
463 
464 
465 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
466 					      struct sk_buff *skb,
467 					      const struct tcphdr *th,
468 					      u32 *tw_isn,
469 					      enum skb_drop_reason *drop_reason);
470 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
471 			   struct request_sock *req, bool fastopen,
472 			   bool *lost_race, enum skb_drop_reason *drop_reason);
473 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
474 				       struct sk_buff *skb);
475 void tcp_enter_loss(struct sock *sk);
476 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
477 void tcp_clear_retrans(struct tcp_sock *tp);
478 void tcp_update_pacing_rate(struct sock *sk);
479 void tcp_set_rto(struct sock *sk);
480 void tcp_update_metrics(struct sock *sk);
481 void tcp_init_metrics(struct sock *sk);
482 void tcp_metrics_init(void);
483 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
484 void __tcp_close(struct sock *sk, long timeout);
485 void tcp_close(struct sock *sk, long timeout);
486 void tcp_init_sock(struct sock *sk);
487 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
488 __poll_t tcp_poll(struct file *file, struct socket *sock,
489 		      struct poll_table_struct *wait);
490 int do_tcp_getsockopt(struct sock *sk, int level,
491 		      int optname, sockptr_t optval, sockptr_t optlen);
492 int tcp_getsockopt(struct sock *sk, int level, int optname,
493 		   char __user *optval, int __user *optlen);
494 bool tcp_bpf_bypass_getsockopt(int level, int optname);
495 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
496 		      sockptr_t optval, unsigned int optlen);
497 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
498 		   unsigned int optlen);
499 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
500 void tcp_set_keepalive(struct sock *sk, int val);
501 void tcp_syn_ack_timeout(const struct request_sock *req);
502 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
503 		int flags, int *addr_len);
504 int tcp_set_rcvlowat(struct sock *sk, int val);
505 int tcp_set_window_clamp(struct sock *sk, int val);
506 void tcp_update_recv_tstamps(struct sk_buff *skb,
507 			     struct scm_timestamping_internal *tss);
508 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
509 			struct scm_timestamping_internal *tss);
510 void tcp_data_ready(struct sock *sk);
511 #ifdef CONFIG_MMU
512 int tcp_mmap(struct file *file, struct socket *sock,
513 	     struct vm_area_struct *vma);
514 #endif
515 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
516 		       struct tcp_options_received *opt_rx,
517 		       int estab, struct tcp_fastopen_cookie *foc);
518 
519 /*
520  *	BPF SKB-less helpers
521  */
522 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
523 			 struct tcphdr *th, u32 *cookie);
524 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
525 			 struct tcphdr *th, u32 *cookie);
526 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
527 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
528 			  const struct tcp_request_sock_ops *af_ops,
529 			  struct sock *sk, struct tcphdr *th);
530 /*
531  *	TCP v4 functions exported for the inet6 API
532  */
533 
534 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
535 void tcp_v4_mtu_reduced(struct sock *sk);
536 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
537 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
538 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
539 struct sock *tcp_create_openreq_child(const struct sock *sk,
540 				      struct request_sock *req,
541 				      struct sk_buff *skb);
542 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
543 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
544 				  struct request_sock *req,
545 				  struct dst_entry *dst,
546 				  struct request_sock *req_unhash,
547 				  bool *own_req);
548 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
549 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
550 int tcp_connect(struct sock *sk);
551 enum tcp_synack_type {
552 	TCP_SYNACK_NORMAL,
553 	TCP_SYNACK_FASTOPEN,
554 	TCP_SYNACK_COOKIE,
555 };
556 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
557 				struct request_sock *req,
558 				struct tcp_fastopen_cookie *foc,
559 				enum tcp_synack_type synack_type,
560 				struct sk_buff *syn_skb);
561 int tcp_disconnect(struct sock *sk, int flags);
562 
563 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
564 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
565 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
566 
567 /* From syncookies.c */
568 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
569 				 struct request_sock *req,
570 				 struct dst_entry *dst);
571 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
572 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
573 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
574 					    struct sock *sk, struct sk_buff *skb,
575 					    struct tcp_options_received *tcp_opt,
576 					    int mss, u32 tsoff);
577 
578 #if IS_ENABLED(CONFIG_BPF)
579 struct bpf_tcp_req_attrs {
580 	u32 rcv_tsval;
581 	u32 rcv_tsecr;
582 	u16 mss;
583 	u8 rcv_wscale;
584 	u8 snd_wscale;
585 	u8 ecn_ok;
586 	u8 wscale_ok;
587 	u8 sack_ok;
588 	u8 tstamp_ok;
589 	u8 usec_ts_ok;
590 	u8 reserved[3];
591 };
592 #endif
593 
594 #ifdef CONFIG_SYN_COOKIES
595 
596 /* Syncookies use a monotonic timer which increments every 60 seconds.
597  * This counter is used both as a hash input and partially encoded into
598  * the cookie value.  A cookie is only validated further if the delta
599  * between the current counter value and the encoded one is less than this,
600  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
601  * the counter advances immediately after a cookie is generated).
602  */
603 #define MAX_SYNCOOKIE_AGE	2
604 #define TCP_SYNCOOKIE_PERIOD	(60 * HZ)
605 #define TCP_SYNCOOKIE_VALID	(MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
606 
607 /* syncookies: remember time of last synqueue overflow
608  * But do not dirty this field too often (once per second is enough)
609  * It is racy as we do not hold a lock, but race is very minor.
610  */
611 static inline void tcp_synq_overflow(const struct sock *sk)
612 {
613 	unsigned int last_overflow;
614 	unsigned int now = jiffies;
615 
616 	if (sk->sk_reuseport) {
617 		struct sock_reuseport *reuse;
618 
619 		reuse = rcu_dereference(sk->sk_reuseport_cb);
620 		if (likely(reuse)) {
621 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
622 			if (!time_between32(now, last_overflow,
623 					    last_overflow + HZ))
624 				WRITE_ONCE(reuse->synq_overflow_ts, now);
625 			return;
626 		}
627 	}
628 
629 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
630 	if (!time_between32(now, last_overflow, last_overflow + HZ))
631 		WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
632 }
633 
634 /* syncookies: no recent synqueue overflow on this listening socket? */
635 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
636 {
637 	unsigned int last_overflow;
638 	unsigned int now = jiffies;
639 
640 	if (sk->sk_reuseport) {
641 		struct sock_reuseport *reuse;
642 
643 		reuse = rcu_dereference(sk->sk_reuseport_cb);
644 		if (likely(reuse)) {
645 			last_overflow = READ_ONCE(reuse->synq_overflow_ts);
646 			return !time_between32(now, last_overflow - HZ,
647 					       last_overflow +
648 					       TCP_SYNCOOKIE_VALID);
649 		}
650 	}
651 
652 	last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
653 
654 	/* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
655 	 * then we're under synflood. However, we have to use
656 	 * 'last_overflow - HZ' as lower bound. That's because a concurrent
657 	 * tcp_synq_overflow() could update .ts_recent_stamp after we read
658 	 * jiffies but before we store .ts_recent_stamp into last_overflow,
659 	 * which could lead to rejecting a valid syncookie.
660 	 */
661 	return !time_between32(now, last_overflow - HZ,
662 			       last_overflow + TCP_SYNCOOKIE_VALID);
663 }
664 
665 static inline u32 tcp_cookie_time(void)
666 {
667 	u64 val = get_jiffies_64();
668 
669 	do_div(val, TCP_SYNCOOKIE_PERIOD);
670 	return val;
671 }
672 
673 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
674 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
675 {
676 	if (usec_ts)
677 		return div_u64(val, NSEC_PER_USEC);
678 
679 	return div_u64(val, NSEC_PER_MSEC);
680 }
681 
682 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
683 			      u16 *mssp);
684 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
685 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
686 bool cookie_timestamp_decode(const struct net *net,
687 			     struct tcp_options_received *opt);
688 
689 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
690 {
691 	return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
692 		dst_feature(dst, RTAX_FEATURE_ECN);
693 }
694 
695 #if IS_ENABLED(CONFIG_BPF)
696 static inline bool cookie_bpf_ok(struct sk_buff *skb)
697 {
698 	return skb->sk;
699 }
700 
701 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
702 #else
703 static inline bool cookie_bpf_ok(struct sk_buff *skb)
704 {
705 	return false;
706 }
707 
708 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
709 						    struct sk_buff *skb)
710 {
711 	return NULL;
712 }
713 #endif
714 
715 /* From net/ipv6/syncookies.c */
716 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
717 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
718 
719 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
720 			      const struct tcphdr *th, u16 *mssp);
721 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
722 #endif
723 /* tcp_output.c */
724 
725 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
726 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
727 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
728 			       int nonagle);
729 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
730 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
731 void tcp_retransmit_timer(struct sock *sk);
732 void tcp_xmit_retransmit_queue(struct sock *);
733 void tcp_simple_retransmit(struct sock *);
734 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
735 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
736 enum tcp_queue {
737 	TCP_FRAG_IN_WRITE_QUEUE,
738 	TCP_FRAG_IN_RTX_QUEUE,
739 };
740 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
741 		 struct sk_buff *skb, u32 len,
742 		 unsigned int mss_now, gfp_t gfp);
743 
744 void tcp_send_probe0(struct sock *);
745 int tcp_write_wakeup(struct sock *, int mib);
746 void tcp_send_fin(struct sock *sk);
747 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
748 			   enum sk_rst_reason reason);
749 int tcp_send_synack(struct sock *);
750 void tcp_push_one(struct sock *, unsigned int mss_now);
751 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
752 void tcp_send_ack(struct sock *sk);
753 void tcp_send_delayed_ack(struct sock *sk);
754 void tcp_send_loss_probe(struct sock *sk);
755 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
756 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
757 			     const struct sk_buff *next_skb);
758 
759 /* tcp_input.c */
760 void tcp_rearm_rto(struct sock *sk);
761 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
762 void tcp_done_with_error(struct sock *sk, int err);
763 void tcp_reset(struct sock *sk, struct sk_buff *skb);
764 void tcp_fin(struct sock *sk);
765 void tcp_check_space(struct sock *sk);
766 void tcp_sack_compress_send_ack(struct sock *sk);
767 
768 static inline void tcp_cleanup_skb(struct sk_buff *skb)
769 {
770 	skb_dst_drop(skb);
771 	secpath_reset(skb);
772 }
773 
774 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
775 {
776 	DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
777 	DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
778 	__skb_queue_tail(&sk->sk_receive_queue, skb);
779 }
780 
781 /* tcp_timer.c */
782 void tcp_init_xmit_timers(struct sock *);
783 static inline void tcp_clear_xmit_timers(struct sock *sk)
784 {
785 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
786 		__sock_put(sk);
787 
788 	if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
789 		__sock_put(sk);
790 
791 	inet_csk_clear_xmit_timers(sk);
792 }
793 
794 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
795 unsigned int tcp_current_mss(struct sock *sk);
796 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
797 
798 /* Bound MSS / TSO packet size with the half of the window */
799 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
800 {
801 	int cutoff;
802 
803 	/* When peer uses tiny windows, there is no use in packetizing
804 	 * to sub-MSS pieces for the sake of SWS or making sure there
805 	 * are enough packets in the pipe for fast recovery.
806 	 *
807 	 * On the other hand, for extremely large MSS devices, handling
808 	 * smaller than MSS windows in this way does make sense.
809 	 */
810 	if (tp->max_window > TCP_MSS_DEFAULT)
811 		cutoff = (tp->max_window >> 1);
812 	else
813 		cutoff = tp->max_window;
814 
815 	if (cutoff && pktsize > cutoff)
816 		return max_t(int, cutoff, 68U - tp->tcp_header_len);
817 	else
818 		return pktsize;
819 }
820 
821 /* tcp.c */
822 void tcp_get_info(struct sock *, struct tcp_info *);
823 void tcp_rate_check_app_limited(struct sock *sk);
824 
825 /* Read 'sendfile()'-style from a TCP socket */
826 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
827 		  sk_read_actor_t recv_actor);
828 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
829 			sk_read_actor_t recv_actor, bool noack,
830 			u32 *copied_seq);
831 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
832 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
833 void tcp_read_done(struct sock *sk, size_t len);
834 
835 void tcp_initialize_rcv_mss(struct sock *sk);
836 
837 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
838 int tcp_mss_to_mtu(struct sock *sk, int mss);
839 void tcp_mtup_init(struct sock *sk);
840 
841 static inline unsigned int tcp_rto_max(const struct sock *sk)
842 {
843 	return READ_ONCE(inet_csk(sk)->icsk_rto_max);
844 }
845 
846 static inline void tcp_bound_rto(struct sock *sk)
847 {
848 	inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
849 }
850 
851 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
852 {
853 	return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
854 }
855 
856 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req)
857 {
858 	u64 timeout = (u64)req->timeout << req->num_timeout;
859 
860 	return (unsigned long)min_t(u64, timeout,
861 				    tcp_rto_max(req->rsk_listener));
862 }
863 
864 u32 tcp_delack_max(const struct sock *sk);
865 
866 /* Compute the actual rto_min value */
867 static inline u32 tcp_rto_min(const struct sock *sk)
868 {
869 	const struct dst_entry *dst = __sk_dst_get(sk);
870 	u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
871 
872 	if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
873 		rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
874 	return rto_min;
875 }
876 
877 static inline u32 tcp_rto_min_us(const struct sock *sk)
878 {
879 	return jiffies_to_usecs(tcp_rto_min(sk));
880 }
881 
882 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
883 {
884 	return dst_metric_locked(dst, RTAX_CC_ALGO);
885 }
886 
887 /* Minimum RTT in usec. ~0 means not available. */
888 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
889 {
890 	return minmax_get(&tp->rtt_min);
891 }
892 
893 /* Compute the actual receive window we are currently advertising.
894  * Rcv_nxt can be after the window if our peer push more data
895  * than the offered window.
896  */
897 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
898 {
899 	s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
900 
901 	if (win < 0)
902 		win = 0;
903 	return (u32) win;
904 }
905 
906 /* Choose a new window, without checks for shrinking, and without
907  * scaling applied to the result.  The caller does these things
908  * if necessary.  This is a "raw" window selection.
909  */
910 u32 __tcp_select_window(struct sock *sk);
911 
912 void tcp_send_window_probe(struct sock *sk);
913 
914 /* TCP uses 32bit jiffies to save some space.
915  * Note that this is different from tcp_time_stamp, which
916  * historically has been the same until linux-4.13.
917  */
918 #define tcp_jiffies32 ((u32)jiffies)
919 
920 /*
921  * Deliver a 32bit value for TCP timestamp option (RFC 7323)
922  * It is no longer tied to jiffies, but to 1 ms clock.
923  * Note: double check if you want to use tcp_jiffies32 instead of this.
924  */
925 #define TCP_TS_HZ	1000
926 
927 static inline u64 tcp_clock_ns(void)
928 {
929 	return ktime_get_ns();
930 }
931 
932 static inline u64 tcp_clock_us(void)
933 {
934 	return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
935 }
936 
937 static inline u64 tcp_clock_ms(void)
938 {
939 	return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
940 }
941 
942 /* TCP Timestamp included in TS option (RFC 1323) can either use ms
943  * or usec resolution. Each socket carries a flag to select one or other
944  * resolution, as the route attribute could change anytime.
945  * Each flow must stick to initial resolution.
946  */
947 static inline u32 tcp_clock_ts(bool usec_ts)
948 {
949 	return usec_ts ? tcp_clock_us() : tcp_clock_ms();
950 }
951 
952 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
953 {
954 	return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
955 }
956 
957 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
958 {
959 	if (tp->tcp_usec_ts)
960 		return tp->tcp_mstamp;
961 	return tcp_time_stamp_ms(tp);
962 }
963 
964 void tcp_mstamp_refresh(struct tcp_sock *tp);
965 
966 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
967 {
968 	return max_t(s64, t1 - t0, 0);
969 }
970 
971 /* provide the departure time in us unit */
972 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
973 {
974 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
975 }
976 
977 /* Provide skb TSval in usec or ms unit */
978 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
979 {
980 	if (usec_ts)
981 		return tcp_skb_timestamp_us(skb);
982 
983 	return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
984 }
985 
986 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
987 {
988 	return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
989 }
990 
991 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
992 {
993 	return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
994 }
995 
996 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
997 
998 #define TCPHDR_FIN	BIT(0)
999 #define TCPHDR_SYN	BIT(1)
1000 #define TCPHDR_RST	BIT(2)
1001 #define TCPHDR_PSH	BIT(3)
1002 #define TCPHDR_ACK	BIT(4)
1003 #define TCPHDR_URG	BIT(5)
1004 #define TCPHDR_ECE	BIT(6)
1005 #define TCPHDR_CWR	BIT(7)
1006 #define TCPHDR_AE	BIT(8)
1007 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
1008 			   TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
1009 			   TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1010 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
1011 			    TCPHDR_FLAGS_MASK)
1012 
1013 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1014 #define TCPHDR_SYN_ECN	(TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
1015 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
1016 
1017 #define TCP_ACCECN_CEP_ACE_MASK 0x7
1018 #define TCP_ACCECN_ACE_MAX_DELTA 6
1019 
1020 /* To avoid/detect middlebox interference, not all counters start at 0.
1021  * See draft-ietf-tcpm-accurate-ecn for the latest values.
1022  */
1023 #define TCP_ACCECN_CEP_INIT_OFFSET 5
1024 #define TCP_ACCECN_E1B_INIT_OFFSET 1
1025 #define TCP_ACCECN_E0B_INIT_OFFSET 1
1026 #define TCP_ACCECN_CEB_INIT_OFFSET 0
1027 
1028 /* State flags for sacked in struct tcp_skb_cb */
1029 enum tcp_skb_cb_sacked_flags {
1030 	TCPCB_SACKED_ACKED	= (1 << 0),	/* SKB ACK'd by a SACK block	*/
1031 	TCPCB_SACKED_RETRANS	= (1 << 1),	/* SKB retransmitted		*/
1032 	TCPCB_LOST		= (1 << 2),	/* SKB is lost			*/
1033 	TCPCB_TAGBITS		= (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
1034 				   TCPCB_LOST),	/* All tag bits			*/
1035 	TCPCB_REPAIRED		= (1 << 4),	/* SKB repaired (no skb_mstamp_ns)	*/
1036 	TCPCB_EVER_RETRANS	= (1 << 7),	/* Ever retransmitted frame	*/
1037 	TCPCB_RETRANS		= (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
1038 				   TCPCB_REPAIRED),
1039 };
1040 
1041 /* This is what the send packet queuing engine uses to pass
1042  * TCP per-packet control information to the transmission code.
1043  * We also store the host-order sequence numbers in here too.
1044  * This is 44 bytes if IPV6 is enabled.
1045  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1046  */
1047 struct tcp_skb_cb {
1048 	__u32		seq;		/* Starting sequence number	*/
1049 	__u32		end_seq;	/* SEQ + FIN + SYN + datalen	*/
1050 	union {
1051 		/* Note :
1052 		 * 	  tcp_gso_segs/size are used in write queue only,
1053 		 *	  cf tcp_skb_pcount()/tcp_skb_mss()
1054 		 */
1055 		struct {
1056 			u16	tcp_gso_segs;
1057 			u16	tcp_gso_size;
1058 		};
1059 	};
1060 	__u16		tcp_flags;	/* TCP header flags (tcp[12-13])*/
1061 
1062 	__u8		sacked;		/* State flags for SACK.	*/
1063 	__u8		ip_dsfield;	/* IPv4 tos or IPv6 dsfield	*/
1064 #define TSTAMP_ACK_SK	0x1
1065 #define TSTAMP_ACK_BPF	0x2
1066 	__u8		txstamp_ack:2,	/* Record TX timestamp for ack? */
1067 			eor:1,		/* Is skb MSG_EOR marked? */
1068 			has_rxtstamp:1,	/* SKB has a RX timestamp	*/
1069 			unused:4;
1070 	__u32		ack_seq;	/* Sequence number ACK'd	*/
1071 	union {
1072 		struct {
1073 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
1074 			/* There is space for up to 24 bytes */
1075 			__u32 is_app_limited:1, /* cwnd not fully used? */
1076 			      delivered_ce:20,
1077 			      unused:11;
1078 			/* pkts S/ACKed so far upon tx of skb, incl retrans: */
1079 			__u32 delivered;
1080 			/* start of send pipeline phase */
1081 			u64 first_tx_mstamp;
1082 			/* when we reached the "delivered" count */
1083 			u64 delivered_mstamp;
1084 		} tx;   /* only used for outgoing skbs */
1085 		union {
1086 			struct inet_skb_parm	h4;
1087 #if IS_ENABLED(CONFIG_IPV6)
1088 			struct inet6_skb_parm	h6;
1089 #endif
1090 		} header;	/* For incoming skbs */
1091 	};
1092 };
1093 
1094 #define TCP_SKB_CB(__skb)	((struct tcp_skb_cb *)&((__skb)->cb[0]))
1095 
1096 extern const struct inet_connection_sock_af_ops ipv4_specific;
1097 
1098 #if IS_ENABLED(CONFIG_IPV6)
1099 /* This is the variant of inet6_iif() that must be used by TCP,
1100  * as TCP moves IP6CB into a different location in skb->cb[]
1101  */
1102 static inline int tcp_v6_iif(const struct sk_buff *skb)
1103 {
1104 	return TCP_SKB_CB(skb)->header.h6.iif;
1105 }
1106 
1107 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1108 {
1109 	bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1110 
1111 	return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1112 }
1113 
1114 /* TCP_SKB_CB reference means this can not be used from early demux */
1115 static inline int tcp_v6_sdif(const struct sk_buff *skb)
1116 {
1117 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1118 	if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1119 		return TCP_SKB_CB(skb)->header.h6.iif;
1120 #endif
1121 	return 0;
1122 }
1123 
1124 extern const struct inet_connection_sock_af_ops ipv6_specific;
1125 
1126 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1127 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1128 void tcp_v6_early_demux(struct sk_buff *skb);
1129 
1130 #endif
1131 
1132 /* TCP_SKB_CB reference means this can not be used from early demux */
1133 static inline int tcp_v4_sdif(struct sk_buff *skb)
1134 {
1135 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1136 	if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1137 		return TCP_SKB_CB(skb)->header.h4.iif;
1138 #endif
1139 	return 0;
1140 }
1141 
1142 /* Due to TSO, an SKB can be composed of multiple actual
1143  * packets.  To keep these tracked properly, we use this.
1144  */
1145 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1146 {
1147 	return TCP_SKB_CB(skb)->tcp_gso_segs;
1148 }
1149 
1150 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1151 {
1152 	TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1153 }
1154 
1155 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1156 {
1157 	TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1158 }
1159 
1160 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
1161 static inline int tcp_skb_mss(const struct sk_buff *skb)
1162 {
1163 	return TCP_SKB_CB(skb)->tcp_gso_size;
1164 }
1165 
1166 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1167 {
1168 	return likely(!TCP_SKB_CB(skb)->eor);
1169 }
1170 
1171 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1172 					const struct sk_buff *from)
1173 {
1174 	/* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1175 	return likely(tcp_skb_can_collapse_to(to) &&
1176 		      mptcp_skb_can_collapse(to, from) &&
1177 		      skb_pure_zcopy_same(to, from) &&
1178 		      skb_frags_readable(to) == skb_frags_readable(from));
1179 }
1180 
1181 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1182 					   const struct sk_buff *from)
1183 {
1184 	return likely(mptcp_skb_can_collapse(to, from) &&
1185 		      !skb_cmp_decrypted(to, from));
1186 }
1187 
1188 /* Events passed to congestion control interface */
1189 enum tcp_ca_event {
1190 	CA_EVENT_TX_START,	/* first transmit when no packets in flight */
1191 	CA_EVENT_CWND_RESTART,	/* congestion window restart */
1192 	CA_EVENT_COMPLETE_CWR,	/* end of congestion recovery */
1193 	CA_EVENT_LOSS,		/* loss timeout */
1194 	CA_EVENT_ECN_NO_CE,	/* ECT set, but not CE marked */
1195 	CA_EVENT_ECN_IS_CE,	/* received CE marked IP packet */
1196 };
1197 
1198 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1199 enum tcp_ca_ack_event_flags {
1200 	CA_ACK_SLOWPATH		= (1 << 0),	/* In slow path processing */
1201 	CA_ACK_WIN_UPDATE	= (1 << 1),	/* ACK updated window */
1202 	CA_ACK_ECE		= (1 << 2),	/* ECE bit is set on ack */
1203 };
1204 
1205 /*
1206  * Interface for adding new TCP congestion control handlers
1207  */
1208 #define TCP_CA_NAME_MAX	16
1209 #define TCP_CA_MAX	128
1210 #define TCP_CA_BUF_MAX	(TCP_CA_NAME_MAX*TCP_CA_MAX)
1211 
1212 #define TCP_CA_UNSPEC	0
1213 
1214 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1215 #define TCP_CONG_NON_RESTRICTED		BIT(0)
1216 /* Requires ECN/ECT set on all packets */
1217 #define TCP_CONG_NEEDS_ECN		BIT(1)
1218 #define TCP_CONG_MASK	(TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
1219 
1220 union tcp_cc_info;
1221 
1222 struct ack_sample {
1223 	u32 pkts_acked;
1224 	s32 rtt_us;
1225 	u32 in_flight;
1226 };
1227 
1228 /* A rate sample measures the number of (original/retransmitted) data
1229  * packets delivered "delivered" over an interval of time "interval_us".
1230  * The tcp_rate.c code fills in the rate sample, and congestion
1231  * control modules that define a cong_control function to run at the end
1232  * of ACK processing can optionally chose to consult this sample when
1233  * setting cwnd and pacing rate.
1234  * A sample is invalid if "delivered" or "interval_us" is negative.
1235  */
1236 struct rate_sample {
1237 	u64  prior_mstamp; /* starting timestamp for interval */
1238 	u32  prior_delivered;	/* tp->delivered at "prior_mstamp" */
1239 	u32  prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1240 	s32  delivered;		/* number of packets delivered over interval */
1241 	s32  delivered_ce;	/* number of packets delivered w/ CE marks*/
1242 	long interval_us;	/* time for tp->delivered to incr "delivered" */
1243 	u32 snd_interval_us;	/* snd interval for delivered packets */
1244 	u32 rcv_interval_us;	/* rcv interval for delivered packets */
1245 	long rtt_us;		/* RTT of last (S)ACKed packet (or -1) */
1246 	int  losses;		/* number of packets marked lost upon ACK */
1247 	u32  acked_sacked;	/* number of packets newly (S)ACKed upon ACK */
1248 	u32  prior_in_flight;	/* in flight before this ACK */
1249 	u32  last_end_seq;	/* end_seq of most recently ACKed packet */
1250 	bool is_app_limited;	/* is sample from packet with bubble in pipe? */
1251 	bool is_retrans;	/* is sample from retransmission? */
1252 	bool is_ack_delayed;	/* is this (likely) a delayed ACK? */
1253 };
1254 
1255 struct tcp_congestion_ops {
1256 /* fast path fields are put first to fill one cache line */
1257 
1258 	/* A congestion control (CC) must provide one of either:
1259 	 *
1260 	 * (a) a cong_avoid function, if the CC wants to use the core TCP
1261 	 *     stack's default functionality to implement a "classic"
1262 	 *     (Reno/CUBIC-style) response to packet loss, RFC3168 ECN,
1263 	 *     idle periods, pacing rate computations, etc.
1264 	 *
1265 	 * (b) a cong_control function, if the CC wants custom behavior and
1266 	 *      complete control of all congestion control behaviors.
1267 	 */
1268 	/* (a) "classic" response: calculate new cwnd.
1269 	 */
1270 	void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1271 	/* (b) "custom" response: call when packets are delivered to update
1272 	 * cwnd and pacing rate, after all the ca_state processing.
1273 	 */
1274 	void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1275 
1276 	/* return slow start threshold (required) */
1277 	u32 (*ssthresh)(struct sock *sk);
1278 
1279 	/* call before changing ca_state (optional) */
1280 	void (*set_state)(struct sock *sk, u8 new_state);
1281 
1282 	/* call when cwnd event occurs (optional) */
1283 	void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1284 
1285 	/* call when ack arrives (optional) */
1286 	void (*in_ack_event)(struct sock *sk, u32 flags);
1287 
1288 	/* hook for packet ack accounting (optional) */
1289 	void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1290 
1291 	/* override sysctl_tcp_min_tso_segs (optional) */
1292 	u32 (*min_tso_segs)(struct sock *sk);
1293 
1294 	/* new value of cwnd after loss (required) */
1295 	u32  (*undo_cwnd)(struct sock *sk);
1296 	/* returns the multiplier used in tcp_sndbuf_expand (optional) */
1297 	u32 (*sndbuf_expand)(struct sock *sk);
1298 
1299 /* control/slow paths put last */
1300 	/* get info for inet_diag (optional) */
1301 	size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1302 			   union tcp_cc_info *info);
1303 
1304 	char 			name[TCP_CA_NAME_MAX];
1305 	struct module		*owner;
1306 	struct list_head	list;
1307 	u32			key;
1308 	u32			flags;
1309 
1310 	/* initialize private data (optional) */
1311 	void (*init)(struct sock *sk);
1312 	/* cleanup private data  (optional) */
1313 	void (*release)(struct sock *sk);
1314 } ____cacheline_aligned_in_smp;
1315 
1316 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1317 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1318 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1319 				  struct tcp_congestion_ops *old_type);
1320 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1321 
1322 void tcp_assign_congestion_control(struct sock *sk);
1323 void tcp_init_congestion_control(struct sock *sk);
1324 void tcp_cleanup_congestion_control(struct sock *sk);
1325 int tcp_set_default_congestion_control(struct net *net, const char *name);
1326 void tcp_get_default_congestion_control(struct net *net, char *name);
1327 void tcp_get_available_congestion_control(char *buf, size_t len);
1328 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1329 int tcp_set_allowed_congestion_control(char *allowed);
1330 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1331 			       bool cap_net_admin);
1332 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1333 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1334 
1335 u32 tcp_reno_ssthresh(struct sock *sk);
1336 u32 tcp_reno_undo_cwnd(struct sock *sk);
1337 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1338 extern struct tcp_congestion_ops tcp_reno;
1339 
1340 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1341 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1342 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1343 #ifdef CONFIG_INET
1344 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1345 #else
1346 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1347 {
1348 	return NULL;
1349 }
1350 #endif
1351 
1352 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1353 {
1354 	const struct inet_connection_sock *icsk = inet_csk(sk);
1355 
1356 	return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1357 }
1358 
1359 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1360 {
1361 	const struct inet_connection_sock *icsk = inet_csk(sk);
1362 
1363 	if (icsk->icsk_ca_ops->cwnd_event)
1364 		icsk->icsk_ca_ops->cwnd_event(sk, event);
1365 }
1366 
1367 /* From tcp_cong.c */
1368 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1369 
1370 
1371 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1372 {
1373 	return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1374 }
1375 
1376 /* These functions determine how the current flow behaves in respect of SACK
1377  * handling. SACK is negotiated with the peer, and therefore it can vary
1378  * between different flows.
1379  *
1380  * tcp_is_sack - SACK enabled
1381  * tcp_is_reno - No SACK
1382  */
1383 static inline int tcp_is_sack(const struct tcp_sock *tp)
1384 {
1385 	return likely(tp->rx_opt.sack_ok);
1386 }
1387 
1388 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1389 {
1390 	return !tcp_is_sack(tp);
1391 }
1392 
1393 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1394 {
1395 	return tp->sacked_out + tp->lost_out;
1396 }
1397 
1398 /* This determines how many packets are "in the network" to the best
1399  * of our knowledge.  In many cases it is conservative, but where
1400  * detailed information is available from the receiver (via SACK
1401  * blocks etc.) we can make more aggressive calculations.
1402  *
1403  * Use this for decisions involving congestion control, use just
1404  * tp->packets_out to determine if the send queue is empty or not.
1405  *
1406  * Read this equation as:
1407  *
1408  *	"Packets sent once on transmission queue" MINUS
1409  *	"Packets left network, but not honestly ACKed yet" PLUS
1410  *	"Packets fast retransmitted"
1411  */
1412 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1413 {
1414 	return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1415 }
1416 
1417 #define TCP_INFINITE_SSTHRESH	0x7fffffff
1418 
1419 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1420 {
1421 	return tp->snd_cwnd;
1422 }
1423 
1424 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1425 {
1426 	WARN_ON_ONCE((int)val <= 0);
1427 	tp->snd_cwnd = val;
1428 }
1429 
1430 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1431 {
1432 	return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1433 }
1434 
1435 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1436 {
1437 	return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1438 }
1439 
1440 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1441 {
1442 	return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1443 	       (1 << inet_csk(sk)->icsk_ca_state);
1444 }
1445 
1446 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1447  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1448  * ssthresh.
1449  */
1450 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1451 {
1452 	const struct tcp_sock *tp = tcp_sk(sk);
1453 
1454 	if (tcp_in_cwnd_reduction(sk))
1455 		return tp->snd_ssthresh;
1456 	else
1457 		return max(tp->snd_ssthresh,
1458 			   ((tcp_snd_cwnd(tp) >> 1) +
1459 			    (tcp_snd_cwnd(tp) >> 2)));
1460 }
1461 
1462 /* Use define here intentionally to get WARN_ON location shown at the caller */
1463 #define tcp_verify_left_out(tp)	WARN_ON(tcp_left_out(tp) > tp->packets_out)
1464 
1465 void tcp_enter_cwr(struct sock *sk);
1466 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1467 
1468 /* The maximum number of MSS of available cwnd for which TSO defers
1469  * sending if not using sysctl_tcp_tso_win_divisor.
1470  */
1471 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1472 {
1473 	return 3;
1474 }
1475 
1476 /* Returns end sequence number of the receiver's advertised window */
1477 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1478 {
1479 	return tp->snd_una + tp->snd_wnd;
1480 }
1481 
1482 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1483  * flexible approach. The RFC suggests cwnd should not be raised unless
1484  * it was fully used previously. And that's exactly what we do in
1485  * congestion avoidance mode. But in slow start we allow cwnd to grow
1486  * as long as the application has used half the cwnd.
1487  * Example :
1488  *    cwnd is 10 (IW10), but application sends 9 frames.
1489  *    We allow cwnd to reach 18 when all frames are ACKed.
1490  * This check is safe because it's as aggressive as slow start which already
1491  * risks 100% overshoot. The advantage is that we discourage application to
1492  * either send more filler packets or data to artificially blow up the cwnd
1493  * usage, and allow application-limited process to probe bw more aggressively.
1494  */
1495 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1496 {
1497 	const struct tcp_sock *tp = tcp_sk(sk);
1498 
1499 	if (tp->is_cwnd_limited)
1500 		return true;
1501 
1502 	/* If in slow start, ensure cwnd grows to twice what was ACKed. */
1503 	if (tcp_in_slow_start(tp))
1504 		return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1505 
1506 	return false;
1507 }
1508 
1509 /* BBR congestion control needs pacing.
1510  * Same remark for SO_MAX_PACING_RATE.
1511  * sch_fq packet scheduler is efficiently handling pacing,
1512  * but is not always installed/used.
1513  * Return true if TCP stack should pace packets itself.
1514  */
1515 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1516 {
1517 	return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1518 }
1519 
1520 /* Estimates in how many jiffies next packet for this flow can be sent.
1521  * Scheduling a retransmit timer too early would be silly.
1522  */
1523 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1524 {
1525 	s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1526 
1527 	return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1528 }
1529 
1530 static inline void tcp_reset_xmit_timer(struct sock *sk,
1531 					const int what,
1532 					unsigned long when,
1533 					bool pace_delay)
1534 {
1535 	if (pace_delay)
1536 		when += tcp_pacing_delay(sk);
1537 	inet_csk_reset_xmit_timer(sk, what, when,
1538 				  tcp_rto_max(sk));
1539 }
1540 
1541 /* Something is really bad, we could not queue an additional packet,
1542  * because qdisc is full or receiver sent a 0 window, or we are paced.
1543  * We do not want to add fuel to the fire, or abort too early,
1544  * so make sure the timer we arm now is at least 200ms in the future,
1545  * regardless of current icsk_rto value (as it could be ~2ms)
1546  */
1547 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1548 {
1549 	return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1550 }
1551 
1552 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1553 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1554 					    unsigned long max_when)
1555 {
1556 	u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1557 			   inet_csk(sk)->icsk_backoff);
1558 	u64 when = (u64)tcp_probe0_base(sk) << backoff;
1559 
1560 	return (unsigned long)min_t(u64, when, max_when);
1561 }
1562 
1563 static inline void tcp_check_probe_timer(struct sock *sk)
1564 {
1565 	if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1566 		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1567 				     tcp_probe0_base(sk), true);
1568 }
1569 
1570 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1571 {
1572 	tp->snd_wl1 = seq;
1573 }
1574 
1575 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1576 {
1577 	tp->snd_wl1 = seq;
1578 }
1579 
1580 /*
1581  * Calculate(/check) TCP checksum
1582  */
1583 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1584 				   __be32 daddr, __wsum base)
1585 {
1586 	return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1587 }
1588 
1589 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1590 {
1591 	return !skb_csum_unnecessary(skb) &&
1592 		__skb_checksum_complete(skb);
1593 }
1594 
1595 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1596 		     enum skb_drop_reason *reason);
1597 
1598 
1599 int tcp_filter(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason *reason);
1600 void tcp_set_state(struct sock *sk, int state);
1601 void tcp_done(struct sock *sk);
1602 int tcp_abort(struct sock *sk, int err);
1603 
1604 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1605 {
1606 	rx_opt->dsack = 0;
1607 	rx_opt->num_sacks = 0;
1608 }
1609 
1610 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1611 
1612 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1613 {
1614 	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1615 	struct tcp_sock *tp = tcp_sk(sk);
1616 	s32 delta;
1617 
1618 	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1619 	    tp->packets_out || ca_ops->cong_control)
1620 		return;
1621 	delta = tcp_jiffies32 - tp->lsndtime;
1622 	if (delta > inet_csk(sk)->icsk_rto)
1623 		tcp_cwnd_restart(sk, delta);
1624 }
1625 
1626 /* Determine a window scaling and initial window to offer. */
1627 void tcp_select_initial_window(const struct sock *sk, int __space,
1628 			       __u32 mss, __u32 *rcv_wnd,
1629 			       __u32 *window_clamp, int wscale_ok,
1630 			       __u8 *rcv_wscale, __u32 init_rcv_wnd);
1631 
1632 static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1633 {
1634 	s64 scaled_space = (s64)space * scaling_ratio;
1635 
1636 	return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1637 }
1638 
1639 static inline int tcp_win_from_space(const struct sock *sk, int space)
1640 {
1641 	return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1642 }
1643 
1644 /* inverse of __tcp_win_from_space() */
1645 static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1646 {
1647 	u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1648 
1649 	do_div(val, scaling_ratio);
1650 	return val;
1651 }
1652 
1653 static inline int tcp_space_from_win(const struct sock *sk, int win)
1654 {
1655 	return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1656 }
1657 
1658 /* Assume a 50% default for skb->len/skb->truesize ratio.
1659  * This may be adjusted later in tcp_measure_rcv_mss().
1660  */
1661 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1662 
1663 static inline void tcp_scaling_ratio_init(struct sock *sk)
1664 {
1665 	tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1666 }
1667 
1668 /* Note: caller must be prepared to deal with negative returns */
1669 static inline int tcp_space(const struct sock *sk)
1670 {
1671 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1672 				  READ_ONCE(sk->sk_backlog.len) -
1673 				  atomic_read(&sk->sk_rmem_alloc));
1674 }
1675 
1676 static inline int tcp_full_space(const struct sock *sk)
1677 {
1678 	return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1679 }
1680 
1681 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1682 {
1683 	int unused_mem = sk_unused_reserved_mem(sk);
1684 	struct tcp_sock *tp = tcp_sk(sk);
1685 
1686 	tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1687 	if (unused_mem)
1688 		tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1689 					 tcp_win_from_space(sk, unused_mem));
1690 }
1691 
1692 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1693 {
1694 	__tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1695 }
1696 
1697 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1698 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1699 
1700 
1701 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1702  * If 87.5 % (7/8) of the space has been consumed, we want to override
1703  * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1704  * len/truesize ratio.
1705  */
1706 static inline bool tcp_rmem_pressure(const struct sock *sk)
1707 {
1708 	int rcvbuf, threshold;
1709 
1710 	if (tcp_under_memory_pressure(sk))
1711 		return true;
1712 
1713 	rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1714 	threshold = rcvbuf - (rcvbuf >> 3);
1715 
1716 	return atomic_read(&sk->sk_rmem_alloc) > threshold;
1717 }
1718 
1719 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1720 {
1721 	const struct tcp_sock *tp = tcp_sk(sk);
1722 	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1723 
1724 	if (avail <= 0)
1725 		return false;
1726 
1727 	return (avail >= target) || tcp_rmem_pressure(sk) ||
1728 	       (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1729 }
1730 
1731 extern void tcp_openreq_init_rwin(struct request_sock *req,
1732 				  const struct sock *sk_listener,
1733 				  const struct dst_entry *dst);
1734 
1735 void tcp_enter_memory_pressure(struct sock *sk);
1736 void tcp_leave_memory_pressure(struct sock *sk);
1737 
1738 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1739 {
1740 	struct net *net = sock_net((struct sock *)tp);
1741 	int val;
1742 
1743 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1744 	 * and do_tcp_setsockopt().
1745 	 */
1746 	val = READ_ONCE(tp->keepalive_intvl);
1747 
1748 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1749 }
1750 
1751 static inline int keepalive_time_when(const struct tcp_sock *tp)
1752 {
1753 	struct net *net = sock_net((struct sock *)tp);
1754 	int val;
1755 
1756 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1757 	val = READ_ONCE(tp->keepalive_time);
1758 
1759 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1760 }
1761 
1762 static inline int keepalive_probes(const struct tcp_sock *tp)
1763 {
1764 	struct net *net = sock_net((struct sock *)tp);
1765 	int val;
1766 
1767 	/* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1768 	 * and do_tcp_setsockopt().
1769 	 */
1770 	val = READ_ONCE(tp->keepalive_probes);
1771 
1772 	return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1773 }
1774 
1775 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1776 {
1777 	const struct inet_connection_sock *icsk = &tp->inet_conn;
1778 
1779 	return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1780 			  tcp_jiffies32 - tp->rcv_tstamp);
1781 }
1782 
1783 static inline int tcp_fin_time(const struct sock *sk)
1784 {
1785 	int fin_timeout = tcp_sk(sk)->linger2 ? :
1786 		READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1787 	const int rto = inet_csk(sk)->icsk_rto;
1788 
1789 	if (fin_timeout < (rto << 2) - (rto >> 1))
1790 		fin_timeout = (rto << 2) - (rto >> 1);
1791 
1792 	return fin_timeout;
1793 }
1794 
1795 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1796 				  int paws_win)
1797 {
1798 	if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1799 		return true;
1800 	if (unlikely(!time_before32(ktime_get_seconds(),
1801 				    rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1802 		return true;
1803 	/*
1804 	 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1805 	 * then following tcp messages have valid values. Ignore 0 value,
1806 	 * or else 'negative' tsval might forbid us to accept their packets.
1807 	 */
1808 	if (!rx_opt->ts_recent)
1809 		return true;
1810 	return false;
1811 }
1812 
1813 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1814 				   int rst)
1815 {
1816 	if (tcp_paws_check(rx_opt, 0))
1817 		return false;
1818 
1819 	/* RST segments are not recommended to carry timestamp,
1820 	   and, if they do, it is recommended to ignore PAWS because
1821 	   "their cleanup function should take precedence over timestamps."
1822 	   Certainly, it is mistake. It is necessary to understand the reasons
1823 	   of this constraint to relax it: if peer reboots, clock may go
1824 	   out-of-sync and half-open connections will not be reset.
1825 	   Actually, the problem would be not existing if all
1826 	   the implementations followed draft about maintaining clock
1827 	   via reboots. Linux-2.2 DOES NOT!
1828 
1829 	   However, we can relax time bounds for RST segments to MSL.
1830 	 */
1831 	if (rst && !time_before32(ktime_get_seconds(),
1832 				  rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1833 		return false;
1834 	return true;
1835 }
1836 
1837 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1838 {
1839 	u32 ace;
1840 
1841 	/* mptcp hooks are only on the slow path */
1842 	if (sk_is_mptcp((struct sock *)tp))
1843 		return;
1844 
1845 	ace = tcp_ecn_mode_accecn(tp) ?
1846 	      ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
1847 	       TCP_ACCECN_CEP_ACE_MASK) : 0;
1848 
1849 	tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1850 			       (ace << 22) |
1851 			       ntohl(TCP_FLAG_ACK) |
1852 			       snd_wnd);
1853 }
1854 
1855 static inline void tcp_fast_path_on(struct tcp_sock *tp)
1856 {
1857 	__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
1858 }
1859 
1860 static inline void tcp_fast_path_check(struct sock *sk)
1861 {
1862 	struct tcp_sock *tp = tcp_sk(sk);
1863 
1864 	if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
1865 	    tp->rcv_wnd &&
1866 	    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1867 	    !tp->urg_data)
1868 		tcp_fast_path_on(tp);
1869 }
1870 
1871 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1872 			  int mib_idx, u32 *last_oow_ack_time);
1873 
1874 static inline void tcp_mib_init(struct net *net)
1875 {
1876 	/* See RFC 2012 */
1877 	TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1878 	TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1879 	TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1880 	TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1881 }
1882 
1883 /* from STCP */
1884 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1885 {
1886 	tp->retransmit_skb_hint = NULL;
1887 }
1888 
1889 #define tcp_md5_addr tcp_ao_addr
1890 
1891 /* - key database */
1892 struct tcp_md5sig_key {
1893 	struct hlist_node	node;
1894 	u8			keylen;
1895 	u8			family; /* AF_INET or AF_INET6 */
1896 	u8			prefixlen;
1897 	u8			flags;
1898 	union tcp_md5_addr	addr;
1899 	int			l3index; /* set if key added with L3 scope */
1900 	u8			key[TCP_MD5SIG_MAXKEYLEN];
1901 	struct rcu_head		rcu;
1902 };
1903 
1904 /* - sock block */
1905 struct tcp_md5sig_info {
1906 	struct hlist_head	head;
1907 	struct rcu_head		rcu;
1908 };
1909 
1910 /* - pseudo header */
1911 struct tcp4_pseudohdr {
1912 	__be32		saddr;
1913 	__be32		daddr;
1914 	__u8		pad;
1915 	__u8		protocol;
1916 	__be16		len;
1917 };
1918 
1919 struct tcp6_pseudohdr {
1920 	struct in6_addr	saddr;
1921 	struct in6_addr daddr;
1922 	__be32		len;
1923 	__be32		protocol;	/* including padding */
1924 };
1925 
1926 /*
1927  * struct tcp_sigpool - per-CPU pool of ahash_requests
1928  * @scratch: per-CPU temporary area, that can be used between
1929  *	     tcp_sigpool_start() and tcp_sigpool_end() to perform
1930  *	     crypto request
1931  * @req: pre-allocated ahash request
1932  */
1933 struct tcp_sigpool {
1934 	void *scratch;
1935 	struct ahash_request *req;
1936 };
1937 
1938 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1939 void tcp_sigpool_get(unsigned int id);
1940 void tcp_sigpool_release(unsigned int id);
1941 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1942 			      const struct sk_buff *skb,
1943 			      unsigned int header_len);
1944 
1945 /**
1946  * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1947  * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1948  * @c: returned tcp_sigpool for usage (uninitialized on failure)
1949  *
1950  * Returns: 0 on success, error otherwise.
1951  */
1952 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
1953 /**
1954  * tcp_sigpool_end - enable bh and stop using tcp_sigpool
1955  * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
1956  */
1957 void tcp_sigpool_end(struct tcp_sigpool *c);
1958 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
1959 /* - functions */
1960 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1961 			 const struct sock *sk, const struct sk_buff *skb);
1962 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1963 		   int family, u8 prefixlen, int l3index, u8 flags,
1964 		   const u8 *newkey, u8 newkeylen);
1965 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
1966 		     int family, u8 prefixlen, int l3index,
1967 		     struct tcp_md5sig_key *key);
1968 
1969 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1970 		   int family, u8 prefixlen, int l3index, u8 flags);
1971 void tcp_clear_md5_list(struct sock *sk);
1972 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1973 					 const struct sock *addr_sk);
1974 
1975 #ifdef CONFIG_TCP_MD5SIG
1976 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
1977 					   const union tcp_md5_addr *addr,
1978 					   int family, bool any_l3index);
1979 static inline struct tcp_md5sig_key *
1980 tcp_md5_do_lookup(const struct sock *sk, int l3index,
1981 		  const union tcp_md5_addr *addr, int family)
1982 {
1983 	if (!static_branch_unlikely(&tcp_md5_needed.key))
1984 		return NULL;
1985 	return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
1986 }
1987 
1988 static inline struct tcp_md5sig_key *
1989 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
1990 			      const union tcp_md5_addr *addr, int family)
1991 {
1992 	if (!static_branch_unlikely(&tcp_md5_needed.key))
1993 		return NULL;
1994 	return __tcp_md5_do_lookup(sk, 0, addr, family, true);
1995 }
1996 
1997 #define tcp_twsk_md5_key(twsk)	((twsk)->tw_md5_key)
1998 void tcp_md5_destruct_sock(struct sock *sk);
1999 #else
2000 static inline struct tcp_md5sig_key *
2001 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2002 		  const union tcp_md5_addr *addr, int family)
2003 {
2004 	return NULL;
2005 }
2006 
2007 static inline struct tcp_md5sig_key *
2008 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2009 			      const union tcp_md5_addr *addr, int family)
2010 {
2011 	return NULL;
2012 }
2013 
2014 #define tcp_twsk_md5_key(twsk)	NULL
2015 static inline void tcp_md5_destruct_sock(struct sock *sk)
2016 {
2017 }
2018 #endif
2019 
2020 struct md5_ctx;
2021 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
2022 			   unsigned int header_len);
2023 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
2024 
2025 /* From tcp_fastopen.c */
2026 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2027 			    struct tcp_fastopen_cookie *cookie);
2028 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2029 			    struct tcp_fastopen_cookie *cookie, bool syn_lost,
2030 			    u16 try_exp);
2031 struct tcp_fastopen_request {
2032 	/* Fast Open cookie. Size 0 means a cookie request */
2033 	struct tcp_fastopen_cookie	cookie;
2034 	struct msghdr			*data;  /* data in MSG_FASTOPEN */
2035 	size_t				size;
2036 	int				copied;	/* queued in tcp_connect() */
2037 	struct ubuf_info		*uarg;
2038 };
2039 void tcp_free_fastopen_req(struct tcp_sock *tp);
2040 void tcp_fastopen_destroy_cipher(struct sock *sk);
2041 void tcp_fastopen_ctx_destroy(struct net *net);
2042 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
2043 			      void *primary_key, void *backup_key);
2044 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
2045 			    u64 *key);
2046 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
2047 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
2048 			      struct request_sock *req,
2049 			      struct tcp_fastopen_cookie *foc,
2050 			      const struct dst_entry *dst);
2051 void tcp_fastopen_init_key_once(struct net *net);
2052 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
2053 			     struct tcp_fastopen_cookie *cookie);
2054 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
2055 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
2056 #define TCP_FASTOPEN_KEY_MAX 2
2057 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
2058 	(TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
2059 
2060 /* Fastopen key context */
2061 struct tcp_fastopen_context {
2062 	siphash_key_t	key[TCP_FASTOPEN_KEY_MAX];
2063 	int		num;
2064 	struct rcu_head	rcu;
2065 };
2066 
2067 void tcp_fastopen_active_disable(struct sock *sk);
2068 bool tcp_fastopen_active_should_disable(struct sock *sk);
2069 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
2070 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
2071 
2072 /* Caller needs to wrap with rcu_read_(un)lock() */
2073 static inline
2074 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
2075 {
2076 	struct tcp_fastopen_context *ctx;
2077 
2078 	ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
2079 	if (!ctx)
2080 		ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
2081 	return ctx;
2082 }
2083 
2084 static inline
2085 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
2086 			       const struct tcp_fastopen_cookie *orig)
2087 {
2088 	if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
2089 	    orig->len == foc->len &&
2090 	    !memcmp(orig->val, foc->val, foc->len))
2091 		return true;
2092 	return false;
2093 }
2094 
2095 static inline
2096 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
2097 {
2098 	return ctx->num;
2099 }
2100 
2101 /* Latencies incurred by various limits for a sender. They are
2102  * chronograph-like stats that are mutually exclusive.
2103  */
2104 enum tcp_chrono {
2105 	TCP_CHRONO_UNSPEC,
2106 	TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
2107 	TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
2108 	TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
2109 	__TCP_CHRONO_MAX,
2110 };
2111 
2112 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
2113 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2114 
2115 /* This helper is needed, because skb->tcp_tsorted_anchor uses
2116  * the same memory storage than skb->destructor/_skb_refdst
2117  */
2118 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2119 {
2120 	skb->destructor = NULL;
2121 	skb->_skb_refdst = 0UL;
2122 }
2123 
2124 #define tcp_skb_tsorted_save(skb) {		\
2125 	unsigned long _save = skb->_skb_refdst;	\
2126 	skb->_skb_refdst = 0UL;
2127 
2128 #define tcp_skb_tsorted_restore(skb)		\
2129 	skb->_skb_refdst = _save;		\
2130 }
2131 
2132 void tcp_write_queue_purge(struct sock *sk);
2133 
2134 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2135 {
2136 	return skb_rb_first(&sk->tcp_rtx_queue);
2137 }
2138 
2139 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2140 {
2141 	return skb_rb_last(&sk->tcp_rtx_queue);
2142 }
2143 
2144 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2145 {
2146 	return skb_peek_tail(&sk->sk_write_queue);
2147 }
2148 
2149 #define tcp_for_write_queue_from_safe(skb, tmp, sk)			\
2150 	skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2151 
2152 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2153 {
2154 	return skb_peek(&sk->sk_write_queue);
2155 }
2156 
2157 static inline bool tcp_skb_is_last(const struct sock *sk,
2158 				   const struct sk_buff *skb)
2159 {
2160 	return skb_queue_is_last(&sk->sk_write_queue, skb);
2161 }
2162 
2163 /**
2164  * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2165  * @sk: socket
2166  *
2167  * Since the write queue can have a temporary empty skb in it,
2168  * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2169  */
2170 static inline bool tcp_write_queue_empty(const struct sock *sk)
2171 {
2172 	const struct tcp_sock *tp = tcp_sk(sk);
2173 
2174 	return tp->write_seq == tp->snd_nxt;
2175 }
2176 
2177 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2178 {
2179 	return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2180 }
2181 
2182 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2183 {
2184 	return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2185 }
2186 
2187 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2188 {
2189 	__skb_queue_tail(&sk->sk_write_queue, skb);
2190 
2191 	/* Queue it, remembering where we must start sending. */
2192 	if (sk->sk_write_queue.next == skb)
2193 		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2194 }
2195 
2196 /* Insert new before skb on the write queue of sk.  */
2197 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2198 						  struct sk_buff *skb,
2199 						  struct sock *sk)
2200 {
2201 	__skb_queue_before(&sk->sk_write_queue, skb, new);
2202 }
2203 
2204 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2205 {
2206 	tcp_skb_tsorted_anchor_cleanup(skb);
2207 	__skb_unlink(skb, &sk->sk_write_queue);
2208 }
2209 
2210 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2211 
2212 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2213 {
2214 	tcp_skb_tsorted_anchor_cleanup(skb);
2215 	rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2216 }
2217 
2218 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2219 {
2220 	list_del(&skb->tcp_tsorted_anchor);
2221 	tcp_rtx_queue_unlink(skb, sk);
2222 	tcp_wmem_free_skb(sk, skb);
2223 }
2224 
2225 static inline void tcp_write_collapse_fence(struct sock *sk)
2226 {
2227 	struct sk_buff *skb = tcp_write_queue_tail(sk);
2228 
2229 	if (skb)
2230 		TCP_SKB_CB(skb)->eor = 1;
2231 }
2232 
2233 static inline void tcp_push_pending_frames(struct sock *sk)
2234 {
2235 	if (tcp_send_head(sk)) {
2236 		struct tcp_sock *tp = tcp_sk(sk);
2237 
2238 		__tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2239 	}
2240 }
2241 
2242 /* Start sequence of the skb just after the highest skb with SACKed
2243  * bit, valid only if sacked_out > 0 or when the caller has ensured
2244  * validity by itself.
2245  */
2246 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2247 {
2248 	if (!tp->sacked_out)
2249 		return tp->snd_una;
2250 
2251 	if (tp->highest_sack == NULL)
2252 		return tp->snd_nxt;
2253 
2254 	return TCP_SKB_CB(tp->highest_sack)->seq;
2255 }
2256 
2257 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2258 {
2259 	tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2260 }
2261 
2262 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2263 {
2264 	return tcp_sk(sk)->highest_sack;
2265 }
2266 
2267 static inline void tcp_highest_sack_reset(struct sock *sk)
2268 {
2269 	tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2270 }
2271 
2272 /* Called when old skb is about to be deleted and replaced by new skb */
2273 static inline void tcp_highest_sack_replace(struct sock *sk,
2274 					    struct sk_buff *old,
2275 					    struct sk_buff *new)
2276 {
2277 	if (old == tcp_highest_sack(sk))
2278 		tcp_sk(sk)->highest_sack = new;
2279 }
2280 
2281 /* This helper checks if socket has IP_TRANSPARENT set */
2282 static inline bool inet_sk_transparent(const struct sock *sk)
2283 {
2284 	switch (sk->sk_state) {
2285 	case TCP_TIME_WAIT:
2286 		return inet_twsk(sk)->tw_transparent;
2287 	case TCP_NEW_SYN_RECV:
2288 		return inet_rsk(inet_reqsk(sk))->no_srccheck;
2289 	}
2290 	return inet_test_bit(TRANSPARENT, sk);
2291 }
2292 
2293 /* Determines whether this is a thin stream (which may suffer from
2294  * increased latency). Used to trigger latency-reducing mechanisms.
2295  */
2296 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2297 {
2298 	return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2299 }
2300 
2301 /* /proc */
2302 enum tcp_seq_states {
2303 	TCP_SEQ_STATE_LISTENING,
2304 	TCP_SEQ_STATE_ESTABLISHED,
2305 };
2306 
2307 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2308 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2309 void tcp_seq_stop(struct seq_file *seq, void *v);
2310 
2311 struct tcp_seq_afinfo {
2312 	sa_family_t			family;
2313 };
2314 
2315 struct tcp_iter_state {
2316 	struct seq_net_private	p;
2317 	enum tcp_seq_states	state;
2318 	struct sock		*syn_wait_sk;
2319 	int			bucket, offset, sbucket, num;
2320 	loff_t			last_pos;
2321 };
2322 
2323 extern struct request_sock_ops tcp_request_sock_ops;
2324 extern struct request_sock_ops tcp6_request_sock_ops;
2325 
2326 void tcp_v4_destroy_sock(struct sock *sk);
2327 
2328 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2329 				netdev_features_t features);
2330 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2331 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2332 				struct tcphdr *th);
2333 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2334 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2335 #ifdef CONFIG_INET
2336 void tcp_gro_complete(struct sk_buff *skb);
2337 #else
2338 static inline void tcp_gro_complete(struct sk_buff *skb) { }
2339 #endif
2340 
2341 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2342 
2343 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2344 {
2345 	struct net *net = sock_net((struct sock *)tp);
2346 	u32 val;
2347 
2348 	val = READ_ONCE(tp->notsent_lowat);
2349 
2350 	return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2351 }
2352 
2353 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2354 
2355 #ifdef CONFIG_PROC_FS
2356 int tcp4_proc_init(void);
2357 void tcp4_proc_exit(void);
2358 #endif
2359 
2360 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2361 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2362 		     const struct tcp_request_sock_ops *af_ops,
2363 		     struct sock *sk, struct sk_buff *skb);
2364 
2365 /* TCP af-specific functions */
2366 struct tcp_sock_af_ops {
2367 #ifdef CONFIG_TCP_MD5SIG
2368 	struct tcp_md5sig_key	*(*md5_lookup) (const struct sock *sk,
2369 						const struct sock *addr_sk);
2370 	void		(*calc_md5_hash)(char *location,
2371 					 const struct tcp_md5sig_key *md5,
2372 					 const struct sock *sk,
2373 					 const struct sk_buff *skb);
2374 	int		(*md5_parse)(struct sock *sk,
2375 				     int optname,
2376 				     sockptr_t optval,
2377 				     int optlen);
2378 #endif
2379 #ifdef CONFIG_TCP_AO
2380 	int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2381 	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2382 					struct sock *addr_sk,
2383 					int sndid, int rcvid);
2384 	int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2385 			      const struct sock *sk,
2386 			      __be32 sisn, __be32 disn, bool send);
2387 	int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2388 			    const struct sock *sk, const struct sk_buff *skb,
2389 			    const u8 *tkey, int hash_offset, u32 sne);
2390 #endif
2391 };
2392 
2393 struct tcp_request_sock_ops {
2394 	u16 mss_clamp;
2395 #ifdef CONFIG_TCP_MD5SIG
2396 	struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2397 						 const struct sock *addr_sk);
2398 	void		(*calc_md5_hash) (char *location,
2399 					  const struct tcp_md5sig_key *md5,
2400 					  const struct sock *sk,
2401 					  const struct sk_buff *skb);
2402 #endif
2403 #ifdef CONFIG_TCP_AO
2404 	struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2405 					struct request_sock *req,
2406 					int sndid, int rcvid);
2407 	int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2408 	int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2409 			      struct request_sock *req, const struct sk_buff *skb,
2410 			      int hash_offset, u32 sne);
2411 #endif
2412 #ifdef CONFIG_SYN_COOKIES
2413 	__u32 (*cookie_init_seq)(const struct sk_buff *skb,
2414 				 __u16 *mss);
2415 #endif
2416 	struct dst_entry *(*route_req)(const struct sock *sk,
2417 				       struct sk_buff *skb,
2418 				       struct flowi *fl,
2419 				       struct request_sock *req,
2420 				       u32 tw_isn);
2421 	u32 (*init_seq)(const struct sk_buff *skb);
2422 	u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
2423 	int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2424 			   struct flowi *fl, struct request_sock *req,
2425 			   struct tcp_fastopen_cookie *foc,
2426 			   enum tcp_synack_type synack_type,
2427 			   struct sk_buff *syn_skb);
2428 };
2429 
2430 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2431 #if IS_ENABLED(CONFIG_IPV6)
2432 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2433 #endif
2434 
2435 #ifdef CONFIG_SYN_COOKIES
2436 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2437 					 const struct sock *sk, struct sk_buff *skb,
2438 					 __u16 *mss)
2439 {
2440 	tcp_synq_overflow(sk);
2441 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2442 	return ops->cookie_init_seq(skb, mss);
2443 }
2444 #else
2445 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2446 					 const struct sock *sk, struct sk_buff *skb,
2447 					 __u16 *mss)
2448 {
2449 	return 0;
2450 }
2451 #endif
2452 
2453 struct tcp_key {
2454 	union {
2455 		struct {
2456 			struct tcp_ao_key *ao_key;
2457 			char *traffic_key;
2458 			u32 sne;
2459 			u8 rcv_next;
2460 		};
2461 		struct tcp_md5sig_key *md5_key;
2462 	};
2463 	enum {
2464 		TCP_KEY_NONE = 0,
2465 		TCP_KEY_MD5,
2466 		TCP_KEY_AO,
2467 	} type;
2468 };
2469 
2470 static inline void tcp_get_current_key(const struct sock *sk,
2471 				       struct tcp_key *out)
2472 {
2473 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2474 	const struct tcp_sock *tp = tcp_sk(sk);
2475 #endif
2476 
2477 #ifdef CONFIG_TCP_AO
2478 	if (static_branch_unlikely(&tcp_ao_needed.key)) {
2479 		struct tcp_ao_info *ao;
2480 
2481 		ao = rcu_dereference_protected(tp->ao_info,
2482 					       lockdep_sock_is_held(sk));
2483 		if (ao) {
2484 			out->ao_key = READ_ONCE(ao->current_key);
2485 			out->type = TCP_KEY_AO;
2486 			return;
2487 		}
2488 	}
2489 #endif
2490 #ifdef CONFIG_TCP_MD5SIG
2491 	if (static_branch_unlikely(&tcp_md5_needed.key) &&
2492 	    rcu_access_pointer(tp->md5sig_info)) {
2493 		out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2494 		if (out->md5_key) {
2495 			out->type = TCP_KEY_MD5;
2496 			return;
2497 		}
2498 	}
2499 #endif
2500 	out->type = TCP_KEY_NONE;
2501 }
2502 
2503 static inline bool tcp_key_is_md5(const struct tcp_key *key)
2504 {
2505 	if (static_branch_tcp_md5())
2506 		return key->type == TCP_KEY_MD5;
2507 	return false;
2508 }
2509 
2510 static inline bool tcp_key_is_ao(const struct tcp_key *key)
2511 {
2512 	if (static_branch_tcp_ao())
2513 		return key->type == TCP_KEY_AO;
2514 	return false;
2515 }
2516 
2517 int tcpv4_offload_init(void);
2518 
2519 void tcp_v4_init(void);
2520 void tcp_init(void);
2521 
2522 /* tcp_recovery.c */
2523 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2524 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2525 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2526 				u32 reo_wnd);
2527 extern bool tcp_rack_mark_lost(struct sock *sk);
2528 extern void tcp_rack_reo_timeout(struct sock *sk);
2529 
2530 /* tcp_plb.c */
2531 
2532 /*
2533  * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2534  * expects cong_ratio which represents fraction of traffic that experienced
2535  * congestion over a single RTT. In order to avoid floating point operations,
2536  * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2537  */
2538 #define TCP_PLB_SCALE 8
2539 
2540 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2541 struct tcp_plb_state {
2542 	u8	consec_cong_rounds:5, /* consecutive congested rounds */
2543 		unused:3;
2544 	u32	pause_until; /* jiffies32 when PLB can resume rerouting */
2545 };
2546 
2547 static inline void tcp_plb_init(const struct sock *sk,
2548 				struct tcp_plb_state *plb)
2549 {
2550 	plb->consec_cong_rounds = 0;
2551 	plb->pause_until = 0;
2552 }
2553 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2554 			  const int cong_ratio);
2555 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2556 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2557 
2558 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2559 {
2560 	WARN_ONCE(cond,
2561 		  "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2562 		  str,
2563 		  tcp_snd_cwnd(tcp_sk(sk)),
2564 		  tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2565 		  tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2566 		  tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2567 		  inet_csk(sk)->icsk_ca_state,
2568 		  tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2569 		  inet_csk(sk)->icsk_pmtu_cookie);
2570 }
2571 
2572 /* At how many usecs into the future should the RTO fire? */
2573 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2574 {
2575 	const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2576 	u32 rto = inet_csk(sk)->icsk_rto;
2577 
2578 	if (likely(skb)) {
2579 		u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2580 
2581 		return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2582 	} else {
2583 		tcp_warn_once(sk, 1, "rtx queue empty: ");
2584 		return jiffies_to_usecs(rto);
2585 	}
2586 
2587 }
2588 
2589 /*
2590  * Save and compile IPv4 options, return a pointer to it
2591  */
2592 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2593 							 struct sk_buff *skb)
2594 {
2595 	const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2596 	struct ip_options_rcu *dopt = NULL;
2597 
2598 	if (opt->optlen) {
2599 		int opt_size = sizeof(*dopt) + opt->optlen;
2600 
2601 		dopt = kmalloc(opt_size, GFP_ATOMIC);
2602 		if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2603 			kfree(dopt);
2604 			dopt = NULL;
2605 		}
2606 	}
2607 	return dopt;
2608 }
2609 
2610 /* locally generated TCP pure ACKs have skb->truesize == 2
2611  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2612  * This is much faster than dissecting the packet to find out.
2613  * (Think of GRE encapsulations, IPv4, IPv6, ...)
2614  */
2615 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2616 {
2617 	return skb->truesize == 2;
2618 }
2619 
2620 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2621 {
2622 	skb->truesize = 2;
2623 }
2624 
2625 static inline int tcp_inq(struct sock *sk)
2626 {
2627 	struct tcp_sock *tp = tcp_sk(sk);
2628 	int answ;
2629 
2630 	if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2631 		answ = 0;
2632 	} else if (sock_flag(sk, SOCK_URGINLINE) ||
2633 		   !tp->urg_data ||
2634 		   before(tp->urg_seq, tp->copied_seq) ||
2635 		   !before(tp->urg_seq, tp->rcv_nxt)) {
2636 
2637 		answ = tp->rcv_nxt - tp->copied_seq;
2638 
2639 		/* Subtract 1, if FIN was received */
2640 		if (answ && sock_flag(sk, SOCK_DONE))
2641 			answ--;
2642 	} else {
2643 		answ = tp->urg_seq - tp->copied_seq;
2644 	}
2645 
2646 	return answ;
2647 }
2648 
2649 int tcp_peek_len(struct socket *sock);
2650 
2651 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2652 {
2653 	u16 segs_in;
2654 
2655 	segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2656 
2657 	/* We update these fields while other threads might
2658 	 * read them from tcp_get_info()
2659 	 */
2660 	WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2661 	if (skb->len > tcp_hdrlen(skb))
2662 		WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2663 }
2664 
2665 /*
2666  * TCP listen path runs lockless.
2667  * We forced "struct sock" to be const qualified to make sure
2668  * we don't modify one of its field by mistake.
2669  * Here, we increment sk_drops which is an atomic_t, so we can safely
2670  * make sock writable again.
2671  */
2672 static inline void tcp_listendrop(const struct sock *sk)
2673 {
2674 	sk_drops_inc((struct sock *)sk);
2675 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2676 }
2677 
2678 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2679 
2680 /*
2681  * Interface for adding Upper Level Protocols over TCP
2682  */
2683 
2684 #define TCP_ULP_NAME_MAX	16
2685 #define TCP_ULP_MAX		128
2686 #define TCP_ULP_BUF_MAX		(TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2687 
2688 struct tcp_ulp_ops {
2689 	struct list_head	list;
2690 
2691 	/* initialize ulp */
2692 	int (*init)(struct sock *sk);
2693 	/* update ulp */
2694 	void (*update)(struct sock *sk, struct proto *p,
2695 		       void (*write_space)(struct sock *sk));
2696 	/* cleanup ulp */
2697 	void (*release)(struct sock *sk);
2698 	/* diagnostic */
2699 	int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2700 	size_t (*get_info_size)(const struct sock *sk, bool net_admin);
2701 	/* clone ulp */
2702 	void (*clone)(const struct request_sock *req, struct sock *newsk,
2703 		      const gfp_t priority);
2704 
2705 	char		name[TCP_ULP_NAME_MAX];
2706 	struct module	*owner;
2707 };
2708 int tcp_register_ulp(struct tcp_ulp_ops *type);
2709 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2710 int tcp_set_ulp(struct sock *sk, const char *name);
2711 void tcp_get_available_ulp(char *buf, size_t len);
2712 void tcp_cleanup_ulp(struct sock *sk);
2713 void tcp_update_ulp(struct sock *sk, struct proto *p,
2714 		    void (*write_space)(struct sock *sk));
2715 
2716 #define MODULE_ALIAS_TCP_ULP(name)				\
2717 	MODULE_INFO(alias, name);		\
2718 	MODULE_INFO(alias, "tcp-ulp-" name)
2719 
2720 #ifdef CONFIG_NET_SOCK_MSG
2721 struct sk_msg;
2722 struct sk_psock;
2723 
2724 #ifdef CONFIG_BPF_SYSCALL
2725 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2726 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2727 #ifdef CONFIG_BPF_STREAM_PARSER
2728 struct strparser;
2729 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
2730 			   sk_read_actor_t recv_actor);
2731 #endif /* CONFIG_BPF_STREAM_PARSER */
2732 #endif /* CONFIG_BPF_SYSCALL */
2733 
2734 #ifdef CONFIG_INET
2735 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2736 #else
2737 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2738 {
2739 }
2740 #endif
2741 
2742 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2743 			  struct sk_msg *msg, u32 bytes, int flags);
2744 #endif /* CONFIG_NET_SOCK_MSG */
2745 
2746 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
2747 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2748 {
2749 }
2750 #endif
2751 
2752 #ifdef CONFIG_CGROUP_BPF
2753 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2754 				      struct sk_buff *skb,
2755 				      unsigned int end_offset)
2756 {
2757 	skops->skb = skb;
2758 	skops->skb_data_end = skb->data + end_offset;
2759 }
2760 #else
2761 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2762 				      struct sk_buff *skb,
2763 				      unsigned int end_offset)
2764 {
2765 }
2766 #endif
2767 
2768 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2769  * is < 0, then the BPF op failed (for example if the loaded BPF
2770  * program does not support the chosen operation or there is no BPF
2771  * program loaded).
2772  */
2773 #ifdef CONFIG_BPF
2774 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2775 {
2776 	struct bpf_sock_ops_kern sock_ops;
2777 	int ret;
2778 
2779 	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2780 	if (sk_fullsock(sk)) {
2781 		sock_ops.is_fullsock = 1;
2782 		sock_ops.is_locked_tcp_sock = 1;
2783 		sock_owned_by_me(sk);
2784 	}
2785 
2786 	sock_ops.sk = sk;
2787 	sock_ops.op = op;
2788 	if (nargs > 0)
2789 		memcpy(sock_ops.args, args, nargs * sizeof(*args));
2790 
2791 	ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2792 	if (ret == 0)
2793 		ret = sock_ops.reply;
2794 	else
2795 		ret = -1;
2796 	return ret;
2797 }
2798 
2799 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2800 {
2801 	u32 args[2] = {arg1, arg2};
2802 
2803 	return tcp_call_bpf(sk, op, 2, args);
2804 }
2805 
2806 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2807 				    u32 arg3)
2808 {
2809 	u32 args[3] = {arg1, arg2, arg3};
2810 
2811 	return tcp_call_bpf(sk, op, 3, args);
2812 }
2813 
2814 #else
2815 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2816 {
2817 	return -EPERM;
2818 }
2819 
2820 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2821 {
2822 	return -EPERM;
2823 }
2824 
2825 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2826 				    u32 arg3)
2827 {
2828 	return -EPERM;
2829 }
2830 
2831 #endif
2832 
2833 static inline u32 tcp_timeout_init(struct sock *sk)
2834 {
2835 	int timeout;
2836 
2837 	timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2838 
2839 	if (timeout <= 0)
2840 		timeout = TCP_TIMEOUT_INIT;
2841 	return min_t(int, timeout, TCP_RTO_MAX);
2842 }
2843 
2844 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2845 {
2846 	int rwnd;
2847 
2848 	rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2849 
2850 	if (rwnd < 0)
2851 		rwnd = 0;
2852 	return rwnd;
2853 }
2854 
2855 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2856 {
2857 	return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2858 }
2859 
2860 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2861 {
2862 	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2863 		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2864 }
2865 
2866 #if IS_ENABLED(CONFIG_SMC)
2867 extern struct static_key_false tcp_have_smc;
2868 #endif
2869 
2870 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2871 void clean_acked_data_enable(struct tcp_sock *tp,
2872 			     void (*cad)(struct sock *sk, u32 ack_seq));
2873 void clean_acked_data_disable(struct tcp_sock *tp);
2874 void clean_acked_data_flush(void);
2875 #endif
2876 
2877 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2878 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2879 				    const struct tcp_sock *tp)
2880 {
2881 	if (static_branch_unlikely(&tcp_tx_delay_enabled))
2882 		skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2883 }
2884 
2885 /* Compute Earliest Departure Time for some control packets
2886  * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2887  */
2888 static inline u64 tcp_transmit_time(const struct sock *sk)
2889 {
2890 	if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2891 		u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2892 			tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2893 
2894 		return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2895 	}
2896 	return 0;
2897 }
2898 
2899 static inline int tcp_parse_auth_options(const struct tcphdr *th,
2900 		const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2901 {
2902 	const u8 *md5_tmp, *ao_tmp;
2903 	int ret;
2904 
2905 	ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2906 	if (ret)
2907 		return ret;
2908 
2909 	if (md5_hash)
2910 		*md5_hash = md5_tmp;
2911 
2912 	if (aoh) {
2913 		if (!ao_tmp)
2914 			*aoh = NULL;
2915 		else
2916 			*aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2917 	}
2918 
2919 	return 0;
2920 }
2921 
2922 static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2923 				   int family, int l3index, bool stat_inc)
2924 {
2925 #ifdef CONFIG_TCP_AO
2926 	struct tcp_ao_info *ao_info;
2927 	struct tcp_ao_key *ao_key;
2928 
2929 	if (!static_branch_unlikely(&tcp_ao_needed.key))
2930 		return false;
2931 
2932 	ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2933 					lockdep_sock_is_held(sk));
2934 	if (!ao_info)
2935 		return false;
2936 
2937 	ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2938 	if (ao_info->ao_required || ao_key) {
2939 		if (stat_inc) {
2940 			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2941 			atomic64_inc(&ao_info->counters.ao_required);
2942 		}
2943 		return true;
2944 	}
2945 #endif
2946 	return false;
2947 }
2948 
2949 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
2950 		const struct request_sock *req, const struct sk_buff *skb,
2951 		const void *saddr, const void *daddr,
2952 		int family, int dif, int sdif);
2953 
2954 #endif	/* _TCP_H */
2955