1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Definitions for the TCP module.
8 *
9 * Version: @(#)tcp.h 1.0.5 05/23/93
10 *
11 * Authors: Ross Biro
12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 */
14 #ifndef _TCP_H
15 #define _TCP_H
16
17 #define FASTRETRANS_DEBUG 1
18
19 #include <linux/list.h>
20 #include <linux/tcp.h>
21 #include <linux/bug.h>
22 #include <linux/slab.h>
23 #include <linux/cache.h>
24 #include <linux/percpu.h>
25 #include <linux/skbuff.h>
26 #include <linux/kref.h>
27 #include <linux/ktime.h>
28 #include <linux/indirect_call_wrapper.h>
29 #include <linux/bits.h>
30
31 #include <net/inet_connection_sock.h>
32 #include <net/inet_timewait_sock.h>
33 #include <net/inet_hashtables.h>
34 #include <net/checksum.h>
35 #include <net/request_sock.h>
36 #include <net/sock_reuseport.h>
37 #include <net/sock.h>
38 #include <net/snmp.h>
39 #include <net/ip.h>
40 #include <net/tcp_states.h>
41 #include <net/tcp_ao.h>
42 #include <net/inet_ecn.h>
43 #include <net/dst.h>
44 #include <net/mptcp.h>
45 #include <net/xfrm.h>
46 #include <net/secure_seq.h>
47
48 #include <linux/seq_file.h>
49 #include <linux/memcontrol.h>
50 #include <linux/bpf-cgroup.h>
51 #include <linux/siphash.h>
52
53 extern struct inet_hashinfo tcp_hashinfo;
54
55 DECLARE_PER_CPU(unsigned int, tcp_orphan_count);
56 int tcp_orphan_count_sum(void);
57
tcp_orphan_count_inc(void)58 static inline void tcp_orphan_count_inc(void)
59 {
60 this_cpu_inc(tcp_orphan_count);
61 }
62
tcp_orphan_count_dec(void)63 static inline void tcp_orphan_count_dec(void)
64 {
65 this_cpu_dec(tcp_orphan_count);
66 }
67
68 DECLARE_PER_CPU(u32, tcp_tw_isn);
69
70 void tcp_time_wait(struct sock *sk, int state, int timeo);
71
72 #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER)
73 #define MAX_TCP_OPTION_SPACE 40
74 #define TCP_MIN_SND_MSS 48
75 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
76
77 /*
78 * Never offer a window over 32767 without using window scaling. Some
79 * poor stacks do signed 16bit maths!
80 */
81 #define MAX_TCP_WINDOW 32767U
82
83 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
84 #define TCP_MIN_MSS 88U
85
86 /* The initial MTU to use for probing */
87 #define TCP_BASE_MSS 1024
88
89 /* probing interval, default to 10 minutes as per RFC4821 */
90 #define TCP_PROBE_INTERVAL 600
91
92 /* Specify interval when tcp mtu probing will stop */
93 #define TCP_PROBE_THRESHOLD 8
94
95 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
96 #define TCP_FASTRETRANS_THRESH 3
97
98 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
99 #define TCP_MAX_QUICKACKS 16U
100
101 /* Maximal number of window scale according to RFC1323 */
102 #define TCP_MAX_WSCALE 14U
103
104 /* Default sending frequency of accurate ECN option per RTT */
105 #define TCP_ACCECN_OPTION_BEACON 3
106
107 /* urg_data states */
108 #define TCP_URG_VALID 0x0100
109 #define TCP_URG_NOTYET 0x0200
110 #define TCP_URG_READ 0x0400
111
112 #define TCP_RETR1 3 /*
113 * This is how many retries it does before it
114 * tries to figure out if the gateway is
115 * down. Minimal RFC value is 3; it corresponds
116 * to ~3sec-8min depending on RTO.
117 */
118
119 #define TCP_RETR2 15 /*
120 * This should take at least
121 * 90 minutes to time out.
122 * RFC1122 says that the limit is 100 sec.
123 * 15 is ~13-30min depending on RTO.
124 */
125
126 #define TCP_SYN_RETRIES 6 /* This is how many retries are done
127 * when active opening a connection.
128 * RFC1122 says the minimum retry MUST
129 * be at least 180secs. Nevertheless
130 * this value is corresponding to
131 * 63secs of retransmission with the
132 * current initial RTO.
133 */
134
135 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
136 * when passive opening a connection.
137 * This is corresponding to 31secs of
138 * retransmission with the current
139 * initial RTO.
140 */
141
142 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
143 * state, about 60 seconds */
144 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
145 /* BSD style FIN_WAIT2 deadlock breaker.
146 * It used to be 3min, new value is 60sec,
147 * to combine FIN-WAIT-2 timeout with
148 * TIME-WAIT timer.
149 */
150 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */
151
152 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
153 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX);
154
155 #if HZ >= 100
156 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
157 #define TCP_ATO_MIN ((unsigned)(HZ/25))
158 #else
159 #define TCP_DELACK_MIN 4U
160 #define TCP_ATO_MIN 4U
161 #endif
162 #define TCP_RTO_MAX_SEC 120
163 #define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ))
164 #define TCP_RTO_MIN ((unsigned)(HZ / 5))
165 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */
166
167 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
168
169 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
170 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
171 * used as a fallback RTO for the
172 * initial data transmission if no
173 * valid RTT sample has been acquired,
174 * most likely due to retrans in 3WHS.
175 */
176
177 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
178 * for local resources.
179 */
180 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
181 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
182 #define TCP_KEEPALIVE_INTVL (75*HZ)
183
184 #define MAX_TCP_KEEPIDLE 32767
185 #define MAX_TCP_KEEPINTVL 32767
186 #define MAX_TCP_KEEPCNT 127
187 #define MAX_TCP_SYNCNT 127
188
189 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds
190 * to avoid overflows. This assumes a clock smaller than 1 Mhz.
191 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz.
192 */
193 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC)
194
195 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
196 * after this time. It should be equal
197 * (or greater than) TCP_TIMEWAIT_LEN
198 * to provide reliability equal to one
199 * provided by timewait state.
200 */
201 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
202 * timestamps. It must be less than
203 * minimal timewait lifetime.
204 */
205 /*
206 * TCP option
207 */
208
209 #define TCPOPT_NOP 1 /* Padding */
210 #define TCPOPT_EOL 0 /* End of options */
211 #define TCPOPT_MSS 2 /* Segment size negotiating */
212 #define TCPOPT_WINDOW 3 /* Window scaling */
213 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
214 #define TCPOPT_SACK 5 /* SACK Block */
215 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
216 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
217 #define TCPOPT_AO 29 /* Authentication Option (RFC5925) */
218 #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
219 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
220 #define TCPOPT_ACCECN0 172 /* 0xAC: Accurate ECN Order 0 */
221 #define TCPOPT_ACCECN1 174 /* 0xAE: Accurate ECN Order 1 */
222 #define TCPOPT_EXP 254 /* Experimental */
223 /* Magic number to be after the option value for sharing TCP
224 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
225 */
226 #define TCPOPT_FASTOPEN_MAGIC 0xF989
227 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9
228
229 /*
230 * TCP option lengths
231 */
232
233 #define TCPOLEN_MSS 4
234 #define TCPOLEN_WINDOW 3
235 #define TCPOLEN_SACK_PERM 2
236 #define TCPOLEN_TIMESTAMP 10
237 #define TCPOLEN_MD5SIG 18
238 #define TCPOLEN_FASTOPEN_BASE 2
239 #define TCPOLEN_ACCECN_BASE 2
240 #define TCPOLEN_EXP_FASTOPEN_BASE 4
241 #define TCPOLEN_EXP_SMC_BASE 6
242
243 /* But this is what stacks really send out. */
244 #define TCPOLEN_TSTAMP_ALIGNED 12
245 #define TCPOLEN_WSCALE_ALIGNED 4
246 #define TCPOLEN_SACKPERM_ALIGNED 4
247 #define TCPOLEN_SACK_BASE 2
248 #define TCPOLEN_SACK_BASE_ALIGNED 4
249 #define TCPOLEN_SACK_PERBLOCK 8
250 #define TCPOLEN_MD5SIG_ALIGNED 20
251 #define TCPOLEN_MSS_ALIGNED 4
252 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8
253 #define TCPOLEN_ACCECN_PERFIELD 3
254
255 /* Maximum number of byte counters in AccECN option + size */
256 #define TCP_ACCECN_NUMFIELDS 3
257 #define TCP_ACCECN_MAXSIZE (TCPOLEN_ACCECN_BASE + \
258 TCPOLEN_ACCECN_PERFIELD * \
259 TCP_ACCECN_NUMFIELDS)
260 #define TCP_ACCECN_SAFETY_SHIFT 1 /* SAFETY_FACTOR in accecn draft */
261
262 /* Flags in tp->nonagle */
263 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
264 #define TCP_NAGLE_CORK 2 /* Socket is corked */
265 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
266
267 /* TCP thin-stream limits */
268 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
269
270 /* TCP initial congestion window as per rfc6928 */
271 #define TCP_INIT_CWND 10
272
273 /* Bit Flags for sysctl_tcp_fastopen */
274 #define TFO_CLIENT_ENABLE 1
275 #define TFO_SERVER_ENABLE 2
276 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
277
278 /* Accept SYN data w/o any cookie option */
279 #define TFO_SERVER_COOKIE_NOT_REQD 0x200
280
281 /* Force enable TFO on all listeners, i.e., not requiring the
282 * TCP_FASTOPEN socket option.
283 */
284 #define TFO_SERVER_WO_SOCKOPT1 0x400
285
286
287 /* sysctl variables for tcp */
288 extern int sysctl_tcp_max_orphans;
289 extern long sysctl_tcp_mem[3];
290
291 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
292 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */
293 #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */
294
295 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc);
296
297 extern struct percpu_counter tcp_sockets_allocated;
298 extern unsigned long tcp_memory_pressure;
299
300 /* optimized version of sk_under_memory_pressure() for TCP sockets */
tcp_under_memory_pressure(const struct sock * sk)301 static inline bool tcp_under_memory_pressure(const struct sock *sk)
302 {
303 if (mem_cgroup_sk_enabled(sk) &&
304 mem_cgroup_sk_under_memory_pressure(sk))
305 return true;
306
307 if (sk->sk_bypass_prot_mem)
308 return false;
309
310 return READ_ONCE(tcp_memory_pressure);
311 }
312 /*
313 * The next routines deal with comparing 32 bit unsigned ints
314 * and worry about wraparound (automatic with unsigned arithmetic).
315 */
316
before(__u32 seq1,__u32 seq2)317 static inline bool before(__u32 seq1, __u32 seq2)
318 {
319 return (__s32)(seq1-seq2) < 0;
320 }
321 #define after(seq2, seq1) before(seq1, seq2)
322
323 /* is s2<=s1<=s3 ? */
between(__u32 seq1,__u32 seq2,__u32 seq3)324 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
325 {
326 return seq3 - seq2 >= seq1 - seq2;
327 }
328
tcp_wmem_free_skb(struct sock * sk,struct sk_buff * skb)329 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
330 {
331 sk_wmem_queued_add(sk, -skb->truesize);
332 if (!skb_zcopy_pure(skb))
333 sk_mem_uncharge(sk, skb->truesize);
334 else
335 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb)));
336 __kfree_skb(skb);
337 }
338
339 void sk_forced_mem_schedule(struct sock *sk, int size);
340
341 bool tcp_check_oom(const struct sock *sk, int shift);
342
343
344 extern struct proto tcp_prot;
345
346 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
347 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
348 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
349 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
350
351 /*
352 * TCP splice context
353 */
354 struct tcp_splice_state {
355 struct pipe_inode_info *pipe;
356 size_t len;
357 unsigned int flags;
358 };
359
360 void tcp_tsq_work_init(void);
361
362 int tcp_v4_err(struct sk_buff *skb, u32);
363
364 void tcp_shutdown(struct sock *sk, int how);
365
366 int tcp_v4_early_demux(struct sk_buff *skb);
367 int tcp_v4_rcv(struct sk_buff *skb);
368
369 void tcp_remove_empty_skb(struct sock *sk);
370 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
371 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size);
372 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied,
373 size_t size, struct ubuf_info *uarg);
374 void tcp_splice_eof(struct socket *sock);
375 int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
376 int tcp_wmem_schedule(struct sock *sk, int copy);
377 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
378 int size_goal);
379 void tcp_release_cb(struct sock *sk);
380 void tcp_wfree(struct sk_buff *skb);
381 void tcp_write_timer_handler(struct sock *sk);
382 void tcp_delack_timer_handler(struct sock *sk);
383 int tcp_ioctl(struct sock *sk, int cmd, int *karg);
384 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
385 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
386 void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
387 void tcp_rcv_space_adjust(struct sock *sk);
388 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
389 void tcp_twsk_destructor(struct sock *sk);
390 void tcp_twsk_purge(struct list_head *net_exit_list);
391 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
392 unsigned int offset, size_t len);
393 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
394 struct pipe_inode_info *pipe, size_t len,
395 unsigned int flags);
396 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
397 bool force_schedule);
398
tcp_dec_quickack_mode(struct sock * sk)399 static inline void tcp_dec_quickack_mode(struct sock *sk)
400 {
401 struct inet_connection_sock *icsk = inet_csk(sk);
402
403 if (icsk->icsk_ack.quick) {
404 /* How many ACKs S/ACKing new data have we sent? */
405 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
406
407 if (pkts >= icsk->icsk_ack.quick) {
408 icsk->icsk_ack.quick = 0;
409 /* Leaving quickack mode we deflate ATO. */
410 icsk->icsk_ack.ato = TCP_ATO_MIN;
411 } else
412 icsk->icsk_ack.quick -= pkts;
413 }
414 }
415
416 #define TCP_ECN_MODE_RFC3168 BIT(0)
417 #define TCP_ECN_QUEUE_CWR BIT(1)
418 #define TCP_ECN_DEMAND_CWR BIT(2)
419 #define TCP_ECN_SEEN BIT(3)
420 #define TCP_ECN_MODE_ACCECN BIT(4)
421
422 #define TCP_ECN_DISABLED 0
423 #define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
424 #define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN)
425
tcp_ecn_mode_any(const struct tcp_sock * tp)426 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp)
427 {
428 return tp->ecn_flags & TCP_ECN_MODE_ANY;
429 }
430
tcp_ecn_mode_rfc3168(const struct tcp_sock * tp)431 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp)
432 {
433 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168;
434 }
435
tcp_ecn_mode_accecn(const struct tcp_sock * tp)436 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp)
437 {
438 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN;
439 }
440
tcp_ecn_disabled(const struct tcp_sock * tp)441 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp)
442 {
443 return !tcp_ecn_mode_any(tp);
444 }
445
tcp_ecn_mode_pending(const struct tcp_sock * tp)446 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp)
447 {
448 return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING;
449 }
450
tcp_ecn_mode_set(struct tcp_sock * tp,u8 mode)451 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode)
452 {
453 tp->ecn_flags &= ~TCP_ECN_MODE_ANY;
454 tp->ecn_flags |= mode;
455 }
456
457 enum tcp_tw_status {
458 TCP_TW_SUCCESS = 0,
459 TCP_TW_RST = 1,
460 TCP_TW_ACK = 2,
461 TCP_TW_SYN = 3,
462 TCP_TW_ACK_OOW = 4
463 };
464
465
466 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
467 struct sk_buff *skb,
468 const struct tcphdr *th,
469 u32 *tw_isn,
470 enum skb_drop_reason *drop_reason);
471 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
472 struct request_sock *req, bool fastopen,
473 bool *lost_race, enum skb_drop_reason *drop_reason);
474 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
475 struct sk_buff *skb);
476 void tcp_enter_loss(struct sock *sk);
477 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag);
478 void tcp_clear_retrans(struct tcp_sock *tp);
479 void tcp_update_pacing_rate(struct sock *sk);
480 void tcp_set_rto(struct sock *sk);
481 void tcp_update_metrics(struct sock *sk);
482 void tcp_init_metrics(struct sock *sk);
483 void tcp_metrics_init(void);
484 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
485 void __tcp_close(struct sock *sk, long timeout);
486 void tcp_close(struct sock *sk, long timeout);
487 void tcp_init_sock(struct sock *sk);
488 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
489 __poll_t tcp_poll(struct file *file, struct socket *sock,
490 struct poll_table_struct *wait);
491 int do_tcp_getsockopt(struct sock *sk, int level,
492 int optname, sockptr_t optval, sockptr_t optlen);
493 int tcp_getsockopt(struct sock *sk, int level, int optname,
494 char __user *optval, int __user *optlen);
495 bool tcp_bpf_bypass_getsockopt(int level, int optname);
496 int do_tcp_setsockopt(struct sock *sk, int level, int optname,
497 sockptr_t optval, unsigned int optlen);
498 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
499 unsigned int optlen);
500 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout);
501 void tcp_set_keepalive(struct sock *sk, int val);
502 void tcp_syn_ack_timeout(const struct request_sock *req);
503 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
504 int flags, int *addr_len);
505 int tcp_set_rcvlowat(struct sock *sk, int val);
506 int tcp_set_window_clamp(struct sock *sk, int val);
507 void tcp_update_recv_tstamps(struct sk_buff *skb,
508 struct scm_timestamping_internal *tss);
509 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
510 struct scm_timestamping_internal *tss);
511 void tcp_data_ready(struct sock *sk);
512 #ifdef CONFIG_MMU
513 int tcp_mmap(struct file *file, struct socket *sock,
514 struct vm_area_struct *vma);
515 #endif
516 void tcp_parse_options(const struct net *net, const struct sk_buff *skb,
517 struct tcp_options_received *opt_rx,
518 int estab, struct tcp_fastopen_cookie *foc);
519
520 /*
521 * BPF SKB-less helpers
522 */
523 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
524 struct tcphdr *th, u32 *cookie);
525 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
526 struct tcphdr *th, u32 *cookie);
527 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss);
528 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
529 const struct tcp_request_sock_ops *af_ops,
530 struct sock *sk, struct tcphdr *th);
531 /*
532 * TCP v4 functions exported for the inet6 API
533 */
534
535 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
536 void tcp_v4_mtu_reduced(struct sock *sk);
537 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
538 void tcp_ld_RTO_revert(struct sock *sk, u32 seq);
539 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
540 struct sock *tcp_create_openreq_child(const struct sock *sk,
541 struct request_sock *req,
542 struct sk_buff *skb);
543 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
544 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
545 struct request_sock *req,
546 struct dst_entry *dst,
547 struct request_sock *req_unhash,
548 bool *own_req,
549 void (*opt_child_init)(struct sock *newsk,
550 const struct sock *sk));
551 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
552 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len);
553 int tcp_connect(struct sock *sk);
554 enum tcp_synack_type {
555 TCP_SYNACK_NORMAL,
556 TCP_SYNACK_FASTOPEN,
557 TCP_SYNACK_COOKIE,
558 TCP_SYNACK_RETRANS,
559 };
560 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
561 struct request_sock *req,
562 struct tcp_fastopen_cookie *foc,
563 enum tcp_synack_type synack_type,
564 struct sk_buff *syn_skb);
565 int tcp_disconnect(struct sock *sk, int flags);
566
567 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
568 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
569 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
570
571 /* From syncookies.c */
572 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
573 struct request_sock *req,
574 struct dst_entry *dst);
575 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th);
576 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
577 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
578 struct sock *sk, struct sk_buff *skb,
579 struct tcp_options_received *tcp_opt,
580 int mss, u32 tsoff);
581
582 #if IS_ENABLED(CONFIG_BPF)
583 struct bpf_tcp_req_attrs {
584 u32 rcv_tsval;
585 u32 rcv_tsecr;
586 u16 mss;
587 u8 rcv_wscale;
588 u8 snd_wscale;
589 u8 ecn_ok;
590 u8 wscale_ok;
591 u8 sack_ok;
592 u8 tstamp_ok;
593 u8 usec_ts_ok;
594 u8 reserved[3];
595 };
596 #endif
597
598 #ifdef CONFIG_SYN_COOKIES
599
600 /* Syncookies use a monotonic timer which increments every 60 seconds.
601 * This counter is used both as a hash input and partially encoded into
602 * the cookie value. A cookie is only validated further if the delta
603 * between the current counter value and the encoded one is less than this,
604 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
605 * the counter advances immediately after a cookie is generated).
606 */
607 #define MAX_SYNCOOKIE_AGE 2
608 #define TCP_SYNCOOKIE_PERIOD (60 * HZ)
609 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
610
611 /* syncookies: remember time of last synqueue overflow
612 * But do not dirty this field too often (once per second is enough)
613 * It is racy as we do not hold a lock, but race is very minor.
614 */
tcp_synq_overflow(const struct sock * sk)615 static inline void tcp_synq_overflow(const struct sock *sk)
616 {
617 unsigned int last_overflow;
618 unsigned int now = jiffies;
619
620 if (sk->sk_reuseport) {
621 struct sock_reuseport *reuse;
622
623 reuse = rcu_dereference(sk->sk_reuseport_cb);
624 if (likely(reuse)) {
625 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
626 if (!time_between32(now, last_overflow,
627 last_overflow + HZ))
628 WRITE_ONCE(reuse->synq_overflow_ts, now);
629 return;
630 }
631 }
632
633 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
634 if (!time_between32(now, last_overflow, last_overflow + HZ))
635 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now);
636 }
637
638 /* syncookies: no recent synqueue overflow on this listening socket? */
tcp_synq_no_recent_overflow(const struct sock * sk)639 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
640 {
641 unsigned int last_overflow;
642 unsigned int now = jiffies;
643
644 if (sk->sk_reuseport) {
645 struct sock_reuseport *reuse;
646
647 reuse = rcu_dereference(sk->sk_reuseport_cb);
648 if (likely(reuse)) {
649 last_overflow = READ_ONCE(reuse->synq_overflow_ts);
650 return !time_between32(now, last_overflow - HZ,
651 last_overflow +
652 TCP_SYNCOOKIE_VALID);
653 }
654 }
655
656 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
657
658 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
659 * then we're under synflood. However, we have to use
660 * 'last_overflow - HZ' as lower bound. That's because a concurrent
661 * tcp_synq_overflow() could update .ts_recent_stamp after we read
662 * jiffies but before we store .ts_recent_stamp into last_overflow,
663 * which could lead to rejecting a valid syncookie.
664 */
665 return !time_between32(now, last_overflow - HZ,
666 last_overflow + TCP_SYNCOOKIE_VALID);
667 }
668
tcp_cookie_time(void)669 static inline u32 tcp_cookie_time(void)
670 {
671 u64 val = get_jiffies_64();
672
673 do_div(val, TCP_SYNCOOKIE_PERIOD);
674 return val;
675 }
676
677 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */
tcp_ns_to_ts(bool usec_ts,u64 val)678 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val)
679 {
680 if (usec_ts)
681 return div_u64(val, NSEC_PER_USEC);
682
683 return div_u64(val, NSEC_PER_MSEC);
684 }
685
686 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
687 u16 *mssp);
688 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
689 u64 cookie_init_timestamp(struct request_sock *req, u64 now);
690 bool cookie_timestamp_decode(const struct net *net,
691 struct tcp_options_received *opt);
692
cookie_ecn_ok(const struct net * net,const struct dst_entry * dst)693 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst)
694 {
695 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) ||
696 dst_feature(dst, RTAX_FEATURE_ECN);
697 }
698
699 #if IS_ENABLED(CONFIG_BPF)
cookie_bpf_ok(struct sk_buff * skb)700 static inline bool cookie_bpf_ok(struct sk_buff *skb)
701 {
702 return skb->sk;
703 }
704
705 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb);
706 #else
cookie_bpf_ok(struct sk_buff * skb)707 static inline bool cookie_bpf_ok(struct sk_buff *skb)
708 {
709 return false;
710 }
711
cookie_bpf_check(struct net * net,struct sock * sk,struct sk_buff * skb)712 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk,
713 struct sk_buff *skb)
714 {
715 return NULL;
716 }
717 #endif
718
719 /* From net/ipv6/syncookies.c */
720 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th);
721 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
722
723 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
724 const struct tcphdr *th, u16 *mssp);
725 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
726 #endif
727 /* tcp_output.c */
728
729 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb);
730 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb);
731 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
732 int nonagle);
733 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
734 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
735 void tcp_retransmit_timer(struct sock *sk);
736 void tcp_xmit_retransmit_queue(struct sock *);
737 void tcp_simple_retransmit(struct sock *);
738 void tcp_enter_recovery(struct sock *sk, bool ece_ack);
739 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
740 enum tcp_queue {
741 TCP_FRAG_IN_WRITE_QUEUE,
742 TCP_FRAG_IN_RTX_QUEUE,
743 };
744 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
745 struct sk_buff *skb, u32 len,
746 unsigned int mss_now, gfp_t gfp);
747
748 void tcp_send_probe0(struct sock *);
749 int tcp_write_wakeup(struct sock *, int mib);
750 void tcp_send_fin(struct sock *sk);
751 void tcp_send_active_reset(struct sock *sk, gfp_t priority,
752 enum sk_rst_reason reason);
753 int tcp_send_synack(struct sock *);
754 void tcp_push_one(struct sock *, unsigned int mss_now);
755 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags);
756 void tcp_send_ack(struct sock *sk);
757 void tcp_send_delayed_ack(struct sock *sk);
758 void tcp_send_loss_probe(struct sock *sk);
759 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto);
760 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
761 const struct sk_buff *next_skb);
762
763 /* tcp_input.c */
764 void tcp_rearm_rto(struct sock *sk);
765 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
766 void tcp_done_with_error(struct sock *sk, int err);
767 void tcp_reset(struct sock *sk, struct sk_buff *skb);
768 void tcp_fin(struct sock *sk);
769 void __tcp_check_space(struct sock *sk);
tcp_check_space(struct sock * sk)770 static inline void tcp_check_space(struct sock *sk)
771 {
772 /* pairs with tcp_poll() */
773 smp_mb();
774
775 if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
776 __tcp_check_space(sk);
777 }
778 void tcp_sack_compress_send_ack(struct sock *sk);
779
tcp_cleanup_skb(struct sk_buff * skb)780 static inline void tcp_cleanup_skb(struct sk_buff *skb)
781 {
782 skb_dst_drop(skb);
783 secpath_reset(skb);
784 }
785
tcp_add_receive_queue(struct sock * sk,struct sk_buff * skb)786 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb)
787 {
788 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
789 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb));
790 __skb_queue_tail(&sk->sk_receive_queue, skb);
791 }
792
793 /* tcp_timer.c */
794 void tcp_init_xmit_timers(struct sock *);
tcp_clear_xmit_timers(struct sock * sk)795 static inline void tcp_clear_xmit_timers(struct sock *sk)
796 {
797 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1)
798 __sock_put(sk);
799
800 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1)
801 __sock_put(sk);
802
803 inet_csk_clear_xmit_timers(sk);
804 }
805
806 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
807 unsigned int tcp_current_mss(struct sock *sk);
808 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
809
810 /* Bound MSS / TSO packet size with the half of the window */
tcp_bound_to_half_wnd(struct tcp_sock * tp,int pktsize)811 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
812 {
813 int cutoff;
814
815 /* When peer uses tiny windows, there is no use in packetizing
816 * to sub-MSS pieces for the sake of SWS or making sure there
817 * are enough packets in the pipe for fast recovery.
818 *
819 * On the other hand, for extremely large MSS devices, handling
820 * smaller than MSS windows in this way does make sense.
821 */
822 if (tp->max_window > TCP_MSS_DEFAULT)
823 cutoff = (tp->max_window >> 1);
824 else
825 cutoff = tp->max_window;
826
827 if (cutoff && pktsize > cutoff)
828 return max_t(int, cutoff, 68U - tp->tcp_header_len);
829 else
830 return pktsize;
831 }
832
833 /* tcp.c */
834 void tcp_get_info(struct sock *, struct tcp_info *);
835 void tcp_rate_check_app_limited(struct sock *sk);
836
837 /* Read 'sendfile()'-style from a TCP socket */
838 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
839 sk_read_actor_t recv_actor);
840 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc,
841 sk_read_actor_t recv_actor, bool noack,
842 u32 *copied_seq);
843 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
844 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off);
845 void tcp_read_done(struct sock *sk, size_t len);
846
847 void tcp_initialize_rcv_mss(struct sock *sk);
848
849 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
850 int tcp_mss_to_mtu(struct sock *sk, int mss);
851 void tcp_mtup_init(struct sock *sk);
852
tcp_rto_max(const struct sock * sk)853 static inline unsigned int tcp_rto_max(const struct sock *sk)
854 {
855 return READ_ONCE(inet_csk(sk)->icsk_rto_max);
856 }
857
tcp_bound_rto(struct sock * sk)858 static inline void tcp_bound_rto(struct sock *sk)
859 {
860 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk));
861 }
862
__tcp_set_rto(const struct tcp_sock * tp)863 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
864 {
865 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
866 }
867
tcp_reqsk_timeout(struct request_sock * req)868 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req)
869 {
870 u64 timeout = (u64)req->timeout << req->num_timeout;
871
872 return (unsigned long)min_t(u64, timeout,
873 tcp_rto_max(req->rsk_listener));
874 }
875
876 u32 tcp_delack_max(const struct sock *sk);
877
878 /* Compute the actual rto_min value */
tcp_rto_min(const struct sock * sk)879 static inline u32 tcp_rto_min(const struct sock *sk)
880 {
881 const struct dst_entry *dst = __sk_dst_get(sk);
882 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min);
883
884 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
885 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
886 return rto_min;
887 }
888
tcp_rto_min_us(const struct sock * sk)889 static inline u32 tcp_rto_min_us(const struct sock *sk)
890 {
891 return jiffies_to_usecs(tcp_rto_min(sk));
892 }
893
tcp_ca_dst_locked(const struct dst_entry * dst)894 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
895 {
896 return dst_metric_locked(dst, RTAX_CC_ALGO);
897 }
898
899 /* Minimum RTT in usec. ~0 means not available. */
tcp_min_rtt(const struct tcp_sock * tp)900 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
901 {
902 return minmax_get(&tp->rtt_min);
903 }
904
905 /* Compute the actual receive window we are currently advertising.
906 * Rcv_nxt can be after the window if our peer push more data
907 * than the offered window.
908 */
tcp_receive_window(const struct tcp_sock * tp)909 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
910 {
911 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
912
913 if (win < 0)
914 win = 0;
915 return (u32) win;
916 }
917
918 /* Choose a new window, without checks for shrinking, and without
919 * scaling applied to the result. The caller does these things
920 * if necessary. This is a "raw" window selection.
921 */
922 u32 __tcp_select_window(struct sock *sk);
923
924 void tcp_send_window_probe(struct sock *sk);
925
926 /* TCP uses 32bit jiffies to save some space.
927 * Note that this is different from tcp_time_stamp, which
928 * historically has been the same until linux-4.13.
929 */
930 #define tcp_jiffies32 ((u32)jiffies)
931
932 /*
933 * Deliver a 32bit value for TCP timestamp option (RFC 7323)
934 * It is no longer tied to jiffies, but to 1 ms clock.
935 * Note: double check if you want to use tcp_jiffies32 instead of this.
936 */
937 #define TCP_TS_HZ 1000
938
tcp_clock_ns(void)939 static inline u64 tcp_clock_ns(void)
940 {
941 return ktime_get_ns();
942 }
943
tcp_clock_us(void)944 static inline u64 tcp_clock_us(void)
945 {
946 return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
947 }
948
tcp_clock_ms(void)949 static inline u64 tcp_clock_ms(void)
950 {
951 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC);
952 }
953
954 /* TCP Timestamp included in TS option (RFC 1323) can either use ms
955 * or usec resolution. Each socket carries a flag to select one or other
956 * resolution, as the route attribute could change anytime.
957 * Each flow must stick to initial resolution.
958 */
tcp_clock_ts(bool usec_ts)959 static inline u32 tcp_clock_ts(bool usec_ts)
960 {
961 return usec_ts ? tcp_clock_us() : tcp_clock_ms();
962 }
963
tcp_time_stamp_ms(const struct tcp_sock * tp)964 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp)
965 {
966 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC);
967 }
968
tcp_time_stamp_ts(const struct tcp_sock * tp)969 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp)
970 {
971 if (tp->tcp_usec_ts)
972 return tp->tcp_mstamp;
973 return tcp_time_stamp_ms(tp);
974 }
975
976 void tcp_mstamp_refresh(struct tcp_sock *tp);
977
tcp_stamp_us_delta(u64 t1,u64 t0)978 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
979 {
980 return max_t(s64, t1 - t0, 0);
981 }
982
983 /* provide the departure time in us unit */
tcp_skb_timestamp_us(const struct sk_buff * skb)984 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
985 {
986 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
987 }
988
989 /* Provide skb TSval in usec or ms unit */
tcp_skb_timestamp_ts(bool usec_ts,const struct sk_buff * skb)990 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb)
991 {
992 if (usec_ts)
993 return tcp_skb_timestamp_us(skb);
994
995 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC);
996 }
997
tcp_tw_tsval(const struct tcp_timewait_sock * tcptw)998 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw)
999 {
1000 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset;
1001 }
1002
tcp_rsk_tsval(const struct tcp_request_sock * treq)1003 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq)
1004 {
1005 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off;
1006 }
1007
1008 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
1009
1010 #define TCPHDR_FIN BIT(0)
1011 #define TCPHDR_SYN BIT(1)
1012 #define TCPHDR_RST BIT(2)
1013 #define TCPHDR_PSH BIT(3)
1014 #define TCPHDR_ACK BIT(4)
1015 #define TCPHDR_URG BIT(5)
1016 #define TCPHDR_ECE BIT(6)
1017 #define TCPHDR_CWR BIT(7)
1018 #define TCPHDR_AE BIT(8)
1019 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
1020 TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \
1021 TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1022 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \
1023 TCPHDR_FLAGS_MASK)
1024
1025 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE)
1026 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
1027 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR)
1028
1029 #define TCP_ACCECN_CEP_ACE_MASK 0x7
1030 #define TCP_ACCECN_ACE_MAX_DELTA 6
1031
1032 /* To avoid/detect middlebox interference, not all counters start at 0.
1033 * See draft-ietf-tcpm-accurate-ecn for the latest values.
1034 */
1035 #define TCP_ACCECN_CEP_INIT_OFFSET 5
1036 #define TCP_ACCECN_E1B_INIT_OFFSET 1
1037 #define TCP_ACCECN_E0B_INIT_OFFSET 1
1038 #define TCP_ACCECN_CEB_INIT_OFFSET 0
1039
1040 /* State flags for sacked in struct tcp_skb_cb */
1041 enum tcp_skb_cb_sacked_flags {
1042 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */
1043 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */
1044 TCPCB_LOST = (1 << 2), /* SKB is lost */
1045 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS |
1046 TCPCB_LOST), /* All tag bits */
1047 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */
1048 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */
1049 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS |
1050 TCPCB_REPAIRED),
1051 };
1052
1053 /* This is what the send packet queuing engine uses to pass
1054 * TCP per-packet control information to the transmission code.
1055 * We also store the host-order sequence numbers in here too.
1056 * This is 44 bytes if IPV6 is enabled.
1057 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
1058 */
1059 struct tcp_skb_cb {
1060 __u32 seq; /* Starting sequence number */
1061 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1062 union {
1063 /* Note :
1064 * tcp_gso_segs/size are used in write queue only,
1065 * cf tcp_skb_pcount()/tcp_skb_mss()
1066 */
1067 struct {
1068 u16 tcp_gso_segs;
1069 u16 tcp_gso_size;
1070 };
1071 };
1072 __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/
1073
1074 __u8 sacked; /* State flags for SACK. */
1075 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
1076 #define TSTAMP_ACK_SK 0x1
1077 #define TSTAMP_ACK_BPF 0x2
1078 __u8 txstamp_ack:2, /* Record TX timestamp for ack? */
1079 eor:1, /* Is skb MSG_EOR marked? */
1080 has_rxtstamp:1, /* SKB has a RX timestamp */
1081 unused:4;
1082 __u32 ack_seq; /* Sequence number ACK'd */
1083 union {
1084 struct {
1085 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1)
1086 /* There is space for up to 24 bytes */
1087 __u32 is_app_limited:1, /* cwnd not fully used? */
1088 delivered_ce:20,
1089 unused:11;
1090 /* pkts S/ACKed so far upon tx of skb, incl retrans: */
1091 __u32 delivered;
1092 /* start of send pipeline phase */
1093 u64 first_tx_mstamp;
1094 /* when we reached the "delivered" count */
1095 u64 delivered_mstamp;
1096 } tx; /* only used for outgoing skbs */
1097 union {
1098 struct inet_skb_parm h4;
1099 #if IS_ENABLED(CONFIG_IPV6)
1100 struct inet6_skb_parm h6;
1101 #endif
1102 } header; /* For incoming skbs */
1103 };
1104 };
1105
1106 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1107
1108 extern const struct inet_connection_sock_af_ops ipv4_specific;
1109
1110 #if IS_ENABLED(CONFIG_IPV6)
1111 /* This is the variant of inet6_iif() that must be used by TCP,
1112 * as TCP moves IP6CB into a different location in skb->cb[]
1113 */
tcp_v6_iif(const struct sk_buff * skb)1114 static inline int tcp_v6_iif(const struct sk_buff *skb)
1115 {
1116 return TCP_SKB_CB(skb)->header.h6.iif;
1117 }
1118
tcp_v6_iif_l3_slave(const struct sk_buff * skb)1119 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
1120 {
1121 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
1122
1123 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
1124 }
1125
1126 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v6_sdif(const struct sk_buff * skb)1127 static inline int tcp_v6_sdif(const struct sk_buff *skb)
1128 {
1129 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1130 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags))
1131 return TCP_SKB_CB(skb)->header.h6.iif;
1132 #endif
1133 return 0;
1134 }
1135
1136 extern const struct inet_connection_sock_af_ops ipv6_specific;
1137
1138 INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
1139 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
1140 void tcp_v6_early_demux(struct sk_buff *skb);
1141
1142 #endif
1143
1144 /* TCP_SKB_CB reference means this can not be used from early demux */
tcp_v4_sdif(struct sk_buff * skb)1145 static inline int tcp_v4_sdif(struct sk_buff *skb)
1146 {
1147 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
1148 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
1149 return TCP_SKB_CB(skb)->header.h4.iif;
1150 #endif
1151 return 0;
1152 }
1153
1154 /* Due to TSO, an SKB can be composed of multiple actual
1155 * packets. To keep these tracked properly, we use this.
1156 */
tcp_skb_pcount(const struct sk_buff * skb)1157 static inline int tcp_skb_pcount(const struct sk_buff *skb)
1158 {
1159 return TCP_SKB_CB(skb)->tcp_gso_segs;
1160 }
1161
tcp_skb_pcount_set(struct sk_buff * skb,int segs)1162 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
1163 {
1164 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
1165 }
1166
tcp_skb_pcount_add(struct sk_buff * skb,int segs)1167 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
1168 {
1169 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
1170 }
1171
1172 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
tcp_skb_mss(const struct sk_buff * skb)1173 static inline int tcp_skb_mss(const struct sk_buff *skb)
1174 {
1175 return TCP_SKB_CB(skb)->tcp_gso_size;
1176 }
1177
tcp_skb_can_collapse_to(const struct sk_buff * skb)1178 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
1179 {
1180 return likely(!TCP_SKB_CB(skb)->eor);
1181 }
1182
tcp_skb_can_collapse(const struct sk_buff * to,const struct sk_buff * from)1183 static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
1184 const struct sk_buff *from)
1185 {
1186 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */
1187 return likely(tcp_skb_can_collapse_to(to) &&
1188 mptcp_skb_can_collapse(to, from) &&
1189 skb_pure_zcopy_same(to, from) &&
1190 skb_frags_readable(to) == skb_frags_readable(from));
1191 }
1192
tcp_skb_can_collapse_rx(const struct sk_buff * to,const struct sk_buff * from)1193 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to,
1194 const struct sk_buff *from)
1195 {
1196 return likely(mptcp_skb_can_collapse(to, from) &&
1197 !skb_cmp_decrypted(to, from));
1198 }
1199
1200 /* Events passed to congestion control interface */
1201 enum tcp_ca_event {
1202 CA_EVENT_TX_START, /* first transmit when no packets in flight */
1203 CA_EVENT_CWND_RESTART, /* congestion window restart */
1204 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
1205 CA_EVENT_LOSS, /* loss timeout */
1206 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
1207 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
1208 };
1209
1210 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
1211 enum tcp_ca_ack_event_flags {
1212 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
1213 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
1214 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
1215 };
1216
1217 /*
1218 * Interface for adding new TCP congestion control handlers
1219 */
1220 #define TCP_CA_NAME_MAX 16
1221 #define TCP_CA_MAX 128
1222 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
1223
1224 #define TCP_CA_UNSPEC 0
1225
1226 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
1227 #define TCP_CONG_NON_RESTRICTED BIT(0)
1228 /* Requires ECN/ECT set on all packets */
1229 #define TCP_CONG_NEEDS_ECN BIT(1)
1230 /* Require successfully negotiated AccECN capability */
1231 #define TCP_CONG_NEEDS_ACCECN BIT(2)
1232 /* Use ECT(1) instead of ECT(0) while the CA is uninitialized */
1233 #define TCP_CONG_ECT_1_NEGOTIATION BIT(3)
1234 /* Cannot fallback to RFC3168 during AccECN negotiation */
1235 #define TCP_CONG_NO_FALLBACK_RFC3168 BIT(4)
1236 #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \
1237 TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \
1238 TCP_CONG_NO_FALLBACK_RFC3168)
1239
1240 union tcp_cc_info;
1241
1242 struct ack_sample {
1243 u32 pkts_acked;
1244 s32 rtt_us;
1245 u32 in_flight;
1246 };
1247
1248 /* A rate sample measures the number of (original/retransmitted) data
1249 * packets delivered "delivered" over an interval of time "interval_us".
1250 * The tcp_rate.c code fills in the rate sample, and congestion
1251 * control modules that define a cong_control function to run at the end
1252 * of ACK processing can optionally chose to consult this sample when
1253 * setting cwnd and pacing rate.
1254 * A sample is invalid if "delivered" or "interval_us" is negative.
1255 */
1256 struct rate_sample {
1257 u64 prior_mstamp; /* starting timestamp for interval */
1258 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
1259 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */
1260 s32 delivered; /* number of packets delivered over interval */
1261 s32 delivered_ce; /* number of packets delivered w/ CE marks*/
1262 long interval_us; /* time for tp->delivered to incr "delivered" */
1263 u32 snd_interval_us; /* snd interval for delivered packets */
1264 u32 rcv_interval_us; /* rcv interval for delivered packets */
1265 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
1266 int losses; /* number of packets marked lost upon ACK */
1267 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
1268 u32 prior_in_flight; /* in flight before this ACK */
1269 u32 last_end_seq; /* end_seq of most recently ACKed packet */
1270 bool is_app_limited; /* is sample from packet with bubble in pipe? */
1271 bool is_retrans; /* is sample from retransmission? */
1272 bool is_ack_delayed; /* is this (likely) a delayed ACK? */
1273 };
1274
1275 struct tcp_congestion_ops {
1276 /* fast path fields are put first to fill one cache line */
1277
1278 /* A congestion control (CC) must provide one of either:
1279 *
1280 * (a) a cong_avoid function, if the CC wants to use the core TCP
1281 * stack's default functionality to implement a "classic"
1282 * (Reno/CUBIC-style) response to packet loss, RFC3168 ECN,
1283 * idle periods, pacing rate computations, etc.
1284 *
1285 * (b) a cong_control function, if the CC wants custom behavior and
1286 * complete control of all congestion control behaviors.
1287 */
1288 /* (a) "classic" response: calculate new cwnd.
1289 */
1290 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
1291 /* (b) "custom" response: call when packets are delivered to update
1292 * cwnd and pacing rate, after all the ca_state processing.
1293 */
1294 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs);
1295
1296 /* return slow start threshold (required) */
1297 u32 (*ssthresh)(struct sock *sk);
1298
1299 /* call before changing ca_state (optional) */
1300 void (*set_state)(struct sock *sk, u8 new_state);
1301
1302 /* call when cwnd event occurs (optional) */
1303 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
1304
1305 /* call when ack arrives (optional) */
1306 void (*in_ack_event)(struct sock *sk, u32 flags);
1307
1308 /* hook for packet ack accounting (optional) */
1309 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
1310
1311 /* override sysctl_tcp_min_tso_segs (optional) */
1312 u32 (*min_tso_segs)(struct sock *sk);
1313
1314 /* new value of cwnd after loss (required) */
1315 u32 (*undo_cwnd)(struct sock *sk);
1316 /* returns the multiplier used in tcp_sndbuf_expand (optional) */
1317 u32 (*sndbuf_expand)(struct sock *sk);
1318
1319 /* control/slow paths put last */
1320 /* get info for inet_diag (optional) */
1321 size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
1322 union tcp_cc_info *info);
1323
1324 char name[TCP_CA_NAME_MAX];
1325 struct module *owner;
1326 struct list_head list;
1327 u32 key;
1328 u32 flags;
1329
1330 /* initialize private data (optional) */
1331 void (*init)(struct sock *sk);
1332 /* cleanup private data (optional) */
1333 void (*release)(struct sock *sk);
1334 } ____cacheline_aligned_in_smp;
1335
1336 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
1337 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
1338 int tcp_update_congestion_control(struct tcp_congestion_ops *type,
1339 struct tcp_congestion_ops *old_type);
1340 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
1341
1342 void tcp_assign_congestion_control(struct sock *sk);
1343 void tcp_init_congestion_control(struct sock *sk);
1344 void tcp_cleanup_congestion_control(struct sock *sk);
1345 int tcp_set_default_congestion_control(struct net *net, const char *name);
1346 void tcp_get_default_congestion_control(struct net *net, char *name);
1347 void tcp_get_available_congestion_control(char *buf, size_t len);
1348 void tcp_get_allowed_congestion_control(char *buf, size_t len);
1349 int tcp_set_allowed_congestion_control(char *allowed);
1350 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1351 bool cap_net_admin);
1352 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1353 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1354
1355 u32 tcp_reno_ssthresh(struct sock *sk);
1356 u32 tcp_reno_undo_cwnd(struct sock *sk);
1357 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
1358 extern struct tcp_congestion_ops tcp_reno;
1359
1360 struct tcp_congestion_ops *tcp_ca_find(const char *name);
1361 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
1362 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
1363 #ifdef CONFIG_INET
1364 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
1365 #else
tcp_ca_get_name_by_key(u32 key,char * buffer)1366 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
1367 {
1368 return NULL;
1369 }
1370 #endif
1371
tcp_ca_needs_ecn(const struct sock * sk)1372 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
1373 {
1374 const struct inet_connection_sock *icsk = inet_csk(sk);
1375
1376 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
1377 }
1378
tcp_ca_needs_accecn(const struct sock * sk)1379 static inline bool tcp_ca_needs_accecn(const struct sock *sk)
1380 {
1381 const struct inet_connection_sock *icsk = inet_csk(sk);
1382
1383 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN;
1384 }
1385
tcp_ca_ect_1_negotiation(const struct sock * sk)1386 static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk)
1387 {
1388 const struct inet_connection_sock *icsk = inet_csk(sk);
1389
1390 return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION;
1391 }
1392
tcp_ca_no_fallback_rfc3168(const struct sock * sk)1393 static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk)
1394 {
1395 const struct inet_connection_sock *icsk = inet_csk(sk);
1396
1397 return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168;
1398 }
1399
tcp_ca_event(struct sock * sk,const enum tcp_ca_event event)1400 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
1401 {
1402 const struct inet_connection_sock *icsk = inet_csk(sk);
1403
1404 if (icsk->icsk_ca_ops->cwnd_event)
1405 icsk->icsk_ca_ops->cwnd_event(sk, event);
1406 }
1407
1408 /* From tcp_cong.c */
1409 void tcp_set_ca_state(struct sock *sk, const u8 ca_state);
1410
1411
tcp_skb_sent_after(u64 t1,u64 t2,u32 seq1,u32 seq2)1412 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
1413 {
1414 return t1 > t2 || (t1 == t2 && after(seq1, seq2));
1415 }
1416
1417 /* These functions determine how the current flow behaves in respect of SACK
1418 * handling. SACK is negotiated with the peer, and therefore it can vary
1419 * between different flows.
1420 *
1421 * tcp_is_sack - SACK enabled
1422 * tcp_is_reno - No SACK
1423 */
tcp_is_sack(const struct tcp_sock * tp)1424 static inline int tcp_is_sack(const struct tcp_sock *tp)
1425 {
1426 return likely(tp->rx_opt.sack_ok);
1427 }
1428
tcp_is_reno(const struct tcp_sock * tp)1429 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1430 {
1431 return !tcp_is_sack(tp);
1432 }
1433
tcp_left_out(const struct tcp_sock * tp)1434 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1435 {
1436 return tp->sacked_out + tp->lost_out;
1437 }
1438
1439 /* This determines how many packets are "in the network" to the best
1440 * of our knowledge. In many cases it is conservative, but where
1441 * detailed information is available from the receiver (via SACK
1442 * blocks etc.) we can make more aggressive calculations.
1443 *
1444 * Use this for decisions involving congestion control, use just
1445 * tp->packets_out to determine if the send queue is empty or not.
1446 *
1447 * Read this equation as:
1448 *
1449 * "Packets sent once on transmission queue" MINUS
1450 * "Packets left network, but not honestly ACKed yet" PLUS
1451 * "Packets fast retransmitted"
1452 */
tcp_packets_in_flight(const struct tcp_sock * tp)1453 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1454 {
1455 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1456 }
1457
1458 #define TCP_INFINITE_SSTHRESH 0x7fffffff
1459
tcp_snd_cwnd(const struct tcp_sock * tp)1460 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
1461 {
1462 return tp->snd_cwnd;
1463 }
1464
tcp_snd_cwnd_set(struct tcp_sock * tp,u32 val)1465 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
1466 {
1467 WARN_ON_ONCE((int)val <= 0);
1468 tp->snd_cwnd = val;
1469 }
1470
tcp_in_slow_start(const struct tcp_sock * tp)1471 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1472 {
1473 return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
1474 }
1475
tcp_in_initial_slowstart(const struct tcp_sock * tp)1476 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1477 {
1478 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1479 }
1480
tcp_in_cwnd_reduction(const struct sock * sk)1481 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1482 {
1483 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1484 (1 << inet_csk(sk)->icsk_ca_state);
1485 }
1486
1487 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1488 * The exception is cwnd reduction phase, when cwnd is decreasing towards
1489 * ssthresh.
1490 */
tcp_current_ssthresh(const struct sock * sk)1491 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1492 {
1493 const struct tcp_sock *tp = tcp_sk(sk);
1494
1495 if (tcp_in_cwnd_reduction(sk))
1496 return tp->snd_ssthresh;
1497 else
1498 return max(tp->snd_ssthresh,
1499 ((tcp_snd_cwnd(tp) >> 1) +
1500 (tcp_snd_cwnd(tp) >> 2)));
1501 }
1502
1503 /* Use define here intentionally to get WARN_ON location shown at the caller */
1504 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1505
1506 void tcp_enter_cwr(struct sock *sk);
1507 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1508
1509 /* The maximum number of MSS of available cwnd for which TSO defers
1510 * sending if not using sysctl_tcp_tso_win_divisor.
1511 */
tcp_max_tso_deferred_mss(const struct tcp_sock * tp)1512 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1513 {
1514 return 3;
1515 }
1516
1517 /* Returns end sequence number of the receiver's advertised window */
tcp_wnd_end(const struct tcp_sock * tp)1518 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1519 {
1520 return tp->snd_una + tp->snd_wnd;
1521 }
1522
1523 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1524 * flexible approach. The RFC suggests cwnd should not be raised unless
1525 * it was fully used previously. And that's exactly what we do in
1526 * congestion avoidance mode. But in slow start we allow cwnd to grow
1527 * as long as the application has used half the cwnd.
1528 * Example :
1529 * cwnd is 10 (IW10), but application sends 9 frames.
1530 * We allow cwnd to reach 18 when all frames are ACKed.
1531 * This check is safe because it's as aggressive as slow start which already
1532 * risks 100% overshoot. The advantage is that we discourage application to
1533 * either send more filler packets or data to artificially blow up the cwnd
1534 * usage, and allow application-limited process to probe bw more aggressively.
1535 */
tcp_is_cwnd_limited(const struct sock * sk)1536 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1537 {
1538 const struct tcp_sock *tp = tcp_sk(sk);
1539
1540 if (tp->is_cwnd_limited)
1541 return true;
1542
1543 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1544 if (tcp_in_slow_start(tp))
1545 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
1546
1547 return false;
1548 }
1549
1550 /* BBR congestion control needs pacing.
1551 * Same remark for SO_MAX_PACING_RATE.
1552 * sch_fq packet scheduler is efficiently handling pacing,
1553 * but is not always installed/used.
1554 * Return true if TCP stack should pace packets itself.
1555 */
tcp_needs_internal_pacing(const struct sock * sk)1556 static inline bool tcp_needs_internal_pacing(const struct sock *sk)
1557 {
1558 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
1559 }
1560
1561 /* Estimates in how many jiffies next packet for this flow can be sent.
1562 * Scheduling a retransmit timer too early would be silly.
1563 */
tcp_pacing_delay(const struct sock * sk)1564 static inline unsigned long tcp_pacing_delay(const struct sock *sk)
1565 {
1566 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache;
1567
1568 return delay > 0 ? nsecs_to_jiffies(delay) : 0;
1569 }
1570
tcp_reset_xmit_timer(struct sock * sk,const int what,unsigned long when,bool pace_delay)1571 static inline void tcp_reset_xmit_timer(struct sock *sk,
1572 const int what,
1573 unsigned long when,
1574 bool pace_delay)
1575 {
1576 if (pace_delay)
1577 when += tcp_pacing_delay(sk);
1578 inet_csk_reset_xmit_timer(sk, what, when,
1579 tcp_rto_max(sk));
1580 }
1581
1582 /* Something is really bad, we could not queue an additional packet,
1583 * because qdisc is full or receiver sent a 0 window, or we are paced.
1584 * We do not want to add fuel to the fire, or abort too early,
1585 * so make sure the timer we arm now is at least 200ms in the future,
1586 * regardless of current icsk_rto value (as it could be ~2ms)
1587 */
tcp_probe0_base(const struct sock * sk)1588 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1589 {
1590 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1591 }
1592
1593 /* Variant of inet_csk_rto_backoff() used for zero window probes */
tcp_probe0_when(const struct sock * sk,unsigned long max_when)1594 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1595 unsigned long max_when)
1596 {
1597 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1,
1598 inet_csk(sk)->icsk_backoff);
1599 u64 when = (u64)tcp_probe0_base(sk) << backoff;
1600
1601 return (unsigned long)min_t(u64, when, max_when);
1602 }
1603
tcp_check_probe_timer(struct sock * sk)1604 static inline void tcp_check_probe_timer(struct sock *sk)
1605 {
1606 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1607 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1608 tcp_probe0_base(sk), true);
1609 }
1610
tcp_init_wl(struct tcp_sock * tp,u32 seq)1611 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1612 {
1613 tp->snd_wl1 = seq;
1614 }
1615
tcp_update_wl(struct tcp_sock * tp,u32 seq)1616 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1617 {
1618 tp->snd_wl1 = seq;
1619 }
1620
1621 /*
1622 * Calculate(/check) TCP checksum
1623 */
tcp_v4_check(int len,__be32 saddr,__be32 daddr,__wsum base)1624 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1625 __be32 daddr, __wsum base)
1626 {
1627 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
1628 }
1629
tcp_checksum_complete(struct sk_buff * skb)1630 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1631 {
1632 return !skb_csum_unnecessary(skb) &&
1633 __skb_checksum_complete(skb);
1634 }
1635
1636 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
1637 enum skb_drop_reason *reason);
1638
tcp_filter(struct sock * sk,struct sk_buff * skb,enum skb_drop_reason * reason)1639 static inline int tcp_filter(struct sock *sk, struct sk_buff *skb,
1640 enum skb_drop_reason *reason)
1641 {
1642 const struct tcphdr *th = (const struct tcphdr *)skb->data;
1643
1644 return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason);
1645 }
1646
1647 void tcp_set_state(struct sock *sk, int state);
1648 void tcp_done(struct sock *sk);
1649 int tcp_abort(struct sock *sk, int err);
1650
tcp_sack_reset(struct tcp_options_received * rx_opt)1651 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1652 {
1653 rx_opt->dsack = 0;
1654 rx_opt->num_sacks = 0;
1655 }
1656
1657 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1658
tcp_slow_start_after_idle_check(struct sock * sk)1659 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1660 {
1661 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1662 struct tcp_sock *tp = tcp_sk(sk);
1663 s32 delta;
1664
1665 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) ||
1666 tp->packets_out || ca_ops->cong_control)
1667 return;
1668 delta = tcp_jiffies32 - tp->lsndtime;
1669 if (delta > inet_csk(sk)->icsk_rto)
1670 tcp_cwnd_restart(sk, delta);
1671 }
1672
1673 /* Determine a window scaling and initial window to offer. */
1674 void tcp_select_initial_window(const struct sock *sk, int __space,
1675 __u32 mss, __u32 *rcv_wnd,
1676 __u32 *window_clamp, int wscale_ok,
1677 __u8 *rcv_wscale, __u32 init_rcv_wnd);
1678
__tcp_win_from_space(u8 scaling_ratio,int space)1679 static inline int __tcp_win_from_space(u8 scaling_ratio, int space)
1680 {
1681 s64 scaled_space = (s64)space * scaling_ratio;
1682
1683 return scaled_space >> TCP_RMEM_TO_WIN_SCALE;
1684 }
1685
tcp_win_from_space(const struct sock * sk,int space)1686 static inline int tcp_win_from_space(const struct sock *sk, int space)
1687 {
1688 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space);
1689 }
1690
1691 /* inverse of __tcp_win_from_space() */
__tcp_space_from_win(u8 scaling_ratio,int win)1692 static inline int __tcp_space_from_win(u8 scaling_ratio, int win)
1693 {
1694 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE;
1695
1696 do_div(val, scaling_ratio);
1697 return val;
1698 }
1699
tcp_space_from_win(const struct sock * sk,int win)1700 static inline int tcp_space_from_win(const struct sock *sk, int win)
1701 {
1702 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win);
1703 }
1704
1705 /* Assume a 50% default for skb->len/skb->truesize ratio.
1706 * This may be adjusted later in tcp_measure_rcv_mss().
1707 */
1708 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1))
1709
tcp_scaling_ratio_init(struct sock * sk)1710 static inline void tcp_scaling_ratio_init(struct sock *sk)
1711 {
1712 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO;
1713 }
1714
1715 /* Note: caller must be prepared to deal with negative returns */
tcp_space(const struct sock * sk)1716 static inline int tcp_space(const struct sock *sk)
1717 {
1718 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
1719 READ_ONCE(sk->sk_backlog.len) -
1720 atomic_read(&sk->sk_rmem_alloc));
1721 }
1722
tcp_full_space(const struct sock * sk)1723 static inline int tcp_full_space(const struct sock *sk)
1724 {
1725 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1726 }
1727
__tcp_adjust_rcv_ssthresh(struct sock * sk,u32 new_ssthresh)1728 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh)
1729 {
1730 int unused_mem = sk_unused_reserved_mem(sk);
1731 struct tcp_sock *tp = tcp_sk(sk);
1732
1733 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh);
1734 if (unused_mem)
1735 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
1736 tcp_win_from_space(sk, unused_mem));
1737 }
1738
tcp_adjust_rcv_ssthresh(struct sock * sk)1739 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
1740 {
1741 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss);
1742 }
1743
1744 void tcp_cleanup_rbuf(struct sock *sk, int copied);
1745 void __tcp_cleanup_rbuf(struct sock *sk, int copied);
1746
1747
1748 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
1749 * If 87.5 % (7/8) of the space has been consumed, we want to override
1750 * SO_RCVLOWAT constraint, since we are receiving skbs with too small
1751 * len/truesize ratio.
1752 */
tcp_rmem_pressure(const struct sock * sk)1753 static inline bool tcp_rmem_pressure(const struct sock *sk)
1754 {
1755 int rcvbuf, threshold;
1756
1757 if (tcp_under_memory_pressure(sk))
1758 return true;
1759
1760 rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1761 threshold = rcvbuf - (rcvbuf >> 3);
1762
1763 return atomic_read(&sk->sk_rmem_alloc) > threshold;
1764 }
1765
tcp_epollin_ready(const struct sock * sk,int target)1766 static inline bool tcp_epollin_ready(const struct sock *sk, int target)
1767 {
1768 const struct tcp_sock *tp = tcp_sk(sk);
1769 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
1770
1771 if (avail <= 0)
1772 return false;
1773
1774 return (avail >= target) || tcp_rmem_pressure(sk) ||
1775 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss);
1776 }
1777
1778 extern void tcp_openreq_init_rwin(struct request_sock *req,
1779 const struct sock *sk_listener,
1780 const struct dst_entry *dst);
1781
1782 void tcp_enter_memory_pressure(struct sock *sk);
1783 void tcp_leave_memory_pressure(struct sock *sk);
1784
keepalive_intvl_when(const struct tcp_sock * tp)1785 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1786 {
1787 struct net *net = sock_net((struct sock *)tp);
1788 int val;
1789
1790 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl()
1791 * and do_tcp_setsockopt().
1792 */
1793 val = READ_ONCE(tp->keepalive_intvl);
1794
1795 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl);
1796 }
1797
keepalive_time_when(const struct tcp_sock * tp)1798 static inline int keepalive_time_when(const struct tcp_sock *tp)
1799 {
1800 struct net *net = sock_net((struct sock *)tp);
1801 int val;
1802
1803 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */
1804 val = READ_ONCE(tp->keepalive_time);
1805
1806 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time);
1807 }
1808
keepalive_probes(const struct tcp_sock * tp)1809 static inline int keepalive_probes(const struct tcp_sock *tp)
1810 {
1811 struct net *net = sock_net((struct sock *)tp);
1812 int val;
1813
1814 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt()
1815 * and do_tcp_setsockopt().
1816 */
1817 val = READ_ONCE(tp->keepalive_probes);
1818
1819 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes);
1820 }
1821
keepalive_time_elapsed(const struct tcp_sock * tp)1822 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1823 {
1824 const struct inet_connection_sock *icsk = &tp->inet_conn;
1825
1826 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
1827 tcp_jiffies32 - tp->rcv_tstamp);
1828 }
1829
tcp_fin_time(const struct sock * sk)1830 static inline int tcp_fin_time(const struct sock *sk)
1831 {
1832 int fin_timeout = tcp_sk(sk)->linger2 ? :
1833 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout);
1834 const int rto = inet_csk(sk)->icsk_rto;
1835
1836 if (fin_timeout < (rto << 2) - (rto >> 1))
1837 fin_timeout = (rto << 2) - (rto >> 1);
1838
1839 return fin_timeout;
1840 }
1841
tcp_paws_check(const struct tcp_options_received * rx_opt,int paws_win)1842 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1843 int paws_win)
1844 {
1845 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1846 return true;
1847 if (unlikely(!time_before32(ktime_get_seconds(),
1848 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP)))
1849 return true;
1850 /*
1851 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1852 * then following tcp messages have valid values. Ignore 0 value,
1853 * or else 'negative' tsval might forbid us to accept their packets.
1854 */
1855 if (!rx_opt->ts_recent)
1856 return true;
1857 return false;
1858 }
1859
tcp_paws_reject(const struct tcp_options_received * rx_opt,int rst)1860 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1861 int rst)
1862 {
1863 if (tcp_paws_check(rx_opt, 0))
1864 return false;
1865
1866 /* RST segments are not recommended to carry timestamp,
1867 and, if they do, it is recommended to ignore PAWS because
1868 "their cleanup function should take precedence over timestamps."
1869 Certainly, it is mistake. It is necessary to understand the reasons
1870 of this constraint to relax it: if peer reboots, clock may go
1871 out-of-sync and half-open connections will not be reset.
1872 Actually, the problem would be not existing if all
1873 the implementations followed draft about maintaining clock
1874 via reboots. Linux-2.2 DOES NOT!
1875
1876 However, we can relax time bounds for RST segments to MSL.
1877 */
1878 if (rst && !time_before32(ktime_get_seconds(),
1879 rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
1880 return false;
1881 return true;
1882 }
1883
__tcp_fast_path_on(struct tcp_sock * tp,u32 snd_wnd)1884 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
1885 {
1886 u32 ace;
1887
1888 /* mptcp hooks are only on the slow path */
1889 if (sk_is_mptcp((struct sock *)tp))
1890 return;
1891
1892 ace = tcp_ecn_mode_accecn(tp) ?
1893 ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) &
1894 TCP_ACCECN_CEP_ACE_MASK) : 0;
1895
1896 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1897 (ace << 22) |
1898 ntohl(TCP_FLAG_ACK) |
1899 snd_wnd);
1900 }
1901
tcp_fast_path_on(struct tcp_sock * tp)1902 static inline void tcp_fast_path_on(struct tcp_sock *tp)
1903 {
1904 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
1905 }
1906
tcp_fast_path_check(struct sock * sk)1907 static inline void tcp_fast_path_check(struct sock *sk)
1908 {
1909 struct tcp_sock *tp = tcp_sk(sk);
1910
1911 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
1912 tp->rcv_wnd &&
1913 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
1914 !tp->urg_data)
1915 tcp_fast_path_on(tp);
1916 }
1917
1918 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1919 int mib_idx, u32 *last_oow_ack_time);
1920
tcp_mib_init(struct net * net)1921 static inline void tcp_mib_init(struct net *net)
1922 {
1923 /* See RFC 2012 */
1924 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1925 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1926 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1927 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1928 }
1929
1930 /* from STCP */
tcp_clear_all_retrans_hints(struct tcp_sock * tp)1931 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1932 {
1933 tp->retransmit_skb_hint = NULL;
1934 }
1935
1936 #define tcp_md5_addr tcp_ao_addr
1937
1938 /* - key database */
1939 struct tcp_md5sig_key {
1940 struct hlist_node node;
1941 u8 keylen;
1942 u8 family; /* AF_INET or AF_INET6 */
1943 u8 prefixlen;
1944 u8 flags;
1945 union tcp_md5_addr addr;
1946 int l3index; /* set if key added with L3 scope */
1947 u8 key[TCP_MD5SIG_MAXKEYLEN];
1948 struct rcu_head rcu;
1949 };
1950
1951 /* - sock block */
1952 struct tcp_md5sig_info {
1953 struct hlist_head head;
1954 struct rcu_head rcu;
1955 };
1956
1957 /* - pseudo header */
1958 struct tcp4_pseudohdr {
1959 __be32 saddr;
1960 __be32 daddr;
1961 __u8 pad;
1962 __u8 protocol;
1963 __be16 len;
1964 };
1965
1966 struct tcp6_pseudohdr {
1967 struct in6_addr saddr;
1968 struct in6_addr daddr;
1969 __be32 len;
1970 __be32 protocol; /* including padding */
1971 };
1972
1973 /*
1974 * struct tcp_sigpool - per-CPU pool of ahash_requests
1975 * @scratch: per-CPU temporary area, that can be used between
1976 * tcp_sigpool_start() and tcp_sigpool_end() to perform
1977 * crypto request
1978 * @req: pre-allocated ahash request
1979 */
1980 struct tcp_sigpool {
1981 void *scratch;
1982 struct ahash_request *req;
1983 };
1984
1985 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size);
1986 void tcp_sigpool_get(unsigned int id);
1987 void tcp_sigpool_release(unsigned int id);
1988 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp,
1989 const struct sk_buff *skb,
1990 unsigned int header_len);
1991
1992 /**
1993 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash
1994 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash()
1995 * @c: returned tcp_sigpool for usage (uninitialized on failure)
1996 *
1997 * Returns: 0 on success, error otherwise.
1998 */
1999 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c);
2000 /**
2001 * tcp_sigpool_end - enable bh and stop using tcp_sigpool
2002 * @c: tcp_sigpool context that was returned by tcp_sigpool_start()
2003 */
2004 void tcp_sigpool_end(struct tcp_sigpool *c);
2005 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len);
2006 /* - functions */
2007 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
2008 const struct sock *sk, const struct sk_buff *skb);
2009 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
2010 int family, u8 prefixlen, int l3index, u8 flags,
2011 const u8 *newkey, u8 newkeylen);
2012 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr,
2013 int family, u8 prefixlen, int l3index,
2014 struct tcp_md5sig_key *key);
2015
2016 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
2017 int family, u8 prefixlen, int l3index, u8 flags);
2018 void tcp_clear_md5_list(struct sock *sk);
2019 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
2020 const struct sock *addr_sk);
2021
2022 #ifdef CONFIG_TCP_MD5SIG
2023 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
2024 const union tcp_md5_addr *addr,
2025 int family, bool any_l3index);
2026 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)2027 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2028 const union tcp_md5_addr *addr, int family)
2029 {
2030 if (!static_branch_unlikely(&tcp_md5_needed.key))
2031 return NULL;
2032 return __tcp_md5_do_lookup(sk, l3index, addr, family, false);
2033 }
2034
2035 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup_any_l3index(const struct sock * sk,const union tcp_md5_addr * addr,int family)2036 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2037 const union tcp_md5_addr *addr, int family)
2038 {
2039 if (!static_branch_unlikely(&tcp_md5_needed.key))
2040 return NULL;
2041 return __tcp_md5_do_lookup(sk, 0, addr, family, true);
2042 }
2043
2044 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
2045 void tcp_md5_destruct_sock(struct sock *sk);
2046 #else
2047 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup(const struct sock * sk,int l3index,const union tcp_md5_addr * addr,int family)2048 tcp_md5_do_lookup(const struct sock *sk, int l3index,
2049 const union tcp_md5_addr *addr, int family)
2050 {
2051 return NULL;
2052 }
2053
2054 static inline struct tcp_md5sig_key *
tcp_md5_do_lookup_any_l3index(const struct sock * sk,const union tcp_md5_addr * addr,int family)2055 tcp_md5_do_lookup_any_l3index(const struct sock *sk,
2056 const union tcp_md5_addr *addr, int family)
2057 {
2058 return NULL;
2059 }
2060
2061 #define tcp_twsk_md5_key(twsk) NULL
tcp_md5_destruct_sock(struct sock * sk)2062 static inline void tcp_md5_destruct_sock(struct sock *sk)
2063 {
2064 }
2065 #endif
2066
2067 struct md5_ctx;
2068 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb,
2069 unsigned int header_len);
2070 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key);
2071
2072 /* From tcp_fastopen.c */
2073 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2074 struct tcp_fastopen_cookie *cookie);
2075 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2076 struct tcp_fastopen_cookie *cookie, bool syn_lost,
2077 u16 try_exp);
2078 struct tcp_fastopen_request {
2079 /* Fast Open cookie. Size 0 means a cookie request */
2080 struct tcp_fastopen_cookie cookie;
2081 struct msghdr *data; /* data in MSG_FASTOPEN */
2082 size_t size;
2083 int copied; /* queued in tcp_connect() */
2084 struct ubuf_info *uarg;
2085 };
2086 void tcp_free_fastopen_req(struct tcp_sock *tp);
2087 void tcp_fastopen_destroy_cipher(struct sock *sk);
2088 void tcp_fastopen_ctx_destroy(struct net *net);
2089 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
2090 void *primary_key, void *backup_key);
2091 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
2092 u64 *key);
2093 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
2094 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
2095 struct request_sock *req,
2096 struct tcp_fastopen_cookie *foc,
2097 const struct dst_entry *dst);
2098 void tcp_fastopen_init_key_once(struct net *net);
2099 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
2100 struct tcp_fastopen_cookie *cookie);
2101 bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
2102 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t)
2103 #define TCP_FASTOPEN_KEY_MAX 2
2104 #define TCP_FASTOPEN_KEY_BUF_LENGTH \
2105 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX)
2106
2107 /* Fastopen key context */
2108 struct tcp_fastopen_context {
2109 siphash_key_t key[TCP_FASTOPEN_KEY_MAX];
2110 int num;
2111 struct rcu_head rcu;
2112 };
2113
2114 void tcp_fastopen_active_disable(struct sock *sk);
2115 bool tcp_fastopen_active_should_disable(struct sock *sk);
2116 void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
2117 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired);
2118
2119 /* Caller needs to wrap with rcu_read_(un)lock() */
2120 static inline
tcp_fastopen_get_ctx(const struct sock * sk)2121 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk)
2122 {
2123 struct tcp_fastopen_context *ctx;
2124
2125 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx);
2126 if (!ctx)
2127 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx);
2128 return ctx;
2129 }
2130
2131 static inline
tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie * foc,const struct tcp_fastopen_cookie * orig)2132 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc,
2133 const struct tcp_fastopen_cookie *orig)
2134 {
2135 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE &&
2136 orig->len == foc->len &&
2137 !memcmp(orig->val, foc->val, foc->len))
2138 return true;
2139 return false;
2140 }
2141
2142 static inline
tcp_fastopen_context_len(const struct tcp_fastopen_context * ctx)2143 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
2144 {
2145 return ctx->num;
2146 }
2147
2148 /* Latencies incurred by various limits for a sender. They are
2149 * chronograph-like stats that are mutually exclusive.
2150 */
2151 enum tcp_chrono {
2152 TCP_CHRONO_UNSPEC,
2153 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
2154 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
2155 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
2156 __TCP_CHRONO_MAX,
2157 };
2158
2159 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
2160 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
2161
2162 /* This helper is needed, because skb->tcp_tsorted_anchor uses
2163 * the same memory storage than skb->destructor/_skb_refdst
2164 */
tcp_skb_tsorted_anchor_cleanup(struct sk_buff * skb)2165 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb)
2166 {
2167 skb->destructor = NULL;
2168 skb->_skb_refdst = 0UL;
2169 }
2170
2171 #define tcp_skb_tsorted_save(skb) { \
2172 unsigned long _save = skb->_skb_refdst; \
2173 skb->_skb_refdst = 0UL;
2174
2175 #define tcp_skb_tsorted_restore(skb) \
2176 skb->_skb_refdst = _save; \
2177 }
2178
2179 void tcp_write_queue_purge(struct sock *sk);
2180
tcp_rtx_queue_head(const struct sock * sk)2181 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
2182 {
2183 return skb_rb_first(&sk->tcp_rtx_queue);
2184 }
2185
tcp_rtx_queue_tail(const struct sock * sk)2186 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
2187 {
2188 return skb_rb_last(&sk->tcp_rtx_queue);
2189 }
2190
tcp_write_queue_tail(const struct sock * sk)2191 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
2192 {
2193 return skb_peek_tail(&sk->sk_write_queue);
2194 }
2195
2196 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \
2197 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
2198
tcp_send_head(const struct sock * sk)2199 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
2200 {
2201 return skb_peek(&sk->sk_write_queue);
2202 }
2203
tcp_skb_is_last(const struct sock * sk,const struct sk_buff * skb)2204 static inline bool tcp_skb_is_last(const struct sock *sk,
2205 const struct sk_buff *skb)
2206 {
2207 return skb_queue_is_last(&sk->sk_write_queue, skb);
2208 }
2209
2210 /**
2211 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
2212 * @sk: socket
2213 *
2214 * Since the write queue can have a temporary empty skb in it,
2215 * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
2216 */
tcp_write_queue_empty(const struct sock * sk)2217 static inline bool tcp_write_queue_empty(const struct sock *sk)
2218 {
2219 const struct tcp_sock *tp = tcp_sk(sk);
2220
2221 return tp->write_seq == tp->snd_nxt;
2222 }
2223
tcp_rtx_queue_empty(const struct sock * sk)2224 static inline bool tcp_rtx_queue_empty(const struct sock *sk)
2225 {
2226 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue);
2227 }
2228
tcp_rtx_and_write_queues_empty(const struct sock * sk)2229 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
2230 {
2231 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk);
2232 }
2233
tcp_add_write_queue_tail(struct sock * sk,struct sk_buff * skb)2234 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
2235 {
2236 __skb_queue_tail(&sk->sk_write_queue, skb);
2237
2238 /* Queue it, remembering where we must start sending. */
2239 if (sk->sk_write_queue.next == skb)
2240 tcp_chrono_start(sk, TCP_CHRONO_BUSY);
2241 }
2242
2243 /* Insert new before skb on the write queue of sk. */
tcp_insert_write_queue_before(struct sk_buff * new,struct sk_buff * skb,struct sock * sk)2244 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
2245 struct sk_buff *skb,
2246 struct sock *sk)
2247 {
2248 __skb_queue_before(&sk->sk_write_queue, skb, new);
2249 }
2250
tcp_unlink_write_queue(struct sk_buff * skb,struct sock * sk)2251 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
2252 {
2253 tcp_skb_tsorted_anchor_cleanup(skb);
2254 __skb_unlink(skb, &sk->sk_write_queue);
2255 }
2256
2257 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb);
2258
tcp_rtx_queue_unlink(struct sk_buff * skb,struct sock * sk)2259 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
2260 {
2261 tcp_skb_tsorted_anchor_cleanup(skb);
2262 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue);
2263 }
2264
tcp_rtx_queue_unlink_and_free(struct sk_buff * skb,struct sock * sk)2265 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
2266 {
2267 list_del(&skb->tcp_tsorted_anchor);
2268 tcp_rtx_queue_unlink(skb, sk);
2269 tcp_wmem_free_skb(sk, skb);
2270 }
2271
tcp_write_collapse_fence(struct sock * sk)2272 static inline void tcp_write_collapse_fence(struct sock *sk)
2273 {
2274 struct sk_buff *skb = tcp_write_queue_tail(sk);
2275
2276 if (skb)
2277 TCP_SKB_CB(skb)->eor = 1;
2278 }
2279
tcp_push_pending_frames(struct sock * sk)2280 static inline void tcp_push_pending_frames(struct sock *sk)
2281 {
2282 if (tcp_send_head(sk)) {
2283 struct tcp_sock *tp = tcp_sk(sk);
2284
2285 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
2286 }
2287 }
2288
2289 /* Start sequence of the skb just after the highest skb with SACKed
2290 * bit, valid only if sacked_out > 0 or when the caller has ensured
2291 * validity by itself.
2292 */
tcp_highest_sack_seq(struct tcp_sock * tp)2293 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
2294 {
2295 if (!tp->sacked_out)
2296 return tp->snd_una;
2297
2298 if (tp->highest_sack == NULL)
2299 return tp->snd_nxt;
2300
2301 return TCP_SKB_CB(tp->highest_sack)->seq;
2302 }
2303
tcp_advance_highest_sack(struct sock * sk,struct sk_buff * skb)2304 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
2305 {
2306 tcp_sk(sk)->highest_sack = skb_rb_next(skb);
2307 }
2308
tcp_highest_sack(struct sock * sk)2309 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
2310 {
2311 return tcp_sk(sk)->highest_sack;
2312 }
2313
tcp_highest_sack_reset(struct sock * sk)2314 static inline void tcp_highest_sack_reset(struct sock *sk)
2315 {
2316 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk);
2317 }
2318
2319 /* Called when old skb is about to be deleted and replaced by new skb */
tcp_highest_sack_replace(struct sock * sk,struct sk_buff * old,struct sk_buff * new)2320 static inline void tcp_highest_sack_replace(struct sock *sk,
2321 struct sk_buff *old,
2322 struct sk_buff *new)
2323 {
2324 if (old == tcp_highest_sack(sk))
2325 tcp_sk(sk)->highest_sack = new;
2326 }
2327
2328 /* This helper checks if socket has IP_TRANSPARENT set */
inet_sk_transparent(const struct sock * sk)2329 static inline bool inet_sk_transparent(const struct sock *sk)
2330 {
2331 switch (sk->sk_state) {
2332 case TCP_TIME_WAIT:
2333 return inet_twsk(sk)->tw_transparent;
2334 case TCP_NEW_SYN_RECV:
2335 return inet_rsk(inet_reqsk(sk))->no_srccheck;
2336 }
2337 return inet_test_bit(TRANSPARENT, sk);
2338 }
2339
2340 /* Determines whether this is a thin stream (which may suffer from
2341 * increased latency). Used to trigger latency-reducing mechanisms.
2342 */
tcp_stream_is_thin(struct tcp_sock * tp)2343 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
2344 {
2345 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
2346 }
2347
2348 /* /proc */
2349 enum tcp_seq_states {
2350 TCP_SEQ_STATE_LISTENING,
2351 TCP_SEQ_STATE_ESTABLISHED,
2352 };
2353
2354 void *tcp_seq_start(struct seq_file *seq, loff_t *pos);
2355 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2356 void tcp_seq_stop(struct seq_file *seq, void *v);
2357
2358 struct tcp_seq_afinfo {
2359 sa_family_t family;
2360 };
2361
2362 struct tcp_iter_state {
2363 struct seq_net_private p;
2364 enum tcp_seq_states state;
2365 struct sock *syn_wait_sk;
2366 int bucket, offset, sbucket, num;
2367 loff_t last_pos;
2368 };
2369
2370 extern struct request_sock_ops tcp_request_sock_ops;
2371 extern struct request_sock_ops tcp6_request_sock_ops;
2372
2373 void tcp_v4_destroy_sock(struct sock *sk);
2374
2375 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
2376 netdev_features_t features);
2377 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th);
2378 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
2379 struct tcphdr *th);
2380 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
2381 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
2382 #ifdef CONFIG_INET
2383 void tcp_gro_complete(struct sk_buff *skb);
2384 #else
tcp_gro_complete(struct sk_buff * skb)2385 static inline void tcp_gro_complete(struct sk_buff *skb) { }
2386 #endif
2387
2388 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
2389
tcp_notsent_lowat(const struct tcp_sock * tp)2390 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
2391 {
2392 struct net *net = sock_net((struct sock *)tp);
2393 u32 val;
2394
2395 val = READ_ONCE(tp->notsent_lowat);
2396
2397 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat);
2398 }
2399
2400 bool tcp_stream_memory_free(const struct sock *sk, int wake);
2401
2402 #ifdef CONFIG_PROC_FS
2403 int tcp4_proc_init(void);
2404 void tcp4_proc_exit(void);
2405 #endif
2406
2407 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
2408 int tcp_conn_request(struct request_sock_ops *rsk_ops,
2409 const struct tcp_request_sock_ops *af_ops,
2410 struct sock *sk, struct sk_buff *skb);
2411
2412 /* TCP af-specific functions */
2413 struct tcp_sock_af_ops {
2414 #ifdef CONFIG_TCP_MD5SIG
2415 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk,
2416 const struct sock *addr_sk);
2417 void (*calc_md5_hash)(char *location,
2418 const struct tcp_md5sig_key *md5,
2419 const struct sock *sk,
2420 const struct sk_buff *skb);
2421 int (*md5_parse)(struct sock *sk,
2422 int optname,
2423 sockptr_t optval,
2424 int optlen);
2425 #endif
2426 #ifdef CONFIG_TCP_AO
2427 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen);
2428 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2429 struct sock *addr_sk,
2430 int sndid, int rcvid);
2431 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key,
2432 const struct sock *sk,
2433 __be32 sisn, __be32 disn, bool send);
2434 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao,
2435 const struct sock *sk, const struct sk_buff *skb,
2436 const u8 *tkey, int hash_offset, u32 sne);
2437 #endif
2438 };
2439
2440 struct tcp_request_sock_ops {
2441 u16 mss_clamp;
2442 #ifdef CONFIG_TCP_MD5SIG
2443 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
2444 const struct sock *addr_sk);
2445 void (*calc_md5_hash) (char *location,
2446 const struct tcp_md5sig_key *md5,
2447 const struct sock *sk,
2448 const struct sk_buff *skb);
2449 #endif
2450 #ifdef CONFIG_TCP_AO
2451 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk,
2452 struct request_sock *req,
2453 int sndid, int rcvid);
2454 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk);
2455 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt,
2456 struct request_sock *req, const struct sk_buff *skb,
2457 int hash_offset, u32 sne);
2458 #endif
2459 #ifdef CONFIG_SYN_COOKIES
2460 __u32 (*cookie_init_seq)(const struct sk_buff *skb,
2461 __u16 *mss);
2462 #endif
2463 struct dst_entry *(*route_req)(const struct sock *sk,
2464 struct sk_buff *skb,
2465 struct flowi *fl,
2466 struct request_sock *req,
2467 u32 tw_isn);
2468 union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
2469 const struct net *net,
2470 const struct sk_buff *skb);
2471 int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
2472 struct flowi *fl, struct request_sock *req,
2473 struct tcp_fastopen_cookie *foc,
2474 enum tcp_synack_type synack_type,
2475 struct sk_buff *syn_skb);
2476 };
2477
2478 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
2479 #if IS_ENABLED(CONFIG_IPV6)
2480 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
2481 #endif
2482
2483 #ifdef CONFIG_SYN_COOKIES
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2484 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2485 const struct sock *sk, struct sk_buff *skb,
2486 __u16 *mss)
2487 {
2488 tcp_synq_overflow(sk);
2489 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
2490 return ops->cookie_init_seq(skb, mss);
2491 }
2492 #else
cookie_init_sequence(const struct tcp_request_sock_ops * ops,const struct sock * sk,struct sk_buff * skb,__u16 * mss)2493 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
2494 const struct sock *sk, struct sk_buff *skb,
2495 __u16 *mss)
2496 {
2497 return 0;
2498 }
2499 #endif
2500
2501 struct tcp_key {
2502 union {
2503 struct {
2504 struct tcp_ao_key *ao_key;
2505 char *traffic_key;
2506 u32 sne;
2507 u8 rcv_next;
2508 };
2509 struct tcp_md5sig_key *md5_key;
2510 };
2511 enum {
2512 TCP_KEY_NONE = 0,
2513 TCP_KEY_MD5,
2514 TCP_KEY_AO,
2515 } type;
2516 };
2517
tcp_get_current_key(const struct sock * sk,struct tcp_key * out)2518 static inline void tcp_get_current_key(const struct sock *sk,
2519 struct tcp_key *out)
2520 {
2521 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
2522 const struct tcp_sock *tp = tcp_sk(sk);
2523 #endif
2524
2525 #ifdef CONFIG_TCP_AO
2526 if (static_branch_unlikely(&tcp_ao_needed.key)) {
2527 struct tcp_ao_info *ao;
2528
2529 ao = rcu_dereference_protected(tp->ao_info,
2530 lockdep_sock_is_held(sk));
2531 if (ao) {
2532 out->ao_key = READ_ONCE(ao->current_key);
2533 out->type = TCP_KEY_AO;
2534 return;
2535 }
2536 }
2537 #endif
2538 #ifdef CONFIG_TCP_MD5SIG
2539 if (static_branch_unlikely(&tcp_md5_needed.key) &&
2540 rcu_access_pointer(tp->md5sig_info)) {
2541 out->md5_key = tp->af_specific->md5_lookup(sk, sk);
2542 if (out->md5_key) {
2543 out->type = TCP_KEY_MD5;
2544 return;
2545 }
2546 }
2547 #endif
2548 out->type = TCP_KEY_NONE;
2549 }
2550
tcp_key_is_md5(const struct tcp_key * key)2551 static inline bool tcp_key_is_md5(const struct tcp_key *key)
2552 {
2553 if (static_branch_tcp_md5())
2554 return key->type == TCP_KEY_MD5;
2555 return false;
2556 }
2557
tcp_key_is_ao(const struct tcp_key * key)2558 static inline bool tcp_key_is_ao(const struct tcp_key *key)
2559 {
2560 if (static_branch_tcp_ao())
2561 return key->type == TCP_KEY_AO;
2562 return false;
2563 }
2564
2565 int tcpv4_offload_init(void);
2566
2567 void tcp_v4_init(void);
2568 void tcp_init(void);
2569
2570 /* tcp_recovery.c */
2571 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb);
2572 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced);
2573 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb,
2574 u32 reo_wnd);
2575 extern bool tcp_rack_mark_lost(struct sock *sk);
2576 extern void tcp_rack_reo_timeout(struct sock *sk);
2577
2578 /* tcp_plb.c */
2579
2580 /*
2581 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state
2582 * expects cong_ratio which represents fraction of traffic that experienced
2583 * congestion over a single RTT. In order to avoid floating point operations,
2584 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in.
2585 */
2586 #define TCP_PLB_SCALE 8
2587
2588 /* State for PLB (Protective Load Balancing) for a single TCP connection. */
2589 struct tcp_plb_state {
2590 u8 consec_cong_rounds:5, /* consecutive congested rounds */
2591 unused:3;
2592 u32 pause_until; /* jiffies32 when PLB can resume rerouting */
2593 };
2594
tcp_plb_init(const struct sock * sk,struct tcp_plb_state * plb)2595 static inline void tcp_plb_init(const struct sock *sk,
2596 struct tcp_plb_state *plb)
2597 {
2598 plb->consec_cong_rounds = 0;
2599 plb->pause_until = 0;
2600 }
2601 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb,
2602 const int cong_ratio);
2603 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb);
2604 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb);
2605
tcp_warn_once(const struct sock * sk,bool cond,const char * str)2606 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str)
2607 {
2608 WARN_ONCE(cond,
2609 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n",
2610 str,
2611 tcp_snd_cwnd(tcp_sk(sk)),
2612 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out,
2613 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out,
2614 tcp_sk(sk)->tlp_high_seq, sk->sk_state,
2615 inet_csk(sk)->icsk_ca_state,
2616 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache,
2617 inet_csk(sk)->icsk_pmtu_cookie);
2618 }
2619
2620 /* At how many usecs into the future should the RTO fire? */
tcp_rto_delta_us(const struct sock * sk)2621 static inline s64 tcp_rto_delta_us(const struct sock *sk)
2622 {
2623 const struct sk_buff *skb = tcp_rtx_queue_head(sk);
2624 u32 rto = inet_csk(sk)->icsk_rto;
2625
2626 if (likely(skb)) {
2627 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto);
2628
2629 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp;
2630 } else {
2631 tcp_warn_once(sk, 1, "rtx queue empty: ");
2632 return jiffies_to_usecs(rto);
2633 }
2634
2635 }
2636
2637 /*
2638 * Save and compile IPv4 options, return a pointer to it
2639 */
tcp_v4_save_options(struct net * net,struct sk_buff * skb)2640 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net,
2641 struct sk_buff *skb)
2642 {
2643 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
2644 struct ip_options_rcu *dopt = NULL;
2645
2646 if (opt->optlen) {
2647 int opt_size = sizeof(*dopt) + opt->optlen;
2648
2649 dopt = kmalloc(opt_size, GFP_ATOMIC);
2650 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) {
2651 kfree(dopt);
2652 dopt = NULL;
2653 }
2654 }
2655 return dopt;
2656 }
2657
2658 /* locally generated TCP pure ACKs have skb->truesize == 2
2659 * (check tcp_send_ack() in net/ipv4/tcp_output.c )
2660 * This is much faster than dissecting the packet to find out.
2661 * (Think of GRE encapsulations, IPv4, IPv6, ...)
2662 */
skb_is_tcp_pure_ack(const struct sk_buff * skb)2663 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
2664 {
2665 return skb->truesize == 2;
2666 }
2667
skb_set_tcp_pure_ack(struct sk_buff * skb)2668 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
2669 {
2670 skb->truesize = 2;
2671 }
2672
tcp_inq(struct sock * sk)2673 static inline int tcp_inq(struct sock *sk)
2674 {
2675 struct tcp_sock *tp = tcp_sk(sk);
2676 int answ;
2677
2678 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
2679 answ = 0;
2680 } else if (sock_flag(sk, SOCK_URGINLINE) ||
2681 !tp->urg_data ||
2682 before(tp->urg_seq, tp->copied_seq) ||
2683 !before(tp->urg_seq, tp->rcv_nxt)) {
2684
2685 answ = tp->rcv_nxt - tp->copied_seq;
2686
2687 /* Subtract 1, if FIN was received */
2688 if (answ && sock_flag(sk, SOCK_DONE))
2689 answ--;
2690 } else {
2691 answ = tp->urg_seq - tp->copied_seq;
2692 }
2693
2694 return answ;
2695 }
2696
2697 int tcp_peek_len(struct socket *sock);
2698
tcp_segs_in(struct tcp_sock * tp,const struct sk_buff * skb)2699 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
2700 {
2701 u16 segs_in;
2702
2703 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
2704
2705 /* We update these fields while other threads might
2706 * read them from tcp_get_info()
2707 */
2708 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in);
2709 if (skb->len > tcp_hdrlen(skb))
2710 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in);
2711 }
2712
2713 /*
2714 * TCP listen path runs lockless.
2715 * We forced "struct sock" to be const qualified to make sure
2716 * we don't modify one of its field by mistake.
2717 * Here, we increment sk_drops which is an atomic_t, so we can safely
2718 * make sock writable again.
2719 */
tcp_listendrop(const struct sock * sk)2720 static inline void tcp_listendrop(const struct sock *sk)
2721 {
2722 sk_drops_inc((struct sock *)sk);
2723 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
2724 }
2725
2726 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer);
2727
2728 /*
2729 * Interface for adding Upper Level Protocols over TCP
2730 */
2731
2732 #define TCP_ULP_NAME_MAX 16
2733 #define TCP_ULP_MAX 128
2734 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX)
2735
2736 struct tcp_ulp_ops {
2737 struct list_head list;
2738
2739 /* initialize ulp */
2740 int (*init)(struct sock *sk);
2741 /* update ulp */
2742 void (*update)(struct sock *sk, struct proto *p,
2743 void (*write_space)(struct sock *sk));
2744 /* cleanup ulp */
2745 void (*release)(struct sock *sk);
2746 /* diagnostic */
2747 int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin);
2748 size_t (*get_info_size)(const struct sock *sk, bool net_admin);
2749 /* clone ulp */
2750 void (*clone)(const struct request_sock *req, struct sock *newsk,
2751 const gfp_t priority);
2752
2753 char name[TCP_ULP_NAME_MAX];
2754 struct module *owner;
2755 };
2756 int tcp_register_ulp(struct tcp_ulp_ops *type);
2757 void tcp_unregister_ulp(struct tcp_ulp_ops *type);
2758 int tcp_set_ulp(struct sock *sk, const char *name);
2759 void tcp_get_available_ulp(char *buf, size_t len);
2760 void tcp_cleanup_ulp(struct sock *sk);
2761 void tcp_update_ulp(struct sock *sk, struct proto *p,
2762 void (*write_space)(struct sock *sk));
2763
2764 #define MODULE_ALIAS_TCP_ULP(name) \
2765 MODULE_INFO(alias, name); \
2766 MODULE_INFO(alias, "tcp-ulp-" name)
2767
2768 #ifdef CONFIG_NET_SOCK_MSG
2769 struct sk_msg;
2770 struct sk_psock;
2771
2772 #ifdef CONFIG_BPF_SYSCALL
2773 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
2774 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
2775 #ifdef CONFIG_BPF_STREAM_PARSER
2776 struct strparser;
2777 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc,
2778 sk_read_actor_t recv_actor);
2779 #endif /* CONFIG_BPF_STREAM_PARSER */
2780 #endif /* CONFIG_BPF_SYSCALL */
2781
2782 #ifdef CONFIG_INET
2783 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
2784 #else
tcp_eat_skb(struct sock * sk,struct sk_buff * skb)2785 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
2786 {
2787 }
2788 #endif
2789
2790 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
2791 struct sk_msg *msg, u32 bytes, int flags);
2792 #endif /* CONFIG_NET_SOCK_MSG */
2793
2794 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG)
tcp_bpf_clone(const struct sock * sk,struct sock * newsk)2795 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
2796 {
2797 }
2798 #endif
2799
2800 #ifdef CONFIG_CGROUP_BPF
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2801 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2802 struct sk_buff *skb,
2803 unsigned int end_offset)
2804 {
2805 skops->skb = skb;
2806 skops->skb_data_end = skb->data + end_offset;
2807 }
2808 #else
bpf_skops_init_skb(struct bpf_sock_ops_kern * skops,struct sk_buff * skb,unsigned int end_offset)2809 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
2810 struct sk_buff *skb,
2811 unsigned int end_offset)
2812 {
2813 }
2814 #endif
2815
2816 /* Call BPF_SOCK_OPS program that returns an int. If the return value
2817 * is < 0, then the BPF op failed (for example if the loaded BPF
2818 * program does not support the chosen operation or there is no BPF
2819 * program loaded).
2820 */
2821 #ifdef CONFIG_BPF
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2822 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2823 {
2824 struct bpf_sock_ops_kern sock_ops;
2825 int ret;
2826
2827 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
2828 if (sk_fullsock(sk)) {
2829 sock_ops.is_fullsock = 1;
2830 sock_ops.is_locked_tcp_sock = 1;
2831 sock_owned_by_me(sk);
2832 }
2833
2834 sock_ops.sk = sk;
2835 sock_ops.op = op;
2836 if (nargs > 0)
2837 memcpy(sock_ops.args, args, nargs * sizeof(*args));
2838
2839 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
2840 if (ret == 0)
2841 ret = sock_ops.reply;
2842 else
2843 ret = -1;
2844 return ret;
2845 }
2846
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2847 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2848 {
2849 u32 args[2] = {arg1, arg2};
2850
2851 return tcp_call_bpf(sk, op, 2, args);
2852 }
2853
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2854 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2855 u32 arg3)
2856 {
2857 u32 args[3] = {arg1, arg2, arg3};
2858
2859 return tcp_call_bpf(sk, op, 3, args);
2860 }
2861
2862 #else
tcp_call_bpf(struct sock * sk,int op,u32 nargs,u32 * args)2863 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args)
2864 {
2865 return -EPERM;
2866 }
2867
tcp_call_bpf_2arg(struct sock * sk,int op,u32 arg1,u32 arg2)2868 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2)
2869 {
2870 return -EPERM;
2871 }
2872
tcp_call_bpf_3arg(struct sock * sk,int op,u32 arg1,u32 arg2,u32 arg3)2873 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2,
2874 u32 arg3)
2875 {
2876 return -EPERM;
2877 }
2878
2879 #endif
2880
tcp_timeout_init(struct sock * sk)2881 static inline u32 tcp_timeout_init(struct sock *sk)
2882 {
2883 int timeout;
2884
2885 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL);
2886
2887 if (timeout <= 0)
2888 timeout = TCP_TIMEOUT_INIT;
2889 return min_t(int, timeout, TCP_RTO_MAX);
2890 }
2891
tcp_rwnd_init_bpf(struct sock * sk)2892 static inline u32 tcp_rwnd_init_bpf(struct sock *sk)
2893 {
2894 int rwnd;
2895
2896 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL);
2897
2898 if (rwnd < 0)
2899 rwnd = 0;
2900 return rwnd;
2901 }
2902
tcp_bpf_ca_needs_ecn(struct sock * sk)2903 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
2904 {
2905 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1);
2906 }
2907
tcp_bpf_rtt(struct sock * sk,long mrtt,u32 srtt)2908 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt)
2909 {
2910 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG))
2911 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt);
2912 }
2913
2914 #if IS_ENABLED(CONFIG_SMC)
2915 extern struct static_key_false tcp_have_smc;
2916 #endif
2917
2918 #if IS_ENABLED(CONFIG_TLS_DEVICE)
2919 void clean_acked_data_enable(struct tcp_sock *tp,
2920 void (*cad)(struct sock *sk, u32 ack_seq));
2921 void clean_acked_data_disable(struct tcp_sock *tp);
2922 void clean_acked_data_flush(void);
2923 #endif
2924
2925 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
tcp_add_tx_delay(struct sk_buff * skb,const struct tcp_sock * tp)2926 static inline void tcp_add_tx_delay(struct sk_buff *skb,
2927 const struct tcp_sock *tp)
2928 {
2929 if (static_branch_unlikely(&tcp_tx_delay_enabled))
2930 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC;
2931 }
2932
2933 /* Compute Earliest Departure Time for some control packets
2934 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.
2935 */
tcp_transmit_time(const struct sock * sk)2936 static inline u64 tcp_transmit_time(const struct sock *sk)
2937 {
2938 if (static_branch_unlikely(&tcp_tx_delay_enabled)) {
2939 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ?
2940 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay;
2941
2942 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC;
2943 }
2944 return 0;
2945 }
2946
tcp_parse_auth_options(const struct tcphdr * th,const u8 ** md5_hash,const struct tcp_ao_hdr ** aoh)2947 static inline int tcp_parse_auth_options(const struct tcphdr *th,
2948 const u8 **md5_hash, const struct tcp_ao_hdr **aoh)
2949 {
2950 const u8 *md5_tmp, *ao_tmp;
2951 int ret;
2952
2953 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp);
2954 if (ret)
2955 return ret;
2956
2957 if (md5_hash)
2958 *md5_hash = md5_tmp;
2959
2960 if (aoh) {
2961 if (!ao_tmp)
2962 *aoh = NULL;
2963 else
2964 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2);
2965 }
2966
2967 return 0;
2968 }
2969
tcp_ao_required(struct sock * sk,const void * saddr,int family,int l3index,bool stat_inc)2970 static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
2971 int family, int l3index, bool stat_inc)
2972 {
2973 #ifdef CONFIG_TCP_AO
2974 struct tcp_ao_info *ao_info;
2975 struct tcp_ao_key *ao_key;
2976
2977 if (!static_branch_unlikely(&tcp_ao_needed.key))
2978 return false;
2979
2980 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info,
2981 lockdep_sock_is_held(sk));
2982 if (!ao_info)
2983 return false;
2984
2985 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1);
2986 if (ao_info->ao_required || ao_key) {
2987 if (stat_inc) {
2988 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED);
2989 atomic64_inc(&ao_info->counters.ao_required);
2990 }
2991 return true;
2992 }
2993 #endif
2994 return false;
2995 }
2996
2997 enum skb_drop_reason tcp_inbound_hash(struct sock *sk,
2998 const struct request_sock *req, const struct sk_buff *skb,
2999 const void *saddr, const void *daddr,
3000 int family, int dif, int sdif);
3001
3002 #endif /* _TCP_H */
3003