1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the TCP module. 8 * 9 * Version: @(#)tcp.h 1.0.5 05/23/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 */ 14 #ifndef _TCP_H 15 #define _TCP_H 16 17 #define FASTRETRANS_DEBUG 1 18 19 #include <linux/list.h> 20 #include <linux/tcp.h> 21 #include <linux/bug.h> 22 #include <linux/slab.h> 23 #include <linux/cache.h> 24 #include <linux/percpu.h> 25 #include <linux/skbuff.h> 26 #include <linux/kref.h> 27 #include <linux/ktime.h> 28 #include <linux/indirect_call_wrapper.h> 29 #include <linux/bits.h> 30 31 #include <net/inet_connection_sock.h> 32 #include <net/inet_timewait_sock.h> 33 #include <net/inet_hashtables.h> 34 #include <net/checksum.h> 35 #include <net/request_sock.h> 36 #include <net/sock_reuseport.h> 37 #include <net/sock.h> 38 #include <net/snmp.h> 39 #include <net/ip.h> 40 #include <net/tcp_states.h> 41 #include <net/tcp_ao.h> 42 #include <net/inet_ecn.h> 43 #include <net/dst.h> 44 #include <net/mptcp.h> 45 #include <net/xfrm.h> 46 47 #include <linux/seq_file.h> 48 #include <linux/memcontrol.h> 49 #include <linux/bpf-cgroup.h> 50 #include <linux/siphash.h> 51 52 extern struct inet_hashinfo tcp_hashinfo; 53 54 DECLARE_PER_CPU(unsigned int, tcp_orphan_count); 55 int tcp_orphan_count_sum(void); 56 57 static inline void tcp_orphan_count_inc(void) 58 { 59 this_cpu_inc(tcp_orphan_count); 60 } 61 62 static inline void tcp_orphan_count_dec(void) 63 { 64 this_cpu_dec(tcp_orphan_count); 65 } 66 67 DECLARE_PER_CPU(u32, tcp_tw_isn); 68 69 void tcp_time_wait(struct sock *sk, int state, int timeo); 70 71 #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) 72 #define MAX_TCP_OPTION_SPACE 40 73 #define TCP_MIN_SND_MSS 48 74 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) 75 76 /* 77 * Never offer a window over 32767 without using window scaling. Some 78 * poor stacks do signed 16bit maths! 79 */ 80 #define MAX_TCP_WINDOW 32767U 81 82 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 83 #define TCP_MIN_MSS 88U 84 85 /* The initial MTU to use for probing */ 86 #define TCP_BASE_MSS 1024 87 88 /* probing interval, default to 10 minutes as per RFC4821 */ 89 #define TCP_PROBE_INTERVAL 600 90 91 /* Specify interval when tcp mtu probing will stop */ 92 #define TCP_PROBE_THRESHOLD 8 93 94 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ 95 #define TCP_FASTRETRANS_THRESH 3 96 97 /* Maximal number of ACKs sent quickly to accelerate slow-start. */ 98 #define TCP_MAX_QUICKACKS 16U 99 100 /* Maximal number of window scale according to RFC1323 */ 101 #define TCP_MAX_WSCALE 14U 102 103 /* Default sending frequency of accurate ECN option per RTT */ 104 #define TCP_ACCECN_OPTION_BEACON 3 105 106 /* urg_data states */ 107 #define TCP_URG_VALID 0x0100 108 #define TCP_URG_NOTYET 0x0200 109 #define TCP_URG_READ 0x0400 110 111 #define TCP_RETR1 3 /* 112 * This is how many retries it does before it 113 * tries to figure out if the gateway is 114 * down. Minimal RFC value is 3; it corresponds 115 * to ~3sec-8min depending on RTO. 116 */ 117 118 #define TCP_RETR2 15 /* 119 * This should take at least 120 * 90 minutes to time out. 121 * RFC1122 says that the limit is 100 sec. 122 * 15 is ~13-30min depending on RTO. 123 */ 124 125 #define TCP_SYN_RETRIES 6 /* This is how many retries are done 126 * when active opening a connection. 127 * RFC1122 says the minimum retry MUST 128 * be at least 180secs. Nevertheless 129 * this value is corresponding to 130 * 63secs of retransmission with the 131 * current initial RTO. 132 */ 133 134 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done 135 * when passive opening a connection. 136 * This is corresponding to 31secs of 137 * retransmission with the current 138 * initial RTO. 139 */ 140 141 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 142 * state, about 60 seconds */ 143 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 144 /* BSD style FIN_WAIT2 deadlock breaker. 145 * It used to be 3min, new value is 60sec, 146 * to combine FIN-WAIT-2 timeout with 147 * TIME-WAIT timer. 148 */ 149 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */ 150 151 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ 152 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX); 153 154 #if HZ >= 100 155 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ 156 #define TCP_ATO_MIN ((unsigned)(HZ/25)) 157 #else 158 #define TCP_DELACK_MIN 4U 159 #define TCP_ATO_MIN 4U 160 #endif 161 #define TCP_RTO_MAX_SEC 120 162 #define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ)) 163 #define TCP_RTO_MIN ((unsigned)(HZ / 5)) 164 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ 165 166 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */ 167 168 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 169 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now 170 * used as a fallback RTO for the 171 * initial data transmission if no 172 * valid RTT sample has been acquired, 173 * most likely due to retrans in 3WHS. 174 */ 175 176 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes 177 * for local resources. 178 */ 179 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ 180 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ 181 #define TCP_KEEPALIVE_INTVL (75*HZ) 182 183 #define MAX_TCP_KEEPIDLE 32767 184 #define MAX_TCP_KEEPINTVL 32767 185 #define MAX_TCP_KEEPCNT 127 186 #define MAX_TCP_SYNCNT 127 187 188 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds 189 * to avoid overflows. This assumes a clock smaller than 1 Mhz. 190 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz. 191 */ 192 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC) 193 194 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated 195 * after this time. It should be equal 196 * (or greater than) TCP_TIMEWAIT_LEN 197 * to provide reliability equal to one 198 * provided by timewait state. 199 */ 200 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host 201 * timestamps. It must be less than 202 * minimal timewait lifetime. 203 */ 204 /* 205 * TCP option 206 */ 207 208 #define TCPOPT_NOP 1 /* Padding */ 209 #define TCPOPT_EOL 0 /* End of options */ 210 #define TCPOPT_MSS 2 /* Segment size negotiating */ 211 #define TCPOPT_WINDOW 3 /* Window scaling */ 212 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ 213 #define TCPOPT_SACK 5 /* SACK Block */ 214 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 215 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 216 #define TCPOPT_AO 29 /* Authentication Option (RFC5925) */ 217 #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */ 218 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ 219 #define TCPOPT_ACCECN0 172 /* 0xAC: Accurate ECN Order 0 */ 220 #define TCPOPT_ACCECN1 174 /* 0xAE: Accurate ECN Order 1 */ 221 #define TCPOPT_EXP 254 /* Experimental */ 222 /* Magic number to be after the option value for sharing TCP 223 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 224 */ 225 #define TCPOPT_FASTOPEN_MAGIC 0xF989 226 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9 227 228 /* 229 * TCP option lengths 230 */ 231 232 #define TCPOLEN_MSS 4 233 #define TCPOLEN_WINDOW 3 234 #define TCPOLEN_SACK_PERM 2 235 #define TCPOLEN_TIMESTAMP 10 236 #define TCPOLEN_MD5SIG 18 237 #define TCPOLEN_FASTOPEN_BASE 2 238 #define TCPOLEN_ACCECN_BASE 2 239 #define TCPOLEN_EXP_FASTOPEN_BASE 4 240 #define TCPOLEN_EXP_SMC_BASE 6 241 242 /* But this is what stacks really send out. */ 243 #define TCPOLEN_TSTAMP_ALIGNED 12 244 #define TCPOLEN_WSCALE_ALIGNED 4 245 #define TCPOLEN_SACKPERM_ALIGNED 4 246 #define TCPOLEN_SACK_BASE 2 247 #define TCPOLEN_SACK_BASE_ALIGNED 4 248 #define TCPOLEN_SACK_PERBLOCK 8 249 #define TCPOLEN_MD5SIG_ALIGNED 20 250 #define TCPOLEN_MSS_ALIGNED 4 251 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8 252 #define TCPOLEN_ACCECN_PERFIELD 3 253 254 /* Maximum number of byte counters in AccECN option + size */ 255 #define TCP_ACCECN_NUMFIELDS 3 256 #define TCP_ACCECN_MAXSIZE (TCPOLEN_ACCECN_BASE + \ 257 TCPOLEN_ACCECN_PERFIELD * \ 258 TCP_ACCECN_NUMFIELDS) 259 #define TCP_ACCECN_SAFETY_SHIFT 1 /* SAFETY_FACTOR in accecn draft */ 260 261 /* Flags in tp->nonagle */ 262 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 263 #define TCP_NAGLE_CORK 2 /* Socket is corked */ 264 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ 265 266 /* TCP thin-stream limits */ 267 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ 268 269 /* TCP initial congestion window as per rfc6928 */ 270 #define TCP_INIT_CWND 10 271 272 /* Bit Flags for sysctl_tcp_fastopen */ 273 #define TFO_CLIENT_ENABLE 1 274 #define TFO_SERVER_ENABLE 2 275 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 276 277 /* Accept SYN data w/o any cookie option */ 278 #define TFO_SERVER_COOKIE_NOT_REQD 0x200 279 280 /* Force enable TFO on all listeners, i.e., not requiring the 281 * TCP_FASTOPEN socket option. 282 */ 283 #define TFO_SERVER_WO_SOCKOPT1 0x400 284 285 286 /* sysctl variables for tcp */ 287 extern int sysctl_tcp_max_orphans; 288 extern long sysctl_tcp_mem[3]; 289 290 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ 291 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */ 292 #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */ 293 294 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 295 296 extern struct percpu_counter tcp_sockets_allocated; 297 extern unsigned long tcp_memory_pressure; 298 299 /* optimized version of sk_under_memory_pressure() for TCP sockets */ 300 static inline bool tcp_under_memory_pressure(const struct sock *sk) 301 { 302 if (mem_cgroup_sk_enabled(sk) && 303 mem_cgroup_sk_under_memory_pressure(sk)) 304 return true; 305 306 if (sk->sk_bypass_prot_mem) 307 return false; 308 309 return READ_ONCE(tcp_memory_pressure); 310 } 311 /* 312 * The next routines deal with comparing 32 bit unsigned ints 313 * and worry about wraparound (automatic with unsigned arithmetic). 314 */ 315 316 static inline bool before(__u32 seq1, __u32 seq2) 317 { 318 return (__s32)(seq1-seq2) < 0; 319 } 320 #define after(seq2, seq1) before(seq1, seq2) 321 322 /* is s2<=s1<=s3 ? */ 323 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) 324 { 325 return seq3 - seq2 >= seq1 - seq2; 326 } 327 328 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 329 { 330 sk_wmem_queued_add(sk, -skb->truesize); 331 if (!skb_zcopy_pure(skb)) 332 sk_mem_uncharge(sk, skb->truesize); 333 else 334 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb))); 335 __kfree_skb(skb); 336 } 337 338 void sk_forced_mem_schedule(struct sock *sk, int size); 339 340 bool tcp_check_oom(const struct sock *sk, int shift); 341 342 343 extern struct proto tcp_prot; 344 345 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) 346 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) 347 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 348 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) 349 350 /* 351 * TCP splice context 352 */ 353 struct tcp_splice_state { 354 struct pipe_inode_info *pipe; 355 size_t len; 356 unsigned int flags; 357 }; 358 359 void tcp_tsq_work_init(void); 360 361 int tcp_v4_err(struct sk_buff *skb, u32); 362 363 void tcp_shutdown(struct sock *sk, int how); 364 365 int tcp_v4_early_demux(struct sk_buff *skb); 366 int tcp_v4_rcv(struct sk_buff *skb); 367 368 void tcp_remove_empty_skb(struct sock *sk); 369 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 370 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); 371 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 372 size_t size, struct ubuf_info *uarg); 373 void tcp_splice_eof(struct socket *sock); 374 int tcp_send_mss(struct sock *sk, int *size_goal, int flags); 375 int tcp_wmem_schedule(struct sock *sk, int copy); 376 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, 377 int size_goal); 378 void tcp_release_cb(struct sock *sk); 379 void tcp_wfree(struct sk_buff *skb); 380 void tcp_write_timer_handler(struct sock *sk); 381 void tcp_delack_timer_handler(struct sock *sk); 382 int tcp_ioctl(struct sock *sk, int cmd, int *karg); 383 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); 384 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); 385 void tcp_rcvbuf_grow(struct sock *sk, u32 newval); 386 void tcp_rcv_space_adjust(struct sock *sk); 387 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 388 void tcp_twsk_destructor(struct sock *sk); 389 void tcp_twsk_purge(struct list_head *net_exit_list); 390 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 391 unsigned int offset, size_t len); 392 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 393 struct pipe_inode_info *pipe, size_t len, 394 unsigned int flags); 395 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 396 bool force_schedule); 397 398 static inline void tcp_dec_quickack_mode(struct sock *sk) 399 { 400 struct inet_connection_sock *icsk = inet_csk(sk); 401 402 if (icsk->icsk_ack.quick) { 403 /* How many ACKs S/ACKing new data have we sent? */ 404 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; 405 406 if (pkts >= icsk->icsk_ack.quick) { 407 icsk->icsk_ack.quick = 0; 408 /* Leaving quickack mode we deflate ATO. */ 409 icsk->icsk_ack.ato = TCP_ATO_MIN; 410 } else 411 icsk->icsk_ack.quick -= pkts; 412 } 413 } 414 415 #define TCP_ECN_MODE_RFC3168 BIT(0) 416 #define TCP_ECN_QUEUE_CWR BIT(1) 417 #define TCP_ECN_DEMAND_CWR BIT(2) 418 #define TCP_ECN_SEEN BIT(3) 419 #define TCP_ECN_MODE_ACCECN BIT(4) 420 421 #define TCP_ECN_DISABLED 0 422 #define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN) 423 #define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN) 424 425 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp) 426 { 427 return tp->ecn_flags & TCP_ECN_MODE_ANY; 428 } 429 430 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp) 431 { 432 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168; 433 } 434 435 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp) 436 { 437 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN; 438 } 439 440 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp) 441 { 442 return !tcp_ecn_mode_any(tp); 443 } 444 445 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp) 446 { 447 return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING; 448 } 449 450 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode) 451 { 452 tp->ecn_flags &= ~TCP_ECN_MODE_ANY; 453 tp->ecn_flags |= mode; 454 } 455 456 enum tcp_tw_status { 457 TCP_TW_SUCCESS = 0, 458 TCP_TW_RST = 1, 459 TCP_TW_ACK = 2, 460 TCP_TW_SYN = 3, 461 TCP_TW_ACK_OOW = 4 462 }; 463 464 465 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, 466 struct sk_buff *skb, 467 const struct tcphdr *th, 468 u32 *tw_isn, 469 enum skb_drop_reason *drop_reason); 470 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 471 struct request_sock *req, bool fastopen, 472 bool *lost_race, enum skb_drop_reason *drop_reason); 473 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, 474 struct sk_buff *skb); 475 void tcp_enter_loss(struct sock *sk); 476 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag); 477 void tcp_clear_retrans(struct tcp_sock *tp); 478 void tcp_update_pacing_rate(struct sock *sk); 479 void tcp_set_rto(struct sock *sk); 480 void tcp_update_metrics(struct sock *sk); 481 void tcp_init_metrics(struct sock *sk); 482 void tcp_metrics_init(void); 483 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); 484 void __tcp_close(struct sock *sk, long timeout); 485 void tcp_close(struct sock *sk, long timeout); 486 void tcp_init_sock(struct sock *sk); 487 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb); 488 __poll_t tcp_poll(struct file *file, struct socket *sock, 489 struct poll_table_struct *wait); 490 int do_tcp_getsockopt(struct sock *sk, int level, 491 int optname, sockptr_t optval, sockptr_t optlen); 492 int tcp_getsockopt(struct sock *sk, int level, int optname, 493 char __user *optval, int __user *optlen); 494 bool tcp_bpf_bypass_getsockopt(int level, int optname); 495 int do_tcp_setsockopt(struct sock *sk, int level, int optname, 496 sockptr_t optval, unsigned int optlen); 497 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 498 unsigned int optlen); 499 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout); 500 void tcp_set_keepalive(struct sock *sk, int val); 501 void tcp_syn_ack_timeout(const struct request_sock *req); 502 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 503 int flags); 504 int tcp_set_rcvlowat(struct sock *sk, int val); 505 int tcp_set_window_clamp(struct sock *sk, int val); 506 void tcp_update_recv_tstamps(struct sk_buff *skb, 507 struct scm_timestamping_internal *tss); 508 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 509 struct scm_timestamping_internal *tss); 510 void tcp_data_ready(struct sock *sk); 511 #ifdef CONFIG_MMU 512 int tcp_mmap(struct file *file, struct socket *sock, 513 struct vm_area_struct *vma); 514 #endif 515 void tcp_parse_options(const struct net *net, const struct sk_buff *skb, 516 struct tcp_options_received *opt_rx, 517 int estab, struct tcp_fastopen_cookie *foc); 518 519 /* 520 * BPF SKB-less helpers 521 */ 522 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, 523 struct tcphdr *th, u32 *cookie); 524 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, 525 struct tcphdr *th, u32 *cookie); 526 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss); 527 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, 528 const struct tcp_request_sock_ops *af_ops, 529 struct sock *sk, struct tcphdr *th); 530 /* 531 * TCP v4 functions exported for the inet6 API 532 */ 533 534 void tcp_v4_mtu_reduced(struct sock *sk); 535 void tcp_req_err(struct sock *sk, u32 seq, bool abort); 536 void tcp_ld_RTO_revert(struct sock *sk, u32 seq); 537 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 538 struct sock *tcp_create_openreq_child(const struct sock *sk, 539 struct request_sock *req, 540 struct sk_buff *skb); 541 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 542 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 543 struct request_sock *req, 544 struct dst_entry *dst, 545 struct request_sock *req_unhash, 546 bool *own_req, 547 void (*opt_child_init)(struct sock *newsk, 548 const struct sock *sk)); 549 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 550 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len); 551 int tcp_connect(struct sock *sk); 552 enum tcp_synack_type { 553 TCP_SYNACK_NORMAL, 554 TCP_SYNACK_FASTOPEN, 555 TCP_SYNACK_COOKIE, 556 TCP_SYNACK_RETRANS, 557 }; 558 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 559 struct request_sock *req, 560 struct tcp_fastopen_cookie *foc, 561 enum tcp_synack_type synack_type, 562 struct sk_buff *syn_skb); 563 int tcp_disconnect(struct sock *sk, int flags); 564 565 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 566 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); 567 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 568 569 /* From syncookies.c */ 570 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, 571 struct request_sock *req, 572 struct dst_entry *dst); 573 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th); 574 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 575 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, 576 struct sock *sk, struct sk_buff *skb, 577 struct tcp_options_received *tcp_opt, 578 int mss, u32 tsoff); 579 580 #if IS_ENABLED(CONFIG_BPF) 581 struct bpf_tcp_req_attrs { 582 u32 rcv_tsval; 583 u32 rcv_tsecr; 584 u16 mss; 585 u8 rcv_wscale; 586 u8 snd_wscale; 587 u8 ecn_ok; 588 u8 wscale_ok; 589 u8 sack_ok; 590 u8 tstamp_ok; 591 u8 usec_ts_ok; 592 u8 reserved[3]; 593 }; 594 #endif 595 596 #ifdef CONFIG_SYN_COOKIES 597 598 /* Syncookies use a monotonic timer which increments every 60 seconds. 599 * This counter is used both as a hash input and partially encoded into 600 * the cookie value. A cookie is only validated further if the delta 601 * between the current counter value and the encoded one is less than this, 602 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if 603 * the counter advances immediately after a cookie is generated). 604 */ 605 #define MAX_SYNCOOKIE_AGE 2 606 #define TCP_SYNCOOKIE_PERIOD (60 * HZ) 607 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 608 609 /* syncookies: remember time of last synqueue overflow 610 * But do not dirty this field too often (once per second is enough) 611 * It is racy as we do not hold a lock, but race is very minor. 612 */ 613 static inline void tcp_synq_overflow(const struct sock *sk) 614 { 615 unsigned int last_overflow; 616 unsigned int now = jiffies; 617 618 if (sk->sk_reuseport) { 619 struct sock_reuseport *reuse; 620 621 reuse = rcu_dereference(sk->sk_reuseport_cb); 622 if (likely(reuse)) { 623 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 624 if (!time_between32(now, last_overflow, 625 last_overflow + HZ)) 626 WRITE_ONCE(reuse->synq_overflow_ts, now); 627 return; 628 } 629 } 630 631 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); 632 if (!time_between32(now, last_overflow, last_overflow + HZ)) 633 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now); 634 } 635 636 /* syncookies: no recent synqueue overflow on this listening socket? */ 637 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 638 { 639 unsigned int last_overflow; 640 unsigned int now = jiffies; 641 642 if (sk->sk_reuseport) { 643 struct sock_reuseport *reuse; 644 645 reuse = rcu_dereference(sk->sk_reuseport_cb); 646 if (likely(reuse)) { 647 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 648 return !time_between32(now, last_overflow - HZ, 649 last_overflow + 650 TCP_SYNCOOKIE_VALID); 651 } 652 } 653 654 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); 655 656 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID, 657 * then we're under synflood. However, we have to use 658 * 'last_overflow - HZ' as lower bound. That's because a concurrent 659 * tcp_synq_overflow() could update .ts_recent_stamp after we read 660 * jiffies but before we store .ts_recent_stamp into last_overflow, 661 * which could lead to rejecting a valid syncookie. 662 */ 663 return !time_between32(now, last_overflow - HZ, 664 last_overflow + TCP_SYNCOOKIE_VALID); 665 } 666 667 static inline u32 tcp_cookie_time(void) 668 { 669 u64 val = get_jiffies_64(); 670 671 do_div(val, TCP_SYNCOOKIE_PERIOD); 672 return val; 673 } 674 675 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */ 676 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val) 677 { 678 if (usec_ts) 679 return div_u64(val, NSEC_PER_USEC); 680 681 return div_u64(val, NSEC_PER_MSEC); 682 } 683 684 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 685 u16 *mssp); 686 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); 687 u64 cookie_init_timestamp(struct request_sock *req, u64 now); 688 bool cookie_timestamp_decode(const struct net *net, 689 struct tcp_options_received *opt); 690 691 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst) 692 { 693 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) || 694 dst_feature(dst, RTAX_FEATURE_ECN); 695 } 696 697 #if IS_ENABLED(CONFIG_BPF) 698 static inline bool cookie_bpf_ok(struct sk_buff *skb) 699 { 700 return skb->sk; 701 } 702 703 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb); 704 #else 705 static inline bool cookie_bpf_ok(struct sk_buff *skb) 706 { 707 return false; 708 } 709 710 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk, 711 struct sk_buff *skb) 712 { 713 return NULL; 714 } 715 #endif 716 717 /* From net/ipv6/syncookies.c */ 718 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th); 719 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 720 721 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, 722 const struct tcphdr *th, u16 *mssp); 723 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); 724 #endif 725 /* tcp_output.c */ 726 727 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb); 728 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb); 729 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 730 int nonagle); 731 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 732 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 733 void tcp_retransmit_timer(struct sock *sk); 734 void tcp_xmit_retransmit_queue(struct sock *); 735 void tcp_simple_retransmit(struct sock *); 736 void tcp_enter_recovery(struct sock *sk, bool ece_ack); 737 int tcp_trim_head(struct sock *, struct sk_buff *, u32); 738 enum tcp_queue { 739 TCP_FRAG_IN_WRITE_QUEUE, 740 TCP_FRAG_IN_RTX_QUEUE, 741 }; 742 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 743 struct sk_buff *skb, u32 len, 744 unsigned int mss_now, gfp_t gfp); 745 746 void tcp_send_probe0(struct sock *); 747 int tcp_write_wakeup(struct sock *, int mib); 748 void tcp_send_fin(struct sock *sk); 749 void tcp_send_active_reset(struct sock *sk, gfp_t priority, 750 enum sk_rst_reason reason); 751 int tcp_send_synack(struct sock *); 752 void tcp_push_one(struct sock *, unsigned int mss_now); 753 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags); 754 void tcp_send_ack(struct sock *sk); 755 void tcp_send_delayed_ack(struct sock *sk); 756 void tcp_send_loss_probe(struct sock *sk); 757 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); 758 void tcp_skb_collapse_tstamp(struct sk_buff *skb, 759 const struct sk_buff *next_skb); 760 761 /* tcp_input.c */ 762 void tcp_rearm_rto(struct sock *sk); 763 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); 764 void tcp_done_with_error(struct sock *sk, int err); 765 void tcp_reset(struct sock *sk, struct sk_buff *skb); 766 void tcp_fin(struct sock *sk); 767 void __tcp_check_space(struct sock *sk); 768 static inline void tcp_check_space(struct sock *sk) 769 { 770 /* pairs with tcp_poll() */ 771 smp_mb(); 772 773 if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 774 __tcp_check_space(sk); 775 } 776 void tcp_sack_compress_send_ack(struct sock *sk); 777 778 static inline void tcp_cleanup_skb(struct sk_buff *skb) 779 { 780 skb_dst_drop(skb); 781 secpath_reset(skb); 782 } 783 784 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) 785 { 786 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 787 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb)); 788 __skb_queue_tail(&sk->sk_receive_queue, skb); 789 } 790 791 /* tcp_timer.c */ 792 void tcp_init_xmit_timers(struct sock *); 793 static inline void tcp_clear_xmit_timers(struct sock *sk) 794 { 795 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) 796 __sock_put(sk); 797 798 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) 799 __sock_put(sk); 800 801 inet_csk_clear_xmit_timers(sk); 802 } 803 804 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 805 unsigned int tcp_current_mss(struct sock *sk); 806 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when); 807 808 /* Bound MSS / TSO packet size with the half of the window */ 809 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 810 { 811 int cutoff; 812 813 /* When peer uses tiny windows, there is no use in packetizing 814 * to sub-MSS pieces for the sake of SWS or making sure there 815 * are enough packets in the pipe for fast recovery. 816 * 817 * On the other hand, for extremely large MSS devices, handling 818 * smaller than MSS windows in this way does make sense. 819 */ 820 if (tp->max_window > TCP_MSS_DEFAULT) 821 cutoff = (tp->max_window >> 1); 822 else 823 cutoff = tp->max_window; 824 825 if (cutoff && pktsize > cutoff) 826 return max_t(int, cutoff, 68U - tp->tcp_header_len); 827 else 828 return pktsize; 829 } 830 831 /* tcp.c */ 832 void tcp_get_info(struct sock *, struct tcp_info *); 833 void tcp_rate_check_app_limited(struct sock *sk); 834 835 /* Read 'sendfile()'-style from a TCP socket */ 836 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 837 sk_read_actor_t recv_actor); 838 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, 839 sk_read_actor_t recv_actor, bool noack, 840 u32 *copied_seq); 841 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 842 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off); 843 void tcp_read_done(struct sock *sk, size_t len); 844 845 void tcp_initialize_rcv_mss(struct sock *sk); 846 847 int tcp_mtu_to_mss(struct sock *sk, int pmtu); 848 int tcp_mss_to_mtu(struct sock *sk, int mss); 849 void tcp_mtup_init(struct sock *sk); 850 851 static inline unsigned int tcp_rto_max(const struct sock *sk) 852 { 853 return READ_ONCE(inet_csk(sk)->icsk_rto_max); 854 } 855 856 static inline void tcp_bound_rto(struct sock *sk) 857 { 858 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); 859 } 860 861 static inline u32 __tcp_set_rto(const struct tcp_sock *tp) 862 { 863 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); 864 } 865 866 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req) 867 { 868 u64 timeout = (u64)req->timeout << req->num_timeout; 869 870 return (unsigned long)min_t(u64, timeout, 871 tcp_rto_max(req->rsk_listener)); 872 } 873 874 u32 tcp_delack_max(const struct sock *sk); 875 876 /* Compute the actual rto_min value */ 877 static inline u32 tcp_rto_min(const struct sock *sk) 878 { 879 const struct dst_entry *dst = __sk_dst_get(sk); 880 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min); 881 882 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 883 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 884 return rto_min; 885 } 886 887 static inline u32 tcp_rto_min_us(const struct sock *sk) 888 { 889 return jiffies_to_usecs(tcp_rto_min(sk)); 890 } 891 892 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) 893 { 894 return dst_metric_locked(dst, RTAX_CC_ALGO); 895 } 896 897 /* Minimum RTT in usec. ~0 means not available. */ 898 static inline u32 tcp_min_rtt(const struct tcp_sock *tp) 899 { 900 return minmax_get(&tp->rtt_min); 901 } 902 903 /* Compute the actual receive window we are currently advertising. 904 * Rcv_nxt can be after the window if our peer push more data 905 * than the offered window. 906 */ 907 static inline u32 tcp_receive_window(const struct tcp_sock *tp) 908 { 909 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; 910 911 if (win < 0) 912 win = 0; 913 return (u32) win; 914 } 915 916 /* Choose a new window, without checks for shrinking, and without 917 * scaling applied to the result. The caller does these things 918 * if necessary. This is a "raw" window selection. 919 */ 920 u32 __tcp_select_window(struct sock *sk); 921 922 void tcp_send_window_probe(struct sock *sk); 923 924 /* TCP uses 32bit jiffies to save some space. 925 * Note that this is different from tcp_time_stamp, which 926 * historically has been the same until linux-4.13. 927 */ 928 #define tcp_jiffies32 ((u32)jiffies) 929 930 /* 931 * Deliver a 32bit value for TCP timestamp option (RFC 7323) 932 * It is no longer tied to jiffies, but to 1 ms clock. 933 * Note: double check if you want to use tcp_jiffies32 instead of this. 934 */ 935 #define TCP_TS_HZ 1000 936 937 static inline u64 tcp_clock_ns(void) 938 { 939 return ktime_get_ns(); 940 } 941 942 static inline u64 tcp_clock_us(void) 943 { 944 return div_u64(tcp_clock_ns(), NSEC_PER_USEC); 945 } 946 947 static inline u64 tcp_clock_ms(void) 948 { 949 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC); 950 } 951 952 /* TCP Timestamp included in TS option (RFC 1323) can either use ms 953 * or usec resolution. Each socket carries a flag to select one or other 954 * resolution, as the route attribute could change anytime. 955 * Each flow must stick to initial resolution. 956 */ 957 static inline u32 tcp_clock_ts(bool usec_ts) 958 { 959 return usec_ts ? tcp_clock_us() : tcp_clock_ms(); 960 } 961 962 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp) 963 { 964 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC); 965 } 966 967 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp) 968 { 969 if (tp->tcp_usec_ts) 970 return tp->tcp_mstamp; 971 return tcp_time_stamp_ms(tp); 972 } 973 974 void tcp_mstamp_refresh(struct tcp_sock *tp); 975 976 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) 977 { 978 return max_t(s64, t1 - t0, 0); 979 } 980 981 /* provide the departure time in us unit */ 982 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) 983 { 984 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); 985 } 986 987 /* Provide skb TSval in usec or ms unit */ 988 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb) 989 { 990 if (usec_ts) 991 return tcp_skb_timestamp_us(skb); 992 993 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC); 994 } 995 996 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw) 997 { 998 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset; 999 } 1000 1001 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq) 1002 { 1003 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off; 1004 } 1005 1006 #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 1007 1008 #define TCPHDR_FIN BIT(0) 1009 #define TCPHDR_SYN BIT(1) 1010 #define TCPHDR_RST BIT(2) 1011 #define TCPHDR_PSH BIT(3) 1012 #define TCPHDR_ACK BIT(4) 1013 #define TCPHDR_URG BIT(5) 1014 #define TCPHDR_ECE BIT(6) 1015 #define TCPHDR_CWR BIT(7) 1016 #define TCPHDR_AE BIT(8) 1017 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 1018 TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \ 1019 TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE) 1020 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \ 1021 TCPHDR_FLAGS_MASK) 1022 1023 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE) 1024 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) 1025 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR) 1026 1027 #define TCP_ACCECN_CEP_ACE_MASK 0x7 1028 #define TCP_ACCECN_ACE_MAX_DELTA 6 1029 1030 /* To avoid/detect middlebox interference, not all counters start at 0. 1031 * See draft-ietf-tcpm-accurate-ecn for the latest values. 1032 */ 1033 #define TCP_ACCECN_CEP_INIT_OFFSET 5 1034 #define TCP_ACCECN_E1B_INIT_OFFSET 1 1035 #define TCP_ACCECN_E0B_INIT_OFFSET 1 1036 #define TCP_ACCECN_CEB_INIT_OFFSET 0 1037 1038 /* State flags for sacked in struct tcp_skb_cb */ 1039 enum tcp_skb_cb_sacked_flags { 1040 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */ 1041 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */ 1042 TCPCB_LOST = (1 << 2), /* SKB is lost */ 1043 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS | 1044 TCPCB_LOST), /* All tag bits */ 1045 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */ 1046 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */ 1047 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS | 1048 TCPCB_REPAIRED), 1049 }; 1050 1051 /* This is what the send packet queuing engine uses to pass 1052 * TCP per-packet control information to the transmission code. 1053 * We also store the host-order sequence numbers in here too. 1054 * This is 44 bytes if IPV6 is enabled. 1055 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. 1056 */ 1057 struct tcp_skb_cb { 1058 __u32 seq; /* Starting sequence number */ 1059 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 1060 union { 1061 /* Note : 1062 * tcp_gso_segs/size are used in write queue only, 1063 * cf tcp_skb_pcount()/tcp_skb_mss() 1064 */ 1065 struct { 1066 u16 tcp_gso_segs; 1067 u16 tcp_gso_size; 1068 }; 1069 }; 1070 __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/ 1071 1072 __u8 sacked; /* State flags for SACK. */ 1073 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ 1074 #define TSTAMP_ACK_SK 0x1 1075 #define TSTAMP_ACK_BPF 0x2 1076 __u8 txstamp_ack:2, /* Record TX timestamp for ack? */ 1077 eor:1, /* Is skb MSG_EOR marked? */ 1078 has_rxtstamp:1, /* SKB has a RX timestamp */ 1079 unused:4; 1080 __u32 ack_seq; /* Sequence number ACK'd */ 1081 union { 1082 struct { 1083 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1) 1084 /* There is space for up to 24 bytes */ 1085 __u32 is_app_limited:1, /* cwnd not fully used? */ 1086 delivered_ce:20, 1087 unused:11; 1088 /* pkts S/ACKed so far upon tx of skb, incl retrans: */ 1089 __u32 delivered; 1090 /* start of send pipeline phase */ 1091 u64 first_tx_mstamp; 1092 /* when we reached the "delivered" count */ 1093 u64 delivered_mstamp; 1094 } tx; /* only used for outgoing skbs */ 1095 union { 1096 struct inet_skb_parm h4; 1097 #if IS_ENABLED(CONFIG_IPV6) 1098 struct inet6_skb_parm h6; 1099 #endif 1100 } header; /* For incoming skbs */ 1101 }; 1102 }; 1103 1104 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 1105 1106 extern const struct inet_connection_sock_af_ops ipv4_specific; 1107 1108 #if IS_ENABLED(CONFIG_IPV6) 1109 /* This is the variant of inet6_iif() that must be used by TCP, 1110 * as TCP moves IP6CB into a different location in skb->cb[] 1111 */ 1112 static inline int tcp_v6_iif(const struct sk_buff *skb) 1113 { 1114 return TCP_SKB_CB(skb)->header.h6.iif; 1115 } 1116 1117 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) 1118 { 1119 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 1120 1121 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 1122 } 1123 1124 /* TCP_SKB_CB reference means this can not be used from early demux */ 1125 static inline int tcp_v6_sdif(const struct sk_buff *skb) 1126 { 1127 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 1128 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) 1129 return TCP_SKB_CB(skb)->header.h6.iif; 1130 #endif 1131 return 0; 1132 } 1133 1134 extern const struct inet_connection_sock_af_ops ipv6_specific; 1135 1136 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); 1137 void tcp_v6_early_demux(struct sk_buff *skb); 1138 1139 #endif 1140 1141 /* TCP_SKB_CB reference means this can not be used from early demux */ 1142 static inline int tcp_v4_sdif(struct sk_buff *skb) 1143 { 1144 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 1145 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 1146 return TCP_SKB_CB(skb)->header.h4.iif; 1147 #endif 1148 return 0; 1149 } 1150 1151 /* Due to TSO, an SKB can be composed of multiple actual 1152 * packets. To keep these tracked properly, we use this. 1153 */ 1154 static inline int tcp_skb_pcount(const struct sk_buff *skb) 1155 { 1156 return TCP_SKB_CB(skb)->tcp_gso_segs; 1157 } 1158 1159 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) 1160 { 1161 TCP_SKB_CB(skb)->tcp_gso_segs = segs; 1162 } 1163 1164 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) 1165 { 1166 TCP_SKB_CB(skb)->tcp_gso_segs += segs; 1167 } 1168 1169 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */ 1170 static inline int tcp_skb_mss(const struct sk_buff *skb) 1171 { 1172 return TCP_SKB_CB(skb)->tcp_gso_size; 1173 } 1174 1175 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) 1176 { 1177 return likely(!TCP_SKB_CB(skb)->eor); 1178 } 1179 1180 static inline bool tcp_skb_can_collapse(const struct sk_buff *to, 1181 const struct sk_buff *from) 1182 { 1183 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */ 1184 return likely(tcp_skb_can_collapse_to(to) && 1185 mptcp_skb_can_collapse(to, from) && 1186 skb_pure_zcopy_same(to, from) && 1187 skb_frags_readable(to) == skb_frags_readable(from)); 1188 } 1189 1190 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to, 1191 const struct sk_buff *from) 1192 { 1193 return likely(mptcp_skb_can_collapse(to, from) && 1194 !skb_cmp_decrypted(to, from)); 1195 } 1196 1197 /* Events passed to congestion control interface */ 1198 enum tcp_ca_event { 1199 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 1200 CA_EVENT_CWND_RESTART, /* congestion window restart */ 1201 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 1202 CA_EVENT_LOSS, /* loss timeout */ 1203 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 1204 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 1205 }; 1206 1207 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 1208 enum tcp_ca_ack_event_flags { 1209 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ 1210 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ 1211 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ 1212 }; 1213 1214 /* 1215 * Interface for adding new TCP congestion control handlers 1216 */ 1217 #define TCP_CA_NAME_MAX 16 1218 #define TCP_CA_MAX 128 1219 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) 1220 1221 #define TCP_CA_UNSPEC 0 1222 1223 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ 1224 #define TCP_CONG_NON_RESTRICTED BIT(0) 1225 /* Requires ECN/ECT set on all packets */ 1226 #define TCP_CONG_NEEDS_ECN BIT(1) 1227 /* Require successfully negotiated AccECN capability */ 1228 #define TCP_CONG_NEEDS_ACCECN BIT(2) 1229 /* Use ECT(1) instead of ECT(0) while the CA is uninitialized */ 1230 #define TCP_CONG_ECT_1_NEGOTIATION BIT(3) 1231 /* Cannot fallback to RFC3168 during AccECN negotiation */ 1232 #define TCP_CONG_NO_FALLBACK_RFC3168 BIT(4) 1233 #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \ 1234 TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \ 1235 TCP_CONG_NO_FALLBACK_RFC3168) 1236 1237 union tcp_cc_info; 1238 1239 struct ack_sample { 1240 u32 pkts_acked; 1241 s32 rtt_us; 1242 u32 in_flight; 1243 }; 1244 1245 /* A rate sample measures the number of (original/retransmitted) data 1246 * packets delivered "delivered" over an interval of time "interval_us". 1247 * The tcp_rate.c code fills in the rate sample, and congestion 1248 * control modules that define a cong_control function to run at the end 1249 * of ACK processing can optionally chose to consult this sample when 1250 * setting cwnd and pacing rate. 1251 * A sample is invalid if "delivered" or "interval_us" is negative. 1252 */ 1253 struct rate_sample { 1254 u64 prior_mstamp; /* starting timestamp for interval */ 1255 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 1256 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */ 1257 s32 delivered; /* number of packets delivered over interval */ 1258 s32 delivered_ce; /* number of packets delivered w/ CE marks*/ 1259 long interval_us; /* time for tp->delivered to incr "delivered" */ 1260 u32 snd_interval_us; /* snd interval for delivered packets */ 1261 u32 rcv_interval_us; /* rcv interval for delivered packets */ 1262 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 1263 int losses; /* number of packets marked lost upon ACK */ 1264 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 1265 u32 prior_in_flight; /* in flight before this ACK */ 1266 u32 last_end_seq; /* end_seq of most recently ACKed packet */ 1267 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 1268 bool is_retrans; /* is sample from retransmission? */ 1269 bool is_ack_delayed; /* is this (likely) a delayed ACK? */ 1270 }; 1271 1272 struct tcp_congestion_ops { 1273 /* fast path fields are put first to fill one cache line */ 1274 1275 /* A congestion control (CC) must provide one of either: 1276 * 1277 * (a) a cong_avoid function, if the CC wants to use the core TCP 1278 * stack's default functionality to implement a "classic" 1279 * (Reno/CUBIC-style) response to packet loss, RFC3168 ECN, 1280 * idle periods, pacing rate computations, etc. 1281 * 1282 * (b) a cong_control function, if the CC wants custom behavior and 1283 * complete control of all congestion control behaviors. 1284 */ 1285 /* (a) "classic" response: calculate new cwnd. 1286 */ 1287 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 1288 /* (b) "custom" response: call when packets are delivered to update 1289 * cwnd and pacing rate, after all the ca_state processing. 1290 */ 1291 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs); 1292 1293 /* return slow start threshold (required) */ 1294 u32 (*ssthresh)(struct sock *sk); 1295 1296 /* call before changing ca_state (optional) */ 1297 void (*set_state)(struct sock *sk, u8 new_state); 1298 1299 /* call when cwnd event occurs (optional) */ 1300 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 1301 1302 /* call when ack arrives (optional) */ 1303 void (*in_ack_event)(struct sock *sk, u32 flags); 1304 1305 /* hook for packet ack accounting (optional) */ 1306 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 1307 1308 /* override sysctl_tcp_min_tso_segs (optional) */ 1309 u32 (*min_tso_segs)(struct sock *sk); 1310 1311 /* new value of cwnd after loss (required) */ 1312 u32 (*undo_cwnd)(struct sock *sk); 1313 /* returns the multiplier used in tcp_sndbuf_expand (optional) */ 1314 u32 (*sndbuf_expand)(struct sock *sk); 1315 1316 /* control/slow paths put last */ 1317 /* get info for inet_diag (optional) */ 1318 size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 1319 union tcp_cc_info *info); 1320 1321 char name[TCP_CA_NAME_MAX]; 1322 struct module *owner; 1323 struct list_head list; 1324 u32 key; 1325 u32 flags; 1326 1327 /* initialize private data (optional) */ 1328 void (*init)(struct sock *sk); 1329 /* cleanup private data (optional) */ 1330 void (*release)(struct sock *sk); 1331 } ____cacheline_aligned_in_smp; 1332 1333 int tcp_register_congestion_control(struct tcp_congestion_ops *type); 1334 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 1335 int tcp_update_congestion_control(struct tcp_congestion_ops *type, 1336 struct tcp_congestion_ops *old_type); 1337 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca); 1338 1339 void tcp_assign_congestion_control(struct sock *sk); 1340 void tcp_init_congestion_control(struct sock *sk); 1341 void tcp_cleanup_congestion_control(struct sock *sk); 1342 int tcp_set_default_congestion_control(struct net *net, const char *name); 1343 void tcp_get_default_congestion_control(struct net *net, char *name); 1344 void tcp_get_available_congestion_control(char *buf, size_t len); 1345 void tcp_get_allowed_congestion_control(char *buf, size_t len); 1346 int tcp_set_allowed_congestion_control(char *allowed); 1347 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, 1348 bool cap_net_admin); 1349 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 1350 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1351 1352 u32 tcp_reno_ssthresh(struct sock *sk); 1353 u32 tcp_reno_undo_cwnd(struct sock *sk); 1354 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 1355 extern struct tcp_congestion_ops tcp_reno; 1356 1357 struct tcp_congestion_ops *tcp_ca_find(const char *name); 1358 struct tcp_congestion_ops *tcp_ca_find_key(u32 key); 1359 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca); 1360 #ifdef CONFIG_INET 1361 char *tcp_ca_get_name_by_key(u32 key, char *buffer); 1362 #else 1363 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) 1364 { 1365 return NULL; 1366 } 1367 #endif 1368 1369 static inline bool tcp_ca_needs_ecn(const struct sock *sk) 1370 { 1371 const struct inet_connection_sock *icsk = inet_csk(sk); 1372 1373 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; 1374 } 1375 1376 static inline bool tcp_ca_needs_accecn(const struct sock *sk) 1377 { 1378 const struct inet_connection_sock *icsk = inet_csk(sk); 1379 1380 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN; 1381 } 1382 1383 static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk) 1384 { 1385 const struct inet_connection_sock *icsk = inet_csk(sk); 1386 1387 return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION; 1388 } 1389 1390 static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk) 1391 { 1392 const struct inet_connection_sock *icsk = inet_csk(sk); 1393 1394 return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168; 1395 } 1396 1397 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) 1398 { 1399 const struct inet_connection_sock *icsk = inet_csk(sk); 1400 1401 if (icsk->icsk_ca_ops->cwnd_event) 1402 icsk->icsk_ca_ops->cwnd_event(sk, event); 1403 } 1404 1405 /* From tcp_cong.c */ 1406 void tcp_set_ca_state(struct sock *sk, const u8 ca_state); 1407 1408 1409 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) 1410 { 1411 return t1 > t2 || (t1 == t2 && after(seq1, seq2)); 1412 } 1413 1414 /* These functions determine how the current flow behaves in respect of SACK 1415 * handling. SACK is negotiated with the peer, and therefore it can vary 1416 * between different flows. 1417 * 1418 * tcp_is_sack - SACK enabled 1419 * tcp_is_reno - No SACK 1420 */ 1421 static inline int tcp_is_sack(const struct tcp_sock *tp) 1422 { 1423 return likely(tp->rx_opt.sack_ok); 1424 } 1425 1426 static inline bool tcp_is_reno(const struct tcp_sock *tp) 1427 { 1428 return !tcp_is_sack(tp); 1429 } 1430 1431 static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 1432 { 1433 return tp->sacked_out + tp->lost_out; 1434 } 1435 1436 /* This determines how many packets are "in the network" to the best 1437 * of our knowledge. In many cases it is conservative, but where 1438 * detailed information is available from the receiver (via SACK 1439 * blocks etc.) we can make more aggressive calculations. 1440 * 1441 * Use this for decisions involving congestion control, use just 1442 * tp->packets_out to determine if the send queue is empty or not. 1443 * 1444 * Read this equation as: 1445 * 1446 * "Packets sent once on transmission queue" MINUS 1447 * "Packets left network, but not honestly ACKed yet" PLUS 1448 * "Packets fast retransmitted" 1449 */ 1450 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) 1451 { 1452 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; 1453 } 1454 1455 #define TCP_INFINITE_SSTHRESH 0x7fffffff 1456 1457 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp) 1458 { 1459 return tp->snd_cwnd; 1460 } 1461 1462 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val) 1463 { 1464 WARN_ON_ONCE((int)val <= 0); 1465 tp->snd_cwnd = val; 1466 } 1467 1468 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 1469 { 1470 return tcp_snd_cwnd(tp) < tp->snd_ssthresh; 1471 } 1472 1473 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 1474 { 1475 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 1476 } 1477 1478 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) 1479 { 1480 return (TCPF_CA_CWR | TCPF_CA_Recovery) & 1481 (1 << inet_csk(sk)->icsk_ca_state); 1482 } 1483 1484 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1485 * The exception is cwnd reduction phase, when cwnd is decreasing towards 1486 * ssthresh. 1487 */ 1488 static inline __u32 tcp_current_ssthresh(const struct sock *sk) 1489 { 1490 const struct tcp_sock *tp = tcp_sk(sk); 1491 1492 if (tcp_in_cwnd_reduction(sk)) 1493 return tp->snd_ssthresh; 1494 else 1495 return max(tp->snd_ssthresh, 1496 ((tcp_snd_cwnd(tp) >> 1) + 1497 (tcp_snd_cwnd(tp) >> 2))); 1498 } 1499 1500 /* Use define here intentionally to get WARN_ON location shown at the caller */ 1501 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 1502 1503 void tcp_enter_cwr(struct sock *sk); 1504 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 1505 1506 /* The maximum number of MSS of available cwnd for which TSO defers 1507 * sending if not using sysctl_tcp_tso_win_divisor. 1508 */ 1509 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) 1510 { 1511 return 3; 1512 } 1513 1514 /* Returns end sequence number of the receiver's advertised window */ 1515 static inline u32 tcp_wnd_end(const struct tcp_sock *tp) 1516 { 1517 return tp->snd_una + tp->snd_wnd; 1518 } 1519 1520 /* We follow the spirit of RFC2861 to validate cwnd but implement a more 1521 * flexible approach. The RFC suggests cwnd should not be raised unless 1522 * it was fully used previously. And that's exactly what we do in 1523 * congestion avoidance mode. But in slow start we allow cwnd to grow 1524 * as long as the application has used half the cwnd. 1525 * Example : 1526 * cwnd is 10 (IW10), but application sends 9 frames. 1527 * We allow cwnd to reach 18 when all frames are ACKed. 1528 * This check is safe because it's as aggressive as slow start which already 1529 * risks 100% overshoot. The advantage is that we discourage application to 1530 * either send more filler packets or data to artificially blow up the cwnd 1531 * usage, and allow application-limited process to probe bw more aggressively. 1532 */ 1533 static inline bool tcp_is_cwnd_limited(const struct sock *sk) 1534 { 1535 const struct tcp_sock *tp = tcp_sk(sk); 1536 1537 if (tp->is_cwnd_limited) 1538 return true; 1539 1540 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1541 if (tcp_in_slow_start(tp)) 1542 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out; 1543 1544 return false; 1545 } 1546 1547 /* BBR congestion control needs pacing. 1548 * Same remark for SO_MAX_PACING_RATE. 1549 * sch_fq packet scheduler is efficiently handling pacing, 1550 * but is not always installed/used. 1551 * Return true if TCP stack should pace packets itself. 1552 */ 1553 static inline bool tcp_needs_internal_pacing(const struct sock *sk) 1554 { 1555 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; 1556 } 1557 1558 /* Estimates in how many jiffies next packet for this flow can be sent. 1559 * Scheduling a retransmit timer too early would be silly. 1560 */ 1561 static inline unsigned long tcp_pacing_delay(const struct sock *sk) 1562 { 1563 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; 1564 1565 return delay > 0 ? nsecs_to_jiffies(delay) : 0; 1566 } 1567 1568 static inline void tcp_reset_xmit_timer(struct sock *sk, 1569 const int what, 1570 unsigned long when, 1571 bool pace_delay) 1572 { 1573 if (pace_delay) 1574 when += tcp_pacing_delay(sk); 1575 inet_csk_reset_xmit_timer(sk, what, when, 1576 tcp_rto_max(sk)); 1577 } 1578 1579 /* Something is really bad, we could not queue an additional packet, 1580 * because qdisc is full or receiver sent a 0 window, or we are paced. 1581 * We do not want to add fuel to the fire, or abort too early, 1582 * so make sure the timer we arm now is at least 200ms in the future, 1583 * regardless of current icsk_rto value (as it could be ~2ms) 1584 */ 1585 static inline unsigned long tcp_probe0_base(const struct sock *sk) 1586 { 1587 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); 1588 } 1589 1590 /* Variant of inet_csk_rto_backoff() used for zero window probes */ 1591 static inline unsigned long tcp_probe0_when(const struct sock *sk, 1592 unsigned long max_when) 1593 { 1594 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1, 1595 inet_csk(sk)->icsk_backoff); 1596 u64 when = (u64)tcp_probe0_base(sk) << backoff; 1597 1598 return (unsigned long)min_t(u64, when, max_when); 1599 } 1600 1601 static inline void tcp_check_probe_timer(struct sock *sk) 1602 { 1603 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) 1604 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 1605 tcp_probe0_base(sk), true); 1606 } 1607 1608 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 1609 { 1610 tp->snd_wl1 = seq; 1611 } 1612 1613 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) 1614 { 1615 tp->snd_wl1 = seq; 1616 } 1617 1618 /* 1619 * Calculate(/check) TCP checksum 1620 */ 1621 static inline __sum16 tcp_v4_check(int len, __be32 saddr, 1622 __be32 daddr, __wsum base) 1623 { 1624 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base); 1625 } 1626 1627 static inline bool tcp_checksum_complete(struct sk_buff *skb) 1628 { 1629 return !skb_csum_unnecessary(skb) && 1630 __skb_checksum_complete(skb); 1631 } 1632 1633 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, 1634 enum skb_drop_reason *reason); 1635 1636 static inline int tcp_filter(struct sock *sk, struct sk_buff *skb, 1637 enum skb_drop_reason *reason) 1638 { 1639 const struct tcphdr *th = (const struct tcphdr *)skb->data; 1640 1641 return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason); 1642 } 1643 1644 void tcp_set_state(struct sock *sk, int state); 1645 void tcp_done(struct sock *sk); 1646 int tcp_abort(struct sock *sk, int err); 1647 1648 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) 1649 { 1650 rx_opt->dsack = 0; 1651 rx_opt->num_sacks = 0; 1652 } 1653 1654 void tcp_cwnd_restart(struct sock *sk, s32 delta); 1655 1656 static inline void tcp_slow_start_after_idle_check(struct sock *sk) 1657 { 1658 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1659 struct tcp_sock *tp = tcp_sk(sk); 1660 s32 delta; 1661 1662 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || 1663 tp->packets_out || ca_ops->cong_control) 1664 return; 1665 delta = tcp_jiffies32 - tp->lsndtime; 1666 if (delta > inet_csk(sk)->icsk_rto) 1667 tcp_cwnd_restart(sk, delta); 1668 } 1669 1670 /* Determine a window scaling and initial window to offer. */ 1671 void tcp_select_initial_window(const struct sock *sk, int __space, 1672 __u32 mss, __u32 *rcv_wnd, 1673 __u32 *window_clamp, int wscale_ok, 1674 __u8 *rcv_wscale, __u32 init_rcv_wnd); 1675 1676 static inline int __tcp_win_from_space(u8 scaling_ratio, int space) 1677 { 1678 s64 scaled_space = (s64)space * scaling_ratio; 1679 1680 return scaled_space >> TCP_RMEM_TO_WIN_SCALE; 1681 } 1682 1683 static inline int tcp_win_from_space(const struct sock *sk, int space) 1684 { 1685 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space); 1686 } 1687 1688 /* inverse of __tcp_win_from_space() */ 1689 static inline int __tcp_space_from_win(u8 scaling_ratio, int win) 1690 { 1691 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE; 1692 1693 do_div(val, scaling_ratio); 1694 return val; 1695 } 1696 1697 static inline int tcp_space_from_win(const struct sock *sk, int win) 1698 { 1699 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); 1700 } 1701 1702 /* Assume a 50% default for skb->len/skb->truesize ratio. 1703 * This may be adjusted later in tcp_measure_rcv_mss(). 1704 */ 1705 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1)) 1706 1707 static inline void tcp_scaling_ratio_init(struct sock *sk) 1708 { 1709 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; 1710 } 1711 1712 /* Note: caller must be prepared to deal with negative returns */ 1713 static inline int tcp_space(const struct sock *sk) 1714 { 1715 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - 1716 READ_ONCE(sk->sk_backlog.len) - 1717 atomic_read(&sk->sk_rmem_alloc)); 1718 } 1719 1720 static inline int tcp_full_space(const struct sock *sk) 1721 { 1722 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); 1723 } 1724 1725 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh) 1726 { 1727 int unused_mem = sk_unused_reserved_mem(sk); 1728 struct tcp_sock *tp = tcp_sk(sk); 1729 1730 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh); 1731 if (unused_mem) 1732 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh, 1733 tcp_win_from_space(sk, unused_mem)); 1734 } 1735 1736 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) 1737 { 1738 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss); 1739 } 1740 1741 void tcp_cleanup_rbuf(struct sock *sk, int copied); 1742 void __tcp_cleanup_rbuf(struct sock *sk, int copied); 1743 1744 1745 /* We provision sk_rcvbuf around 200% of sk_rcvlowat. 1746 * If 87.5 % (7/8) of the space has been consumed, we want to override 1747 * SO_RCVLOWAT constraint, since we are receiving skbs with too small 1748 * len/truesize ratio. 1749 */ 1750 static inline bool tcp_rmem_pressure(const struct sock *sk) 1751 { 1752 int rcvbuf, threshold; 1753 1754 if (tcp_under_memory_pressure(sk)) 1755 return true; 1756 1757 rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1758 threshold = rcvbuf - (rcvbuf >> 3); 1759 1760 return atomic_read(&sk->sk_rmem_alloc) > threshold; 1761 } 1762 1763 static inline bool tcp_epollin_ready(const struct sock *sk, int target) 1764 { 1765 const struct tcp_sock *tp = tcp_sk(sk); 1766 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq); 1767 1768 if (avail <= 0) 1769 return false; 1770 1771 return (avail >= target) || tcp_rmem_pressure(sk) || 1772 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss); 1773 } 1774 1775 extern void tcp_openreq_init_rwin(struct request_sock *req, 1776 const struct sock *sk_listener, 1777 const struct dst_entry *dst); 1778 1779 void tcp_enter_memory_pressure(struct sock *sk); 1780 void tcp_leave_memory_pressure(struct sock *sk); 1781 1782 static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1783 { 1784 struct net *net = sock_net((struct sock *)tp); 1785 int val; 1786 1787 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl() 1788 * and do_tcp_setsockopt(). 1789 */ 1790 val = READ_ONCE(tp->keepalive_intvl); 1791 1792 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); 1793 } 1794 1795 static inline int keepalive_time_when(const struct tcp_sock *tp) 1796 { 1797 struct net *net = sock_net((struct sock *)tp); 1798 int val; 1799 1800 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */ 1801 val = READ_ONCE(tp->keepalive_time); 1802 1803 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); 1804 } 1805 1806 static inline int keepalive_probes(const struct tcp_sock *tp) 1807 { 1808 struct net *net = sock_net((struct sock *)tp); 1809 int val; 1810 1811 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt() 1812 * and do_tcp_setsockopt(). 1813 */ 1814 val = READ_ONCE(tp->keepalive_probes); 1815 1816 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); 1817 } 1818 1819 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) 1820 { 1821 const struct inet_connection_sock *icsk = &tp->inet_conn; 1822 1823 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, 1824 tcp_jiffies32 - tp->rcv_tstamp); 1825 } 1826 1827 static inline int tcp_fin_time(const struct sock *sk) 1828 { 1829 int fin_timeout = tcp_sk(sk)->linger2 ? : 1830 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); 1831 const int rto = inet_csk(sk)->icsk_rto; 1832 1833 if (fin_timeout < (rto << 2) - (rto >> 1)) 1834 fin_timeout = (rto << 2) - (rto >> 1); 1835 1836 return fin_timeout; 1837 } 1838 1839 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1840 int paws_win) 1841 { 1842 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1843 return true; 1844 if (unlikely(!time_before32(ktime_get_seconds(), 1845 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP))) 1846 return true; 1847 /* 1848 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1849 * then following tcp messages have valid values. Ignore 0 value, 1850 * or else 'negative' tsval might forbid us to accept their packets. 1851 */ 1852 if (!rx_opt->ts_recent) 1853 return true; 1854 return false; 1855 } 1856 1857 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, 1858 int rst) 1859 { 1860 if (tcp_paws_check(rx_opt, 0)) 1861 return false; 1862 1863 /* RST segments are not recommended to carry timestamp, 1864 and, if they do, it is recommended to ignore PAWS because 1865 "their cleanup function should take precedence over timestamps." 1866 Certainly, it is mistake. It is necessary to understand the reasons 1867 of this constraint to relax it: if peer reboots, clock may go 1868 out-of-sync and half-open connections will not be reset. 1869 Actually, the problem would be not existing if all 1870 the implementations followed draft about maintaining clock 1871 via reboots. Linux-2.2 DOES NOT! 1872 1873 However, we can relax time bounds for RST segments to MSL. 1874 */ 1875 if (rst && !time_before32(ktime_get_seconds(), 1876 rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) 1877 return false; 1878 return true; 1879 } 1880 1881 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 1882 { 1883 u32 ace; 1884 1885 /* mptcp hooks are only on the slow path */ 1886 if (sk_is_mptcp((struct sock *)tp)) 1887 return; 1888 1889 ace = tcp_ecn_mode_accecn(tp) ? 1890 ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) & 1891 TCP_ACCECN_CEP_ACE_MASK) : 0; 1892 1893 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 1894 (ace << 22) | 1895 ntohl(TCP_FLAG_ACK) | 1896 snd_wnd); 1897 } 1898 1899 static inline void tcp_fast_path_on(struct tcp_sock *tp) 1900 { 1901 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); 1902 } 1903 1904 static inline void tcp_fast_path_check(struct sock *sk) 1905 { 1906 struct tcp_sock *tp = tcp_sk(sk); 1907 1908 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) && 1909 tp->rcv_wnd && 1910 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 1911 !tp->urg_data) 1912 tcp_fast_path_on(tp); 1913 } 1914 1915 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 1916 int mib_idx, u32 *last_oow_ack_time); 1917 1918 static inline void tcp_mib_init(struct net *net) 1919 { 1920 /* See RFC 2012 */ 1921 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1); 1922 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); 1923 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); 1924 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1); 1925 } 1926 1927 /* from STCP */ 1928 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1929 { 1930 tp->retransmit_skb_hint = NULL; 1931 } 1932 1933 #define tcp_md5_addr tcp_ao_addr 1934 1935 /* - key database */ 1936 struct tcp_md5sig_key { 1937 struct hlist_node node; 1938 u8 keylen; 1939 u8 family; /* AF_INET or AF_INET6 */ 1940 u8 prefixlen; 1941 u8 flags; 1942 union tcp_md5_addr addr; 1943 int l3index; /* set if key added with L3 scope */ 1944 u8 key[TCP_MD5SIG_MAXKEYLEN]; 1945 struct rcu_head rcu; 1946 }; 1947 1948 /* - sock block */ 1949 struct tcp_md5sig_info { 1950 struct hlist_head head; 1951 struct rcu_head rcu; 1952 }; 1953 1954 /* - pseudo header */ 1955 struct tcp4_pseudohdr { 1956 __be32 saddr; 1957 __be32 daddr; 1958 __u8 pad; 1959 __u8 protocol; 1960 __be16 len; 1961 }; 1962 1963 struct tcp6_pseudohdr { 1964 struct in6_addr saddr; 1965 struct in6_addr daddr; 1966 __be32 len; 1967 __be32 protocol; /* including padding */ 1968 }; 1969 1970 /* 1971 * struct tcp_sigpool - per-CPU pool of ahash_requests 1972 * @scratch: per-CPU temporary area, that can be used between 1973 * tcp_sigpool_start() and tcp_sigpool_end() to perform 1974 * crypto request 1975 * @req: pre-allocated ahash request 1976 */ 1977 struct tcp_sigpool { 1978 void *scratch; 1979 struct ahash_request *req; 1980 }; 1981 1982 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size); 1983 void tcp_sigpool_get(unsigned int id); 1984 void tcp_sigpool_release(unsigned int id); 1985 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp, 1986 const struct sk_buff *skb, 1987 unsigned int header_len); 1988 1989 /** 1990 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash 1991 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() 1992 * @c: returned tcp_sigpool for usage (uninitialized on failure) 1993 * 1994 * Returns: 0 on success, error otherwise. 1995 */ 1996 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c); 1997 /** 1998 * tcp_sigpool_end - enable bh and stop using tcp_sigpool 1999 * @c: tcp_sigpool context that was returned by tcp_sigpool_start() 2000 */ 2001 void tcp_sigpool_end(struct tcp_sigpool *c); 2002 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len); 2003 /* - functions */ 2004 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 2005 const struct sock *sk, const struct sk_buff *skb); 2006 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 2007 int family, u8 prefixlen, int l3index, u8 flags, 2008 const u8 *newkey, u8 newkeylen); 2009 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, 2010 int family, u8 prefixlen, int l3index, 2011 struct tcp_md5sig_key *key); 2012 2013 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 2014 int family, u8 prefixlen, int l3index, u8 flags); 2015 void tcp_clear_md5_list(struct sock *sk); 2016 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 2017 const struct sock *addr_sk); 2018 2019 #ifdef CONFIG_TCP_MD5SIG 2020 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, 2021 const union tcp_md5_addr *addr, 2022 int family, bool any_l3index); 2023 static inline struct tcp_md5sig_key * 2024 tcp_md5_do_lookup(const struct sock *sk, int l3index, 2025 const union tcp_md5_addr *addr, int family) 2026 { 2027 if (!static_branch_unlikely(&tcp_md5_needed.key)) 2028 return NULL; 2029 return __tcp_md5_do_lookup(sk, l3index, addr, family, false); 2030 } 2031 2032 static inline struct tcp_md5sig_key * 2033 tcp_md5_do_lookup_any_l3index(const struct sock *sk, 2034 const union tcp_md5_addr *addr, int family) 2035 { 2036 if (!static_branch_unlikely(&tcp_md5_needed.key)) 2037 return NULL; 2038 return __tcp_md5_do_lookup(sk, 0, addr, family, true); 2039 } 2040 2041 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) 2042 void tcp_md5_destruct_sock(struct sock *sk); 2043 #else 2044 static inline struct tcp_md5sig_key * 2045 tcp_md5_do_lookup(const struct sock *sk, int l3index, 2046 const union tcp_md5_addr *addr, int family) 2047 { 2048 return NULL; 2049 } 2050 2051 static inline struct tcp_md5sig_key * 2052 tcp_md5_do_lookup_any_l3index(const struct sock *sk, 2053 const union tcp_md5_addr *addr, int family) 2054 { 2055 return NULL; 2056 } 2057 2058 #define tcp_twsk_md5_key(twsk) NULL 2059 static inline void tcp_md5_destruct_sock(struct sock *sk) 2060 { 2061 } 2062 #endif 2063 2064 struct md5_ctx; 2065 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb, 2066 unsigned int header_len); 2067 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key); 2068 2069 /* From tcp_fastopen.c */ 2070 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 2071 struct tcp_fastopen_cookie *cookie); 2072 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 2073 struct tcp_fastopen_cookie *cookie, bool syn_lost, 2074 u16 try_exp); 2075 struct tcp_fastopen_request { 2076 /* Fast Open cookie. Size 0 means a cookie request */ 2077 struct tcp_fastopen_cookie cookie; 2078 struct msghdr *data; /* data in MSG_FASTOPEN */ 2079 size_t size; 2080 int copied; /* queued in tcp_connect() */ 2081 struct ubuf_info *uarg; 2082 }; 2083 void tcp_free_fastopen_req(struct tcp_sock *tp); 2084 void tcp_fastopen_destroy_cipher(struct sock *sk); 2085 void tcp_fastopen_ctx_destroy(struct net *net); 2086 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, 2087 void *primary_key, void *backup_key); 2088 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, 2089 u64 *key); 2090 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); 2091 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 2092 struct request_sock *req, 2093 struct tcp_fastopen_cookie *foc, 2094 const struct dst_entry *dst); 2095 void tcp_fastopen_init_key_once(struct net *net); 2096 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 2097 struct tcp_fastopen_cookie *cookie); 2098 bool tcp_fastopen_defer_connect(struct sock *sk, int *err); 2099 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t) 2100 #define TCP_FASTOPEN_KEY_MAX 2 2101 #define TCP_FASTOPEN_KEY_BUF_LENGTH \ 2102 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX) 2103 2104 /* Fastopen key context */ 2105 struct tcp_fastopen_context { 2106 siphash_key_t key[TCP_FASTOPEN_KEY_MAX]; 2107 int num; 2108 struct rcu_head rcu; 2109 }; 2110 2111 void tcp_fastopen_active_disable(struct sock *sk); 2112 bool tcp_fastopen_active_should_disable(struct sock *sk); 2113 void tcp_fastopen_active_disable_ofo_check(struct sock *sk); 2114 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); 2115 2116 /* Caller needs to wrap with rcu_read_(un)lock() */ 2117 static inline 2118 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) 2119 { 2120 struct tcp_fastopen_context *ctx; 2121 2122 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); 2123 if (!ctx) 2124 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); 2125 return ctx; 2126 } 2127 2128 static inline 2129 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc, 2130 const struct tcp_fastopen_cookie *orig) 2131 { 2132 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE && 2133 orig->len == foc->len && 2134 !memcmp(orig->val, foc->val, foc->len)) 2135 return true; 2136 return false; 2137 } 2138 2139 static inline 2140 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx) 2141 { 2142 return ctx->num; 2143 } 2144 2145 /* Latencies incurred by various limits for a sender. They are 2146 * chronograph-like stats that are mutually exclusive. 2147 */ 2148 enum tcp_chrono { 2149 TCP_CHRONO_UNSPEC, 2150 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */ 2151 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */ 2152 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */ 2153 __TCP_CHRONO_MAX, 2154 }; 2155 2156 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type); 2157 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type); 2158 2159 /* This helper is needed, because skb->tcp_tsorted_anchor uses 2160 * the same memory storage than skb->destructor/_skb_refdst 2161 */ 2162 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb) 2163 { 2164 skb->destructor = NULL; 2165 skb->_skb_refdst = 0UL; 2166 } 2167 2168 #define tcp_skb_tsorted_save(skb) { \ 2169 unsigned long _save = skb->_skb_refdst; \ 2170 skb->_skb_refdst = 0UL; 2171 2172 #define tcp_skb_tsorted_restore(skb) \ 2173 skb->_skb_refdst = _save; \ 2174 } 2175 2176 void tcp_write_queue_purge(struct sock *sk); 2177 2178 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) 2179 { 2180 return skb_rb_first(&sk->tcp_rtx_queue); 2181 } 2182 2183 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) 2184 { 2185 return skb_rb_last(&sk->tcp_rtx_queue); 2186 } 2187 2188 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) 2189 { 2190 return skb_peek_tail(&sk->sk_write_queue); 2191 } 2192 2193 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 2194 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 2195 2196 static inline struct sk_buff *tcp_send_head(const struct sock *sk) 2197 { 2198 return skb_peek(&sk->sk_write_queue); 2199 } 2200 2201 static inline bool tcp_skb_is_last(const struct sock *sk, 2202 const struct sk_buff *skb) 2203 { 2204 return skb_queue_is_last(&sk->sk_write_queue, skb); 2205 } 2206 2207 /** 2208 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue 2209 * @sk: socket 2210 * 2211 * Since the write queue can have a temporary empty skb in it, 2212 * we must not use "return skb_queue_empty(&sk->sk_write_queue)" 2213 */ 2214 static inline bool tcp_write_queue_empty(const struct sock *sk) 2215 { 2216 const struct tcp_sock *tp = tcp_sk(sk); 2217 2218 return tp->write_seq == tp->snd_nxt; 2219 } 2220 2221 static inline bool tcp_rtx_queue_empty(const struct sock *sk) 2222 { 2223 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); 2224 } 2225 2226 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) 2227 { 2228 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); 2229 } 2230 2231 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 2232 { 2233 __skb_queue_tail(&sk->sk_write_queue, skb); 2234 2235 /* Queue it, remembering where we must start sending. */ 2236 if (sk->sk_write_queue.next == skb) 2237 tcp_chrono_start(sk, TCP_CHRONO_BUSY); 2238 } 2239 2240 /* Insert new before skb on the write queue of sk. */ 2241 static inline void tcp_insert_write_queue_before(struct sk_buff *new, 2242 struct sk_buff *skb, 2243 struct sock *sk) 2244 { 2245 __skb_queue_before(&sk->sk_write_queue, skb, new); 2246 } 2247 2248 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) 2249 { 2250 tcp_skb_tsorted_anchor_cleanup(skb); 2251 __skb_unlink(skb, &sk->sk_write_queue); 2252 } 2253 2254 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb); 2255 2256 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) 2257 { 2258 tcp_skb_tsorted_anchor_cleanup(skb); 2259 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); 2260 } 2261 2262 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) 2263 { 2264 list_del(&skb->tcp_tsorted_anchor); 2265 tcp_rtx_queue_unlink(skb, sk); 2266 tcp_wmem_free_skb(sk, skb); 2267 } 2268 2269 static inline void tcp_write_collapse_fence(struct sock *sk) 2270 { 2271 struct sk_buff *skb = tcp_write_queue_tail(sk); 2272 2273 if (skb) 2274 TCP_SKB_CB(skb)->eor = 1; 2275 } 2276 2277 static inline void tcp_push_pending_frames(struct sock *sk) 2278 { 2279 if (tcp_send_head(sk)) { 2280 struct tcp_sock *tp = tcp_sk(sk); 2281 2282 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); 2283 } 2284 } 2285 2286 /* Start sequence of the skb just after the highest skb with SACKed 2287 * bit, valid only if sacked_out > 0 or when the caller has ensured 2288 * validity by itself. 2289 */ 2290 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) 2291 { 2292 if (!tp->sacked_out) 2293 return tp->snd_una; 2294 2295 if (tp->highest_sack == NULL) 2296 return tp->snd_nxt; 2297 2298 return TCP_SKB_CB(tp->highest_sack)->seq; 2299 } 2300 2301 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) 2302 { 2303 tcp_sk(sk)->highest_sack = skb_rb_next(skb); 2304 } 2305 2306 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) 2307 { 2308 return tcp_sk(sk)->highest_sack; 2309 } 2310 2311 static inline void tcp_highest_sack_reset(struct sock *sk) 2312 { 2313 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); 2314 } 2315 2316 /* Called when old skb is about to be deleted and replaced by new skb */ 2317 static inline void tcp_highest_sack_replace(struct sock *sk, 2318 struct sk_buff *old, 2319 struct sk_buff *new) 2320 { 2321 if (old == tcp_highest_sack(sk)) 2322 tcp_sk(sk)->highest_sack = new; 2323 } 2324 2325 /* This helper checks if socket has IP_TRANSPARENT set */ 2326 static inline bool inet_sk_transparent(const struct sock *sk) 2327 { 2328 switch (sk->sk_state) { 2329 case TCP_TIME_WAIT: 2330 return inet_twsk(sk)->tw_transparent; 2331 case TCP_NEW_SYN_RECV: 2332 return inet_rsk(inet_reqsk(sk))->no_srccheck; 2333 } 2334 return inet_test_bit(TRANSPARENT, sk); 2335 } 2336 2337 /* Determines whether this is a thin stream (which may suffer from 2338 * increased latency). Used to trigger latency-reducing mechanisms. 2339 */ 2340 static inline bool tcp_stream_is_thin(struct tcp_sock *tp) 2341 { 2342 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); 2343 } 2344 2345 /* /proc */ 2346 enum tcp_seq_states { 2347 TCP_SEQ_STATE_LISTENING, 2348 TCP_SEQ_STATE_ESTABLISHED, 2349 }; 2350 2351 void *tcp_seq_start(struct seq_file *seq, loff_t *pos); 2352 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos); 2353 void tcp_seq_stop(struct seq_file *seq, void *v); 2354 2355 struct tcp_seq_afinfo { 2356 sa_family_t family; 2357 }; 2358 2359 struct tcp_iter_state { 2360 struct seq_net_private p; 2361 enum tcp_seq_states state; 2362 struct sock *syn_wait_sk; 2363 int bucket, offset, sbucket, num; 2364 loff_t last_pos; 2365 }; 2366 2367 extern struct request_sock_ops tcp_request_sock_ops; 2368 extern struct request_sock_ops tcp6_request_sock_ops; 2369 2370 void tcp_v4_destroy_sock(struct sock *sk); 2371 2372 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 2373 netdev_features_t features); 2374 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th); 2375 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, 2376 struct tcphdr *th); 2377 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)); 2378 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); 2379 #ifdef CONFIG_INET 2380 void tcp_gro_complete(struct sk_buff *skb); 2381 #else 2382 static inline void tcp_gro_complete(struct sk_buff *skb) { } 2383 #endif 2384 2385 static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, 2386 __be32 daddr) 2387 { 2388 struct tcphdr *th = tcp_hdr(skb); 2389 2390 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); 2391 skb->csum_start = skb_transport_header(skb) - skb->head; 2392 skb->csum_offset = offsetof(struct tcphdr, check); 2393 } 2394 2395 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 2396 { 2397 struct net *net = sock_net((struct sock *)tp); 2398 u32 val; 2399 2400 val = READ_ONCE(tp->notsent_lowat); 2401 2402 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); 2403 } 2404 2405 bool tcp_stream_memory_free(const struct sock *sk, int wake); 2406 2407 #ifdef CONFIG_PROC_FS 2408 int tcp4_proc_init(void); 2409 void tcp4_proc_exit(void); 2410 #endif 2411 2412 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req); 2413 int tcp_conn_request(struct request_sock_ops *rsk_ops, 2414 const struct tcp_request_sock_ops *af_ops, 2415 struct sock *sk, struct sk_buff *skb); 2416 2417 /* TCP af-specific functions */ 2418 struct tcp_sock_af_ops { 2419 #ifdef CONFIG_TCP_MD5SIG 2420 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk, 2421 const struct sock *addr_sk); 2422 void (*calc_md5_hash)(char *location, 2423 const struct tcp_md5sig_key *md5, 2424 const struct sock *sk, 2425 const struct sk_buff *skb); 2426 int (*md5_parse)(struct sock *sk, 2427 int optname, 2428 sockptr_t optval, 2429 int optlen); 2430 #endif 2431 #ifdef CONFIG_TCP_AO 2432 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen); 2433 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, 2434 struct sock *addr_sk, 2435 int sndid, int rcvid); 2436 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key, 2437 const struct sock *sk, 2438 __be32 sisn, __be32 disn, bool send); 2439 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao, 2440 const struct sock *sk, const struct sk_buff *skb, 2441 const u8 *tkey, int hash_offset, u32 sne); 2442 #endif 2443 }; 2444 2445 struct tcp_request_sock_ops { 2446 u16 mss_clamp; 2447 #ifdef CONFIG_TCP_MD5SIG 2448 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk, 2449 const struct sock *addr_sk); 2450 void (*calc_md5_hash) (char *location, 2451 const struct tcp_md5sig_key *md5, 2452 const struct sock *sk, 2453 const struct sk_buff *skb); 2454 #endif 2455 #ifdef CONFIG_TCP_AO 2456 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, 2457 struct request_sock *req, 2458 int sndid, int rcvid); 2459 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk); 2460 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt, 2461 struct request_sock *req, const struct sk_buff *skb, 2462 int hash_offset, u32 sne); 2463 #endif 2464 #ifdef CONFIG_SYN_COOKIES 2465 __u32 (*cookie_init_seq)(const struct sk_buff *skb, 2466 __u16 *mss); 2467 #endif 2468 struct dst_entry *(*route_req)(const struct sock *sk, 2469 struct sk_buff *skb, 2470 struct flowi *fl, 2471 struct request_sock *req, 2472 u32 tw_isn); 2473 u32 (*init_seq)(const struct sk_buff *skb); 2474 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb); 2475 int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 2476 struct flowi *fl, struct request_sock *req, 2477 struct tcp_fastopen_cookie *foc, 2478 enum tcp_synack_type synack_type, 2479 struct sk_buff *syn_skb); 2480 }; 2481 2482 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops; 2483 #if IS_ENABLED(CONFIG_IPV6) 2484 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops; 2485 #endif 2486 2487 #ifdef CONFIG_SYN_COOKIES 2488 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 2489 const struct sock *sk, struct sk_buff *skb, 2490 __u16 *mss) 2491 { 2492 tcp_synq_overflow(sk); 2493 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 2494 return ops->cookie_init_seq(skb, mss); 2495 } 2496 #else 2497 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 2498 const struct sock *sk, struct sk_buff *skb, 2499 __u16 *mss) 2500 { 2501 return 0; 2502 } 2503 #endif 2504 2505 struct tcp_key { 2506 union { 2507 struct { 2508 struct tcp_ao_key *ao_key; 2509 char *traffic_key; 2510 u32 sne; 2511 u8 rcv_next; 2512 }; 2513 struct tcp_md5sig_key *md5_key; 2514 }; 2515 enum { 2516 TCP_KEY_NONE = 0, 2517 TCP_KEY_MD5, 2518 TCP_KEY_AO, 2519 } type; 2520 }; 2521 2522 static inline void tcp_get_current_key(const struct sock *sk, 2523 struct tcp_key *out) 2524 { 2525 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG) 2526 const struct tcp_sock *tp = tcp_sk(sk); 2527 #endif 2528 2529 #ifdef CONFIG_TCP_AO 2530 if (static_branch_unlikely(&tcp_ao_needed.key)) { 2531 struct tcp_ao_info *ao; 2532 2533 ao = rcu_dereference_protected(tp->ao_info, 2534 lockdep_sock_is_held(sk)); 2535 if (ao) { 2536 out->ao_key = READ_ONCE(ao->current_key); 2537 out->type = TCP_KEY_AO; 2538 return; 2539 } 2540 } 2541 #endif 2542 #ifdef CONFIG_TCP_MD5SIG 2543 if (static_branch_unlikely(&tcp_md5_needed.key) && 2544 rcu_access_pointer(tp->md5sig_info)) { 2545 out->md5_key = tp->af_specific->md5_lookup(sk, sk); 2546 if (out->md5_key) { 2547 out->type = TCP_KEY_MD5; 2548 return; 2549 } 2550 } 2551 #endif 2552 out->type = TCP_KEY_NONE; 2553 } 2554 2555 static inline bool tcp_key_is_md5(const struct tcp_key *key) 2556 { 2557 if (static_branch_tcp_md5()) 2558 return key->type == TCP_KEY_MD5; 2559 return false; 2560 } 2561 2562 static inline bool tcp_key_is_ao(const struct tcp_key *key) 2563 { 2564 if (static_branch_tcp_ao()) 2565 return key->type == TCP_KEY_AO; 2566 return false; 2567 } 2568 2569 int tcpv4_offload_init(void); 2570 2571 void tcp_v4_init(void); 2572 void tcp_init(void); 2573 2574 /* tcp_recovery.c */ 2575 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); 2576 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); 2577 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, 2578 u32 reo_wnd); 2579 extern bool tcp_rack_mark_lost(struct sock *sk); 2580 extern void tcp_rack_reo_timeout(struct sock *sk); 2581 2582 /* tcp_plb.c */ 2583 2584 /* 2585 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state 2586 * expects cong_ratio which represents fraction of traffic that experienced 2587 * congestion over a single RTT. In order to avoid floating point operations, 2588 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in. 2589 */ 2590 #define TCP_PLB_SCALE 8 2591 2592 /* State for PLB (Protective Load Balancing) for a single TCP connection. */ 2593 struct tcp_plb_state { 2594 u8 consec_cong_rounds:5, /* consecutive congested rounds */ 2595 unused:3; 2596 u32 pause_until; /* jiffies32 when PLB can resume rerouting */ 2597 }; 2598 2599 static inline void tcp_plb_init(const struct sock *sk, 2600 struct tcp_plb_state *plb) 2601 { 2602 plb->consec_cong_rounds = 0; 2603 plb->pause_until = 0; 2604 } 2605 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb, 2606 const int cong_ratio); 2607 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb); 2608 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb); 2609 2610 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str) 2611 { 2612 WARN_ONCE(cond, 2613 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n", 2614 str, 2615 tcp_snd_cwnd(tcp_sk(sk)), 2616 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, 2617 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, 2618 tcp_sk(sk)->tlp_high_seq, sk->sk_state, 2619 inet_csk(sk)->icsk_ca_state, 2620 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, 2621 inet_csk(sk)->icsk_pmtu_cookie); 2622 } 2623 2624 /* At how many usecs into the future should the RTO fire? */ 2625 static inline s64 tcp_rto_delta_us(const struct sock *sk) 2626 { 2627 const struct sk_buff *skb = tcp_rtx_queue_head(sk); 2628 u32 rto = inet_csk(sk)->icsk_rto; 2629 2630 if (likely(skb)) { 2631 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); 2632 2633 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 2634 } else { 2635 tcp_warn_once(sk, 1, "rtx queue empty: "); 2636 return jiffies_to_usecs(rto); 2637 } 2638 2639 } 2640 2641 /* 2642 * Save and compile IPv4 options, return a pointer to it 2643 */ 2644 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net, 2645 struct sk_buff *skb) 2646 { 2647 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; 2648 struct ip_options_rcu *dopt = NULL; 2649 2650 if (opt->optlen) { 2651 int opt_size = sizeof(*dopt) + opt->optlen; 2652 2653 dopt = kmalloc(opt_size, GFP_ATOMIC); 2654 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) { 2655 kfree(dopt); 2656 dopt = NULL; 2657 } 2658 } 2659 return dopt; 2660 } 2661 2662 /* locally generated TCP pure ACKs have skb->truesize == 2 2663 * (check tcp_send_ack() in net/ipv4/tcp_output.c ) 2664 * This is much faster than dissecting the packet to find out. 2665 * (Think of GRE encapsulations, IPv4, IPv6, ...) 2666 */ 2667 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb) 2668 { 2669 return skb->truesize == 2; 2670 } 2671 2672 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) 2673 { 2674 skb->truesize = 2; 2675 } 2676 2677 static inline int tcp_inq(struct sock *sk) 2678 { 2679 struct tcp_sock *tp = tcp_sk(sk); 2680 int answ; 2681 2682 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 2683 answ = 0; 2684 } else if (sock_flag(sk, SOCK_URGINLINE) || 2685 !tp->urg_data || 2686 before(tp->urg_seq, tp->copied_seq) || 2687 !before(tp->urg_seq, tp->rcv_nxt)) { 2688 2689 answ = tp->rcv_nxt - tp->copied_seq; 2690 2691 /* Subtract 1, if FIN was received */ 2692 if (answ && sock_flag(sk, SOCK_DONE)) 2693 answ--; 2694 } else { 2695 answ = tp->urg_seq - tp->copied_seq; 2696 } 2697 2698 return answ; 2699 } 2700 2701 int tcp_peek_len(struct socket *sock); 2702 2703 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) 2704 { 2705 u16 segs_in; 2706 2707 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 2708 2709 /* We update these fields while other threads might 2710 * read them from tcp_get_info() 2711 */ 2712 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in); 2713 if (skb->len > tcp_hdrlen(skb)) 2714 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in); 2715 } 2716 2717 /* 2718 * TCP listen path runs lockless. 2719 * We forced "struct sock" to be const qualified to make sure 2720 * we don't modify one of its field by mistake. 2721 * Here, we increment sk_drops which is an atomic_t, so we can safely 2722 * make sock writable again. 2723 */ 2724 static inline void tcp_listendrop(const struct sock *sk) 2725 { 2726 sk_drops_inc((struct sock *)sk); 2727 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 2728 } 2729 2730 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); 2731 2732 /* 2733 * Interface for adding Upper Level Protocols over TCP 2734 */ 2735 2736 #define TCP_ULP_NAME_MAX 16 2737 #define TCP_ULP_MAX 128 2738 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) 2739 2740 struct tcp_ulp_ops { 2741 struct list_head list; 2742 2743 /* initialize ulp */ 2744 int (*init)(struct sock *sk); 2745 /* update ulp */ 2746 void (*update)(struct sock *sk, struct proto *p, 2747 void (*write_space)(struct sock *sk)); 2748 /* cleanup ulp */ 2749 void (*release)(struct sock *sk); 2750 /* diagnostic */ 2751 int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin); 2752 size_t (*get_info_size)(const struct sock *sk, bool net_admin); 2753 /* clone ulp */ 2754 void (*clone)(const struct request_sock *req, struct sock *newsk, 2755 const gfp_t priority); 2756 2757 char name[TCP_ULP_NAME_MAX]; 2758 struct module *owner; 2759 }; 2760 int tcp_register_ulp(struct tcp_ulp_ops *type); 2761 void tcp_unregister_ulp(struct tcp_ulp_ops *type); 2762 int tcp_set_ulp(struct sock *sk, const char *name); 2763 void tcp_get_available_ulp(char *buf, size_t len); 2764 void tcp_cleanup_ulp(struct sock *sk); 2765 void tcp_update_ulp(struct sock *sk, struct proto *p, 2766 void (*write_space)(struct sock *sk)); 2767 2768 #define MODULE_ALIAS_TCP_ULP(name) \ 2769 MODULE_INFO(alias, name); \ 2770 MODULE_INFO(alias, "tcp-ulp-" name) 2771 2772 #ifdef CONFIG_NET_SOCK_MSG 2773 struct sk_msg; 2774 struct sk_psock; 2775 2776 #ifdef CONFIG_BPF_SYSCALL 2777 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); 2778 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); 2779 #ifdef CONFIG_BPF_STREAM_PARSER 2780 struct strparser; 2781 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, 2782 sk_read_actor_t recv_actor); 2783 #endif /* CONFIG_BPF_STREAM_PARSER */ 2784 #endif /* CONFIG_BPF_SYSCALL */ 2785 2786 #ifdef CONFIG_INET 2787 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb); 2788 #else 2789 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) 2790 { 2791 } 2792 #endif 2793 2794 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, 2795 struct sk_msg *msg, u32 bytes, int flags); 2796 #endif /* CONFIG_NET_SOCK_MSG */ 2797 2798 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG) 2799 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 2800 { 2801 } 2802 #endif 2803 2804 #ifdef CONFIG_CGROUP_BPF 2805 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops, 2806 struct sk_buff *skb, 2807 unsigned int end_offset) 2808 { 2809 skops->skb = skb; 2810 skops->skb_data_end = skb->data + end_offset; 2811 } 2812 #else 2813 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops, 2814 struct sk_buff *skb, 2815 unsigned int end_offset) 2816 { 2817 } 2818 #endif 2819 2820 /* Call BPF_SOCK_OPS program that returns an int. If the return value 2821 * is < 0, then the BPF op failed (for example if the loaded BPF 2822 * program does not support the chosen operation or there is no BPF 2823 * program loaded). 2824 */ 2825 #ifdef CONFIG_BPF 2826 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2827 { 2828 struct bpf_sock_ops_kern sock_ops; 2829 int ret; 2830 2831 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 2832 if (sk_fullsock(sk)) { 2833 sock_ops.is_fullsock = 1; 2834 sock_ops.is_locked_tcp_sock = 1; 2835 sock_owned_by_me(sk); 2836 } 2837 2838 sock_ops.sk = sk; 2839 sock_ops.op = op; 2840 if (nargs > 0) 2841 memcpy(sock_ops.args, args, nargs * sizeof(*args)); 2842 2843 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 2844 if (ret == 0) 2845 ret = sock_ops.reply; 2846 else 2847 ret = -1; 2848 return ret; 2849 } 2850 2851 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2852 { 2853 u32 args[2] = {arg1, arg2}; 2854 2855 return tcp_call_bpf(sk, op, 2, args); 2856 } 2857 2858 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2859 u32 arg3) 2860 { 2861 u32 args[3] = {arg1, arg2, arg3}; 2862 2863 return tcp_call_bpf(sk, op, 3, args); 2864 } 2865 2866 #else 2867 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2868 { 2869 return -EPERM; 2870 } 2871 2872 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2873 { 2874 return -EPERM; 2875 } 2876 2877 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2878 u32 arg3) 2879 { 2880 return -EPERM; 2881 } 2882 2883 #endif 2884 2885 static inline u32 tcp_timeout_init(struct sock *sk) 2886 { 2887 int timeout; 2888 2889 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); 2890 2891 if (timeout <= 0) 2892 timeout = TCP_TIMEOUT_INIT; 2893 return min_t(int, timeout, TCP_RTO_MAX); 2894 } 2895 2896 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) 2897 { 2898 int rwnd; 2899 2900 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); 2901 2902 if (rwnd < 0) 2903 rwnd = 0; 2904 return rwnd; 2905 } 2906 2907 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) 2908 { 2909 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); 2910 } 2911 2912 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt) 2913 { 2914 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) 2915 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt); 2916 } 2917 2918 #if IS_ENABLED(CONFIG_SMC) 2919 extern struct static_key_false tcp_have_smc; 2920 #endif 2921 2922 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2923 void clean_acked_data_enable(struct tcp_sock *tp, 2924 void (*cad)(struct sock *sk, u32 ack_seq)); 2925 void clean_acked_data_disable(struct tcp_sock *tp); 2926 void clean_acked_data_flush(void); 2927 #endif 2928 2929 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 2930 static inline void tcp_add_tx_delay(struct sk_buff *skb, 2931 const struct tcp_sock *tp) 2932 { 2933 if (static_branch_unlikely(&tcp_tx_delay_enabled)) 2934 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC; 2935 } 2936 2937 /* Compute Earliest Departure Time for some control packets 2938 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets. 2939 */ 2940 static inline u64 tcp_transmit_time(const struct sock *sk) 2941 { 2942 if (static_branch_unlikely(&tcp_tx_delay_enabled)) { 2943 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? 2944 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; 2945 2946 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC; 2947 } 2948 return 0; 2949 } 2950 2951 static inline int tcp_parse_auth_options(const struct tcphdr *th, 2952 const u8 **md5_hash, const struct tcp_ao_hdr **aoh) 2953 { 2954 const u8 *md5_tmp, *ao_tmp; 2955 int ret; 2956 2957 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp); 2958 if (ret) 2959 return ret; 2960 2961 if (md5_hash) 2962 *md5_hash = md5_tmp; 2963 2964 if (aoh) { 2965 if (!ao_tmp) 2966 *aoh = NULL; 2967 else 2968 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2); 2969 } 2970 2971 return 0; 2972 } 2973 2974 static inline bool tcp_ao_required(struct sock *sk, const void *saddr, 2975 int family, int l3index, bool stat_inc) 2976 { 2977 #ifdef CONFIG_TCP_AO 2978 struct tcp_ao_info *ao_info; 2979 struct tcp_ao_key *ao_key; 2980 2981 if (!static_branch_unlikely(&tcp_ao_needed.key)) 2982 return false; 2983 2984 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info, 2985 lockdep_sock_is_held(sk)); 2986 if (!ao_info) 2987 return false; 2988 2989 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); 2990 if (ao_info->ao_required || ao_key) { 2991 if (stat_inc) { 2992 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); 2993 atomic64_inc(&ao_info->counters.ao_required); 2994 } 2995 return true; 2996 } 2997 #endif 2998 return false; 2999 } 3000 3001 enum skb_drop_reason tcp_inbound_hash(struct sock *sk, 3002 const struct request_sock *req, const struct sk_buff *skb, 3003 const void *saddr, const void *daddr, 3004 int family, int dif, int sdif); 3005 3006 #endif /* _TCP_H */ 3007