1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the TCP module. 8 * 9 * Version: @(#)tcp.h 1.0.5 05/23/93 10 * 11 * Authors: Ross Biro 12 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 */ 14 #ifndef _TCP_H 15 #define _TCP_H 16 17 #define FASTRETRANS_DEBUG 1 18 19 #include <linux/list.h> 20 #include <linux/tcp.h> 21 #include <linux/bug.h> 22 #include <linux/slab.h> 23 #include <linux/cache.h> 24 #include <linux/percpu.h> 25 #include <linux/skbuff.h> 26 #include <linux/kref.h> 27 #include <linux/ktime.h> 28 #include <linux/indirect_call_wrapper.h> 29 #include <linux/bits.h> 30 31 #include <net/inet_connection_sock.h> 32 #include <net/inet_timewait_sock.h> 33 #include <net/inet_hashtables.h> 34 #include <net/checksum.h> 35 #include <net/request_sock.h> 36 #include <net/sock_reuseport.h> 37 #include <net/sock.h> 38 #include <net/snmp.h> 39 #include <net/ip.h> 40 #include <net/tcp_states.h> 41 #include <net/tcp_ao.h> 42 #include <net/inet_ecn.h> 43 #include <net/dst.h> 44 #include <net/mptcp.h> 45 #include <net/xfrm.h> 46 #include <net/secure_seq.h> 47 48 #include <linux/seq_file.h> 49 #include <linux/memcontrol.h> 50 #include <linux/bpf-cgroup.h> 51 #include <linux/siphash.h> 52 53 extern struct inet_hashinfo tcp_hashinfo; 54 55 DECLARE_PER_CPU(unsigned int, tcp_orphan_count); 56 int tcp_orphan_count_sum(void); 57 58 static inline void tcp_orphan_count_inc(void) 59 { 60 this_cpu_inc(tcp_orphan_count); 61 } 62 63 static inline void tcp_orphan_count_dec(void) 64 { 65 this_cpu_dec(tcp_orphan_count); 66 } 67 68 DECLARE_PER_CPU(u32, tcp_tw_isn); 69 70 void tcp_time_wait(struct sock *sk, int state, int timeo); 71 72 #define MAX_TCP_HEADER L1_CACHE_ALIGN(128 + MAX_HEADER) 73 #define MAX_TCP_OPTION_SPACE 40 74 #define TCP_MIN_SND_MSS 48 75 #define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE) 76 77 /* 78 * Never offer a window over 32767 without using window scaling. Some 79 * poor stacks do signed 16bit maths! 80 */ 81 #define MAX_TCP_WINDOW 32767U 82 83 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 84 #define TCP_MIN_MSS 88U 85 86 /* The initial MTU to use for probing */ 87 #define TCP_BASE_MSS 1024 88 89 /* probing interval, default to 10 minutes as per RFC4821 */ 90 #define TCP_PROBE_INTERVAL 600 91 92 /* Specify interval when tcp mtu probing will stop */ 93 #define TCP_PROBE_THRESHOLD 8 94 95 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ 96 #define TCP_FASTRETRANS_THRESH 3 97 98 /* Maximal number of ACKs sent quickly to accelerate slow-start. */ 99 #define TCP_MAX_QUICKACKS 16U 100 101 /* Maximal number of window scale according to RFC1323 */ 102 #define TCP_MAX_WSCALE 14U 103 104 /* Default sending frequency of accurate ECN option per RTT */ 105 #define TCP_ACCECN_OPTION_BEACON 3 106 107 /* urg_data states */ 108 #define TCP_URG_VALID 0x0100 109 #define TCP_URG_NOTYET 0x0200 110 #define TCP_URG_READ 0x0400 111 112 #define TCP_RETR1 3 /* 113 * This is how many retries it does before it 114 * tries to figure out if the gateway is 115 * down. Minimal RFC value is 3; it corresponds 116 * to ~3sec-8min depending on RTO. 117 */ 118 119 #define TCP_RETR2 15 /* 120 * This should take at least 121 * 90 minutes to time out. 122 * RFC1122 says that the limit is 100 sec. 123 * 15 is ~13-30min depending on RTO. 124 */ 125 126 #define TCP_SYN_RETRIES 6 /* This is how many retries are done 127 * when active opening a connection. 128 * RFC1122 says the minimum retry MUST 129 * be at least 180secs. Nevertheless 130 * this value is corresponding to 131 * 63secs of retransmission with the 132 * current initial RTO. 133 */ 134 135 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done 136 * when passive opening a connection. 137 * This is corresponding to 31secs of 138 * retransmission with the current 139 * initial RTO. 140 */ 141 142 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 143 * state, about 60 seconds */ 144 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 145 /* BSD style FIN_WAIT2 deadlock breaker. 146 * It used to be 3min, new value is 60sec, 147 * to combine FIN-WAIT-2 timeout with 148 * TIME-WAIT timer. 149 */ 150 #define TCP_FIN_TIMEOUT_MAX (120 * HZ) /* max TCP_LINGER2 value (two minutes) */ 151 152 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ 153 static_assert((1 << ATO_BITS) > TCP_DELACK_MAX); 154 155 #if HZ >= 100 156 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ 157 #define TCP_ATO_MIN ((unsigned)(HZ/25)) 158 #else 159 #define TCP_DELACK_MIN 4U 160 #define TCP_ATO_MIN 4U 161 #endif 162 #define TCP_RTO_MAX_SEC 120 163 #define TCP_RTO_MAX ((unsigned)(TCP_RTO_MAX_SEC * HZ)) 164 #define TCP_RTO_MIN ((unsigned)(HZ / 5)) 165 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ 166 167 #define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */ 168 169 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 170 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now 171 * used as a fallback RTO for the 172 * initial data transmission if no 173 * valid RTT sample has been acquired, 174 * most likely due to retrans in 3WHS. 175 */ 176 177 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes 178 * for local resources. 179 */ 180 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ 181 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ 182 #define TCP_KEEPALIVE_INTVL (75*HZ) 183 184 #define MAX_TCP_KEEPIDLE 32767 185 #define MAX_TCP_KEEPINTVL 32767 186 #define MAX_TCP_KEEPCNT 127 187 #define MAX_TCP_SYNCNT 127 188 189 /* Ensure that TCP PAWS checks are relaxed after ~2147 seconds 190 * to avoid overflows. This assumes a clock smaller than 1 Mhz. 191 * Default clock is 1 Khz, tcp_usec_ts uses 1 Mhz. 192 */ 193 #define TCP_PAWS_WRAP (INT_MAX / USEC_PER_SEC) 194 195 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated 196 * after this time. It should be equal 197 * (or greater than) TCP_TIMEWAIT_LEN 198 * to provide reliability equal to one 199 * provided by timewait state. 200 */ 201 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host 202 * timestamps. It must be less than 203 * minimal timewait lifetime. 204 */ 205 /* 206 * TCP option 207 */ 208 209 #define TCPOPT_NOP 1 /* Padding */ 210 #define TCPOPT_EOL 0 /* End of options */ 211 #define TCPOPT_MSS 2 /* Segment size negotiating */ 212 #define TCPOPT_WINDOW 3 /* Window scaling */ 213 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ 214 #define TCPOPT_SACK 5 /* SACK Block */ 215 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 216 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 217 #define TCPOPT_AO 29 /* Authentication Option (RFC5925) */ 218 #define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */ 219 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ 220 #define TCPOPT_ACCECN0 172 /* 0xAC: Accurate ECN Order 0 */ 221 #define TCPOPT_ACCECN1 174 /* 0xAE: Accurate ECN Order 1 */ 222 #define TCPOPT_EXP 254 /* Experimental */ 223 /* Magic number to be after the option value for sharing TCP 224 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 225 */ 226 #define TCPOPT_FASTOPEN_MAGIC 0xF989 227 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9 228 229 /* 230 * TCP option lengths 231 */ 232 233 #define TCPOLEN_MSS 4 234 #define TCPOLEN_WINDOW 3 235 #define TCPOLEN_SACK_PERM 2 236 #define TCPOLEN_TIMESTAMP 10 237 #define TCPOLEN_MD5SIG 18 238 #define TCPOLEN_FASTOPEN_BASE 2 239 #define TCPOLEN_ACCECN_BASE 2 240 #define TCPOLEN_EXP_FASTOPEN_BASE 4 241 #define TCPOLEN_EXP_SMC_BASE 6 242 243 /* But this is what stacks really send out. */ 244 #define TCPOLEN_TSTAMP_ALIGNED 12 245 #define TCPOLEN_WSCALE_ALIGNED 4 246 #define TCPOLEN_SACKPERM_ALIGNED 4 247 #define TCPOLEN_SACK_BASE 2 248 #define TCPOLEN_SACK_BASE_ALIGNED 4 249 #define TCPOLEN_SACK_PERBLOCK 8 250 #define TCPOLEN_MD5SIG_ALIGNED 20 251 #define TCPOLEN_MSS_ALIGNED 4 252 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8 253 #define TCPOLEN_ACCECN_PERFIELD 3 254 255 /* Maximum number of byte counters in AccECN option + size */ 256 #define TCP_ACCECN_NUMFIELDS 3 257 #define TCP_ACCECN_MAXSIZE (TCPOLEN_ACCECN_BASE + \ 258 TCPOLEN_ACCECN_PERFIELD * \ 259 TCP_ACCECN_NUMFIELDS) 260 #define TCP_ACCECN_SAFETY_SHIFT 1 /* SAFETY_FACTOR in accecn draft */ 261 262 /* Flags in tp->nonagle */ 263 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 264 #define TCP_NAGLE_CORK 2 /* Socket is corked */ 265 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ 266 267 /* TCP thin-stream limits */ 268 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ 269 270 /* TCP initial congestion window as per rfc6928 */ 271 #define TCP_INIT_CWND 10 272 273 /* Bit Flags for sysctl_tcp_fastopen */ 274 #define TFO_CLIENT_ENABLE 1 275 #define TFO_SERVER_ENABLE 2 276 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 277 278 /* Accept SYN data w/o any cookie option */ 279 #define TFO_SERVER_COOKIE_NOT_REQD 0x200 280 281 /* Force enable TFO on all listeners, i.e., not requiring the 282 * TCP_FASTOPEN socket option. 283 */ 284 #define TFO_SERVER_WO_SOCKOPT1 0x400 285 286 287 /* sysctl variables for tcp */ 288 extern int sysctl_tcp_max_orphans; 289 extern long sysctl_tcp_mem[3]; 290 291 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ 292 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */ 293 #define TCP_RACK_NO_DUPTHRESH 0x4 /* Do not use DUPACK threshold in RACK */ 294 295 DECLARE_PER_CPU(int, tcp_memory_per_cpu_fw_alloc); 296 297 extern struct percpu_counter tcp_sockets_allocated; 298 extern unsigned long tcp_memory_pressure; 299 300 /* optimized version of sk_under_memory_pressure() for TCP sockets */ 301 static inline bool tcp_under_memory_pressure(const struct sock *sk) 302 { 303 if (mem_cgroup_sk_enabled(sk) && 304 mem_cgroup_sk_under_memory_pressure(sk)) 305 return true; 306 307 if (sk->sk_bypass_prot_mem) 308 return false; 309 310 return READ_ONCE(tcp_memory_pressure); 311 } 312 /* 313 * The next routines deal with comparing 32 bit unsigned ints 314 * and worry about wraparound (automatic with unsigned arithmetic). 315 */ 316 317 static inline bool before(__u32 seq1, __u32 seq2) 318 { 319 return (__s32)(seq1-seq2) < 0; 320 } 321 #define after(seq2, seq1) before(seq1, seq2) 322 323 /* is s2<=s1<=s3 ? */ 324 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) 325 { 326 return seq3 - seq2 >= seq1 - seq2; 327 } 328 329 static inline void tcp_wmem_free_skb(struct sock *sk, struct sk_buff *skb) 330 { 331 sk_wmem_queued_add(sk, -skb->truesize); 332 if (!skb_zcopy_pure(skb)) 333 sk_mem_uncharge(sk, skb->truesize); 334 else 335 sk_mem_uncharge(sk, SKB_TRUESIZE(skb_end_offset(skb))); 336 __kfree_skb(skb); 337 } 338 339 void sk_forced_mem_schedule(struct sock *sk, int size); 340 341 bool tcp_check_oom(const struct sock *sk, int shift); 342 343 344 extern struct proto tcp_prot; 345 346 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) 347 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) 348 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 349 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) 350 351 /* 352 * TCP splice context 353 */ 354 struct tcp_splice_state { 355 struct pipe_inode_info *pipe; 356 size_t len; 357 unsigned int flags; 358 }; 359 360 void tcp_tsq_work_init(void); 361 362 int tcp_v4_err(struct sk_buff *skb, u32); 363 364 void tcp_shutdown(struct sock *sk, int how); 365 366 int tcp_v4_rcv(struct sk_buff *skb); 367 368 void tcp_remove_empty_skb(struct sock *sk); 369 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 370 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); 371 int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *copied, 372 size_t size, struct ubuf_info *uarg); 373 void tcp_splice_eof(struct socket *sock); 374 int tcp_send_mss(struct sock *sk, int *size_goal, int flags); 375 int tcp_wmem_schedule(struct sock *sk, int copy); 376 void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, 377 int size_goal); 378 void tcp_release_cb(struct sock *sk); 379 void tcp_wfree(struct sk_buff *skb); 380 void tcp_write_timer_handler(struct sock *sk); 381 void tcp_delack_timer_handler(struct sock *sk); 382 int tcp_ioctl(struct sock *sk, int cmd, int *karg); 383 enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); 384 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); 385 void tcp_rcvbuf_grow(struct sock *sk, u32 newval); 386 void tcp_rcv_space_adjust(struct sock *sk); 387 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 388 void tcp_twsk_destructor(struct sock *sk); 389 void tcp_twsk_purge(struct list_head *net_exit_list); 390 int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, 391 unsigned int offset, size_t len); 392 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 393 struct pipe_inode_info *pipe, size_t len, 394 unsigned int flags); 395 struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp, 396 bool force_schedule); 397 398 static inline void tcp_dec_quickack_mode(struct sock *sk) 399 { 400 struct inet_connection_sock *icsk = inet_csk(sk); 401 402 if (icsk->icsk_ack.quick) { 403 /* How many ACKs S/ACKing new data have we sent? */ 404 const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0; 405 406 if (pkts >= icsk->icsk_ack.quick) { 407 icsk->icsk_ack.quick = 0; 408 /* Leaving quickack mode we deflate ATO. */ 409 icsk->icsk_ack.ato = TCP_ATO_MIN; 410 } else 411 icsk->icsk_ack.quick -= pkts; 412 } 413 } 414 415 #define TCP_ECN_MODE_RFC3168 BIT(0) 416 #define TCP_ECN_QUEUE_CWR BIT(1) 417 #define TCP_ECN_DEMAND_CWR BIT(2) 418 #define TCP_ECN_SEEN BIT(3) 419 #define TCP_ECN_MODE_ACCECN BIT(4) 420 421 #define TCP_ECN_DISABLED 0 422 #define TCP_ECN_MODE_PENDING (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN) 423 #define TCP_ECN_MODE_ANY (TCP_ECN_MODE_RFC3168 | TCP_ECN_MODE_ACCECN) 424 425 static inline bool tcp_ecn_mode_any(const struct tcp_sock *tp) 426 { 427 return tp->ecn_flags & TCP_ECN_MODE_ANY; 428 } 429 430 static inline bool tcp_ecn_mode_rfc3168(const struct tcp_sock *tp) 431 { 432 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_RFC3168; 433 } 434 435 static inline bool tcp_ecn_mode_accecn(const struct tcp_sock *tp) 436 { 437 return (tp->ecn_flags & TCP_ECN_MODE_ANY) == TCP_ECN_MODE_ACCECN; 438 } 439 440 static inline bool tcp_ecn_disabled(const struct tcp_sock *tp) 441 { 442 return !tcp_ecn_mode_any(tp); 443 } 444 445 static inline bool tcp_ecn_mode_pending(const struct tcp_sock *tp) 446 { 447 return (tp->ecn_flags & TCP_ECN_MODE_PENDING) == TCP_ECN_MODE_PENDING; 448 } 449 450 static inline void tcp_ecn_mode_set(struct tcp_sock *tp, u8 mode) 451 { 452 tp->ecn_flags &= ~TCP_ECN_MODE_ANY; 453 tp->ecn_flags |= mode; 454 } 455 456 enum tcp_tw_status { 457 TCP_TW_SUCCESS = 0, 458 TCP_TW_RST = 1, 459 TCP_TW_ACK = 2, 460 TCP_TW_SYN = 3, 461 TCP_TW_ACK_OOW = 4 462 }; 463 464 465 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, 466 struct sk_buff *skb, 467 const struct tcphdr *th, 468 u32 *tw_isn, 469 enum skb_drop_reason *drop_reason); 470 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 471 struct request_sock *req, bool fastopen, 472 bool *lost_race, enum skb_drop_reason *drop_reason); 473 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child, 474 struct sk_buff *skb); 475 void tcp_enter_loss(struct sock *sk); 476 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag); 477 void tcp_clear_retrans(struct tcp_sock *tp); 478 void tcp_update_pacing_rate(struct sock *sk); 479 void tcp_set_rto(struct sock *sk); 480 void tcp_update_metrics(struct sock *sk); 481 void tcp_init_metrics(struct sock *sk); 482 void tcp_metrics_init(void); 483 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); 484 void __tcp_close(struct sock *sk, long timeout); 485 void tcp_close(struct sock *sk, long timeout); 486 void tcp_init_sock(struct sock *sk); 487 void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb); 488 __poll_t tcp_poll(struct file *file, struct socket *sock, 489 struct poll_table_struct *wait); 490 int do_tcp_getsockopt(struct sock *sk, int level, 491 int optname, sockptr_t optval, sockptr_t optlen); 492 int tcp_getsockopt(struct sock *sk, int level, int optname, 493 char __user *optval, int __user *optlen); 494 bool tcp_bpf_bypass_getsockopt(int level, int optname); 495 int do_tcp_setsockopt(struct sock *sk, int level, int optname, 496 sockptr_t optval, unsigned int optlen); 497 int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, 498 unsigned int optlen); 499 void tcp_reset_keepalive_timer(struct sock *sk, unsigned long timeout); 500 void tcp_set_keepalive(struct sock *sk, int val); 501 void tcp_syn_ack_timeout(const struct request_sock *req); 502 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 503 int flags); 504 int tcp_set_rcvlowat(struct sock *sk, int val); 505 int tcp_set_window_clamp(struct sock *sk, int val); 506 507 static inline void 508 tcp_update_recv_tstamps(struct sk_buff *skb, 509 struct scm_timestamping_internal *tss) 510 { 511 tss->ts[0] = skb->tstamp; 512 tss->ts[2] = skb_hwtstamps(skb)->hwtstamp; 513 } 514 515 void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, 516 struct scm_timestamping_internal *tss); 517 void tcp_data_ready(struct sock *sk); 518 #ifdef CONFIG_MMU 519 int tcp_mmap(struct file *file, struct socket *sock, 520 struct vm_area_struct *vma); 521 #endif 522 void tcp_parse_options(const struct net *net, const struct sk_buff *skb, 523 struct tcp_options_received *opt_rx, 524 int estab, struct tcp_fastopen_cookie *foc); 525 526 /* 527 * BPF SKB-less helpers 528 */ 529 u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph, 530 struct tcphdr *th, u32 *cookie); 531 u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph, 532 struct tcphdr *th, u32 *cookie); 533 u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss); 534 u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, 535 const struct tcp_request_sock_ops *af_ops, 536 struct sock *sk, struct tcphdr *th); 537 /* 538 * TCP v4 functions exported for the inet6 API 539 */ 540 541 void tcp_v4_mtu_reduced(struct sock *sk); 542 void tcp_req_err(struct sock *sk, u32 seq, bool abort); 543 void tcp_ld_RTO_revert(struct sock *sk, u32 seq); 544 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 545 struct sock *tcp_create_openreq_child(const struct sock *sk, 546 struct request_sock *req, 547 struct sk_buff *skb); 548 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 549 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 550 struct request_sock *req, 551 struct dst_entry *dst, 552 struct request_sock *req_unhash, 553 bool *own_req, 554 void (*opt_child_init)(struct sock *newsk, 555 const struct sock *sk)); 556 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 557 int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len); 558 int tcp_connect(struct sock *sk); 559 enum tcp_synack_type { 560 TCP_SYNACK_NORMAL, 561 TCP_SYNACK_FASTOPEN, 562 TCP_SYNACK_COOKIE, 563 TCP_SYNACK_RETRANS, 564 }; 565 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 566 struct request_sock *req, 567 struct tcp_fastopen_cookie *foc, 568 enum tcp_synack_type synack_type, 569 struct sk_buff *syn_skb); 570 int tcp_disconnect(struct sock *sk, int flags); 571 572 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 573 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); 574 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 575 576 /* From syncookies.c */ 577 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, 578 struct request_sock *req, 579 struct dst_entry *dst); 580 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th); 581 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 582 struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops, 583 struct sock *sk, struct sk_buff *skb, 584 struct tcp_options_received *tcp_opt, 585 int mss, u32 tsoff); 586 587 #if IS_ENABLED(CONFIG_BPF) 588 struct bpf_tcp_req_attrs { 589 u32 rcv_tsval; 590 u32 rcv_tsecr; 591 u16 mss; 592 u8 rcv_wscale; 593 u8 snd_wscale; 594 u8 ecn_ok; 595 u8 wscale_ok; 596 u8 sack_ok; 597 u8 tstamp_ok; 598 u8 usec_ts_ok; 599 u8 reserved[3]; 600 }; 601 #endif 602 603 #ifdef CONFIG_SYN_COOKIES 604 605 /* Syncookies use a monotonic timer which increments every 60 seconds. 606 * This counter is used both as a hash input and partially encoded into 607 * the cookie value. A cookie is only validated further if the delta 608 * between the current counter value and the encoded one is less than this, 609 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if 610 * the counter advances immediately after a cookie is generated). 611 */ 612 #define MAX_SYNCOOKIE_AGE 2 613 #define TCP_SYNCOOKIE_PERIOD (60 * HZ) 614 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 615 616 /* syncookies: remember time of last synqueue overflow 617 * But do not dirty this field too often (once per second is enough) 618 * It is racy as we do not hold a lock, but race is very minor. 619 */ 620 static inline void tcp_synq_overflow(const struct sock *sk) 621 { 622 unsigned int last_overflow; 623 unsigned int now = jiffies; 624 625 if (sk->sk_reuseport) { 626 struct sock_reuseport *reuse; 627 628 reuse = rcu_dereference(sk->sk_reuseport_cb); 629 if (likely(reuse)) { 630 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 631 if (!time_between32(now, last_overflow, 632 last_overflow + HZ)) 633 WRITE_ONCE(reuse->synq_overflow_ts, now); 634 return; 635 } 636 } 637 638 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); 639 if (!time_between32(now, last_overflow, last_overflow + HZ)) 640 WRITE_ONCE(tcp_sk_rw(sk)->rx_opt.ts_recent_stamp, now); 641 } 642 643 /* syncookies: no recent synqueue overflow on this listening socket? */ 644 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 645 { 646 unsigned int last_overflow; 647 unsigned int now = jiffies; 648 649 if (sk->sk_reuseport) { 650 struct sock_reuseport *reuse; 651 652 reuse = rcu_dereference(sk->sk_reuseport_cb); 653 if (likely(reuse)) { 654 last_overflow = READ_ONCE(reuse->synq_overflow_ts); 655 return !time_between32(now, last_overflow - HZ, 656 last_overflow + 657 TCP_SYNCOOKIE_VALID); 658 } 659 } 660 661 last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp); 662 663 /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID, 664 * then we're under synflood. However, we have to use 665 * 'last_overflow - HZ' as lower bound. That's because a concurrent 666 * tcp_synq_overflow() could update .ts_recent_stamp after we read 667 * jiffies but before we store .ts_recent_stamp into last_overflow, 668 * which could lead to rejecting a valid syncookie. 669 */ 670 return !time_between32(now, last_overflow - HZ, 671 last_overflow + TCP_SYNCOOKIE_VALID); 672 } 673 674 static inline u32 tcp_cookie_time(void) 675 { 676 u64 val = get_jiffies_64(); 677 678 do_div(val, TCP_SYNCOOKIE_PERIOD); 679 return val; 680 } 681 682 /* Convert one nsec 64bit timestamp to ts (ms or usec resolution) */ 683 static inline u64 tcp_ns_to_ts(bool usec_ts, u64 val) 684 { 685 if (usec_ts) 686 return div_u64(val, NSEC_PER_USEC); 687 688 return div_u64(val, NSEC_PER_MSEC); 689 } 690 691 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 692 u16 *mssp); 693 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); 694 u64 cookie_init_timestamp(struct request_sock *req, u64 now); 695 bool cookie_timestamp_decode(const struct net *net, 696 struct tcp_options_received *opt); 697 698 static inline bool cookie_ecn_ok(const struct net *net, const struct dst_entry *dst) 699 { 700 return READ_ONCE(net->ipv4.sysctl_tcp_ecn) || 701 dst_feature(dst, RTAX_FEATURE_ECN); 702 } 703 704 #if IS_ENABLED(CONFIG_BPF) 705 static inline bool cookie_bpf_ok(struct sk_buff *skb) 706 { 707 return skb->sk; 708 } 709 710 struct request_sock *cookie_bpf_check(struct sock *sk, struct sk_buff *skb); 711 #else 712 static inline bool cookie_bpf_ok(struct sk_buff *skb) 713 { 714 return false; 715 } 716 717 static inline struct request_sock *cookie_bpf_check(struct net *net, struct sock *sk, 718 struct sk_buff *skb) 719 { 720 return NULL; 721 } 722 #endif 723 724 /* From net/ipv6/syncookies.c */ 725 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th); 726 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 727 728 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, 729 const struct tcphdr *th, u16 *mssp); 730 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); 731 #endif 732 /* tcp_output.c */ 733 734 void tcp_skb_entail(struct sock *sk, struct sk_buff *skb); 735 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb); 736 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 737 int nonagle); 738 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 739 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 740 void tcp_retransmit_timer(struct sock *sk); 741 void tcp_xmit_retransmit_queue(struct sock *); 742 void tcp_simple_retransmit(struct sock *); 743 void tcp_enter_recovery(struct sock *sk, bool ece_ack); 744 int tcp_trim_head(struct sock *, struct sk_buff *, u32); 745 enum tcp_queue { 746 TCP_FRAG_IN_WRITE_QUEUE, 747 TCP_FRAG_IN_RTX_QUEUE, 748 }; 749 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 750 struct sk_buff *skb, u32 len, 751 unsigned int mss_now, gfp_t gfp); 752 753 void tcp_send_probe0(struct sock *); 754 int tcp_write_wakeup(struct sock *, int mib); 755 void tcp_send_fin(struct sock *sk); 756 void tcp_send_active_reset(struct sock *sk, gfp_t priority, 757 enum sk_rst_reason reason); 758 int tcp_send_synack(struct sock *); 759 void tcp_push_one(struct sock *, unsigned int mss_now); 760 void __tcp_send_ack(struct sock *sk, u32 rcv_nxt, u16 flags); 761 void tcp_send_ack(struct sock *sk); 762 void tcp_send_delayed_ack(struct sock *sk); 763 void tcp_send_loss_probe(struct sock *sk); 764 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); 765 void tcp_skb_collapse_tstamp(struct sk_buff *skb, 766 const struct sk_buff *next_skb); 767 768 /* tcp_input.c */ 769 void tcp_rearm_rto(struct sock *sk); 770 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); 771 void tcp_done_with_error(struct sock *sk, int err); 772 void tcp_reset(struct sock *sk, struct sk_buff *skb); 773 void tcp_fin(struct sock *sk); 774 void __tcp_check_space(struct sock *sk); 775 static inline void tcp_check_space(struct sock *sk) 776 { 777 /* pairs with tcp_poll() */ 778 smp_mb(); 779 780 if (sk->sk_socket && test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) 781 __tcp_check_space(sk); 782 } 783 void tcp_sack_compress_send_ack(struct sock *sk); 784 785 static inline void tcp_cleanup_skb(struct sk_buff *skb) 786 { 787 skb_dst_drop(skb); 788 secpath_reset(skb); 789 } 790 791 static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) 792 { 793 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); 794 DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb)); 795 __skb_queue_tail(&sk->sk_receive_queue, skb); 796 } 797 798 /* tcp_timer.c */ 799 void tcp_init_xmit_timers(struct sock *); 800 static inline void tcp_clear_xmit_timers(struct sock *sk) 801 { 802 if (hrtimer_try_to_cancel(&tcp_sk(sk)->pacing_timer) == 1) 803 __sock_put(sk); 804 805 if (hrtimer_try_to_cancel(&tcp_sk(sk)->compressed_ack_timer) == 1) 806 __sock_put(sk); 807 808 inet_csk_clear_xmit_timers(sk); 809 } 810 811 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 812 unsigned int tcp_current_mss(struct sock *sk); 813 u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when); 814 815 /* Bound MSS / TSO packet size with the half of the window */ 816 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 817 { 818 int cutoff; 819 820 /* When peer uses tiny windows, there is no use in packetizing 821 * to sub-MSS pieces for the sake of SWS or making sure there 822 * are enough packets in the pipe for fast recovery. 823 * 824 * On the other hand, for extremely large MSS devices, handling 825 * smaller than MSS windows in this way does make sense. 826 */ 827 if (tp->max_window > TCP_MSS_DEFAULT) 828 cutoff = (tp->max_window >> 1); 829 else 830 cutoff = tp->max_window; 831 832 if (cutoff && pktsize > cutoff) 833 return max_t(int, cutoff, 68U - tp->tcp_header_len); 834 else 835 return pktsize; 836 } 837 838 /* tcp.c */ 839 void tcp_get_info(struct sock *, struct tcp_info *); 840 void tcp_rate_check_app_limited(struct sock *sk); 841 842 /* Read 'sendfile()'-style from a TCP socket */ 843 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 844 sk_read_actor_t recv_actor); 845 int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, 846 sk_read_actor_t recv_actor, bool noack, 847 u32 *copied_seq); 848 int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); 849 struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off); 850 void tcp_read_done(struct sock *sk, size_t len); 851 852 void tcp_initialize_rcv_mss(struct sock *sk); 853 854 int tcp_mtu_to_mss(struct sock *sk, int pmtu); 855 int tcp_mss_to_mtu(struct sock *sk, int mss); 856 void tcp_mtup_init(struct sock *sk); 857 858 static inline unsigned int tcp_rto_max(const struct sock *sk) 859 { 860 return READ_ONCE(inet_csk(sk)->icsk_rto_max); 861 } 862 863 static inline void tcp_bound_rto(struct sock *sk) 864 { 865 inet_csk(sk)->icsk_rto = min(inet_csk(sk)->icsk_rto, tcp_rto_max(sk)); 866 } 867 868 static inline u32 __tcp_set_rto(const struct tcp_sock *tp) 869 { 870 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); 871 } 872 873 static inline unsigned long tcp_reqsk_timeout(struct request_sock *req) 874 { 875 u64 timeout = (u64)req->timeout << req->num_timeout; 876 877 return (unsigned long)min_t(u64, timeout, 878 tcp_rto_max(req->rsk_listener)); 879 } 880 881 u32 tcp_delack_max(const struct sock *sk); 882 883 /* Compute the actual rto_min value */ 884 static inline u32 tcp_rto_min(const struct sock *sk) 885 { 886 const struct dst_entry *dst = __sk_dst_get(sk); 887 u32 rto_min = READ_ONCE(inet_csk(sk)->icsk_rto_min); 888 889 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 890 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 891 return rto_min; 892 } 893 894 static inline u32 tcp_rto_min_us(const struct sock *sk) 895 { 896 return jiffies_to_usecs(tcp_rto_min(sk)); 897 } 898 899 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) 900 { 901 return dst_metric_locked(dst, RTAX_CC_ALGO); 902 } 903 904 /* Minimum RTT in usec. ~0 means not available. */ 905 static inline u32 tcp_min_rtt(const struct tcp_sock *tp) 906 { 907 return minmax_get(&tp->rtt_min); 908 } 909 910 /* Compute the actual receive window we are currently advertising. 911 * Rcv_nxt can be after the window if our peer push more data 912 * than the offered window. 913 */ 914 static inline u32 tcp_receive_window(const struct tcp_sock *tp) 915 { 916 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; 917 918 if (win < 0) 919 win = 0; 920 return (u32) win; 921 } 922 923 /* Choose a new window, without checks for shrinking, and without 924 * scaling applied to the result. The caller does these things 925 * if necessary. This is a "raw" window selection. 926 */ 927 u32 __tcp_select_window(struct sock *sk); 928 929 void tcp_send_window_probe(struct sock *sk); 930 931 /* TCP uses 32bit jiffies to save some space. 932 * Note that this is different from tcp_time_stamp, which 933 * historically has been the same until linux-4.13. 934 */ 935 #define tcp_jiffies32 ((u32)jiffies) 936 937 /* 938 * Deliver a 32bit value for TCP timestamp option (RFC 7323) 939 * It is no longer tied to jiffies, but to 1 ms clock. 940 * Note: double check if you want to use tcp_jiffies32 instead of this. 941 */ 942 #define TCP_TS_HZ 1000 943 944 static inline u64 tcp_clock_ns(void) 945 { 946 return ktime_get_ns(); 947 } 948 949 static inline u64 tcp_clock_us(void) 950 { 951 return div_u64(tcp_clock_ns(), NSEC_PER_USEC); 952 } 953 954 static inline u64 tcp_clock_ms(void) 955 { 956 return div_u64(tcp_clock_ns(), NSEC_PER_MSEC); 957 } 958 959 /* TCP Timestamp included in TS option (RFC 1323) can either use ms 960 * or usec resolution. Each socket carries a flag to select one or other 961 * resolution, as the route attribute could change anytime. 962 * Each flow must stick to initial resolution. 963 */ 964 static inline u32 tcp_clock_ts(bool usec_ts) 965 { 966 return usec_ts ? tcp_clock_us() : tcp_clock_ms(); 967 } 968 969 static inline u32 tcp_time_stamp_ms(const struct tcp_sock *tp) 970 { 971 return div_u64(tp->tcp_mstamp, USEC_PER_MSEC); 972 } 973 974 static inline u32 tcp_time_stamp_ts(const struct tcp_sock *tp) 975 { 976 if (tp->tcp_usec_ts) 977 return tp->tcp_mstamp; 978 return tcp_time_stamp_ms(tp); 979 } 980 981 void tcp_mstamp_refresh(struct tcp_sock *tp); 982 983 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) 984 { 985 return max_t(s64, t1 - t0, 0); 986 } 987 988 /* provide the departure time in us unit */ 989 static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) 990 { 991 return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC); 992 } 993 994 /* Provide skb TSval in usec or ms unit */ 995 static inline u32 tcp_skb_timestamp_ts(bool usec_ts, const struct sk_buff *skb) 996 { 997 if (usec_ts) 998 return tcp_skb_timestamp_us(skb); 999 1000 return div_u64(skb->skb_mstamp_ns, NSEC_PER_MSEC); 1001 } 1002 1003 static inline u32 tcp_tw_tsval(const struct tcp_timewait_sock *tcptw) 1004 { 1005 return tcp_clock_ts(tcptw->tw_sk.tw_usec_ts) + tcptw->tw_ts_offset; 1006 } 1007 1008 static inline u32 tcp_rsk_tsval(const struct tcp_request_sock *treq) 1009 { 1010 return tcp_clock_ts(treq->req_usec_ts) + treq->ts_off; 1011 } 1012 1013 #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 1014 1015 #define TCPHDR_FIN BIT(0) 1016 #define TCPHDR_SYN BIT(1) 1017 #define TCPHDR_RST BIT(2) 1018 #define TCPHDR_PSH BIT(3) 1019 #define TCPHDR_ACK BIT(4) 1020 #define TCPHDR_URG BIT(5) 1021 #define TCPHDR_ECE BIT(6) 1022 #define TCPHDR_CWR BIT(7) 1023 #define TCPHDR_AE BIT(8) 1024 #define TCPHDR_FLAGS_MASK (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \ 1025 TCPHDR_PSH | TCPHDR_ACK | TCPHDR_URG | \ 1026 TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE) 1027 #define tcp_flags_ntohs(th) (ntohs(*(__be16 *)&tcp_flag_word(th)) & \ 1028 TCPHDR_FLAGS_MASK) 1029 1030 #define TCPHDR_ACE (TCPHDR_ECE | TCPHDR_CWR | TCPHDR_AE) 1031 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) 1032 #define TCPHDR_SYNACK_ACCECN (TCPHDR_SYN | TCPHDR_ACK | TCPHDR_CWR) 1033 1034 #define TCP_ACCECN_CEP_ACE_MASK 0x7 1035 #define TCP_ACCECN_ACE_MAX_DELTA 6 1036 1037 /* To avoid/detect middlebox interference, not all counters start at 0. 1038 * See draft-ietf-tcpm-accurate-ecn for the latest values. 1039 */ 1040 #define TCP_ACCECN_CEP_INIT_OFFSET 5 1041 #define TCP_ACCECN_E1B_INIT_OFFSET 1 1042 #define TCP_ACCECN_E0B_INIT_OFFSET 1 1043 #define TCP_ACCECN_CEB_INIT_OFFSET 0 1044 1045 /* State flags for sacked in struct tcp_skb_cb */ 1046 enum tcp_skb_cb_sacked_flags { 1047 TCPCB_SACKED_ACKED = (1 << 0), /* SKB ACK'd by a SACK block */ 1048 TCPCB_SACKED_RETRANS = (1 << 1), /* SKB retransmitted */ 1049 TCPCB_LOST = (1 << 2), /* SKB is lost */ 1050 TCPCB_TAGBITS = (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS | 1051 TCPCB_LOST), /* All tag bits */ 1052 TCPCB_REPAIRED = (1 << 4), /* SKB repaired (no skb_mstamp_ns) */ 1053 TCPCB_EVER_RETRANS = (1 << 7), /* Ever retransmitted frame */ 1054 TCPCB_RETRANS = (TCPCB_SACKED_RETRANS | TCPCB_EVER_RETRANS | 1055 TCPCB_REPAIRED), 1056 }; 1057 1058 /* This is what the send packet queuing engine uses to pass 1059 * TCP per-packet control information to the transmission code. 1060 * We also store the host-order sequence numbers in here too. 1061 * This is 44 bytes if IPV6 is enabled. 1062 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. 1063 */ 1064 struct tcp_skb_cb { 1065 __u32 seq; /* Starting sequence number */ 1066 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 1067 union { 1068 /* Note : 1069 * tcp_gso_segs/size are used in write queue only, 1070 * cf tcp_skb_pcount()/tcp_skb_mss() 1071 */ 1072 struct { 1073 u16 tcp_gso_segs; 1074 u16 tcp_gso_size; 1075 }; 1076 }; 1077 __u16 tcp_flags; /* TCP header flags (tcp[12-13])*/ 1078 1079 __u8 sacked; /* State flags for SACK. */ 1080 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ 1081 #define TSTAMP_ACK_SK 0x1 1082 #define TSTAMP_ACK_BPF 0x2 1083 __u8 txstamp_ack:2, /* Record TX timestamp for ack? */ 1084 eor:1, /* Is skb MSG_EOR marked? */ 1085 has_rxtstamp:1, /* SKB has a RX timestamp */ 1086 unused:4; 1087 __u32 ack_seq; /* Sequence number ACK'd */ 1088 union { 1089 struct { 1090 #define TCPCB_DELIVERED_CE_MASK ((1U<<20) - 1) 1091 /* There is space for up to 24 bytes */ 1092 __u32 is_app_limited:1, /* cwnd not fully used? */ 1093 delivered_ce:20, 1094 unused:11; 1095 /* pkts S/ACKed so far upon tx of skb, incl retrans: */ 1096 __u32 delivered; 1097 /* start of send pipeline phase */ 1098 u64 first_tx_mstamp; 1099 /* when we reached the "delivered" count */ 1100 u64 delivered_mstamp; 1101 } tx; /* only used for outgoing skbs */ 1102 union { 1103 struct inet_skb_parm h4; 1104 #if IS_ENABLED(CONFIG_IPV6) 1105 struct inet6_skb_parm h6; 1106 #endif 1107 } header; /* For incoming skbs */ 1108 }; 1109 }; 1110 1111 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 1112 1113 extern const struct inet_connection_sock_af_ops ipv4_specific; 1114 1115 #if IS_ENABLED(CONFIG_IPV6) 1116 /* This is the variant of inet6_iif() that must be used by TCP, 1117 * as TCP moves IP6CB into a different location in skb->cb[] 1118 */ 1119 static inline int tcp_v6_iif(const struct sk_buff *skb) 1120 { 1121 return TCP_SKB_CB(skb)->header.h6.iif; 1122 } 1123 1124 static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) 1125 { 1126 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 1127 1128 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 1129 } 1130 1131 /* TCP_SKB_CB reference means this can not be used from early demux */ 1132 static inline int tcp_v6_sdif(const struct sk_buff *skb) 1133 { 1134 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 1135 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) 1136 return TCP_SKB_CB(skb)->header.h6.iif; 1137 #endif 1138 return 0; 1139 } 1140 1141 extern const struct inet_connection_sock_af_ops ipv6_specific; 1142 1143 INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); 1144 1145 #endif 1146 1147 /* TCP_SKB_CB reference means this can not be used from early demux */ 1148 static inline int tcp_v4_sdif(struct sk_buff *skb) 1149 { 1150 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 1151 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 1152 return TCP_SKB_CB(skb)->header.h4.iif; 1153 #endif 1154 return 0; 1155 } 1156 1157 /* Due to TSO, an SKB can be composed of multiple actual 1158 * packets. To keep these tracked properly, we use this. 1159 */ 1160 static inline int tcp_skb_pcount(const struct sk_buff *skb) 1161 { 1162 return TCP_SKB_CB(skb)->tcp_gso_segs; 1163 } 1164 1165 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) 1166 { 1167 TCP_SKB_CB(skb)->tcp_gso_segs = segs; 1168 } 1169 1170 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) 1171 { 1172 TCP_SKB_CB(skb)->tcp_gso_segs += segs; 1173 } 1174 1175 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */ 1176 static inline int tcp_skb_mss(const struct sk_buff *skb) 1177 { 1178 return TCP_SKB_CB(skb)->tcp_gso_size; 1179 } 1180 1181 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) 1182 { 1183 return likely(!TCP_SKB_CB(skb)->eor); 1184 } 1185 1186 static inline bool tcp_skb_can_collapse(const struct sk_buff *to, 1187 const struct sk_buff *from) 1188 { 1189 /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */ 1190 return likely(tcp_skb_can_collapse_to(to) && 1191 mptcp_skb_can_collapse(to, from) && 1192 skb_pure_zcopy_same(to, from) && 1193 skb_frags_readable(to) == skb_frags_readable(from)); 1194 } 1195 1196 static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to, 1197 const struct sk_buff *from) 1198 { 1199 return likely(mptcp_skb_can_collapse(to, from) && 1200 !skb_cmp_decrypted(to, from)); 1201 } 1202 1203 /* Events passed to congestion control interface */ 1204 enum tcp_ca_event { 1205 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 1206 CA_EVENT_CWND_RESTART, /* congestion window restart */ 1207 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 1208 CA_EVENT_LOSS, /* loss timeout */ 1209 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 1210 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 1211 }; 1212 1213 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 1214 enum tcp_ca_ack_event_flags { 1215 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ 1216 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ 1217 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ 1218 }; 1219 1220 /* 1221 * Interface for adding new TCP congestion control handlers 1222 */ 1223 #define TCP_CA_NAME_MAX 16 1224 #define TCP_CA_MAX 128 1225 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) 1226 1227 #define TCP_CA_UNSPEC 0 1228 1229 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ 1230 #define TCP_CONG_NON_RESTRICTED BIT(0) 1231 /* Requires ECN/ECT set on all packets */ 1232 #define TCP_CONG_NEEDS_ECN BIT(1) 1233 /* Require successfully negotiated AccECN capability */ 1234 #define TCP_CONG_NEEDS_ACCECN BIT(2) 1235 /* Use ECT(1) instead of ECT(0) while the CA is uninitialized */ 1236 #define TCP_CONG_ECT_1_NEGOTIATION BIT(3) 1237 /* Cannot fallback to RFC3168 during AccECN negotiation */ 1238 #define TCP_CONG_NO_FALLBACK_RFC3168 BIT(4) 1239 #define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN | \ 1240 TCP_CONG_NEEDS_ACCECN | TCP_CONG_ECT_1_NEGOTIATION | \ 1241 TCP_CONG_NO_FALLBACK_RFC3168) 1242 1243 union tcp_cc_info; 1244 1245 struct ack_sample { 1246 u32 pkts_acked; 1247 s32 rtt_us; 1248 u32 in_flight; 1249 }; 1250 1251 /* A rate sample measures the number of (original/retransmitted) data 1252 * packets delivered "delivered" over an interval of time "interval_us". 1253 * The tcp_rate.c code fills in the rate sample, and congestion 1254 * control modules that define a cong_control function to run at the end 1255 * of ACK processing can optionally chose to consult this sample when 1256 * setting cwnd and pacing rate. 1257 * A sample is invalid if "delivered" or "interval_us" is negative. 1258 */ 1259 struct rate_sample { 1260 u64 prior_mstamp; /* starting timestamp for interval */ 1261 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 1262 u32 prior_delivered_ce;/* tp->delivered_ce at "prior_mstamp" */ 1263 s32 delivered; /* number of packets delivered over interval */ 1264 s32 delivered_ce; /* number of packets delivered w/ CE marks*/ 1265 long interval_us; /* time for tp->delivered to incr "delivered" */ 1266 u32 snd_interval_us; /* snd interval for delivered packets */ 1267 u32 rcv_interval_us; /* rcv interval for delivered packets */ 1268 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 1269 int losses; /* number of packets marked lost upon ACK */ 1270 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 1271 u32 prior_in_flight; /* in flight before this ACK */ 1272 u32 last_end_seq; /* end_seq of most recently ACKed packet */ 1273 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 1274 bool is_retrans; /* is sample from retransmission? */ 1275 bool is_ack_delayed; /* is this (likely) a delayed ACK? */ 1276 }; 1277 1278 struct tcp_congestion_ops { 1279 /* fast path fields are put first to fill one cache line */ 1280 1281 /* A congestion control (CC) must provide one of either: 1282 * 1283 * (a) a cong_avoid function, if the CC wants to use the core TCP 1284 * stack's default functionality to implement a "classic" 1285 * (Reno/CUBIC-style) response to packet loss, RFC3168 ECN, 1286 * idle periods, pacing rate computations, etc. 1287 * 1288 * (b) a cong_control function, if the CC wants custom behavior and 1289 * complete control of all congestion control behaviors. 1290 */ 1291 /* (a) "classic" response: calculate new cwnd. 1292 */ 1293 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 1294 /* (b) "custom" response: call when packets are delivered to update 1295 * cwnd and pacing rate, after all the ca_state processing. 1296 */ 1297 void (*cong_control)(struct sock *sk, u32 ack, int flag, const struct rate_sample *rs); 1298 1299 /* return slow start threshold (required) */ 1300 u32 (*ssthresh)(struct sock *sk); 1301 1302 /* call before changing ca_state (optional) */ 1303 void (*set_state)(struct sock *sk, u8 new_state); 1304 1305 /* call when cwnd event occurs (optional) */ 1306 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 1307 1308 /* call when ack arrives (optional) */ 1309 void (*in_ack_event)(struct sock *sk, u32 flags); 1310 1311 /* hook for packet ack accounting (optional) */ 1312 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 1313 1314 /* override sysctl_tcp_min_tso_segs (optional) */ 1315 u32 (*min_tso_segs)(struct sock *sk); 1316 1317 /* new value of cwnd after loss (required) */ 1318 u32 (*undo_cwnd)(struct sock *sk); 1319 /* returns the multiplier used in tcp_sndbuf_expand (optional) */ 1320 u32 (*sndbuf_expand)(struct sock *sk); 1321 1322 /* control/slow paths put last */ 1323 /* get info for inet_diag (optional) */ 1324 size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 1325 union tcp_cc_info *info); 1326 1327 char name[TCP_CA_NAME_MAX]; 1328 struct module *owner; 1329 struct list_head list; 1330 u32 key; 1331 u32 flags; 1332 1333 /* initialize private data (optional) */ 1334 void (*init)(struct sock *sk); 1335 /* cleanup private data (optional) */ 1336 void (*release)(struct sock *sk); 1337 } ____cacheline_aligned_in_smp; 1338 1339 int tcp_register_congestion_control(struct tcp_congestion_ops *type); 1340 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 1341 int tcp_update_congestion_control(struct tcp_congestion_ops *type, 1342 struct tcp_congestion_ops *old_type); 1343 int tcp_validate_congestion_control(struct tcp_congestion_ops *ca); 1344 1345 void tcp_assign_congestion_control(struct sock *sk); 1346 void tcp_init_congestion_control(struct sock *sk); 1347 void tcp_cleanup_congestion_control(struct sock *sk); 1348 int tcp_set_default_congestion_control(struct net *net, const char *name); 1349 void tcp_get_default_congestion_control(struct net *net, char *name); 1350 void tcp_get_available_congestion_control(char *buf, size_t len); 1351 void tcp_get_allowed_congestion_control(char *buf, size_t len); 1352 int tcp_set_allowed_congestion_control(char *allowed); 1353 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, 1354 bool cap_net_admin); 1355 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 1356 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1357 1358 u32 tcp_reno_ssthresh(struct sock *sk); 1359 u32 tcp_reno_undo_cwnd(struct sock *sk); 1360 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 1361 extern struct tcp_congestion_ops tcp_reno; 1362 1363 struct tcp_congestion_ops *tcp_ca_find(const char *name); 1364 struct tcp_congestion_ops *tcp_ca_find_key(u32 key); 1365 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca); 1366 #ifdef CONFIG_INET 1367 char *tcp_ca_get_name_by_key(u32 key, char *buffer); 1368 #else 1369 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) 1370 { 1371 return NULL; 1372 } 1373 #endif 1374 1375 static inline bool tcp_ca_needs_ecn(const struct sock *sk) 1376 { 1377 const struct inet_connection_sock *icsk = inet_csk(sk); 1378 1379 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; 1380 } 1381 1382 static inline bool tcp_ca_needs_accecn(const struct sock *sk) 1383 { 1384 const struct inet_connection_sock *icsk = inet_csk(sk); 1385 1386 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ACCECN; 1387 } 1388 1389 static inline bool tcp_ca_ect_1_negotiation(const struct sock *sk) 1390 { 1391 const struct inet_connection_sock *icsk = inet_csk(sk); 1392 1393 return icsk->icsk_ca_ops->flags & TCP_CONG_ECT_1_NEGOTIATION; 1394 } 1395 1396 static inline bool tcp_ca_no_fallback_rfc3168(const struct sock *sk) 1397 { 1398 const struct inet_connection_sock *icsk = inet_csk(sk); 1399 1400 return icsk->icsk_ca_ops->flags & TCP_CONG_NO_FALLBACK_RFC3168; 1401 } 1402 1403 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) 1404 { 1405 const struct inet_connection_sock *icsk = inet_csk(sk); 1406 1407 if (icsk->icsk_ca_ops->cwnd_event) 1408 icsk->icsk_ca_ops->cwnd_event(sk, event); 1409 } 1410 1411 /* From tcp_cong.c */ 1412 void tcp_set_ca_state(struct sock *sk, const u8 ca_state); 1413 1414 1415 static inline bool tcp_skb_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) 1416 { 1417 return t1 > t2 || (t1 == t2 && after(seq1, seq2)); 1418 } 1419 1420 /* These functions determine how the current flow behaves in respect of SACK 1421 * handling. SACK is negotiated with the peer, and therefore it can vary 1422 * between different flows. 1423 * 1424 * tcp_is_sack - SACK enabled 1425 * tcp_is_reno - No SACK 1426 */ 1427 static inline int tcp_is_sack(const struct tcp_sock *tp) 1428 { 1429 return likely(tp->rx_opt.sack_ok); 1430 } 1431 1432 static inline bool tcp_is_reno(const struct tcp_sock *tp) 1433 { 1434 return !tcp_is_sack(tp); 1435 } 1436 1437 static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 1438 { 1439 return tp->sacked_out + tp->lost_out; 1440 } 1441 1442 /* This determines how many packets are "in the network" to the best 1443 * of our knowledge. In many cases it is conservative, but where 1444 * detailed information is available from the receiver (via SACK 1445 * blocks etc.) we can make more aggressive calculations. 1446 * 1447 * Use this for decisions involving congestion control, use just 1448 * tp->packets_out to determine if the send queue is empty or not. 1449 * 1450 * Read this equation as: 1451 * 1452 * "Packets sent once on transmission queue" MINUS 1453 * "Packets left network, but not honestly ACKed yet" PLUS 1454 * "Packets fast retransmitted" 1455 */ 1456 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) 1457 { 1458 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; 1459 } 1460 1461 #define TCP_INFINITE_SSTHRESH 0x7fffffff 1462 1463 static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp) 1464 { 1465 return tp->snd_cwnd; 1466 } 1467 1468 static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val) 1469 { 1470 WARN_ON_ONCE((int)val <= 0); 1471 tp->snd_cwnd = val; 1472 } 1473 1474 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 1475 { 1476 return tcp_snd_cwnd(tp) < tp->snd_ssthresh; 1477 } 1478 1479 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 1480 { 1481 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 1482 } 1483 1484 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) 1485 { 1486 return (TCPF_CA_CWR | TCPF_CA_Recovery) & 1487 (1 << inet_csk(sk)->icsk_ca_state); 1488 } 1489 1490 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1491 * The exception is cwnd reduction phase, when cwnd is decreasing towards 1492 * ssthresh. 1493 */ 1494 static inline __u32 tcp_current_ssthresh(const struct sock *sk) 1495 { 1496 const struct tcp_sock *tp = tcp_sk(sk); 1497 1498 if (tcp_in_cwnd_reduction(sk)) 1499 return tp->snd_ssthresh; 1500 else 1501 return max(tp->snd_ssthresh, 1502 ((tcp_snd_cwnd(tp) >> 1) + 1503 (tcp_snd_cwnd(tp) >> 2))); 1504 } 1505 1506 /* Use define here intentionally to get WARN_ON location shown at the caller */ 1507 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 1508 1509 void tcp_enter_cwr(struct sock *sk); 1510 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 1511 1512 /* The maximum number of MSS of available cwnd for which TSO defers 1513 * sending if not using sysctl_tcp_tso_win_divisor. 1514 */ 1515 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) 1516 { 1517 return 3; 1518 } 1519 1520 /* Returns end sequence number of the receiver's advertised window */ 1521 static inline u32 tcp_wnd_end(const struct tcp_sock *tp) 1522 { 1523 return tp->snd_una + tp->snd_wnd; 1524 } 1525 1526 /* We follow the spirit of RFC2861 to validate cwnd but implement a more 1527 * flexible approach. The RFC suggests cwnd should not be raised unless 1528 * it was fully used previously. And that's exactly what we do in 1529 * congestion avoidance mode. But in slow start we allow cwnd to grow 1530 * as long as the application has used half the cwnd. 1531 * Example : 1532 * cwnd is 10 (IW10), but application sends 9 frames. 1533 * We allow cwnd to reach 18 when all frames are ACKed. 1534 * This check is safe because it's as aggressive as slow start which already 1535 * risks 100% overshoot. The advantage is that we discourage application to 1536 * either send more filler packets or data to artificially blow up the cwnd 1537 * usage, and allow application-limited process to probe bw more aggressively. 1538 */ 1539 static inline bool tcp_is_cwnd_limited(const struct sock *sk) 1540 { 1541 const struct tcp_sock *tp = tcp_sk(sk); 1542 1543 if (tp->is_cwnd_limited) 1544 return true; 1545 1546 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1547 if (tcp_in_slow_start(tp)) 1548 return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out; 1549 1550 return false; 1551 } 1552 1553 /* BBR congestion control needs pacing. 1554 * Same remark for SO_MAX_PACING_RATE. 1555 * sch_fq packet scheduler is efficiently handling pacing, 1556 * but is not always installed/used. 1557 * Return true if TCP stack should pace packets itself. 1558 */ 1559 static inline bool tcp_needs_internal_pacing(const struct sock *sk) 1560 { 1561 return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; 1562 } 1563 1564 /* Estimates in how many jiffies next packet for this flow can be sent. 1565 * Scheduling a retransmit timer too early would be silly. 1566 */ 1567 static inline unsigned long tcp_pacing_delay(const struct sock *sk) 1568 { 1569 s64 delay = tcp_sk(sk)->tcp_wstamp_ns - tcp_sk(sk)->tcp_clock_cache; 1570 1571 return delay > 0 ? nsecs_to_jiffies(delay) : 0; 1572 } 1573 1574 static inline void tcp_reset_xmit_timer(struct sock *sk, 1575 const int what, 1576 unsigned long when, 1577 bool pace_delay) 1578 { 1579 if (pace_delay) 1580 when += tcp_pacing_delay(sk); 1581 inet_csk_reset_xmit_timer(sk, what, when, 1582 tcp_rto_max(sk)); 1583 } 1584 1585 /* Something is really bad, we could not queue an additional packet, 1586 * because qdisc is full or receiver sent a 0 window, or we are paced. 1587 * We do not want to add fuel to the fire, or abort too early, 1588 * so make sure the timer we arm now is at least 200ms in the future, 1589 * regardless of current icsk_rto value (as it could be ~2ms) 1590 */ 1591 static inline unsigned long tcp_probe0_base(const struct sock *sk) 1592 { 1593 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); 1594 } 1595 1596 /* Variant of inet_csk_rto_backoff() used for zero window probes */ 1597 static inline unsigned long tcp_probe0_when(const struct sock *sk, 1598 unsigned long max_when) 1599 { 1600 u8 backoff = min_t(u8, ilog2(TCP_RTO_MAX / TCP_RTO_MIN) + 1, 1601 inet_csk(sk)->icsk_backoff); 1602 u64 when = (u64)tcp_probe0_base(sk) << backoff; 1603 1604 return (unsigned long)min_t(u64, when, max_when); 1605 } 1606 1607 static inline void tcp_check_probe_timer(struct sock *sk) 1608 { 1609 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) 1610 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 1611 tcp_probe0_base(sk), true); 1612 } 1613 1614 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 1615 { 1616 tp->snd_wl1 = seq; 1617 } 1618 1619 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) 1620 { 1621 tp->snd_wl1 = seq; 1622 } 1623 1624 /* 1625 * Calculate(/check) TCP checksum 1626 */ 1627 static inline __sum16 tcp_v4_check(int len, __be32 saddr, 1628 __be32 daddr, __wsum base) 1629 { 1630 return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base); 1631 } 1632 1633 static inline bool tcp_checksum_complete(struct sk_buff *skb) 1634 { 1635 return !skb_csum_unnecessary(skb) && 1636 __skb_checksum_complete(skb); 1637 } 1638 1639 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, 1640 enum skb_drop_reason *reason); 1641 1642 static inline int tcp_filter(struct sock *sk, struct sk_buff *skb, 1643 enum skb_drop_reason *reason) 1644 { 1645 const struct tcphdr *th = (const struct tcphdr *)skb->data; 1646 1647 return sk_filter_trim_cap(sk, skb, __tcp_hdrlen(th), reason); 1648 } 1649 1650 void tcp_set_state(struct sock *sk, int state); 1651 void tcp_done(struct sock *sk); 1652 int tcp_abort(struct sock *sk, int err); 1653 1654 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) 1655 { 1656 rx_opt->dsack = 0; 1657 rx_opt->num_sacks = 0; 1658 } 1659 1660 void tcp_cwnd_restart(struct sock *sk, s32 delta); 1661 1662 static inline void tcp_slow_start_after_idle_check(struct sock *sk) 1663 { 1664 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1665 struct tcp_sock *tp = tcp_sk(sk); 1666 s32 delta; 1667 1668 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || 1669 tp->packets_out || ca_ops->cong_control) 1670 return; 1671 delta = tcp_jiffies32 - tp->lsndtime; 1672 if (delta > inet_csk(sk)->icsk_rto) 1673 tcp_cwnd_restart(sk, delta); 1674 } 1675 1676 /* Determine a window scaling and initial window to offer. */ 1677 void tcp_select_initial_window(const struct sock *sk, int __space, 1678 __u32 mss, __u32 *rcv_wnd, 1679 __u32 *window_clamp, int wscale_ok, 1680 __u8 *rcv_wscale, __u32 init_rcv_wnd); 1681 1682 static inline int __tcp_win_from_space(u8 scaling_ratio, int space) 1683 { 1684 s64 scaled_space = (s64)space * scaling_ratio; 1685 1686 return scaled_space >> TCP_RMEM_TO_WIN_SCALE; 1687 } 1688 1689 static inline int tcp_win_from_space(const struct sock *sk, int space) 1690 { 1691 return __tcp_win_from_space(tcp_sk(sk)->scaling_ratio, space); 1692 } 1693 1694 /* inverse of __tcp_win_from_space() */ 1695 static inline int __tcp_space_from_win(u8 scaling_ratio, int win) 1696 { 1697 u64 val = (u64)win << TCP_RMEM_TO_WIN_SCALE; 1698 1699 do_div(val, scaling_ratio); 1700 return val; 1701 } 1702 1703 static inline int tcp_space_from_win(const struct sock *sk, int win) 1704 { 1705 return __tcp_space_from_win(tcp_sk(sk)->scaling_ratio, win); 1706 } 1707 1708 /* Assume a 50% default for skb->len/skb->truesize ratio. 1709 * This may be adjusted later in tcp_measure_rcv_mss(). 1710 */ 1711 #define TCP_DEFAULT_SCALING_RATIO (1 << (TCP_RMEM_TO_WIN_SCALE - 1)) 1712 1713 static inline void tcp_scaling_ratio_init(struct sock *sk) 1714 { 1715 tcp_sk(sk)->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; 1716 } 1717 1718 /* Note: caller must be prepared to deal with negative returns */ 1719 static inline int tcp_space(const struct sock *sk) 1720 { 1721 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - 1722 READ_ONCE(sk->sk_backlog.len) - 1723 atomic_read(&sk->sk_rmem_alloc)); 1724 } 1725 1726 static inline int tcp_full_space(const struct sock *sk) 1727 { 1728 return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); 1729 } 1730 1731 static inline void __tcp_adjust_rcv_ssthresh(struct sock *sk, u32 new_ssthresh) 1732 { 1733 int unused_mem = sk_unused_reserved_mem(sk); 1734 struct tcp_sock *tp = tcp_sk(sk); 1735 1736 tp->rcv_ssthresh = min(tp->rcv_ssthresh, new_ssthresh); 1737 if (unused_mem) 1738 tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh, 1739 tcp_win_from_space(sk, unused_mem)); 1740 } 1741 1742 static inline void tcp_adjust_rcv_ssthresh(struct sock *sk) 1743 { 1744 __tcp_adjust_rcv_ssthresh(sk, 4U * tcp_sk(sk)->advmss); 1745 } 1746 1747 void tcp_cleanup_rbuf(struct sock *sk, int copied); 1748 void __tcp_cleanup_rbuf(struct sock *sk, int copied); 1749 1750 1751 /* We provision sk_rcvbuf around 200% of sk_rcvlowat. 1752 * If 87.5 % (7/8) of the space has been consumed, we want to override 1753 * SO_RCVLOWAT constraint, since we are receiving skbs with too small 1754 * len/truesize ratio. 1755 */ 1756 static inline bool tcp_rmem_pressure(const struct sock *sk) 1757 { 1758 int rcvbuf, threshold; 1759 1760 if (tcp_under_memory_pressure(sk)) 1761 return true; 1762 1763 rcvbuf = READ_ONCE(sk->sk_rcvbuf); 1764 threshold = rcvbuf - (rcvbuf >> 3); 1765 1766 return atomic_read(&sk->sk_rmem_alloc) > threshold; 1767 } 1768 1769 static inline bool tcp_epollin_ready(const struct sock *sk, int target) 1770 { 1771 const struct tcp_sock *tp = tcp_sk(sk); 1772 int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq); 1773 1774 if (avail <= 0) 1775 return false; 1776 1777 return (avail >= target) || tcp_rmem_pressure(sk) || 1778 (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss); 1779 } 1780 1781 extern void tcp_openreq_init_rwin(struct request_sock *req, 1782 const struct sock *sk_listener, 1783 const struct dst_entry *dst); 1784 1785 void tcp_enter_memory_pressure(struct sock *sk); 1786 void tcp_leave_memory_pressure(struct sock *sk); 1787 1788 static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1789 { 1790 struct net *net = sock_net((struct sock *)tp); 1791 int val; 1792 1793 /* Paired with WRITE_ONCE() in tcp_sock_set_keepintvl() 1794 * and do_tcp_setsockopt(). 1795 */ 1796 val = READ_ONCE(tp->keepalive_intvl); 1797 1798 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); 1799 } 1800 1801 static inline int keepalive_time_when(const struct tcp_sock *tp) 1802 { 1803 struct net *net = sock_net((struct sock *)tp); 1804 int val; 1805 1806 /* Paired with WRITE_ONCE() in tcp_sock_set_keepidle_locked() */ 1807 val = READ_ONCE(tp->keepalive_time); 1808 1809 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); 1810 } 1811 1812 static inline int keepalive_probes(const struct tcp_sock *tp) 1813 { 1814 struct net *net = sock_net((struct sock *)tp); 1815 int val; 1816 1817 /* Paired with WRITE_ONCE() in tcp_sock_set_keepcnt() 1818 * and do_tcp_setsockopt(). 1819 */ 1820 val = READ_ONCE(tp->keepalive_probes); 1821 1822 return val ? : READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); 1823 } 1824 1825 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) 1826 { 1827 const struct inet_connection_sock *icsk = &tp->inet_conn; 1828 1829 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, 1830 tcp_jiffies32 - tp->rcv_tstamp); 1831 } 1832 1833 static inline int tcp_fin_time(const struct sock *sk) 1834 { 1835 int fin_timeout = tcp_sk(sk)->linger2 ? : 1836 READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); 1837 const int rto = inet_csk(sk)->icsk_rto; 1838 1839 if (fin_timeout < (rto << 2) - (rto >> 1)) 1840 fin_timeout = (rto << 2) - (rto >> 1); 1841 1842 return fin_timeout; 1843 } 1844 1845 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1846 int paws_win) 1847 { 1848 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1849 return true; 1850 if (unlikely(!time_before32(ktime_get_seconds(), 1851 rx_opt->ts_recent_stamp + TCP_PAWS_WRAP))) 1852 return true; 1853 /* 1854 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1855 * then following tcp messages have valid values. Ignore 0 value, 1856 * or else 'negative' tsval might forbid us to accept their packets. 1857 */ 1858 if (!rx_opt->ts_recent) 1859 return true; 1860 return false; 1861 } 1862 1863 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, 1864 int rst) 1865 { 1866 if (tcp_paws_check(rx_opt, 0)) 1867 return false; 1868 1869 /* RST segments are not recommended to carry timestamp, 1870 and, if they do, it is recommended to ignore PAWS because 1871 "their cleanup function should take precedence over timestamps." 1872 Certainly, it is mistake. It is necessary to understand the reasons 1873 of this constraint to relax it: if peer reboots, clock may go 1874 out-of-sync and half-open connections will not be reset. 1875 Actually, the problem would be not existing if all 1876 the implementations followed draft about maintaining clock 1877 via reboots. Linux-2.2 DOES NOT! 1878 1879 However, we can relax time bounds for RST segments to MSL. 1880 */ 1881 if (rst && !time_before32(ktime_get_seconds(), 1882 rx_opt->ts_recent_stamp + TCP_PAWS_MSL)) 1883 return false; 1884 return true; 1885 } 1886 1887 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 1888 { 1889 u32 ace; 1890 1891 /* mptcp hooks are only on the slow path */ 1892 if (sk_is_mptcp((struct sock *)tp)) 1893 return; 1894 1895 ace = tcp_ecn_mode_accecn(tp) ? 1896 ((tp->delivered_ce + TCP_ACCECN_CEP_INIT_OFFSET) & 1897 TCP_ACCECN_CEP_ACE_MASK) : 0; 1898 1899 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 1900 (ace << 22) | 1901 ntohl(TCP_FLAG_ACK) | 1902 snd_wnd); 1903 } 1904 1905 static inline void tcp_fast_path_on(struct tcp_sock *tp) 1906 { 1907 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); 1908 } 1909 1910 static inline void tcp_fast_path_check(struct sock *sk) 1911 { 1912 struct tcp_sock *tp = tcp_sk(sk); 1913 1914 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) && 1915 tp->rcv_wnd && 1916 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 1917 !tp->urg_data) 1918 tcp_fast_path_on(tp); 1919 } 1920 1921 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 1922 int mib_idx, u32 *last_oow_ack_time); 1923 1924 static inline void tcp_mib_init(struct net *net) 1925 { 1926 /* See RFC 2012 */ 1927 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1); 1928 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); 1929 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); 1930 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1); 1931 } 1932 1933 /* from STCP */ 1934 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1935 { 1936 tp->retransmit_skb_hint = NULL; 1937 } 1938 1939 #define tcp_md5_addr tcp_ao_addr 1940 1941 /* - key database */ 1942 struct tcp_md5sig_key { 1943 struct hlist_node node; 1944 u8 keylen; 1945 u8 family; /* AF_INET or AF_INET6 */ 1946 u8 prefixlen; 1947 u8 flags; 1948 union tcp_md5_addr addr; 1949 int l3index; /* set if key added with L3 scope */ 1950 u8 key[TCP_MD5SIG_MAXKEYLEN]; 1951 struct rcu_head rcu; 1952 }; 1953 1954 /* - sock block */ 1955 struct tcp_md5sig_info { 1956 struct hlist_head head; 1957 struct rcu_head rcu; 1958 }; 1959 1960 /* - pseudo header */ 1961 struct tcp4_pseudohdr { 1962 __be32 saddr; 1963 __be32 daddr; 1964 __u8 pad; 1965 __u8 protocol; 1966 __be16 len; 1967 }; 1968 1969 struct tcp6_pseudohdr { 1970 struct in6_addr saddr; 1971 struct in6_addr daddr; 1972 __be32 len; 1973 __be32 protocol; /* including padding */ 1974 }; 1975 1976 /* 1977 * struct tcp_sigpool - per-CPU pool of ahash_requests 1978 * @scratch: per-CPU temporary area, that can be used between 1979 * tcp_sigpool_start() and tcp_sigpool_end() to perform 1980 * crypto request 1981 * @req: pre-allocated ahash request 1982 */ 1983 struct tcp_sigpool { 1984 void *scratch; 1985 struct ahash_request *req; 1986 }; 1987 1988 int tcp_sigpool_alloc_ahash(const char *alg, size_t scratch_size); 1989 void tcp_sigpool_get(unsigned int id); 1990 void tcp_sigpool_release(unsigned int id); 1991 int tcp_sigpool_hash_skb_data(struct tcp_sigpool *hp, 1992 const struct sk_buff *skb, 1993 unsigned int header_len); 1994 1995 /** 1996 * tcp_sigpool_start - disable bh and start using tcp_sigpool_ahash 1997 * @id: tcp_sigpool that was previously allocated by tcp_sigpool_alloc_ahash() 1998 * @c: returned tcp_sigpool for usage (uninitialized on failure) 1999 * 2000 * Returns: 0 on success, error otherwise. 2001 */ 2002 int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c); 2003 /** 2004 * tcp_sigpool_end - enable bh and stop using tcp_sigpool 2005 * @c: tcp_sigpool context that was returned by tcp_sigpool_start() 2006 */ 2007 void tcp_sigpool_end(struct tcp_sigpool *c); 2008 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len); 2009 /* - functions */ 2010 void tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 2011 const struct sock *sk, const struct sk_buff *skb); 2012 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 2013 int family, u8 prefixlen, int l3index, u8 flags, 2014 const u8 *newkey, u8 newkeylen); 2015 int tcp_md5_key_copy(struct sock *sk, const union tcp_md5_addr *addr, 2016 int family, u8 prefixlen, int l3index, 2017 struct tcp_md5sig_key *key); 2018 2019 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 2020 int family, u8 prefixlen, int l3index, u8 flags); 2021 void tcp_clear_md5_list(struct sock *sk); 2022 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 2023 const struct sock *addr_sk); 2024 2025 #ifdef CONFIG_TCP_MD5SIG 2026 struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index, 2027 const union tcp_md5_addr *addr, 2028 int family, bool any_l3index); 2029 static inline struct tcp_md5sig_key * 2030 tcp_md5_do_lookup(const struct sock *sk, int l3index, 2031 const union tcp_md5_addr *addr, int family) 2032 { 2033 if (!static_branch_unlikely(&tcp_md5_needed.key)) 2034 return NULL; 2035 return __tcp_md5_do_lookup(sk, l3index, addr, family, false); 2036 } 2037 2038 static inline struct tcp_md5sig_key * 2039 tcp_md5_do_lookup_any_l3index(const struct sock *sk, 2040 const union tcp_md5_addr *addr, int family) 2041 { 2042 if (!static_branch_unlikely(&tcp_md5_needed.key)) 2043 return NULL; 2044 return __tcp_md5_do_lookup(sk, 0, addr, family, true); 2045 } 2046 2047 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) 2048 void tcp_md5_destruct_sock(struct sock *sk); 2049 #else 2050 static inline struct tcp_md5sig_key * 2051 tcp_md5_do_lookup(const struct sock *sk, int l3index, 2052 const union tcp_md5_addr *addr, int family) 2053 { 2054 return NULL; 2055 } 2056 2057 static inline struct tcp_md5sig_key * 2058 tcp_md5_do_lookup_any_l3index(const struct sock *sk, 2059 const union tcp_md5_addr *addr, int family) 2060 { 2061 return NULL; 2062 } 2063 2064 #define tcp_twsk_md5_key(twsk) NULL 2065 static inline void tcp_md5_destruct_sock(struct sock *sk) 2066 { 2067 } 2068 #endif 2069 2070 struct md5_ctx; 2071 void tcp_md5_hash_skb_data(struct md5_ctx *ctx, const struct sk_buff *skb, 2072 unsigned int header_len); 2073 void tcp_md5_hash_key(struct md5_ctx *ctx, const struct tcp_md5sig_key *key); 2074 2075 /* From tcp_fastopen.c */ 2076 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 2077 struct tcp_fastopen_cookie *cookie); 2078 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 2079 struct tcp_fastopen_cookie *cookie, bool syn_lost, 2080 u16 try_exp); 2081 struct tcp_fastopen_request { 2082 /* Fast Open cookie. Size 0 means a cookie request */ 2083 struct tcp_fastopen_cookie cookie; 2084 struct msghdr *data; /* data in MSG_FASTOPEN */ 2085 size_t size; 2086 int copied; /* queued in tcp_connect() */ 2087 struct ubuf_info *uarg; 2088 }; 2089 void tcp_free_fastopen_req(struct tcp_sock *tp); 2090 void tcp_fastopen_destroy_cipher(struct sock *sk); 2091 void tcp_fastopen_ctx_destroy(struct net *net); 2092 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, 2093 void *primary_key, void *backup_key); 2094 int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk, 2095 u64 *key); 2096 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); 2097 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 2098 struct request_sock *req, 2099 struct tcp_fastopen_cookie *foc, 2100 const struct dst_entry *dst); 2101 void tcp_fastopen_init_key_once(struct net *net); 2102 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 2103 struct tcp_fastopen_cookie *cookie); 2104 bool tcp_fastopen_defer_connect(struct sock *sk, int *err); 2105 #define TCP_FASTOPEN_KEY_LENGTH sizeof(siphash_key_t) 2106 #define TCP_FASTOPEN_KEY_MAX 2 2107 #define TCP_FASTOPEN_KEY_BUF_LENGTH \ 2108 (TCP_FASTOPEN_KEY_LENGTH * TCP_FASTOPEN_KEY_MAX) 2109 2110 /* Fastopen key context */ 2111 struct tcp_fastopen_context { 2112 siphash_key_t key[TCP_FASTOPEN_KEY_MAX]; 2113 int num; 2114 struct rcu_head rcu; 2115 }; 2116 2117 void tcp_fastopen_active_disable(struct sock *sk); 2118 bool tcp_fastopen_active_should_disable(struct sock *sk); 2119 void tcp_fastopen_active_disable_ofo_check(struct sock *sk); 2120 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); 2121 2122 /* Caller needs to wrap with rcu_read_(un)lock() */ 2123 static inline 2124 struct tcp_fastopen_context *tcp_fastopen_get_ctx(const struct sock *sk) 2125 { 2126 struct tcp_fastopen_context *ctx; 2127 2128 ctx = rcu_dereference(inet_csk(sk)->icsk_accept_queue.fastopenq.ctx); 2129 if (!ctx) 2130 ctx = rcu_dereference(sock_net(sk)->ipv4.tcp_fastopen_ctx); 2131 return ctx; 2132 } 2133 2134 static inline 2135 bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc, 2136 const struct tcp_fastopen_cookie *orig) 2137 { 2138 if (orig->len == TCP_FASTOPEN_COOKIE_SIZE && 2139 orig->len == foc->len && 2140 !memcmp(orig->val, foc->val, foc->len)) 2141 return true; 2142 return false; 2143 } 2144 2145 static inline 2146 int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx) 2147 { 2148 return ctx->num; 2149 } 2150 2151 /* Latencies incurred by various limits for a sender. They are 2152 * chronograph-like stats that are mutually exclusive. 2153 */ 2154 enum tcp_chrono { 2155 TCP_CHRONO_UNSPEC, 2156 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */ 2157 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */ 2158 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */ 2159 __TCP_CHRONO_MAX, 2160 }; 2161 2162 static inline void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) 2163 { 2164 const u32 now = tcp_jiffies32; 2165 enum tcp_chrono old = tp->chrono_type; 2166 2167 if (old > TCP_CHRONO_UNSPEC) 2168 tp->chrono_stat[old - 1] += now - tp->chrono_start; 2169 tp->chrono_start = now; 2170 tp->chrono_type = new; 2171 } 2172 2173 static inline void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type) 2174 { 2175 struct tcp_sock *tp = tcp_sk(sk); 2176 2177 /* If there are multiple conditions worthy of tracking in a 2178 * chronograph then the highest priority enum takes precedence 2179 * over the other conditions. So that if something "more interesting" 2180 * starts happening, stop the previous chrono and start a new one. 2181 */ 2182 if (type > tp->chrono_type) 2183 tcp_chrono_set(tp, type); 2184 } 2185 2186 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type); 2187 2188 /* This helper is needed, because skb->tcp_tsorted_anchor uses 2189 * the same memory storage than skb->destructor/_skb_refdst 2190 */ 2191 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb) 2192 { 2193 skb->destructor = NULL; 2194 skb->_skb_refdst = 0UL; 2195 } 2196 2197 #define tcp_skb_tsorted_save(skb) { \ 2198 unsigned long _save = skb->_skb_refdst; \ 2199 skb->_skb_refdst = 0UL; 2200 2201 #define tcp_skb_tsorted_restore(skb) \ 2202 skb->_skb_refdst = _save; \ 2203 } 2204 2205 void tcp_write_queue_purge(struct sock *sk); 2206 2207 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) 2208 { 2209 return skb_rb_first(&sk->tcp_rtx_queue); 2210 } 2211 2212 static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) 2213 { 2214 return skb_rb_last(&sk->tcp_rtx_queue); 2215 } 2216 2217 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) 2218 { 2219 return skb_peek_tail(&sk->sk_write_queue); 2220 } 2221 2222 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 2223 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 2224 2225 static inline struct sk_buff *tcp_send_head(const struct sock *sk) 2226 { 2227 return skb_peek(&sk->sk_write_queue); 2228 } 2229 2230 static inline bool tcp_skb_is_last(const struct sock *sk, 2231 const struct sk_buff *skb) 2232 { 2233 return skb_queue_is_last(&sk->sk_write_queue, skb); 2234 } 2235 2236 /** 2237 * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue 2238 * @sk: socket 2239 * 2240 * Since the write queue can have a temporary empty skb in it, 2241 * we must not use "return skb_queue_empty(&sk->sk_write_queue)" 2242 */ 2243 static inline bool tcp_write_queue_empty(const struct sock *sk) 2244 { 2245 const struct tcp_sock *tp = tcp_sk(sk); 2246 2247 return tp->write_seq == tp->snd_nxt; 2248 } 2249 2250 static inline bool tcp_rtx_queue_empty(const struct sock *sk) 2251 { 2252 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); 2253 } 2254 2255 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) 2256 { 2257 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); 2258 } 2259 2260 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 2261 { 2262 __skb_queue_tail(&sk->sk_write_queue, skb); 2263 2264 /* Queue it, remembering where we must start sending. */ 2265 if (sk->sk_write_queue.next == skb) 2266 tcp_chrono_start(sk, TCP_CHRONO_BUSY); 2267 } 2268 2269 /* Insert new before skb on the write queue of sk. */ 2270 static inline void tcp_insert_write_queue_before(struct sk_buff *new, 2271 struct sk_buff *skb, 2272 struct sock *sk) 2273 { 2274 __skb_queue_before(&sk->sk_write_queue, skb, new); 2275 } 2276 2277 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) 2278 { 2279 tcp_skb_tsorted_anchor_cleanup(skb); 2280 __skb_unlink(skb, &sk->sk_write_queue); 2281 } 2282 2283 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb); 2284 2285 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) 2286 { 2287 tcp_skb_tsorted_anchor_cleanup(skb); 2288 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); 2289 } 2290 2291 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) 2292 { 2293 list_del(&skb->tcp_tsorted_anchor); 2294 tcp_rtx_queue_unlink(skb, sk); 2295 tcp_wmem_free_skb(sk, skb); 2296 } 2297 2298 static inline void tcp_write_collapse_fence(struct sock *sk) 2299 { 2300 struct sk_buff *skb = tcp_write_queue_tail(sk); 2301 2302 if (skb) 2303 TCP_SKB_CB(skb)->eor = 1; 2304 } 2305 2306 static inline void tcp_push_pending_frames(struct sock *sk) 2307 { 2308 if (tcp_send_head(sk)) { 2309 struct tcp_sock *tp = tcp_sk(sk); 2310 2311 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); 2312 } 2313 } 2314 2315 /* Start sequence of the skb just after the highest skb with SACKed 2316 * bit, valid only if sacked_out > 0 or when the caller has ensured 2317 * validity by itself. 2318 */ 2319 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) 2320 { 2321 if (!tp->sacked_out) 2322 return tp->snd_una; 2323 2324 if (tp->highest_sack == NULL) 2325 return tp->snd_nxt; 2326 2327 return TCP_SKB_CB(tp->highest_sack)->seq; 2328 } 2329 2330 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) 2331 { 2332 tcp_sk(sk)->highest_sack = skb_rb_next(skb); 2333 } 2334 2335 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) 2336 { 2337 return tcp_sk(sk)->highest_sack; 2338 } 2339 2340 static inline void tcp_highest_sack_reset(struct sock *sk) 2341 { 2342 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); 2343 } 2344 2345 /* Called when old skb is about to be deleted and replaced by new skb */ 2346 static inline void tcp_highest_sack_replace(struct sock *sk, 2347 struct sk_buff *old, 2348 struct sk_buff *new) 2349 { 2350 if (old == tcp_highest_sack(sk)) 2351 tcp_sk(sk)->highest_sack = new; 2352 } 2353 2354 /* This helper checks if socket has IP_TRANSPARENT set */ 2355 static inline bool inet_sk_transparent(const struct sock *sk) 2356 { 2357 switch (sk->sk_state) { 2358 case TCP_TIME_WAIT: 2359 return inet_twsk(sk)->tw_transparent; 2360 case TCP_NEW_SYN_RECV: 2361 return inet_rsk(inet_reqsk(sk))->no_srccheck; 2362 } 2363 return inet_test_bit(TRANSPARENT, sk); 2364 } 2365 2366 /* Determines whether this is a thin stream (which may suffer from 2367 * increased latency). Used to trigger latency-reducing mechanisms. 2368 */ 2369 static inline bool tcp_stream_is_thin(struct tcp_sock *tp) 2370 { 2371 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); 2372 } 2373 2374 /* /proc */ 2375 enum tcp_seq_states { 2376 TCP_SEQ_STATE_LISTENING, 2377 TCP_SEQ_STATE_ESTABLISHED, 2378 }; 2379 2380 void *tcp_seq_start(struct seq_file *seq, loff_t *pos); 2381 void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos); 2382 void tcp_seq_stop(struct seq_file *seq, void *v); 2383 2384 struct tcp_seq_afinfo { 2385 sa_family_t family; 2386 }; 2387 2388 struct tcp_iter_state { 2389 struct seq_net_private p; 2390 enum tcp_seq_states state; 2391 struct sock *syn_wait_sk; 2392 int bucket, offset, sbucket, num; 2393 loff_t last_pos; 2394 }; 2395 2396 extern struct request_sock_ops tcp_request_sock_ops; 2397 extern struct request_sock_ops tcp6_request_sock_ops; 2398 2399 void tcp_v4_destroy_sock(struct sock *sk); 2400 2401 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 2402 netdev_features_t features); 2403 struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th); 2404 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb, 2405 struct tcphdr *th); 2406 INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff)); 2407 INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)); 2408 #ifdef CONFIG_INET 2409 void tcp_gro_complete(struct sk_buff *skb); 2410 #else 2411 static inline void tcp_gro_complete(struct sk_buff *skb) { } 2412 #endif 2413 2414 static inline void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, 2415 __be32 daddr) 2416 { 2417 struct tcphdr *th = tcp_hdr(skb); 2418 2419 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); 2420 skb->csum_start = skb_transport_header(skb) - skb->head; 2421 skb->csum_offset = offsetof(struct tcphdr, check); 2422 } 2423 2424 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 2425 { 2426 struct net *net = sock_net((struct sock *)tp); 2427 u32 val; 2428 2429 val = READ_ONCE(tp->notsent_lowat); 2430 2431 return val ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); 2432 } 2433 2434 bool tcp_stream_memory_free(const struct sock *sk, int wake); 2435 2436 #ifdef CONFIG_PROC_FS 2437 int tcp4_proc_init(void); 2438 void tcp4_proc_exit(void); 2439 #endif 2440 2441 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req); 2442 int tcp_conn_request(struct request_sock_ops *rsk_ops, 2443 const struct tcp_request_sock_ops *af_ops, 2444 struct sock *sk, struct sk_buff *skb); 2445 2446 /* TCP af-specific functions */ 2447 struct tcp_sock_af_ops { 2448 #ifdef CONFIG_TCP_MD5SIG 2449 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk, 2450 const struct sock *addr_sk); 2451 void (*calc_md5_hash)(char *location, 2452 const struct tcp_md5sig_key *md5, 2453 const struct sock *sk, 2454 const struct sk_buff *skb); 2455 int (*md5_parse)(struct sock *sk, 2456 int optname, 2457 sockptr_t optval, 2458 int optlen); 2459 #endif 2460 #ifdef CONFIG_TCP_AO 2461 int (*ao_parse)(struct sock *sk, int optname, sockptr_t optval, int optlen); 2462 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, 2463 struct sock *addr_sk, 2464 int sndid, int rcvid); 2465 int (*ao_calc_key_sk)(struct tcp_ao_key *mkt, u8 *key, 2466 const struct sock *sk, 2467 __be32 sisn, __be32 disn, bool send); 2468 int (*calc_ao_hash)(char *location, struct tcp_ao_key *ao, 2469 const struct sock *sk, const struct sk_buff *skb, 2470 const u8 *tkey, int hash_offset, u32 sne); 2471 #endif 2472 }; 2473 2474 struct tcp_request_sock_ops { 2475 u16 mss_clamp; 2476 #ifdef CONFIG_TCP_MD5SIG 2477 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk, 2478 const struct sock *addr_sk); 2479 void (*calc_md5_hash) (char *location, 2480 const struct tcp_md5sig_key *md5, 2481 const struct sock *sk, 2482 const struct sk_buff *skb); 2483 #endif 2484 #ifdef CONFIG_TCP_AO 2485 struct tcp_ao_key *(*ao_lookup)(const struct sock *sk, 2486 struct request_sock *req, 2487 int sndid, int rcvid); 2488 int (*ao_calc_key)(struct tcp_ao_key *mkt, u8 *key, struct request_sock *sk); 2489 int (*ao_synack_hash)(char *ao_hash, struct tcp_ao_key *mkt, 2490 struct request_sock *req, const struct sk_buff *skb, 2491 int hash_offset, u32 sne); 2492 #endif 2493 #ifdef CONFIG_SYN_COOKIES 2494 __u32 (*cookie_init_seq)(const struct sk_buff *skb, 2495 __u16 *mss); 2496 #endif 2497 struct dst_entry *(*route_req)(const struct sock *sk, 2498 struct sk_buff *skb, 2499 struct flowi *fl, 2500 struct request_sock *req, 2501 u32 tw_isn); 2502 union tcp_seq_and_ts_off (*init_seq_and_ts_off)( 2503 const struct net *net, 2504 const struct sk_buff *skb); 2505 int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 2506 struct flowi *fl, struct request_sock *req, 2507 struct tcp_fastopen_cookie *foc, 2508 enum tcp_synack_type synack_type, 2509 struct sk_buff *syn_skb); 2510 }; 2511 2512 extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops; 2513 #if IS_ENABLED(CONFIG_IPV6) 2514 extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops; 2515 #endif 2516 2517 #ifdef CONFIG_SYN_COOKIES 2518 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 2519 const struct sock *sk, struct sk_buff *skb, 2520 __u16 *mss) 2521 { 2522 tcp_synq_overflow(sk); 2523 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 2524 return ops->cookie_init_seq(skb, mss); 2525 } 2526 #else 2527 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 2528 const struct sock *sk, struct sk_buff *skb, 2529 __u16 *mss) 2530 { 2531 return 0; 2532 } 2533 #endif 2534 2535 struct tcp_key { 2536 union { 2537 struct { 2538 struct tcp_ao_key *ao_key; 2539 char *traffic_key; 2540 u32 sne; 2541 u8 rcv_next; 2542 }; 2543 struct tcp_md5sig_key *md5_key; 2544 }; 2545 enum { 2546 TCP_KEY_NONE = 0, 2547 TCP_KEY_MD5, 2548 TCP_KEY_AO, 2549 } type; 2550 }; 2551 2552 static inline void tcp_get_current_key(const struct sock *sk, 2553 struct tcp_key *out) 2554 { 2555 #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG) 2556 const struct tcp_sock *tp = tcp_sk(sk); 2557 #endif 2558 2559 #ifdef CONFIG_TCP_AO 2560 if (static_branch_unlikely(&tcp_ao_needed.key)) { 2561 struct tcp_ao_info *ao; 2562 2563 ao = rcu_dereference_protected(tp->ao_info, 2564 lockdep_sock_is_held(sk)); 2565 if (ao) { 2566 out->ao_key = READ_ONCE(ao->current_key); 2567 out->type = TCP_KEY_AO; 2568 return; 2569 } 2570 } 2571 #endif 2572 #ifdef CONFIG_TCP_MD5SIG 2573 if (static_branch_unlikely(&tcp_md5_needed.key) && 2574 rcu_access_pointer(tp->md5sig_info)) { 2575 out->md5_key = tp->af_specific->md5_lookup(sk, sk); 2576 if (out->md5_key) { 2577 out->type = TCP_KEY_MD5; 2578 return; 2579 } 2580 } 2581 #endif 2582 out->type = TCP_KEY_NONE; 2583 } 2584 2585 static inline bool tcp_key_is_md5(const struct tcp_key *key) 2586 { 2587 if (static_branch_tcp_md5()) 2588 return key->type == TCP_KEY_MD5; 2589 return false; 2590 } 2591 2592 static inline bool tcp_key_is_ao(const struct tcp_key *key) 2593 { 2594 if (static_branch_tcp_ao()) 2595 return key->type == TCP_KEY_AO; 2596 return false; 2597 } 2598 2599 int tcpv4_offload_init(void); 2600 2601 void tcp_v4_init(void); 2602 void tcp_init(void); 2603 2604 /* tcp_recovery.c */ 2605 void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb); 2606 void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced); 2607 extern s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, 2608 u32 reo_wnd); 2609 extern bool tcp_rack_mark_lost(struct sock *sk); 2610 extern void tcp_rack_reo_timeout(struct sock *sk); 2611 2612 /* tcp_plb.c */ 2613 2614 /* 2615 * Scaling factor for fractions in PLB. For example, tcp_plb_update_state 2616 * expects cong_ratio which represents fraction of traffic that experienced 2617 * congestion over a single RTT. In order to avoid floating point operations, 2618 * this fraction should be mapped to (1 << TCP_PLB_SCALE) and passed in. 2619 */ 2620 #define TCP_PLB_SCALE 8 2621 2622 /* State for PLB (Protective Load Balancing) for a single TCP connection. */ 2623 struct tcp_plb_state { 2624 u8 consec_cong_rounds:5, /* consecutive congested rounds */ 2625 unused:3; 2626 u32 pause_until; /* jiffies32 when PLB can resume rerouting */ 2627 }; 2628 2629 static inline void tcp_plb_init(const struct sock *sk, 2630 struct tcp_plb_state *plb) 2631 { 2632 plb->consec_cong_rounds = 0; 2633 plb->pause_until = 0; 2634 } 2635 void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb, 2636 const int cong_ratio); 2637 void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb); 2638 void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb); 2639 2640 static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str) 2641 { 2642 WARN_ONCE(cond, 2643 "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n", 2644 str, 2645 tcp_snd_cwnd(tcp_sk(sk)), 2646 tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, 2647 tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, 2648 tcp_sk(sk)->tlp_high_seq, sk->sk_state, 2649 inet_csk(sk)->icsk_ca_state, 2650 tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, 2651 inet_csk(sk)->icsk_pmtu_cookie); 2652 } 2653 2654 /* At how many usecs into the future should the RTO fire? */ 2655 static inline s64 tcp_rto_delta_us(const struct sock *sk) 2656 { 2657 const struct sk_buff *skb = tcp_rtx_queue_head(sk); 2658 u32 rto = inet_csk(sk)->icsk_rto; 2659 2660 if (likely(skb)) { 2661 u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); 2662 2663 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 2664 } else { 2665 tcp_warn_once(sk, 1, "rtx queue empty: "); 2666 return jiffies_to_usecs(rto); 2667 } 2668 2669 } 2670 2671 /* 2672 * Save and compile IPv4 options, return a pointer to it 2673 */ 2674 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net, 2675 struct sk_buff *skb) 2676 { 2677 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; 2678 struct ip_options_rcu *dopt = NULL; 2679 2680 if (opt->optlen) { 2681 int opt_size = sizeof(*dopt) + opt->optlen; 2682 2683 dopt = kmalloc(opt_size, GFP_ATOMIC); 2684 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) { 2685 kfree(dopt); 2686 dopt = NULL; 2687 } 2688 } 2689 return dopt; 2690 } 2691 2692 /* locally generated TCP pure ACKs have skb->truesize == 2 2693 * (check tcp_send_ack() in net/ipv4/tcp_output.c ) 2694 * This is much faster than dissecting the packet to find out. 2695 * (Think of GRE encapsulations, IPv4, IPv6, ...) 2696 */ 2697 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb) 2698 { 2699 return skb->truesize == 2; 2700 } 2701 2702 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) 2703 { 2704 skb->truesize = 2; 2705 } 2706 2707 static inline int tcp_inq(struct sock *sk) 2708 { 2709 struct tcp_sock *tp = tcp_sk(sk); 2710 int answ; 2711 2712 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 2713 answ = 0; 2714 } else if (sock_flag(sk, SOCK_URGINLINE) || 2715 !tp->urg_data || 2716 before(tp->urg_seq, tp->copied_seq) || 2717 !before(tp->urg_seq, tp->rcv_nxt)) { 2718 2719 answ = tp->rcv_nxt - tp->copied_seq; 2720 2721 /* Subtract 1, if FIN was received */ 2722 if (answ && sock_flag(sk, SOCK_DONE)) 2723 answ--; 2724 } else { 2725 answ = tp->urg_seq - tp->copied_seq; 2726 } 2727 2728 return answ; 2729 } 2730 2731 int tcp_peek_len(struct socket *sock); 2732 2733 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) 2734 { 2735 u16 segs_in; 2736 2737 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 2738 2739 /* We update these fields while other threads might 2740 * read them from tcp_get_info() 2741 */ 2742 WRITE_ONCE(tp->segs_in, tp->segs_in + segs_in); 2743 if (skb->len > tcp_hdrlen(skb)) 2744 WRITE_ONCE(tp->data_segs_in, tp->data_segs_in + segs_in); 2745 } 2746 2747 /* 2748 * TCP listen path runs lockless. 2749 * We forced "struct sock" to be const qualified to make sure 2750 * we don't modify one of its field by mistake. 2751 * Here, we increment sk_drops which is an atomic_t, so we can safely 2752 * make sock writable again. 2753 */ 2754 static inline void tcp_listendrop(const struct sock *sk) 2755 { 2756 sk_drops_inc((struct sock *)sk); 2757 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 2758 } 2759 2760 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); 2761 2762 /* 2763 * Interface for adding Upper Level Protocols over TCP 2764 */ 2765 2766 #define TCP_ULP_NAME_MAX 16 2767 #define TCP_ULP_MAX 128 2768 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) 2769 2770 struct tcp_ulp_ops { 2771 struct list_head list; 2772 2773 /* initialize ulp */ 2774 int (*init)(struct sock *sk); 2775 /* update ulp */ 2776 void (*update)(struct sock *sk, struct proto *p, 2777 void (*write_space)(struct sock *sk)); 2778 /* cleanup ulp */ 2779 void (*release)(struct sock *sk); 2780 /* diagnostic */ 2781 int (*get_info)(struct sock *sk, struct sk_buff *skb, bool net_admin); 2782 size_t (*get_info_size)(const struct sock *sk, bool net_admin); 2783 /* clone ulp */ 2784 void (*clone)(const struct request_sock *req, struct sock *newsk, 2785 const gfp_t priority); 2786 2787 char name[TCP_ULP_NAME_MAX]; 2788 struct module *owner; 2789 }; 2790 int tcp_register_ulp(struct tcp_ulp_ops *type); 2791 void tcp_unregister_ulp(struct tcp_ulp_ops *type); 2792 int tcp_set_ulp(struct sock *sk, const char *name); 2793 void tcp_get_available_ulp(char *buf, size_t len); 2794 void tcp_cleanup_ulp(struct sock *sk); 2795 void tcp_update_ulp(struct sock *sk, struct proto *p, 2796 void (*write_space)(struct sock *sk)); 2797 2798 #define MODULE_ALIAS_TCP_ULP(name) \ 2799 MODULE_INFO(alias, name); \ 2800 MODULE_INFO(alias, "tcp-ulp-" name) 2801 2802 #ifdef CONFIG_NET_SOCK_MSG 2803 struct sk_msg; 2804 struct sk_psock; 2805 2806 #ifdef CONFIG_BPF_SYSCALL 2807 int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); 2808 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); 2809 #ifdef CONFIG_BPF_STREAM_PARSER 2810 struct strparser; 2811 int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, 2812 sk_read_actor_t recv_actor); 2813 #endif /* CONFIG_BPF_STREAM_PARSER */ 2814 #endif /* CONFIG_BPF_SYSCALL */ 2815 2816 #ifdef CONFIG_INET 2817 void tcp_eat_skb(struct sock *sk, struct sk_buff *skb); 2818 #else 2819 static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb) 2820 { 2821 } 2822 #endif 2823 2824 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress, 2825 struct sk_msg *msg, u32 bytes, int flags); 2826 #endif /* CONFIG_NET_SOCK_MSG */ 2827 2828 #if !defined(CONFIG_BPF_SYSCALL) || !defined(CONFIG_NET_SOCK_MSG) 2829 static inline void tcp_bpf_clone(const struct sock *sk, struct sock *newsk) 2830 { 2831 } 2832 #endif 2833 2834 #ifdef CONFIG_CGROUP_BPF 2835 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops, 2836 struct sk_buff *skb, 2837 unsigned int end_offset) 2838 { 2839 skops->skb = skb; 2840 skops->skb_data_end = skb->data + end_offset; 2841 } 2842 #else 2843 static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops, 2844 struct sk_buff *skb, 2845 unsigned int end_offset) 2846 { 2847 } 2848 #endif 2849 2850 /* Call BPF_SOCK_OPS program that returns an int. If the return value 2851 * is < 0, then the BPF op failed (for example if the loaded BPF 2852 * program does not support the chosen operation or there is no BPF 2853 * program loaded). 2854 */ 2855 #ifdef CONFIG_BPF 2856 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2857 { 2858 struct bpf_sock_ops_kern sock_ops; 2859 int ret; 2860 2861 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 2862 if (sk_fullsock(sk)) { 2863 sock_ops.is_fullsock = 1; 2864 sock_ops.is_locked_tcp_sock = 1; 2865 sock_owned_by_me(sk); 2866 } 2867 2868 sock_ops.sk = sk; 2869 sock_ops.op = op; 2870 if (nargs > 0) 2871 memcpy(sock_ops.args, args, nargs * sizeof(*args)); 2872 2873 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 2874 if (ret == 0) 2875 ret = sock_ops.reply; 2876 else 2877 ret = -1; 2878 return ret; 2879 } 2880 2881 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2882 { 2883 u32 args[2] = {arg1, arg2}; 2884 2885 return tcp_call_bpf(sk, op, 2, args); 2886 } 2887 2888 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2889 u32 arg3) 2890 { 2891 u32 args[3] = {arg1, arg2, arg3}; 2892 2893 return tcp_call_bpf(sk, op, 3, args); 2894 } 2895 2896 #else 2897 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2898 { 2899 return -EPERM; 2900 } 2901 2902 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2903 { 2904 return -EPERM; 2905 } 2906 2907 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2908 u32 arg3) 2909 { 2910 return -EPERM; 2911 } 2912 2913 #endif 2914 2915 static inline u32 tcp_timeout_init(struct sock *sk) 2916 { 2917 int timeout; 2918 2919 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); 2920 2921 if (timeout <= 0) 2922 timeout = TCP_TIMEOUT_INIT; 2923 return min_t(int, timeout, TCP_RTO_MAX); 2924 } 2925 2926 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) 2927 { 2928 int rwnd; 2929 2930 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); 2931 2932 if (rwnd < 0) 2933 rwnd = 0; 2934 return rwnd; 2935 } 2936 2937 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) 2938 { 2939 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); 2940 } 2941 2942 static inline void tcp_bpf_rtt(struct sock *sk, long mrtt, u32 srtt) 2943 { 2944 if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_RTT_CB_FLAG)) 2945 tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_RTT_CB, mrtt, srtt); 2946 } 2947 2948 #if IS_ENABLED(CONFIG_SMC) 2949 extern struct static_key_false tcp_have_smc; 2950 #endif 2951 2952 #if IS_ENABLED(CONFIG_TLS_DEVICE) 2953 void clean_acked_data_enable(struct tcp_sock *tp, 2954 void (*cad)(struct sock *sk, u32 ack_seq)); 2955 void clean_acked_data_disable(struct tcp_sock *tp); 2956 void clean_acked_data_flush(void); 2957 #endif 2958 2959 DECLARE_STATIC_KEY_FALSE(tcp_tx_delay_enabled); 2960 static inline void tcp_add_tx_delay(struct sk_buff *skb, 2961 const struct tcp_sock *tp) 2962 { 2963 if (static_branch_unlikely(&tcp_tx_delay_enabled)) 2964 skb->skb_mstamp_ns += (u64)tp->tcp_tx_delay * NSEC_PER_USEC; 2965 } 2966 2967 /* Compute Earliest Departure Time for some control packets 2968 * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets. 2969 */ 2970 static inline u64 tcp_transmit_time(const struct sock *sk) 2971 { 2972 if (static_branch_unlikely(&tcp_tx_delay_enabled)) { 2973 u32 delay = (sk->sk_state == TCP_TIME_WAIT) ? 2974 tcp_twsk(sk)->tw_tx_delay : tcp_sk(sk)->tcp_tx_delay; 2975 2976 return tcp_clock_ns() + (u64)delay * NSEC_PER_USEC; 2977 } 2978 return 0; 2979 } 2980 2981 static inline int tcp_parse_auth_options(const struct tcphdr *th, 2982 const u8 **md5_hash, const struct tcp_ao_hdr **aoh) 2983 { 2984 const u8 *md5_tmp, *ao_tmp; 2985 int ret; 2986 2987 ret = tcp_do_parse_auth_options(th, &md5_tmp, &ao_tmp); 2988 if (ret) 2989 return ret; 2990 2991 if (md5_hash) 2992 *md5_hash = md5_tmp; 2993 2994 if (aoh) { 2995 if (!ao_tmp) 2996 *aoh = NULL; 2997 else 2998 *aoh = (struct tcp_ao_hdr *)(ao_tmp - 2); 2999 } 3000 3001 return 0; 3002 } 3003 3004 static inline bool tcp_ao_required(struct sock *sk, const void *saddr, 3005 int family, int l3index, bool stat_inc) 3006 { 3007 #ifdef CONFIG_TCP_AO 3008 struct tcp_ao_info *ao_info; 3009 struct tcp_ao_key *ao_key; 3010 3011 if (!static_branch_unlikely(&tcp_ao_needed.key)) 3012 return false; 3013 3014 ao_info = rcu_dereference_check(tcp_sk(sk)->ao_info, 3015 lockdep_sock_is_held(sk)); 3016 if (!ao_info) 3017 return false; 3018 3019 ao_key = tcp_ao_do_lookup(sk, l3index, saddr, family, -1, -1); 3020 if (ao_info->ao_required || ao_key) { 3021 if (stat_inc) { 3022 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOREQUIRED); 3023 atomic64_inc(&ao_info->counters.ao_required); 3024 } 3025 return true; 3026 } 3027 #endif 3028 return false; 3029 } 3030 3031 enum skb_drop_reason tcp_inbound_hash(struct sock *sk, 3032 const struct request_sock *req, const struct sk_buff *skb, 3033 const void *saddr, const void *daddr, 3034 int family, int dif, int sdif); 3035 3036 #endif /* _TCP_H */ 3037