1 /* 2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * Definitions for the TCP module. 7 * 8 * Version: @(#)tcp.h 1.0.5 05/23/93 9 * 10 * Authors: Ross Biro 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 17 */ 18 #ifndef _TCP_H 19 #define _TCP_H 20 21 #define FASTRETRANS_DEBUG 1 22 23 #include <linux/list.h> 24 #include <linux/tcp.h> 25 #include <linux/bug.h> 26 #include <linux/slab.h> 27 #include <linux/cache.h> 28 #include <linux/percpu.h> 29 #include <linux/skbuff.h> 30 #include <linux/cryptohash.h> 31 #include <linux/kref.h> 32 #include <linux/ktime.h> 33 34 #include <net/inet_connection_sock.h> 35 #include <net/inet_timewait_sock.h> 36 #include <net/inet_hashtables.h> 37 #include <net/checksum.h> 38 #include <net/request_sock.h> 39 #include <net/sock.h> 40 #include <net/snmp.h> 41 #include <net/ip.h> 42 #include <net/tcp_states.h> 43 #include <net/inet_ecn.h> 44 #include <net/dst.h> 45 46 #include <linux/seq_file.h> 47 #include <linux/memcontrol.h> 48 #include <linux/bpf-cgroup.h> 49 50 extern struct inet_hashinfo tcp_hashinfo; 51 52 extern struct percpu_counter tcp_orphan_count; 53 void tcp_time_wait(struct sock *sk, int state, int timeo); 54 55 #define MAX_TCP_HEADER (128 + MAX_HEADER) 56 #define MAX_TCP_OPTION_SPACE 40 57 58 /* 59 * Never offer a window over 32767 without using window scaling. Some 60 * poor stacks do signed 16bit maths! 61 */ 62 #define MAX_TCP_WINDOW 32767U 63 64 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 65 #define TCP_MIN_MSS 88U 66 67 /* The least MTU to use for probing */ 68 #define TCP_BASE_MSS 1024 69 70 /* probing interval, default to 10 minutes as per RFC4821 */ 71 #define TCP_PROBE_INTERVAL 600 72 73 /* Specify interval when tcp mtu probing will stop */ 74 #define TCP_PROBE_THRESHOLD 8 75 76 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ 77 #define TCP_FASTRETRANS_THRESH 3 78 79 /* Maximal number of ACKs sent quickly to accelerate slow-start. */ 80 #define TCP_MAX_QUICKACKS 16U 81 82 /* Maximal number of window scale according to RFC1323 */ 83 #define TCP_MAX_WSCALE 14U 84 85 /* urg_data states */ 86 #define TCP_URG_VALID 0x0100 87 #define TCP_URG_NOTYET 0x0200 88 #define TCP_URG_READ 0x0400 89 90 #define TCP_RETR1 3 /* 91 * This is how many retries it does before it 92 * tries to figure out if the gateway is 93 * down. Minimal RFC value is 3; it corresponds 94 * to ~3sec-8min depending on RTO. 95 */ 96 97 #define TCP_RETR2 15 /* 98 * This should take at least 99 * 90 minutes to time out. 100 * RFC1122 says that the limit is 100 sec. 101 * 15 is ~13-30min depending on RTO. 102 */ 103 104 #define TCP_SYN_RETRIES 6 /* This is how many retries are done 105 * when active opening a connection. 106 * RFC1122 says the minimum retry MUST 107 * be at least 180secs. Nevertheless 108 * this value is corresponding to 109 * 63secs of retransmission with the 110 * current initial RTO. 111 */ 112 113 #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done 114 * when passive opening a connection. 115 * This is corresponding to 31secs of 116 * retransmission with the current 117 * initial RTO. 118 */ 119 120 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 121 * state, about 60 seconds */ 122 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 123 /* BSD style FIN_WAIT2 deadlock breaker. 124 * It used to be 3min, new value is 60sec, 125 * to combine FIN-WAIT-2 timeout with 126 * TIME-WAIT timer. 127 */ 128 129 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ 130 #if HZ >= 100 131 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ 132 #define TCP_ATO_MIN ((unsigned)(HZ/25)) 133 #else 134 #define TCP_DELACK_MIN 4U 135 #define TCP_ATO_MIN 4U 136 #endif 137 #define TCP_RTO_MAX ((unsigned)(120*HZ)) 138 #define TCP_RTO_MIN ((unsigned)(HZ/5)) 139 #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ 140 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 141 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now 142 * used as a fallback RTO for the 143 * initial data transmission if no 144 * valid RTT sample has been acquired, 145 * most likely due to retrans in 3WHS. 146 */ 147 148 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes 149 * for local resources. 150 */ 151 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ 152 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ 153 #define TCP_KEEPALIVE_INTVL (75*HZ) 154 155 #define MAX_TCP_KEEPIDLE 32767 156 #define MAX_TCP_KEEPINTVL 32767 157 #define MAX_TCP_KEEPCNT 127 158 #define MAX_TCP_SYNCNT 127 159 160 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ 161 162 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) 163 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated 164 * after this time. It should be equal 165 * (or greater than) TCP_TIMEWAIT_LEN 166 * to provide reliability equal to one 167 * provided by timewait state. 168 */ 169 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host 170 * timestamps. It must be less than 171 * minimal timewait lifetime. 172 */ 173 /* 174 * TCP option 175 */ 176 177 #define TCPOPT_NOP 1 /* Padding */ 178 #define TCPOPT_EOL 0 /* End of options */ 179 #define TCPOPT_MSS 2 /* Segment size negotiating */ 180 #define TCPOPT_WINDOW 3 /* Window scaling */ 181 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ 182 #define TCPOPT_SACK 5 /* SACK Block */ 183 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 184 #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 185 #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ 186 #define TCPOPT_EXP 254 /* Experimental */ 187 /* Magic number to be after the option value for sharing TCP 188 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 189 */ 190 #define TCPOPT_FASTOPEN_MAGIC 0xF989 191 #define TCPOPT_SMC_MAGIC 0xE2D4C3D9 192 193 /* 194 * TCP option lengths 195 */ 196 197 #define TCPOLEN_MSS 4 198 #define TCPOLEN_WINDOW 3 199 #define TCPOLEN_SACK_PERM 2 200 #define TCPOLEN_TIMESTAMP 10 201 #define TCPOLEN_MD5SIG 18 202 #define TCPOLEN_FASTOPEN_BASE 2 203 #define TCPOLEN_EXP_FASTOPEN_BASE 4 204 #define TCPOLEN_EXP_SMC_BASE 6 205 206 /* But this is what stacks really send out. */ 207 #define TCPOLEN_TSTAMP_ALIGNED 12 208 #define TCPOLEN_WSCALE_ALIGNED 4 209 #define TCPOLEN_SACKPERM_ALIGNED 4 210 #define TCPOLEN_SACK_BASE 2 211 #define TCPOLEN_SACK_BASE_ALIGNED 4 212 #define TCPOLEN_SACK_PERBLOCK 8 213 #define TCPOLEN_MD5SIG_ALIGNED 20 214 #define TCPOLEN_MSS_ALIGNED 4 215 #define TCPOLEN_EXP_SMC_BASE_ALIGNED 8 216 217 /* Flags in tp->nonagle */ 218 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 219 #define TCP_NAGLE_CORK 2 /* Socket is corked */ 220 #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ 221 222 /* TCP thin-stream limits */ 223 #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ 224 225 /* TCP initial congestion window as per rfc6928 */ 226 #define TCP_INIT_CWND 10 227 228 /* Bit Flags for sysctl_tcp_fastopen */ 229 #define TFO_CLIENT_ENABLE 1 230 #define TFO_SERVER_ENABLE 2 231 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 232 233 /* Accept SYN data w/o any cookie option */ 234 #define TFO_SERVER_COOKIE_NOT_REQD 0x200 235 236 /* Force enable TFO on all listeners, i.e., not requiring the 237 * TCP_FASTOPEN socket option. 238 */ 239 #define TFO_SERVER_WO_SOCKOPT1 0x400 240 241 242 /* sysctl variables for tcp */ 243 extern int sysctl_tcp_max_orphans; 244 extern long sysctl_tcp_mem[3]; 245 246 #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ 247 #define TCP_RACK_STATIC_REO_WND 0x2 /* Use static RACK reo wnd */ 248 249 extern atomic_long_t tcp_memory_allocated; 250 extern struct percpu_counter tcp_sockets_allocated; 251 extern unsigned long tcp_memory_pressure; 252 253 /* optimized version of sk_under_memory_pressure() for TCP sockets */ 254 static inline bool tcp_under_memory_pressure(const struct sock *sk) 255 { 256 if (mem_cgroup_sockets_enabled && sk->sk_memcg && 257 mem_cgroup_under_socket_pressure(sk->sk_memcg)) 258 return true; 259 260 return tcp_memory_pressure; 261 } 262 /* 263 * The next routines deal with comparing 32 bit unsigned ints 264 * and worry about wraparound (automatic with unsigned arithmetic). 265 */ 266 267 static inline bool before(__u32 seq1, __u32 seq2) 268 { 269 return (__s32)(seq1-seq2) < 0; 270 } 271 #define after(seq2, seq1) before(seq1, seq2) 272 273 /* is s2<=s1<=s3 ? */ 274 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) 275 { 276 return seq3 - seq2 >= seq1 - seq2; 277 } 278 279 static inline bool tcp_out_of_memory(struct sock *sk) 280 { 281 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 282 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) 283 return true; 284 return false; 285 } 286 287 void sk_forced_mem_schedule(struct sock *sk, int size); 288 289 static inline bool tcp_too_many_orphans(struct sock *sk, int shift) 290 { 291 struct percpu_counter *ocp = sk->sk_prot->orphan_count; 292 int orphans = percpu_counter_read_positive(ocp); 293 294 if (orphans << shift > sysctl_tcp_max_orphans) { 295 orphans = percpu_counter_sum_positive(ocp); 296 if (orphans << shift > sysctl_tcp_max_orphans) 297 return true; 298 } 299 return false; 300 } 301 302 bool tcp_check_oom(struct sock *sk, int shift); 303 304 305 extern struct proto tcp_prot; 306 307 #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) 308 #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) 309 #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 310 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) 311 312 void tcp_tasklet_init(void); 313 314 void tcp_v4_err(struct sk_buff *skb, u32); 315 316 void tcp_shutdown(struct sock *sk, int how); 317 318 int tcp_v4_early_demux(struct sk_buff *skb); 319 int tcp_v4_rcv(struct sk_buff *skb); 320 321 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 322 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 323 int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); 324 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 325 int flags); 326 int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 327 size_t size, int flags); 328 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 329 size_t size, int flags); 330 void tcp_release_cb(struct sock *sk); 331 void tcp_wfree(struct sk_buff *skb); 332 void tcp_write_timer_handler(struct sock *sk); 333 void tcp_delack_timer_handler(struct sock *sk); 334 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 335 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); 336 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 337 const struct tcphdr *th); 338 void tcp_rcv_space_adjust(struct sock *sk); 339 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 340 void tcp_twsk_destructor(struct sock *sk); 341 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 342 struct pipe_inode_info *pipe, size_t len, 343 unsigned int flags); 344 345 static inline void tcp_dec_quickack_mode(struct sock *sk, 346 const unsigned int pkts) 347 { 348 struct inet_connection_sock *icsk = inet_csk(sk); 349 350 if (icsk->icsk_ack.quick) { 351 if (pkts >= icsk->icsk_ack.quick) { 352 icsk->icsk_ack.quick = 0; 353 /* Leaving quickack mode we deflate ATO. */ 354 icsk->icsk_ack.ato = TCP_ATO_MIN; 355 } else 356 icsk->icsk_ack.quick -= pkts; 357 } 358 } 359 360 #define TCP_ECN_OK 1 361 #define TCP_ECN_QUEUE_CWR 2 362 #define TCP_ECN_DEMAND_CWR 4 363 #define TCP_ECN_SEEN 8 364 365 enum tcp_tw_status { 366 TCP_TW_SUCCESS = 0, 367 TCP_TW_RST = 1, 368 TCP_TW_ACK = 2, 369 TCP_TW_SYN = 3 370 }; 371 372 373 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, 374 struct sk_buff *skb, 375 const struct tcphdr *th); 376 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 377 struct request_sock *req, bool fastopen, 378 bool *lost_race); 379 int tcp_child_process(struct sock *parent, struct sock *child, 380 struct sk_buff *skb); 381 void tcp_enter_loss(struct sock *sk); 382 void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag); 383 void tcp_clear_retrans(struct tcp_sock *tp); 384 void tcp_update_metrics(struct sock *sk); 385 void tcp_init_metrics(struct sock *sk); 386 void tcp_metrics_init(void); 387 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); 388 void tcp_close(struct sock *sk, long timeout); 389 void tcp_init_sock(struct sock *sk); 390 void tcp_init_transfer(struct sock *sk, int bpf_op); 391 __poll_t tcp_poll_mask(struct socket *sock, __poll_t events); 392 int tcp_getsockopt(struct sock *sk, int level, int optname, 393 char __user *optval, int __user *optlen); 394 int tcp_setsockopt(struct sock *sk, int level, int optname, 395 char __user *optval, unsigned int optlen); 396 int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 397 char __user *optval, int __user *optlen); 398 int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 399 char __user *optval, unsigned int optlen); 400 void tcp_set_keepalive(struct sock *sk, int val); 401 void tcp_syn_ack_timeout(const struct request_sock *req); 402 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, 403 int flags, int *addr_len); 404 void tcp_parse_options(const struct net *net, const struct sk_buff *skb, 405 struct tcp_options_received *opt_rx, 406 int estab, struct tcp_fastopen_cookie *foc); 407 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); 408 409 /* 410 * TCP v4 functions exported for the inet6 API 411 */ 412 413 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 414 void tcp_v4_mtu_reduced(struct sock *sk); 415 void tcp_req_err(struct sock *sk, u32 seq, bool abort); 416 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 417 struct sock *tcp_create_openreq_child(const struct sock *sk, 418 struct request_sock *req, 419 struct sk_buff *skb); 420 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 421 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 422 struct request_sock *req, 423 struct dst_entry *dst, 424 struct request_sock *req_unhash, 425 bool *own_req); 426 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 427 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 428 int tcp_connect(struct sock *sk); 429 enum tcp_synack_type { 430 TCP_SYNACK_NORMAL, 431 TCP_SYNACK_FASTOPEN, 432 TCP_SYNACK_COOKIE, 433 }; 434 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 435 struct request_sock *req, 436 struct tcp_fastopen_cookie *foc, 437 enum tcp_synack_type synack_type); 438 int tcp_disconnect(struct sock *sk, int flags); 439 440 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 441 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); 442 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 443 444 /* From syncookies.c */ 445 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, 446 struct request_sock *req, 447 struct dst_entry *dst, u32 tsoff); 448 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, 449 u32 cookie); 450 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 451 #ifdef CONFIG_SYN_COOKIES 452 453 /* Syncookies use a monotonic timer which increments every 60 seconds. 454 * This counter is used both as a hash input and partially encoded into 455 * the cookie value. A cookie is only validated further if the delta 456 * between the current counter value and the encoded one is less than this, 457 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if 458 * the counter advances immediately after a cookie is generated). 459 */ 460 #define MAX_SYNCOOKIE_AGE 2 461 #define TCP_SYNCOOKIE_PERIOD (60 * HZ) 462 #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 463 464 /* syncookies: remember time of last synqueue overflow 465 * But do not dirty this field too often (once per second is enough) 466 * It is racy as we do not hold a lock, but race is very minor. 467 */ 468 static inline void tcp_synq_overflow(const struct sock *sk) 469 { 470 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 471 unsigned long now = jiffies; 472 473 if (time_after(now, last_overflow + HZ)) 474 tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 475 } 476 477 /* syncookies: no recent synqueue overflow on this listening socket? */ 478 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 479 { 480 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 481 482 return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); 483 } 484 485 static inline u32 tcp_cookie_time(void) 486 { 487 u64 val = get_jiffies_64(); 488 489 do_div(val, TCP_SYNCOOKIE_PERIOD); 490 return val; 491 } 492 493 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 494 u16 *mssp); 495 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); 496 u64 cookie_init_timestamp(struct request_sock *req); 497 bool cookie_timestamp_decode(const struct net *net, 498 struct tcp_options_received *opt); 499 bool cookie_ecn_ok(const struct tcp_options_received *opt, 500 const struct net *net, const struct dst_entry *dst); 501 502 /* From net/ipv6/syncookies.c */ 503 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, 504 u32 cookie); 505 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 506 507 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, 508 const struct tcphdr *th, u16 *mssp); 509 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); 510 #endif 511 /* tcp_output.c */ 512 513 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 514 int nonagle); 515 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 516 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 517 void tcp_retransmit_timer(struct sock *sk); 518 void tcp_xmit_retransmit_queue(struct sock *); 519 void tcp_simple_retransmit(struct sock *); 520 void tcp_enter_recovery(struct sock *sk, bool ece_ack); 521 int tcp_trim_head(struct sock *, struct sk_buff *, u32); 522 enum tcp_queue { 523 TCP_FRAG_IN_WRITE_QUEUE, 524 TCP_FRAG_IN_RTX_QUEUE, 525 }; 526 int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, 527 struct sk_buff *skb, u32 len, 528 unsigned int mss_now, gfp_t gfp); 529 530 void tcp_send_probe0(struct sock *); 531 void tcp_send_partial(struct sock *); 532 int tcp_write_wakeup(struct sock *, int mib); 533 void tcp_send_fin(struct sock *sk); 534 void tcp_send_active_reset(struct sock *sk, gfp_t priority); 535 int tcp_send_synack(struct sock *); 536 void tcp_push_one(struct sock *, unsigned int mss_now); 537 void tcp_send_ack(struct sock *sk); 538 void tcp_send_delayed_ack(struct sock *sk); 539 void tcp_send_loss_probe(struct sock *sk); 540 bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto); 541 void tcp_skb_collapse_tstamp(struct sk_buff *skb, 542 const struct sk_buff *next_skb); 543 544 /* tcp_input.c */ 545 void tcp_rearm_rto(struct sock *sk); 546 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); 547 void tcp_reset(struct sock *sk); 548 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); 549 void tcp_fin(struct sock *sk); 550 551 /* tcp_timer.c */ 552 void tcp_init_xmit_timers(struct sock *); 553 static inline void tcp_clear_xmit_timers(struct sock *sk) 554 { 555 hrtimer_cancel(&tcp_sk(sk)->pacing_timer); 556 inet_csk_clear_xmit_timers(sk); 557 } 558 559 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 560 unsigned int tcp_current_mss(struct sock *sk); 561 562 /* Bound MSS / TSO packet size with the half of the window */ 563 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 564 { 565 int cutoff; 566 567 /* When peer uses tiny windows, there is no use in packetizing 568 * to sub-MSS pieces for the sake of SWS or making sure there 569 * are enough packets in the pipe for fast recovery. 570 * 571 * On the other hand, for extremely large MSS devices, handling 572 * smaller than MSS windows in this way does make sense. 573 */ 574 if (tp->max_window > TCP_MSS_DEFAULT) 575 cutoff = (tp->max_window >> 1); 576 else 577 cutoff = tp->max_window; 578 579 if (cutoff && pktsize > cutoff) 580 return max_t(int, cutoff, 68U - tp->tcp_header_len); 581 else 582 return pktsize; 583 } 584 585 /* tcp.c */ 586 void tcp_get_info(struct sock *, struct tcp_info *); 587 588 /* Read 'sendfile()'-style from a TCP socket */ 589 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 590 sk_read_actor_t recv_actor); 591 592 void tcp_initialize_rcv_mss(struct sock *sk); 593 594 int tcp_mtu_to_mss(struct sock *sk, int pmtu); 595 int tcp_mss_to_mtu(struct sock *sk, int mss); 596 void tcp_mtup_init(struct sock *sk); 597 void tcp_init_buffer_space(struct sock *sk); 598 599 static inline void tcp_bound_rto(const struct sock *sk) 600 { 601 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 602 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 603 } 604 605 static inline u32 __tcp_set_rto(const struct tcp_sock *tp) 606 { 607 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); 608 } 609 610 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) 611 { 612 tp->pred_flags = htonl((tp->tcp_header_len << 26) | 613 ntohl(TCP_FLAG_ACK) | 614 snd_wnd); 615 } 616 617 static inline void tcp_fast_path_on(struct tcp_sock *tp) 618 { 619 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); 620 } 621 622 static inline void tcp_fast_path_check(struct sock *sk) 623 { 624 struct tcp_sock *tp = tcp_sk(sk); 625 626 if (RB_EMPTY_ROOT(&tp->out_of_order_queue) && 627 tp->rcv_wnd && 628 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && 629 !tp->urg_data) 630 tcp_fast_path_on(tp); 631 } 632 633 /* Compute the actual rto_min value */ 634 static inline u32 tcp_rto_min(struct sock *sk) 635 { 636 const struct dst_entry *dst = __sk_dst_get(sk); 637 u32 rto_min = TCP_RTO_MIN; 638 639 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 640 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 641 return rto_min; 642 } 643 644 static inline u32 tcp_rto_min_us(struct sock *sk) 645 { 646 return jiffies_to_usecs(tcp_rto_min(sk)); 647 } 648 649 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) 650 { 651 return dst_metric_locked(dst, RTAX_CC_ALGO); 652 } 653 654 /* Minimum RTT in usec. ~0 means not available. */ 655 static inline u32 tcp_min_rtt(const struct tcp_sock *tp) 656 { 657 return minmax_get(&tp->rtt_min); 658 } 659 660 /* Compute the actual receive window we are currently advertising. 661 * Rcv_nxt can be after the window if our peer push more data 662 * than the offered window. 663 */ 664 static inline u32 tcp_receive_window(const struct tcp_sock *tp) 665 { 666 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; 667 668 if (win < 0) 669 win = 0; 670 return (u32) win; 671 } 672 673 /* Choose a new window, without checks for shrinking, and without 674 * scaling applied to the result. The caller does these things 675 * if necessary. This is a "raw" window selection. 676 */ 677 u32 __tcp_select_window(struct sock *sk); 678 679 void tcp_send_window_probe(struct sock *sk); 680 681 /* TCP uses 32bit jiffies to save some space. 682 * Note that this is different from tcp_time_stamp, which 683 * historically has been the same until linux-4.13. 684 */ 685 #define tcp_jiffies32 ((u32)jiffies) 686 687 /* 688 * Deliver a 32bit value for TCP timestamp option (RFC 7323) 689 * It is no longer tied to jiffies, but to 1 ms clock. 690 * Note: double check if you want to use tcp_jiffies32 instead of this. 691 */ 692 #define TCP_TS_HZ 1000 693 694 static inline u64 tcp_clock_ns(void) 695 { 696 return local_clock(); 697 } 698 699 static inline u64 tcp_clock_us(void) 700 { 701 return div_u64(tcp_clock_ns(), NSEC_PER_USEC); 702 } 703 704 /* This should only be used in contexts where tp->tcp_mstamp is up to date */ 705 static inline u32 tcp_time_stamp(const struct tcp_sock *tp) 706 { 707 return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); 708 } 709 710 /* Could use tcp_clock_us() / 1000, but this version uses a single divide */ 711 static inline u32 tcp_time_stamp_raw(void) 712 { 713 return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); 714 } 715 716 717 /* Refresh 1us clock of a TCP socket, 718 * ensuring monotically increasing values. 719 */ 720 static inline void tcp_mstamp_refresh(struct tcp_sock *tp) 721 { 722 u64 val = tcp_clock_us(); 723 724 if (val > tp->tcp_mstamp) 725 tp->tcp_mstamp = val; 726 } 727 728 static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) 729 { 730 return max_t(s64, t1 - t0, 0); 731 } 732 733 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) 734 { 735 return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); 736 } 737 738 739 #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 740 741 #define TCPHDR_FIN 0x01 742 #define TCPHDR_SYN 0x02 743 #define TCPHDR_RST 0x04 744 #define TCPHDR_PSH 0x08 745 #define TCPHDR_ACK 0x10 746 #define TCPHDR_URG 0x20 747 #define TCPHDR_ECE 0x40 748 #define TCPHDR_CWR 0x80 749 750 #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) 751 752 /* This is what the send packet queuing engine uses to pass 753 * TCP per-packet control information to the transmission code. 754 * We also store the host-order sequence numbers in here too. 755 * This is 44 bytes if IPV6 is enabled. 756 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. 757 */ 758 struct tcp_skb_cb { 759 __u32 seq; /* Starting sequence number */ 760 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 761 union { 762 /* Note : tcp_tw_isn is used in input path only 763 * (isn chosen by tcp_timewait_state_process()) 764 * 765 * tcp_gso_segs/size are used in write queue only, 766 * cf tcp_skb_pcount()/tcp_skb_mss() 767 */ 768 __u32 tcp_tw_isn; 769 struct { 770 u16 tcp_gso_segs; 771 u16 tcp_gso_size; 772 }; 773 }; 774 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ 775 776 __u8 sacked; /* State flags for SACK. */ 777 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ 778 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ 779 #define TCPCB_LOST 0x04 /* SKB is lost */ 780 #define TCPCB_TAGBITS 0x07 /* All tag bits */ 781 #define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */ 782 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ 783 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ 784 TCPCB_REPAIRED) 785 786 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ 787 __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ 788 eor:1, /* Is skb MSG_EOR marked? */ 789 has_rxtstamp:1, /* SKB has a RX timestamp */ 790 unused:5; 791 __u32 ack_seq; /* Sequence number ACK'd */ 792 union { 793 struct { 794 /* There is space for up to 24 bytes */ 795 __u32 in_flight:30,/* Bytes in flight at transmit */ 796 is_app_limited:1, /* cwnd not fully used? */ 797 unused:1; 798 /* pkts S/ACKed so far upon tx of skb, incl retrans: */ 799 __u32 delivered; 800 /* start of send pipeline phase */ 801 u64 first_tx_mstamp; 802 /* when we reached the "delivered" count */ 803 u64 delivered_mstamp; 804 } tx; /* only used for outgoing skbs */ 805 union { 806 struct inet_skb_parm h4; 807 #if IS_ENABLED(CONFIG_IPV6) 808 struct inet6_skb_parm h6; 809 #endif 810 } header; /* For incoming skbs */ 811 struct { 812 __u32 key; 813 __u32 flags; 814 struct bpf_map *map; 815 void *data_end; 816 } bpf; 817 }; 818 }; 819 820 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 821 822 823 #if IS_ENABLED(CONFIG_IPV6) 824 /* This is the variant of inet6_iif() that must be used by TCP, 825 * as TCP moves IP6CB into a different location in skb->cb[] 826 */ 827 static inline int tcp_v6_iif(const struct sk_buff *skb) 828 { 829 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 830 831 return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 832 } 833 834 /* TCP_SKB_CB reference means this can not be used from early demux */ 835 static inline int tcp_v6_sdif(const struct sk_buff *skb) 836 { 837 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 838 if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) 839 return TCP_SKB_CB(skb)->header.h6.iif; 840 #endif 841 return 0; 842 } 843 #endif 844 845 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) 846 { 847 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 848 if (!net->ipv4.sysctl_tcp_l3mdev_accept && 849 skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) 850 return true; 851 #endif 852 return false; 853 } 854 855 /* TCP_SKB_CB reference means this can not be used from early demux */ 856 static inline int tcp_v4_sdif(struct sk_buff *skb) 857 { 858 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 859 if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 860 return TCP_SKB_CB(skb)->header.h4.iif; 861 #endif 862 return 0; 863 } 864 865 /* Due to TSO, an SKB can be composed of multiple actual 866 * packets. To keep these tracked properly, we use this. 867 */ 868 static inline int tcp_skb_pcount(const struct sk_buff *skb) 869 { 870 return TCP_SKB_CB(skb)->tcp_gso_segs; 871 } 872 873 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) 874 { 875 TCP_SKB_CB(skb)->tcp_gso_segs = segs; 876 } 877 878 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) 879 { 880 TCP_SKB_CB(skb)->tcp_gso_segs += segs; 881 } 882 883 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */ 884 static inline int tcp_skb_mss(const struct sk_buff *skb) 885 { 886 return TCP_SKB_CB(skb)->tcp_gso_size; 887 } 888 889 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) 890 { 891 return likely(!TCP_SKB_CB(skb)->eor); 892 } 893 894 /* Events passed to congestion control interface */ 895 enum tcp_ca_event { 896 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 897 CA_EVENT_CWND_RESTART, /* congestion window restart */ 898 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 899 CA_EVENT_LOSS, /* loss timeout */ 900 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 901 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 902 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ 903 CA_EVENT_NON_DELAYED_ACK, 904 }; 905 906 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 907 enum tcp_ca_ack_event_flags { 908 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ 909 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ 910 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ 911 }; 912 913 /* 914 * Interface for adding new TCP congestion control handlers 915 */ 916 #define TCP_CA_NAME_MAX 16 917 #define TCP_CA_MAX 128 918 #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) 919 920 #define TCP_CA_UNSPEC 0 921 922 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ 923 #define TCP_CONG_NON_RESTRICTED 0x1 924 /* Requires ECN/ECT set on all packets */ 925 #define TCP_CONG_NEEDS_ECN 0x2 926 927 union tcp_cc_info; 928 929 struct ack_sample { 930 u32 pkts_acked; 931 s32 rtt_us; 932 u32 in_flight; 933 }; 934 935 /* A rate sample measures the number of (original/retransmitted) data 936 * packets delivered "delivered" over an interval of time "interval_us". 937 * The tcp_rate.c code fills in the rate sample, and congestion 938 * control modules that define a cong_control function to run at the end 939 * of ACK processing can optionally chose to consult this sample when 940 * setting cwnd and pacing rate. 941 * A sample is invalid if "delivered" or "interval_us" is negative. 942 */ 943 struct rate_sample { 944 u64 prior_mstamp; /* starting timestamp for interval */ 945 u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 946 s32 delivered; /* number of packets delivered over interval */ 947 long interval_us; /* time for tp->delivered to incr "delivered" */ 948 long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 949 int losses; /* number of packets marked lost upon ACK */ 950 u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 951 u32 prior_in_flight; /* in flight before this ACK */ 952 bool is_app_limited; /* is sample from packet with bubble in pipe? */ 953 bool is_retrans; /* is sample from retransmission? */ 954 bool is_ack_delayed; /* is this (likely) a delayed ACK? */ 955 }; 956 957 struct tcp_congestion_ops { 958 struct list_head list; 959 u32 key; 960 u32 flags; 961 962 /* initialize private data (optional) */ 963 void (*init)(struct sock *sk); 964 /* cleanup private data (optional) */ 965 void (*release)(struct sock *sk); 966 967 /* return slow start threshold (required) */ 968 u32 (*ssthresh)(struct sock *sk); 969 /* do new cwnd calculation (required) */ 970 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 971 /* call before changing ca_state (optional) */ 972 void (*set_state)(struct sock *sk, u8 new_state); 973 /* call when cwnd event occurs (optional) */ 974 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 975 /* call when ack arrives (optional) */ 976 void (*in_ack_event)(struct sock *sk, u32 flags); 977 /* new value of cwnd after loss (required) */ 978 u32 (*undo_cwnd)(struct sock *sk); 979 /* hook for packet ack accounting (optional) */ 980 void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 981 /* override sysctl_tcp_min_tso_segs */ 982 u32 (*min_tso_segs)(struct sock *sk); 983 /* returns the multiplier used in tcp_sndbuf_expand (optional) */ 984 u32 (*sndbuf_expand)(struct sock *sk); 985 /* call when packets are delivered to update cwnd and pacing rate, 986 * after all the ca_state processing. (optional) 987 */ 988 void (*cong_control)(struct sock *sk, const struct rate_sample *rs); 989 /* get info for inet_diag (optional) */ 990 size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 991 union tcp_cc_info *info); 992 993 char name[TCP_CA_NAME_MAX]; 994 struct module *owner; 995 }; 996 997 int tcp_register_congestion_control(struct tcp_congestion_ops *type); 998 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 999 1000 void tcp_assign_congestion_control(struct sock *sk); 1001 void tcp_init_congestion_control(struct sock *sk); 1002 void tcp_cleanup_congestion_control(struct sock *sk); 1003 int tcp_set_default_congestion_control(struct net *net, const char *name); 1004 void tcp_get_default_congestion_control(struct net *net, char *name); 1005 void tcp_get_available_congestion_control(char *buf, size_t len); 1006 void tcp_get_allowed_congestion_control(char *buf, size_t len); 1007 int tcp_set_allowed_congestion_control(char *allowed); 1008 int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit); 1009 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 1010 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1011 1012 u32 tcp_reno_ssthresh(struct sock *sk); 1013 u32 tcp_reno_undo_cwnd(struct sock *sk); 1014 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 1015 extern struct tcp_congestion_ops tcp_reno; 1016 1017 struct tcp_congestion_ops *tcp_ca_find_key(u32 key); 1018 u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca); 1019 #ifdef CONFIG_INET 1020 char *tcp_ca_get_name_by_key(u32 key, char *buffer); 1021 #else 1022 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) 1023 { 1024 return NULL; 1025 } 1026 #endif 1027 1028 static inline bool tcp_ca_needs_ecn(const struct sock *sk) 1029 { 1030 const struct inet_connection_sock *icsk = inet_csk(sk); 1031 1032 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; 1033 } 1034 1035 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 1036 { 1037 struct inet_connection_sock *icsk = inet_csk(sk); 1038 1039 if (icsk->icsk_ca_ops->set_state) 1040 icsk->icsk_ca_ops->set_state(sk, ca_state); 1041 icsk->icsk_ca_state = ca_state; 1042 } 1043 1044 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) 1045 { 1046 const struct inet_connection_sock *icsk = inet_csk(sk); 1047 1048 if (icsk->icsk_ca_ops->cwnd_event) 1049 icsk->icsk_ca_ops->cwnd_event(sk, event); 1050 } 1051 1052 /* From tcp_rate.c */ 1053 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); 1054 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, 1055 struct rate_sample *rs); 1056 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, 1057 bool is_sack_reneg, struct rate_sample *rs); 1058 void tcp_rate_check_app_limited(struct sock *sk); 1059 1060 /* These functions determine how the current flow behaves in respect of SACK 1061 * handling. SACK is negotiated with the peer, and therefore it can vary 1062 * between different flows. 1063 * 1064 * tcp_is_sack - SACK enabled 1065 * tcp_is_reno - No SACK 1066 */ 1067 static inline int tcp_is_sack(const struct tcp_sock *tp) 1068 { 1069 return tp->rx_opt.sack_ok; 1070 } 1071 1072 static inline bool tcp_is_reno(const struct tcp_sock *tp) 1073 { 1074 return !tcp_is_sack(tp); 1075 } 1076 1077 static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 1078 { 1079 return tp->sacked_out + tp->lost_out; 1080 } 1081 1082 /* This determines how many packets are "in the network" to the best 1083 * of our knowledge. In many cases it is conservative, but where 1084 * detailed information is available from the receiver (via SACK 1085 * blocks etc.) we can make more aggressive calculations. 1086 * 1087 * Use this for decisions involving congestion control, use just 1088 * tp->packets_out to determine if the send queue is empty or not. 1089 * 1090 * Read this equation as: 1091 * 1092 * "Packets sent once on transmission queue" MINUS 1093 * "Packets left network, but not honestly ACKed yet" PLUS 1094 * "Packets fast retransmitted" 1095 */ 1096 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) 1097 { 1098 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; 1099 } 1100 1101 #define TCP_INFINITE_SSTHRESH 0x7fffffff 1102 1103 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 1104 { 1105 return tp->snd_cwnd < tp->snd_ssthresh; 1106 } 1107 1108 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 1109 { 1110 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 1111 } 1112 1113 static inline bool tcp_in_cwnd_reduction(const struct sock *sk) 1114 { 1115 return (TCPF_CA_CWR | TCPF_CA_Recovery) & 1116 (1 << inet_csk(sk)->icsk_ca_state); 1117 } 1118 1119 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1120 * The exception is cwnd reduction phase, when cwnd is decreasing towards 1121 * ssthresh. 1122 */ 1123 static inline __u32 tcp_current_ssthresh(const struct sock *sk) 1124 { 1125 const struct tcp_sock *tp = tcp_sk(sk); 1126 1127 if (tcp_in_cwnd_reduction(sk)) 1128 return tp->snd_ssthresh; 1129 else 1130 return max(tp->snd_ssthresh, 1131 ((tp->snd_cwnd >> 1) + 1132 (tp->snd_cwnd >> 2))); 1133 } 1134 1135 /* Use define here intentionally to get WARN_ON location shown at the caller */ 1136 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 1137 1138 void tcp_enter_cwr(struct sock *sk); 1139 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 1140 1141 /* The maximum number of MSS of available cwnd for which TSO defers 1142 * sending if not using sysctl_tcp_tso_win_divisor. 1143 */ 1144 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) 1145 { 1146 return 3; 1147 } 1148 1149 /* Returns end sequence number of the receiver's advertised window */ 1150 static inline u32 tcp_wnd_end(const struct tcp_sock *tp) 1151 { 1152 return tp->snd_una + tp->snd_wnd; 1153 } 1154 1155 /* We follow the spirit of RFC2861 to validate cwnd but implement a more 1156 * flexible approach. The RFC suggests cwnd should not be raised unless 1157 * it was fully used previously. And that's exactly what we do in 1158 * congestion avoidance mode. But in slow start we allow cwnd to grow 1159 * as long as the application has used half the cwnd. 1160 * Example : 1161 * cwnd is 10 (IW10), but application sends 9 frames. 1162 * We allow cwnd to reach 18 when all frames are ACKed. 1163 * This check is safe because it's as aggressive as slow start which already 1164 * risks 100% overshoot. The advantage is that we discourage application to 1165 * either send more filler packets or data to artificially blow up the cwnd 1166 * usage, and allow application-limited process to probe bw more aggressively. 1167 */ 1168 static inline bool tcp_is_cwnd_limited(const struct sock *sk) 1169 { 1170 const struct tcp_sock *tp = tcp_sk(sk); 1171 1172 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1173 if (tcp_in_slow_start(tp)) 1174 return tp->snd_cwnd < 2 * tp->max_packets_out; 1175 1176 return tp->is_cwnd_limited; 1177 } 1178 1179 /* Something is really bad, we could not queue an additional packet, 1180 * because qdisc is full or receiver sent a 0 window. 1181 * We do not want to add fuel to the fire, or abort too early, 1182 * so make sure the timer we arm now is at least 200ms in the future, 1183 * regardless of current icsk_rto value (as it could be ~2ms) 1184 */ 1185 static inline unsigned long tcp_probe0_base(const struct sock *sk) 1186 { 1187 return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); 1188 } 1189 1190 /* Variant of inet_csk_rto_backoff() used for zero window probes */ 1191 static inline unsigned long tcp_probe0_when(const struct sock *sk, 1192 unsigned long max_when) 1193 { 1194 u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; 1195 1196 return (unsigned long)min_t(u64, when, max_when); 1197 } 1198 1199 static inline void tcp_check_probe_timer(struct sock *sk) 1200 { 1201 if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) 1202 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 1203 tcp_probe0_base(sk), TCP_RTO_MAX); 1204 } 1205 1206 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 1207 { 1208 tp->snd_wl1 = seq; 1209 } 1210 1211 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) 1212 { 1213 tp->snd_wl1 = seq; 1214 } 1215 1216 /* 1217 * Calculate(/check) TCP checksum 1218 */ 1219 static inline __sum16 tcp_v4_check(int len, __be32 saddr, 1220 __be32 daddr, __wsum base) 1221 { 1222 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); 1223 } 1224 1225 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) 1226 { 1227 return __skb_checksum_complete(skb); 1228 } 1229 1230 static inline bool tcp_checksum_complete(struct sk_buff *skb) 1231 { 1232 return !skb_csum_unnecessary(skb) && 1233 __tcp_checksum_complete(skb); 1234 } 1235 1236 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1237 int tcp_filter(struct sock *sk, struct sk_buff *skb); 1238 1239 #undef STATE_TRACE 1240 1241 #ifdef STATE_TRACE 1242 static const char *statename[]={ 1243 "Unused","Established","Syn Sent","Syn Recv", 1244 "Fin Wait 1","Fin Wait 2","Time Wait", "Close", 1245 "Close Wait","Last ACK","Listen","Closing" 1246 }; 1247 #endif 1248 void tcp_set_state(struct sock *sk, int state); 1249 1250 void tcp_done(struct sock *sk); 1251 1252 int tcp_abort(struct sock *sk, int err); 1253 1254 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) 1255 { 1256 rx_opt->dsack = 0; 1257 rx_opt->num_sacks = 0; 1258 } 1259 1260 u32 tcp_default_init_rwnd(u32 mss); 1261 void tcp_cwnd_restart(struct sock *sk, s32 delta); 1262 1263 static inline void tcp_slow_start_after_idle_check(struct sock *sk) 1264 { 1265 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 1266 struct tcp_sock *tp = tcp_sk(sk); 1267 s32 delta; 1268 1269 if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || 1270 ca_ops->cong_control) 1271 return; 1272 delta = tcp_jiffies32 - tp->lsndtime; 1273 if (delta > inet_csk(sk)->icsk_rto) 1274 tcp_cwnd_restart(sk, delta); 1275 } 1276 1277 /* Determine a window scaling and initial window to offer. */ 1278 void tcp_select_initial_window(const struct sock *sk, int __space, 1279 __u32 mss, __u32 *rcv_wnd, 1280 __u32 *window_clamp, int wscale_ok, 1281 __u8 *rcv_wscale, __u32 init_rcv_wnd); 1282 1283 static inline int tcp_win_from_space(const struct sock *sk, int space) 1284 { 1285 int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale; 1286 1287 return tcp_adv_win_scale <= 0 ? 1288 (space>>(-tcp_adv_win_scale)) : 1289 space - (space>>tcp_adv_win_scale); 1290 } 1291 1292 /* Note: caller must be prepared to deal with negative returns */ 1293 static inline int tcp_space(const struct sock *sk) 1294 { 1295 return tcp_win_from_space(sk, sk->sk_rcvbuf - 1296 atomic_read(&sk->sk_rmem_alloc)); 1297 } 1298 1299 static inline int tcp_full_space(const struct sock *sk) 1300 { 1301 return tcp_win_from_space(sk, sk->sk_rcvbuf); 1302 } 1303 1304 extern void tcp_openreq_init_rwin(struct request_sock *req, 1305 const struct sock *sk_listener, 1306 const struct dst_entry *dst); 1307 1308 void tcp_enter_memory_pressure(struct sock *sk); 1309 void tcp_leave_memory_pressure(struct sock *sk); 1310 1311 static inline int keepalive_intvl_when(const struct tcp_sock *tp) 1312 { 1313 struct net *net = sock_net((struct sock *)tp); 1314 1315 return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; 1316 } 1317 1318 static inline int keepalive_time_when(const struct tcp_sock *tp) 1319 { 1320 struct net *net = sock_net((struct sock *)tp); 1321 1322 return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; 1323 } 1324 1325 static inline int keepalive_probes(const struct tcp_sock *tp) 1326 { 1327 struct net *net = sock_net((struct sock *)tp); 1328 1329 return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; 1330 } 1331 1332 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) 1333 { 1334 const struct inet_connection_sock *icsk = &tp->inet_conn; 1335 1336 return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, 1337 tcp_jiffies32 - tp->rcv_tstamp); 1338 } 1339 1340 static inline int tcp_fin_time(const struct sock *sk) 1341 { 1342 int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; 1343 const int rto = inet_csk(sk)->icsk_rto; 1344 1345 if (fin_timeout < (rto << 2) - (rto >> 1)) 1346 fin_timeout = (rto << 2) - (rto >> 1); 1347 1348 return fin_timeout; 1349 } 1350 1351 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1352 int paws_win) 1353 { 1354 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1355 return true; 1356 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) 1357 return true; 1358 /* 1359 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1360 * then following tcp messages have valid values. Ignore 0 value, 1361 * or else 'negative' tsval might forbid us to accept their packets. 1362 */ 1363 if (!rx_opt->ts_recent) 1364 return true; 1365 return false; 1366 } 1367 1368 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, 1369 int rst) 1370 { 1371 if (tcp_paws_check(rx_opt, 0)) 1372 return false; 1373 1374 /* RST segments are not recommended to carry timestamp, 1375 and, if they do, it is recommended to ignore PAWS because 1376 "their cleanup function should take precedence over timestamps." 1377 Certainly, it is mistake. It is necessary to understand the reasons 1378 of this constraint to relax it: if peer reboots, clock may go 1379 out-of-sync and half-open connections will not be reset. 1380 Actually, the problem would be not existing if all 1381 the implementations followed draft about maintaining clock 1382 via reboots. Linux-2.2 DOES NOT! 1383 1384 However, we can relax time bounds for RST segments to MSL. 1385 */ 1386 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) 1387 return false; 1388 return true; 1389 } 1390 1391 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 1392 int mib_idx, u32 *last_oow_ack_time); 1393 1394 static inline void tcp_mib_init(struct net *net) 1395 { 1396 /* See RFC 2012 */ 1397 TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1); 1398 TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); 1399 TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); 1400 TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1); 1401 } 1402 1403 /* from STCP */ 1404 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) 1405 { 1406 tp->lost_skb_hint = NULL; 1407 } 1408 1409 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1410 { 1411 tcp_clear_retrans_hints_partial(tp); 1412 tp->retransmit_skb_hint = NULL; 1413 } 1414 1415 union tcp_md5_addr { 1416 struct in_addr a4; 1417 #if IS_ENABLED(CONFIG_IPV6) 1418 struct in6_addr a6; 1419 #endif 1420 }; 1421 1422 /* - key database */ 1423 struct tcp_md5sig_key { 1424 struct hlist_node node; 1425 u8 keylen; 1426 u8 family; /* AF_INET or AF_INET6 */ 1427 union tcp_md5_addr addr; 1428 u8 prefixlen; 1429 u8 key[TCP_MD5SIG_MAXKEYLEN]; 1430 struct rcu_head rcu; 1431 }; 1432 1433 /* - sock block */ 1434 struct tcp_md5sig_info { 1435 struct hlist_head head; 1436 struct rcu_head rcu; 1437 }; 1438 1439 /* - pseudo header */ 1440 struct tcp4_pseudohdr { 1441 __be32 saddr; 1442 __be32 daddr; 1443 __u8 pad; 1444 __u8 protocol; 1445 __be16 len; 1446 }; 1447 1448 struct tcp6_pseudohdr { 1449 struct in6_addr saddr; 1450 struct in6_addr daddr; 1451 __be32 len; 1452 __be32 protocol; /* including padding */ 1453 }; 1454 1455 union tcp_md5sum_block { 1456 struct tcp4_pseudohdr ip4; 1457 #if IS_ENABLED(CONFIG_IPV6) 1458 struct tcp6_pseudohdr ip6; 1459 #endif 1460 }; 1461 1462 /* - pool: digest algorithm, hash description and scratch buffer */ 1463 struct tcp_md5sig_pool { 1464 struct ahash_request *md5_req; 1465 void *scratch; 1466 }; 1467 1468 /* - functions */ 1469 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 1470 const struct sock *sk, const struct sk_buff *skb); 1471 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 1472 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen, 1473 gfp_t gfp); 1474 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 1475 int family, u8 prefixlen); 1476 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 1477 const struct sock *addr_sk); 1478 1479 #ifdef CONFIG_TCP_MD5SIG 1480 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 1481 const union tcp_md5_addr *addr, 1482 int family); 1483 #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) 1484 #else 1485 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 1486 const union tcp_md5_addr *addr, 1487 int family) 1488 { 1489 return NULL; 1490 } 1491 #define tcp_twsk_md5_key(twsk) NULL 1492 #endif 1493 1494 bool tcp_alloc_md5sig_pool(void); 1495 1496 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); 1497 static inline void tcp_put_md5sig_pool(void) 1498 { 1499 local_bh_enable(); 1500 } 1501 1502 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 1503 unsigned int header_len); 1504 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1505 const struct tcp_md5sig_key *key); 1506 1507 /* From tcp_fastopen.c */ 1508 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 1509 struct tcp_fastopen_cookie *cookie); 1510 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 1511 struct tcp_fastopen_cookie *cookie, bool syn_lost, 1512 u16 try_exp); 1513 struct tcp_fastopen_request { 1514 /* Fast Open cookie. Size 0 means a cookie request */ 1515 struct tcp_fastopen_cookie cookie; 1516 struct msghdr *data; /* data in MSG_FASTOPEN */ 1517 size_t size; 1518 int copied; /* queued in tcp_connect() */ 1519 }; 1520 void tcp_free_fastopen_req(struct tcp_sock *tp); 1521 void tcp_fastopen_destroy_cipher(struct sock *sk); 1522 void tcp_fastopen_ctx_destroy(struct net *net); 1523 int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, 1524 void *key, unsigned int len); 1525 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); 1526 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 1527 struct request_sock *req, 1528 struct tcp_fastopen_cookie *foc, 1529 const struct dst_entry *dst); 1530 void tcp_fastopen_init_key_once(struct net *net); 1531 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 1532 struct tcp_fastopen_cookie *cookie); 1533 bool tcp_fastopen_defer_connect(struct sock *sk, int *err); 1534 #define TCP_FASTOPEN_KEY_LENGTH 16 1535 1536 /* Fastopen key context */ 1537 struct tcp_fastopen_context { 1538 struct crypto_cipher *tfm; 1539 __u8 key[TCP_FASTOPEN_KEY_LENGTH]; 1540 struct rcu_head rcu; 1541 }; 1542 1543 extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; 1544 void tcp_fastopen_active_disable(struct sock *sk); 1545 bool tcp_fastopen_active_should_disable(struct sock *sk); 1546 void tcp_fastopen_active_disable_ofo_check(struct sock *sk); 1547 void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); 1548 1549 /* Latencies incurred by various limits for a sender. They are 1550 * chronograph-like stats that are mutually exclusive. 1551 */ 1552 enum tcp_chrono { 1553 TCP_CHRONO_UNSPEC, 1554 TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */ 1555 TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */ 1556 TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */ 1557 __TCP_CHRONO_MAX, 1558 }; 1559 1560 void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type); 1561 void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type); 1562 1563 /* This helper is needed, because skb->tcp_tsorted_anchor uses 1564 * the same memory storage than skb->destructor/_skb_refdst 1565 */ 1566 static inline void tcp_skb_tsorted_anchor_cleanup(struct sk_buff *skb) 1567 { 1568 skb->destructor = NULL; 1569 skb->_skb_refdst = 0UL; 1570 } 1571 1572 #define tcp_skb_tsorted_save(skb) { \ 1573 unsigned long _save = skb->_skb_refdst; \ 1574 skb->_skb_refdst = 0UL; 1575 1576 #define tcp_skb_tsorted_restore(skb) \ 1577 skb->_skb_refdst = _save; \ 1578 } 1579 1580 void tcp_write_queue_purge(struct sock *sk); 1581 1582 static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) 1583 { 1584 return skb_rb_first(&sk->tcp_rtx_queue); 1585 } 1586 1587 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) 1588 { 1589 return skb_peek(&sk->sk_write_queue); 1590 } 1591 1592 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) 1593 { 1594 return skb_peek_tail(&sk->sk_write_queue); 1595 } 1596 1597 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1598 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1599 1600 static inline struct sk_buff *tcp_send_head(const struct sock *sk) 1601 { 1602 return skb_peek(&sk->sk_write_queue); 1603 } 1604 1605 static inline bool tcp_skb_is_last(const struct sock *sk, 1606 const struct sk_buff *skb) 1607 { 1608 return skb_queue_is_last(&sk->sk_write_queue, skb); 1609 } 1610 1611 static inline bool tcp_write_queue_empty(const struct sock *sk) 1612 { 1613 return skb_queue_empty(&sk->sk_write_queue); 1614 } 1615 1616 static inline bool tcp_rtx_queue_empty(const struct sock *sk) 1617 { 1618 return RB_EMPTY_ROOT(&sk->tcp_rtx_queue); 1619 } 1620 1621 static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) 1622 { 1623 return tcp_rtx_queue_empty(sk) && tcp_write_queue_empty(sk); 1624 } 1625 1626 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) 1627 { 1628 if (tcp_write_queue_empty(sk)) 1629 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 1630 } 1631 1632 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 1633 { 1634 __skb_queue_tail(&sk->sk_write_queue, skb); 1635 } 1636 1637 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 1638 { 1639 __tcp_add_write_queue_tail(sk, skb); 1640 1641 /* Queue it, remembering where we must start sending. */ 1642 if (sk->sk_write_queue.next == skb) 1643 tcp_chrono_start(sk, TCP_CHRONO_BUSY); 1644 } 1645 1646 /* Insert new before skb on the write queue of sk. */ 1647 static inline void tcp_insert_write_queue_before(struct sk_buff *new, 1648 struct sk_buff *skb, 1649 struct sock *sk) 1650 { 1651 __skb_queue_before(&sk->sk_write_queue, skb, new); 1652 } 1653 1654 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) 1655 { 1656 tcp_skb_tsorted_anchor_cleanup(skb); 1657 __skb_unlink(skb, &sk->sk_write_queue); 1658 } 1659 1660 void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb); 1661 1662 static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) 1663 { 1664 tcp_skb_tsorted_anchor_cleanup(skb); 1665 rb_erase(&skb->rbnode, &sk->tcp_rtx_queue); 1666 } 1667 1668 static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) 1669 { 1670 list_del(&skb->tcp_tsorted_anchor); 1671 tcp_rtx_queue_unlink(skb, sk); 1672 sk_wmem_free_skb(sk, skb); 1673 } 1674 1675 static inline void tcp_push_pending_frames(struct sock *sk) 1676 { 1677 if (tcp_send_head(sk)) { 1678 struct tcp_sock *tp = tcp_sk(sk); 1679 1680 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); 1681 } 1682 } 1683 1684 /* Start sequence of the skb just after the highest skb with SACKed 1685 * bit, valid only if sacked_out > 0 or when the caller has ensured 1686 * validity by itself. 1687 */ 1688 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) 1689 { 1690 if (!tp->sacked_out) 1691 return tp->snd_una; 1692 1693 if (tp->highest_sack == NULL) 1694 return tp->snd_nxt; 1695 1696 return TCP_SKB_CB(tp->highest_sack)->seq; 1697 } 1698 1699 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) 1700 { 1701 tcp_sk(sk)->highest_sack = skb_rb_next(skb); 1702 } 1703 1704 static inline struct sk_buff *tcp_highest_sack(struct sock *sk) 1705 { 1706 return tcp_sk(sk)->highest_sack; 1707 } 1708 1709 static inline void tcp_highest_sack_reset(struct sock *sk) 1710 { 1711 tcp_sk(sk)->highest_sack = tcp_rtx_queue_head(sk); 1712 } 1713 1714 /* Called when old skb is about to be deleted and replaced by new skb */ 1715 static inline void tcp_highest_sack_replace(struct sock *sk, 1716 struct sk_buff *old, 1717 struct sk_buff *new) 1718 { 1719 if (old == tcp_highest_sack(sk)) 1720 tcp_sk(sk)->highest_sack = new; 1721 } 1722 1723 /* This helper checks if socket has IP_TRANSPARENT set */ 1724 static inline bool inet_sk_transparent(const struct sock *sk) 1725 { 1726 switch (sk->sk_state) { 1727 case TCP_TIME_WAIT: 1728 return inet_twsk(sk)->tw_transparent; 1729 case TCP_NEW_SYN_RECV: 1730 return inet_rsk(inet_reqsk(sk))->no_srccheck; 1731 } 1732 return inet_sk(sk)->transparent; 1733 } 1734 1735 /* Determines whether this is a thin stream (which may suffer from 1736 * increased latency). Used to trigger latency-reducing mechanisms. 1737 */ 1738 static inline bool tcp_stream_is_thin(struct tcp_sock *tp) 1739 { 1740 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); 1741 } 1742 1743 /* /proc */ 1744 enum tcp_seq_states { 1745 TCP_SEQ_STATE_LISTENING, 1746 TCP_SEQ_STATE_ESTABLISHED, 1747 }; 1748 1749 int tcp_seq_open(struct inode *inode, struct file *file); 1750 1751 struct tcp_seq_afinfo { 1752 char *name; 1753 sa_family_t family; 1754 const struct file_operations *seq_fops; 1755 struct seq_operations seq_ops; 1756 }; 1757 1758 struct tcp_iter_state { 1759 struct seq_net_private p; 1760 sa_family_t family; 1761 enum tcp_seq_states state; 1762 struct sock *syn_wait_sk; 1763 int bucket, offset, sbucket, num; 1764 loff_t last_pos; 1765 }; 1766 1767 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); 1768 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo); 1769 1770 extern struct request_sock_ops tcp_request_sock_ops; 1771 extern struct request_sock_ops tcp6_request_sock_ops; 1772 1773 void tcp_v4_destroy_sock(struct sock *sk); 1774 1775 struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1776 netdev_features_t features); 1777 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); 1778 int tcp_gro_complete(struct sk_buff *skb); 1779 1780 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1781 1782 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 1783 { 1784 struct net *net = sock_net((struct sock *)tp); 1785 return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; 1786 } 1787 1788 static inline bool tcp_stream_memory_free(const struct sock *sk) 1789 { 1790 const struct tcp_sock *tp = tcp_sk(sk); 1791 u32 notsent_bytes = tp->write_seq - tp->snd_nxt; 1792 1793 return notsent_bytes < tcp_notsent_lowat(tp); 1794 } 1795 1796 #ifdef CONFIG_PROC_FS 1797 int tcp4_proc_init(void); 1798 void tcp4_proc_exit(void); 1799 #endif 1800 1801 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req); 1802 int tcp_conn_request(struct request_sock_ops *rsk_ops, 1803 const struct tcp_request_sock_ops *af_ops, 1804 struct sock *sk, struct sk_buff *skb); 1805 1806 /* TCP af-specific functions */ 1807 struct tcp_sock_af_ops { 1808 #ifdef CONFIG_TCP_MD5SIG 1809 struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk, 1810 const struct sock *addr_sk); 1811 int (*calc_md5_hash)(char *location, 1812 const struct tcp_md5sig_key *md5, 1813 const struct sock *sk, 1814 const struct sk_buff *skb); 1815 int (*md5_parse)(struct sock *sk, 1816 int optname, 1817 char __user *optval, 1818 int optlen); 1819 #endif 1820 }; 1821 1822 struct tcp_request_sock_ops { 1823 u16 mss_clamp; 1824 #ifdef CONFIG_TCP_MD5SIG 1825 struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk, 1826 const struct sock *addr_sk); 1827 int (*calc_md5_hash) (char *location, 1828 const struct tcp_md5sig_key *md5, 1829 const struct sock *sk, 1830 const struct sk_buff *skb); 1831 #endif 1832 void (*init_req)(struct request_sock *req, 1833 const struct sock *sk_listener, 1834 struct sk_buff *skb); 1835 #ifdef CONFIG_SYN_COOKIES 1836 __u32 (*cookie_init_seq)(const struct sk_buff *skb, 1837 __u16 *mss); 1838 #endif 1839 struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, 1840 const struct request_sock *req); 1841 u32 (*init_seq)(const struct sk_buff *skb); 1842 u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb); 1843 int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 1844 struct flowi *fl, struct request_sock *req, 1845 struct tcp_fastopen_cookie *foc, 1846 enum tcp_synack_type synack_type); 1847 }; 1848 1849 #ifdef CONFIG_SYN_COOKIES 1850 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 1851 const struct sock *sk, struct sk_buff *skb, 1852 __u16 *mss) 1853 { 1854 tcp_synq_overflow(sk); 1855 __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 1856 return ops->cookie_init_seq(skb, mss); 1857 } 1858 #else 1859 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 1860 const struct sock *sk, struct sk_buff *skb, 1861 __u16 *mss) 1862 { 1863 return 0; 1864 } 1865 #endif 1866 1867 int tcpv4_offload_init(void); 1868 1869 void tcp_v4_init(void); 1870 void tcp_init(void); 1871 1872 /* tcp_recovery.c */ 1873 extern void tcp_rack_mark_lost(struct sock *sk); 1874 extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 1875 u64 xmit_time); 1876 extern void tcp_rack_reo_timeout(struct sock *sk); 1877 extern void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs); 1878 1879 /* At how many usecs into the future should the RTO fire? */ 1880 static inline s64 tcp_rto_delta_us(const struct sock *sk) 1881 { 1882 const struct sk_buff *skb = tcp_rtx_queue_head(sk); 1883 u32 rto = inet_csk(sk)->icsk_rto; 1884 u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); 1885 1886 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 1887 } 1888 1889 /* 1890 * Save and compile IPv4 options, return a pointer to it 1891 */ 1892 static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net, 1893 struct sk_buff *skb) 1894 { 1895 const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; 1896 struct ip_options_rcu *dopt = NULL; 1897 1898 if (opt->optlen) { 1899 int opt_size = sizeof(*dopt) + opt->optlen; 1900 1901 dopt = kmalloc(opt_size, GFP_ATOMIC); 1902 if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) { 1903 kfree(dopt); 1904 dopt = NULL; 1905 } 1906 } 1907 return dopt; 1908 } 1909 1910 /* locally generated TCP pure ACKs have skb->truesize == 2 1911 * (check tcp_send_ack() in net/ipv4/tcp_output.c ) 1912 * This is much faster than dissecting the packet to find out. 1913 * (Think of GRE encapsulations, IPv4, IPv6, ...) 1914 */ 1915 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb) 1916 { 1917 return skb->truesize == 2; 1918 } 1919 1920 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) 1921 { 1922 skb->truesize = 2; 1923 } 1924 1925 static inline int tcp_inq(struct sock *sk) 1926 { 1927 struct tcp_sock *tp = tcp_sk(sk); 1928 int answ; 1929 1930 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 1931 answ = 0; 1932 } else if (sock_flag(sk, SOCK_URGINLINE) || 1933 !tp->urg_data || 1934 before(tp->urg_seq, tp->copied_seq) || 1935 !before(tp->urg_seq, tp->rcv_nxt)) { 1936 1937 answ = tp->rcv_nxt - tp->copied_seq; 1938 1939 /* Subtract 1, if FIN was received */ 1940 if (answ && sock_flag(sk, SOCK_DONE)) 1941 answ--; 1942 } else { 1943 answ = tp->urg_seq - tp->copied_seq; 1944 } 1945 1946 return answ; 1947 } 1948 1949 int tcp_peek_len(struct socket *sock); 1950 1951 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) 1952 { 1953 u16 segs_in; 1954 1955 segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 1956 tp->segs_in += segs_in; 1957 if (skb->len > tcp_hdrlen(skb)) 1958 tp->data_segs_in += segs_in; 1959 } 1960 1961 /* 1962 * TCP listen path runs lockless. 1963 * We forced "struct sock" to be const qualified to make sure 1964 * we don't modify one of its field by mistake. 1965 * Here, we increment sk_drops which is an atomic_t, so we can safely 1966 * make sock writable again. 1967 */ 1968 static inline void tcp_listendrop(const struct sock *sk) 1969 { 1970 atomic_inc(&((struct sock *)sk)->sk_drops); 1971 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 1972 } 1973 1974 enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); 1975 1976 /* 1977 * Interface for adding Upper Level Protocols over TCP 1978 */ 1979 1980 #define TCP_ULP_NAME_MAX 16 1981 #define TCP_ULP_MAX 128 1982 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) 1983 1984 enum { 1985 TCP_ULP_TLS, 1986 TCP_ULP_BPF, 1987 }; 1988 1989 struct tcp_ulp_ops { 1990 struct list_head list; 1991 1992 /* initialize ulp */ 1993 int (*init)(struct sock *sk); 1994 /* cleanup ulp */ 1995 void (*release)(struct sock *sk); 1996 1997 int uid; 1998 char name[TCP_ULP_NAME_MAX]; 1999 bool user_visible; 2000 struct module *owner; 2001 }; 2002 int tcp_register_ulp(struct tcp_ulp_ops *type); 2003 void tcp_unregister_ulp(struct tcp_ulp_ops *type); 2004 int tcp_set_ulp(struct sock *sk, const char *name); 2005 int tcp_set_ulp_id(struct sock *sk, const int ulp); 2006 void tcp_get_available_ulp(char *buf, size_t len); 2007 void tcp_cleanup_ulp(struct sock *sk); 2008 2009 /* Call BPF_SOCK_OPS program that returns an int. If the return value 2010 * is < 0, then the BPF op failed (for example if the loaded BPF 2011 * program does not support the chosen operation or there is no BPF 2012 * program loaded). 2013 */ 2014 #ifdef CONFIG_BPF 2015 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2016 { 2017 struct bpf_sock_ops_kern sock_ops; 2018 int ret; 2019 2020 memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); 2021 if (sk_fullsock(sk)) { 2022 sock_ops.is_fullsock = 1; 2023 sock_owned_by_me(sk); 2024 } 2025 2026 sock_ops.sk = sk; 2027 sock_ops.op = op; 2028 if (nargs > 0) 2029 memcpy(sock_ops.args, args, nargs * sizeof(*args)); 2030 2031 ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 2032 if (ret == 0) 2033 ret = sock_ops.reply; 2034 else 2035 ret = -1; 2036 return ret; 2037 } 2038 2039 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2040 { 2041 u32 args[2] = {arg1, arg2}; 2042 2043 return tcp_call_bpf(sk, op, 2, args); 2044 } 2045 2046 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2047 u32 arg3) 2048 { 2049 u32 args[3] = {arg1, arg2, arg3}; 2050 2051 return tcp_call_bpf(sk, op, 3, args); 2052 } 2053 2054 #else 2055 static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) 2056 { 2057 return -EPERM; 2058 } 2059 2060 static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) 2061 { 2062 return -EPERM; 2063 } 2064 2065 static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, 2066 u32 arg3) 2067 { 2068 return -EPERM; 2069 } 2070 2071 #endif 2072 2073 static inline u32 tcp_timeout_init(struct sock *sk) 2074 { 2075 int timeout; 2076 2077 timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); 2078 2079 if (timeout <= 0) 2080 timeout = TCP_TIMEOUT_INIT; 2081 return timeout; 2082 } 2083 2084 static inline u32 tcp_rwnd_init_bpf(struct sock *sk) 2085 { 2086 int rwnd; 2087 2088 rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); 2089 2090 if (rwnd < 0) 2091 rwnd = 0; 2092 return rwnd; 2093 } 2094 2095 static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) 2096 { 2097 return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); 2098 } 2099 2100 #if IS_ENABLED(CONFIG_SMC) 2101 extern struct static_key_false tcp_have_smc; 2102 #endif 2103 #endif /* _TCP_H */ 2104