11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * INET An implementation of the TCP/IP protocol suite for the LINUX 31da177e4SLinus Torvalds * operating system. INET is implemented using the BSD Socket 41da177e4SLinus Torvalds * interface as the means of communication with the user level. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Definitions for the TCP module. 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Version: @(#)tcp.h 1.0.5 05/23/93 91da177e4SLinus Torvalds * 1002c30a84SJesper Juhl * Authors: Ross Biro 111da177e4SLinus Torvalds * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 121da177e4SLinus Torvalds * 131da177e4SLinus Torvalds * This program is free software; you can redistribute it and/or 141da177e4SLinus Torvalds * modify it under the terms of the GNU General Public License 151da177e4SLinus Torvalds * as published by the Free Software Foundation; either version 161da177e4SLinus Torvalds * 2 of the License, or (at your option) any later version. 171da177e4SLinus Torvalds */ 181da177e4SLinus Torvalds #ifndef _TCP_H 191da177e4SLinus Torvalds #define _TCP_H 201da177e4SLinus Torvalds 211da177e4SLinus Torvalds #define FASTRETRANS_DEBUG 1 221da177e4SLinus Torvalds 231da177e4SLinus Torvalds #include <linux/list.h> 241da177e4SLinus Torvalds #include <linux/tcp.h> 25187f1882SPaul Gortmaker #include <linux/bug.h> 261da177e4SLinus Torvalds #include <linux/slab.h> 271da177e4SLinus Torvalds #include <linux/cache.h> 281da177e4SLinus Torvalds #include <linux/percpu.h> 29fb286bb2SHerbert Xu #include <linux/skbuff.h> 30c6aefafbSGlenn Griffin #include <linux/cryptohash.h> 31435cf559SWilliam Allen Simpson #include <linux/kref.h> 32740b0f18SEric Dumazet #include <linux/ktime.h> 333f421baaSArnaldo Carvalho de Melo 343f421baaSArnaldo Carvalho de Melo #include <net/inet_connection_sock.h> 35295ff7edSArnaldo Carvalho de Melo #include <net/inet_timewait_sock.h> 3677d8bf9cSArnaldo Carvalho de Melo #include <net/inet_hashtables.h> 371da177e4SLinus Torvalds #include <net/checksum.h> 382e6599cbSArnaldo Carvalho de Melo #include <net/request_sock.h> 391da177e4SLinus Torvalds #include <net/sock.h> 401da177e4SLinus Torvalds #include <net/snmp.h> 411da177e4SLinus Torvalds #include <net/ip.h> 42c752f073SArnaldo Carvalho de Melo #include <net/tcp_states.h> 43bdf1ee5dSIlpo Järvinen #include <net/inet_ecn.h> 440c266898SSatoru SATOH #include <net/dst.h> 45c752f073SArnaldo Carvalho de Melo 461da177e4SLinus Torvalds #include <linux/seq_file.h> 47180d8cd9SGlauber Costa #include <linux/memcontrol.h> 481da177e4SLinus Torvalds 4940304b2aSLawrence Brakmo #include <linux/bpf.h> 5040304b2aSLawrence Brakmo #include <linux/filter.h> 5140304b2aSLawrence Brakmo #include <linux/bpf-cgroup.h> 5240304b2aSLawrence Brakmo 530f7ff927SArnaldo Carvalho de Melo extern struct inet_hashinfo tcp_hashinfo; 541da177e4SLinus Torvalds 55dd24c001SEric Dumazet extern struct percpu_counter tcp_orphan_count; 565c9f3023SJoe Perches void tcp_time_wait(struct sock *sk, int state, int timeo); 571da177e4SLinus Torvalds 581da177e4SLinus Torvalds #define MAX_TCP_HEADER (128 + MAX_HEADER) 5933ad798cSAdam Langley #define MAX_TCP_OPTION_SPACE 40 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds /* 621da177e4SLinus Torvalds * Never offer a window over 32767 without using window scaling. Some 631da177e4SLinus Torvalds * poor stacks do signed 16bit maths! 641da177e4SLinus Torvalds */ 651da177e4SLinus Torvalds #define MAX_TCP_WINDOW 32767U 661da177e4SLinus Torvalds 671da177e4SLinus Torvalds /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 681da177e4SLinus Torvalds #define TCP_MIN_MSS 88U 691da177e4SLinus Torvalds 705d424d5aSJohn Heffner /* The least MTU to use for probing */ 71dcd8fb85SFan Du #define TCP_BASE_MSS 1024 725d424d5aSJohn Heffner 7305cbc0dbSFan Du /* probing interval, default to 10 minutes as per RFC4821 */ 7405cbc0dbSFan Du #define TCP_PROBE_INTERVAL 600 7505cbc0dbSFan Du 766b58e0a5SFan Du /* Specify interval when tcp mtu probing will stop */ 776b58e0a5SFan Du #define TCP_PROBE_THRESHOLD 8 786b58e0a5SFan Du 791da177e4SLinus Torvalds /* After receiving this amount of duplicate ACKs fast retransmit starts. */ 801da177e4SLinus Torvalds #define TCP_FASTRETRANS_THRESH 3 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds /* Maximal number of ACKs sent quickly to accelerate slow-start. */ 831da177e4SLinus Torvalds #define TCP_MAX_QUICKACKS 16U 841da177e4SLinus Torvalds 85589c49cbSGao Feng /* Maximal number of window scale according to RFC1323 */ 86589c49cbSGao Feng #define TCP_MAX_WSCALE 14U 87589c49cbSGao Feng 881da177e4SLinus Torvalds /* urg_data states */ 891da177e4SLinus Torvalds #define TCP_URG_VALID 0x0100 901da177e4SLinus Torvalds #define TCP_URG_NOTYET 0x0200 911da177e4SLinus Torvalds #define TCP_URG_READ 0x0400 921da177e4SLinus Torvalds 931da177e4SLinus Torvalds #define TCP_RETR1 3 /* 941da177e4SLinus Torvalds * This is how many retries it does before it 951da177e4SLinus Torvalds * tries to figure out if the gateway is 961da177e4SLinus Torvalds * down. Minimal RFC value is 3; it corresponds 971da177e4SLinus Torvalds * to ~3sec-8min depending on RTO. 981da177e4SLinus Torvalds */ 991da177e4SLinus Torvalds 1001da177e4SLinus Torvalds #define TCP_RETR2 15 /* 1011da177e4SLinus Torvalds * This should take at least 1021da177e4SLinus Torvalds * 90 minutes to time out. 1031da177e4SLinus Torvalds * RFC1122 says that the limit is 100 sec. 1041da177e4SLinus Torvalds * 15 is ~13-30min depending on RTO. 1051da177e4SLinus Torvalds */ 1061da177e4SLinus Torvalds 1076c9ff979SAlex Bergmann #define TCP_SYN_RETRIES 6 /* This is how many retries are done 1086c9ff979SAlex Bergmann * when active opening a connection. 1096c9ff979SAlex Bergmann * RFC1122 says the minimum retry MUST 1106c9ff979SAlex Bergmann * be at least 180secs. Nevertheless 1116c9ff979SAlex Bergmann * this value is corresponding to 1126c9ff979SAlex Bergmann * 63secs of retransmission with the 1136c9ff979SAlex Bergmann * current initial RTO. 1146c9ff979SAlex Bergmann */ 1151da177e4SLinus Torvalds 1166c9ff979SAlex Bergmann #define TCP_SYNACK_RETRIES 5 /* This is how may retries are done 1176c9ff979SAlex Bergmann * when passive opening a connection. 1186c9ff979SAlex Bergmann * This is corresponding to 31secs of 1196c9ff979SAlex Bergmann * retransmission with the current 1206c9ff979SAlex Bergmann * initial RTO. 1216c9ff979SAlex Bergmann */ 1221da177e4SLinus Torvalds 1231da177e4SLinus Torvalds #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT 1241da177e4SLinus Torvalds * state, about 60 seconds */ 1251da177e4SLinus Torvalds #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 1261da177e4SLinus Torvalds /* BSD style FIN_WAIT2 deadlock breaker. 1271da177e4SLinus Torvalds * It used to be 3min, new value is 60sec, 1281da177e4SLinus Torvalds * to combine FIN-WAIT-2 timeout with 1291da177e4SLinus Torvalds * TIME-WAIT timer. 1301da177e4SLinus Torvalds */ 1311da177e4SLinus Torvalds 1321da177e4SLinus Torvalds #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */ 1331da177e4SLinus Torvalds #if HZ >= 100 1341da177e4SLinus Torvalds #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */ 1351da177e4SLinus Torvalds #define TCP_ATO_MIN ((unsigned)(HZ/25)) 1361da177e4SLinus Torvalds #else 1371da177e4SLinus Torvalds #define TCP_DELACK_MIN 4U 1381da177e4SLinus Torvalds #define TCP_ATO_MIN 4U 1391da177e4SLinus Torvalds #endif 1401da177e4SLinus Torvalds #define TCP_RTO_MAX ((unsigned)(120*HZ)) 1411da177e4SLinus Torvalds #define TCP_RTO_MIN ((unsigned)(HZ/5)) 142bb4d991aSYuchung Cheng #define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ 143fd4f2ceaSEric Dumazet #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 1449ad7c049SJerry Chu #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now 1459ad7c049SJerry Chu * used as a fallback RTO for the 1469ad7c049SJerry Chu * initial data transmission if no 1479ad7c049SJerry Chu * valid RTT sample has been acquired, 1489ad7c049SJerry Chu * most likely due to retrans in 3WHS. 1499ad7c049SJerry Chu */ 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes 1521da177e4SLinus Torvalds * for local resources. 1531da177e4SLinus Torvalds */ 1541da177e4SLinus Torvalds #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ 1551da177e4SLinus Torvalds #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ 1561da177e4SLinus Torvalds #define TCP_KEEPALIVE_INTVL (75*HZ) 1571da177e4SLinus Torvalds 1581da177e4SLinus Torvalds #define MAX_TCP_KEEPIDLE 32767 1591da177e4SLinus Torvalds #define MAX_TCP_KEEPINTVL 32767 1601da177e4SLinus Torvalds #define MAX_TCP_KEEPCNT 127 1611da177e4SLinus Torvalds #define MAX_TCP_SYNCNT 127 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ 1641da177e4SLinus Torvalds 1651da177e4SLinus Torvalds #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) 1661da177e4SLinus Torvalds #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated 1671da177e4SLinus Torvalds * after this time. It should be equal 1681da177e4SLinus Torvalds * (or greater than) TCP_TIMEWAIT_LEN 1691da177e4SLinus Torvalds * to provide reliability equal to one 1701da177e4SLinus Torvalds * provided by timewait state. 1711da177e4SLinus Torvalds */ 1721da177e4SLinus Torvalds #define TCP_PAWS_WINDOW 1 /* Replay window for per-host 1731da177e4SLinus Torvalds * timestamps. It must be less than 1741da177e4SLinus Torvalds * minimal timewait lifetime. 1751da177e4SLinus Torvalds */ 1761da177e4SLinus Torvalds /* 1771da177e4SLinus Torvalds * TCP option 1781da177e4SLinus Torvalds */ 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds #define TCPOPT_NOP 1 /* Padding */ 1811da177e4SLinus Torvalds #define TCPOPT_EOL 0 /* End of options */ 1821da177e4SLinus Torvalds #define TCPOPT_MSS 2 /* Segment size negotiating */ 1831da177e4SLinus Torvalds #define TCPOPT_WINDOW 3 /* Window scaling */ 1841da177e4SLinus Torvalds #define TCPOPT_SACK_PERM 4 /* SACK Permitted */ 1851da177e4SLinus Torvalds #define TCPOPT_SACK 5 /* SACK Block */ 1861da177e4SLinus Torvalds #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 187cfb6eeb4SYOSHIFUJI Hideaki #define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 1887f9b838bSDaniel Lee #define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */ 1892100c8d2SYuchung Cheng #define TCPOPT_EXP 254 /* Experimental */ 1902100c8d2SYuchung Cheng /* Magic number to be after the option value for sharing TCP 1912100c8d2SYuchung Cheng * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 1922100c8d2SYuchung Cheng */ 1932100c8d2SYuchung Cheng #define TCPOPT_FASTOPEN_MAGIC 0xF989 1941da177e4SLinus Torvalds 1951da177e4SLinus Torvalds /* 1961da177e4SLinus Torvalds * TCP option lengths 1971da177e4SLinus Torvalds */ 1981da177e4SLinus Torvalds 1991da177e4SLinus Torvalds #define TCPOLEN_MSS 4 2001da177e4SLinus Torvalds #define TCPOLEN_WINDOW 3 2011da177e4SLinus Torvalds #define TCPOLEN_SACK_PERM 2 2021da177e4SLinus Torvalds #define TCPOLEN_TIMESTAMP 10 203cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG 18 2047f9b838bSDaniel Lee #define TCPOLEN_FASTOPEN_BASE 2 2052100c8d2SYuchung Cheng #define TCPOLEN_EXP_FASTOPEN_BASE 4 2061da177e4SLinus Torvalds 2071da177e4SLinus Torvalds /* But this is what stacks really send out. */ 2081da177e4SLinus Torvalds #define TCPOLEN_TSTAMP_ALIGNED 12 2091da177e4SLinus Torvalds #define TCPOLEN_WSCALE_ALIGNED 4 2101da177e4SLinus Torvalds #define TCPOLEN_SACKPERM_ALIGNED 4 2111da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE 2 2121da177e4SLinus Torvalds #define TCPOLEN_SACK_BASE_ALIGNED 4 2131da177e4SLinus Torvalds #define TCPOLEN_SACK_PERBLOCK 8 214cfb6eeb4SYOSHIFUJI Hideaki #define TCPOLEN_MD5SIG_ALIGNED 20 21533ad798cSAdam Langley #define TCPOLEN_MSS_ALIGNED 4 2161da177e4SLinus Torvalds 2171da177e4SLinus Torvalds /* Flags in tp->nonagle */ 2181da177e4SLinus Torvalds #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ 2191da177e4SLinus Torvalds #define TCP_NAGLE_CORK 2 /* Socket is corked */ 220caa20d9aSStephen Hemminger #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */ 2211da177e4SLinus Torvalds 22236e31b0aSAndreas Petlund /* TCP thin-stream limits */ 22336e31b0aSAndreas Petlund #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ 22436e31b0aSAndreas Petlund 22521603fc4SJörg Thalheim /* TCP initial congestion window as per rfc6928 */ 226442b9635SDavid S. Miller #define TCP_INIT_CWND 10 227442b9635SDavid S. Miller 228cf60af03SYuchung Cheng /* Bit Flags for sysctl_tcp_fastopen */ 229cf60af03SYuchung Cheng #define TFO_CLIENT_ENABLE 1 23010467163SJerry Chu #define TFO_SERVER_ENABLE 2 23167da22d2SYuchung Cheng #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ 232cf60af03SYuchung Cheng 23310467163SJerry Chu /* Accept SYN data w/o any cookie option */ 23410467163SJerry Chu #define TFO_SERVER_COOKIE_NOT_REQD 0x200 23510467163SJerry Chu 23610467163SJerry Chu /* Force enable TFO on all listeners, i.e., not requiring the 237cebc5cbaSYuchung Cheng * TCP_FASTOPEN socket option. 23810467163SJerry Chu */ 23910467163SJerry Chu #define TFO_SERVER_WO_SOCKOPT1 0x400 24010467163SJerry Chu 241295ff7edSArnaldo Carvalho de Melo 2421da177e4SLinus Torvalds /* sysctl variables for tcp */ 2432100c8d2SYuchung Cheng extern int sysctl_tcp_fastopen; 2441da177e4SLinus Torvalds extern int sysctl_tcp_retrans_collapse; 2451da177e4SLinus Torvalds extern int sysctl_tcp_stdurg; 2461da177e4SLinus Torvalds extern int sysctl_tcp_rfc1337; 2471da177e4SLinus Torvalds extern int sysctl_tcp_abort_on_overflow; 2481da177e4SLinus Torvalds extern int sysctl_tcp_max_orphans; 2491da177e4SLinus Torvalds extern int sysctl_tcp_fack; 2501da177e4SLinus Torvalds extern int sysctl_tcp_reordering; 251dca145ffSEric Dumazet extern int sysctl_tcp_max_reordering; 2521da177e4SLinus Torvalds extern int sysctl_tcp_dsack; 253a4fe34bfSEric W. Biederman extern long sysctl_tcp_mem[3]; 2541da177e4SLinus Torvalds extern int sysctl_tcp_wmem[3]; 2551da177e4SLinus Torvalds extern int sysctl_tcp_rmem[3]; 2561da177e4SLinus Torvalds extern int sysctl_tcp_app_win; 2571da177e4SLinus Torvalds extern int sysctl_tcp_adv_win_scale; 2581da177e4SLinus Torvalds extern int sysctl_tcp_frto; 2591da177e4SLinus Torvalds extern int sysctl_tcp_nometrics_save; 2601da177e4SLinus Torvalds extern int sysctl_tcp_moderate_rcvbuf; 2611da177e4SLinus Torvalds extern int sysctl_tcp_tso_win_divisor; 26215d99e02SRick Jones extern int sysctl_tcp_workaround_signed_windows; 26335089bb2SDavid S. Miller extern int sysctl_tcp_slow_start_after_idle; 26436e31b0aSAndreas Petlund extern int sysctl_tcp_thin_linear_timeouts; 2657e380175SAndreas Petlund extern int sysctl_tcp_thin_dupack; 266eed530b6SYuchung Cheng extern int sysctl_tcp_early_retrans; 267a0370b3fSYuchung Cheng extern int sysctl_tcp_recovery; 268a0370b3fSYuchung Cheng #define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */ 269a0370b3fSYuchung Cheng 27046d3ceabSEric Dumazet extern int sysctl_tcp_limit_output_bytes; 271282f23c6SEric Dumazet extern int sysctl_tcp_challenge_ack_limit; 27295bd09ebSEric Dumazet extern int sysctl_tcp_min_tso_segs; 273f6722583SYuchung Cheng extern int sysctl_tcp_min_rtt_wlen; 274f54b3111SEric Dumazet extern int sysctl_tcp_autocorking; 275032ee423SNeal Cardwell extern int sysctl_tcp_invalid_ratelimit; 27643e122b0SEric Dumazet extern int sysctl_tcp_pacing_ss_ratio; 27743e122b0SEric Dumazet extern int sysctl_tcp_pacing_ca_ratio; 2781da177e4SLinus Torvalds 2798d987e5cSEric Dumazet extern atomic_long_t tcp_memory_allocated; 2801748376bSEric Dumazet extern struct percpu_counter tcp_sockets_allocated; 28106044751SEric Dumazet extern unsigned long tcp_memory_pressure; 2821da177e4SLinus Torvalds 283b8da51ebSEric Dumazet /* optimized version of sk_under_memory_pressure() for TCP sockets */ 284b8da51ebSEric Dumazet static inline bool tcp_under_memory_pressure(const struct sock *sk) 285b8da51ebSEric Dumazet { 286baac50bbSJohannes Weiner if (mem_cgroup_sockets_enabled && sk->sk_memcg && 287baac50bbSJohannes Weiner mem_cgroup_under_socket_pressure(sk->sk_memcg)) 288e805605cSJohannes Weiner return true; 289b8da51ebSEric Dumazet 290b8da51ebSEric Dumazet return tcp_memory_pressure; 291b8da51ebSEric Dumazet } 2921da177e4SLinus Torvalds /* 2931da177e4SLinus Torvalds * The next routines deal with comparing 32 bit unsigned ints 2941da177e4SLinus Torvalds * and worry about wraparound (automatic with unsigned arithmetic). 2951da177e4SLinus Torvalds */ 2961da177e4SLinus Torvalds 297a2a385d6SEric Dumazet static inline bool before(__u32 seq1, __u32 seq2) 2981da177e4SLinus Torvalds { 2990d630cc0SGerrit Renker return (__s32)(seq1-seq2) < 0; 3001da177e4SLinus Torvalds } 3019a036b9cSGerrit Renker #define after(seq2, seq1) before(seq1, seq2) 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds /* is s2<=s1<=s3 ? */ 304a2a385d6SEric Dumazet static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) 3051da177e4SLinus Torvalds { 3061da177e4SLinus Torvalds return seq3 - seq2 >= seq1 - seq2; 3071da177e4SLinus Torvalds } 3081da177e4SLinus Torvalds 309efcdbf24SArun Sharma static inline bool tcp_out_of_memory(struct sock *sk) 310efcdbf24SArun Sharma { 311efcdbf24SArun Sharma if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && 312efcdbf24SArun Sharma sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2)) 313efcdbf24SArun Sharma return true; 314efcdbf24SArun Sharma return false; 315efcdbf24SArun Sharma } 316efcdbf24SArun Sharma 317a6c5ea4cSEric Dumazet void sk_forced_mem_schedule(struct sock *sk, int size); 318a6c5ea4cSEric Dumazet 319ad1af0feSDavid S. Miller static inline bool tcp_too_many_orphans(struct sock *sk, int shift) 320e4fd5da3SPavel Emelianov { 321ad1af0feSDavid S. Miller struct percpu_counter *ocp = sk->sk_prot->orphan_count; 322ad1af0feSDavid S. Miller int orphans = percpu_counter_read_positive(ocp); 323ad1af0feSDavid S. Miller 324ad1af0feSDavid S. Miller if (orphans << shift > sysctl_tcp_max_orphans) { 325ad1af0feSDavid S. Miller orphans = percpu_counter_sum_positive(ocp); 326ad1af0feSDavid S. Miller if (orphans << shift > sysctl_tcp_max_orphans) 327ad1af0feSDavid S. Miller return true; 328ad1af0feSDavid S. Miller } 329ad1af0feSDavid S. Miller return false; 330e4fd5da3SPavel Emelianov } 3311da177e4SLinus Torvalds 3325c9f3023SJoe Perches bool tcp_check_oom(struct sock *sk, int shift); 333efcdbf24SArun Sharma 334a0f82f64SFlorian Westphal 3351da177e4SLinus Torvalds extern struct proto tcp_prot; 3361da177e4SLinus Torvalds 33757ef42d5SPavel Emelyanov #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) 33813415e46SEric Dumazet #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field) 33957ef42d5SPavel Emelyanov #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) 340aa2ea058STom Herbert #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) 3411da177e4SLinus Torvalds 3425c9f3023SJoe Perches void tcp_tasklet_init(void); 34346d3ceabSEric Dumazet 3445c9f3023SJoe Perches void tcp_v4_err(struct sk_buff *skb, u32); 3451da177e4SLinus Torvalds 3465c9f3023SJoe Perches void tcp_shutdown(struct sock *sk, int how); 3471da177e4SLinus Torvalds 3485c9f3023SJoe Perches void tcp_v4_early_demux(struct sk_buff *skb); 3495c9f3023SJoe Perches int tcp_v4_rcv(struct sk_buff *skb); 3501da177e4SLinus Torvalds 3515c9f3023SJoe Perches int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); 3521b784140SYing Xue int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 353306b13ebSTom Herbert int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); 3545c9f3023SJoe Perches int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 3555c9f3023SJoe Perches int flags); 356306b13ebSTom Herbert int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, 357306b13ebSTom Herbert size_t size, int flags); 358e3b5616aSDave Watson ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 359e3b5616aSDave Watson size_t size, int flags); 3605c9f3023SJoe Perches void tcp_release_cb(struct sock *sk); 3615c9f3023SJoe Perches void tcp_wfree(struct sk_buff *skb); 3625c9f3023SJoe Perches void tcp_write_timer_handler(struct sock *sk); 3635c9f3023SJoe Perches void tcp_delack_timer_handler(struct sock *sk); 3645c9f3023SJoe Perches int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 36572ab4a86SEric Dumazet int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); 3665c9f3023SJoe Perches void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 367e42e24c3SMatvejchikov Ilya const struct tcphdr *th); 3685c9f3023SJoe Perches void tcp_rcv_space_adjust(struct sock *sk); 3695c9f3023SJoe Perches int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 3705c9f3023SJoe Perches void tcp_twsk_destructor(struct sock *sk); 3715c9f3023SJoe Perches ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, 37253d3176bSChangli Gao struct pipe_inode_info *pipe, size_t len, 37353d3176bSChangli Gao unsigned int flags); 3749c55e01cSJens Axboe 375463c84b9SArnaldo Carvalho de Melo static inline void tcp_dec_quickack_mode(struct sock *sk, 376463c84b9SArnaldo Carvalho de Melo const unsigned int pkts) 3771da177e4SLinus Torvalds { 378463c84b9SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 379fc6415bcSDavid S. Miller 380463c84b9SArnaldo Carvalho de Melo if (icsk->icsk_ack.quick) { 381463c84b9SArnaldo Carvalho de Melo if (pkts >= icsk->icsk_ack.quick) { 382463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick = 0; 3831da177e4SLinus Torvalds /* Leaving quickack mode we deflate ATO. */ 384463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.ato = TCP_ATO_MIN; 385fc6415bcSDavid S. Miller } else 386463c84b9SArnaldo Carvalho de Melo icsk->icsk_ack.quick -= pkts; 3871da177e4SLinus Torvalds } 3881da177e4SLinus Torvalds } 3891da177e4SLinus Torvalds 390bdf1ee5dSIlpo Järvinen #define TCP_ECN_OK 1 391bdf1ee5dSIlpo Järvinen #define TCP_ECN_QUEUE_CWR 2 392bdf1ee5dSIlpo Järvinen #define TCP_ECN_DEMAND_CWR 4 3937a269ffaSEric Dumazet #define TCP_ECN_SEEN 8 394bdf1ee5dSIlpo Järvinen 395fd2c3ef7SEric Dumazet enum tcp_tw_status { 3961da177e4SLinus Torvalds TCP_TW_SUCCESS = 0, 3971da177e4SLinus Torvalds TCP_TW_RST = 1, 3981da177e4SLinus Torvalds TCP_TW_ACK = 2, 3991da177e4SLinus Torvalds TCP_TW_SYN = 3 4001da177e4SLinus Torvalds }; 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds 4035c9f3023SJoe Perches enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw, 4041da177e4SLinus Torvalds struct sk_buff *skb, 4058feaf0c0SArnaldo Carvalho de Melo const struct tcphdr *th); 4065c9f3023SJoe Perches struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, 40752452c54SEric Dumazet struct request_sock *req, bool fastopen); 4085c9f3023SJoe Perches int tcp_child_process(struct sock *parent, struct sock *child, 4091da177e4SLinus Torvalds struct sk_buff *skb); 4105ae344c9SNeal Cardwell void tcp_enter_loss(struct sock *sk); 41157dde7f7SYuchung Cheng void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag); 4125c9f3023SJoe Perches void tcp_clear_retrans(struct tcp_sock *tp); 4135c9f3023SJoe Perches void tcp_update_metrics(struct sock *sk); 4145c9f3023SJoe Perches void tcp_init_metrics(struct sock *sk); 4155c9f3023SJoe Perches void tcp_metrics_init(void); 416d82bae12SSoheil Hassas Yeganeh bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); 4175c9f3023SJoe Perches void tcp_disable_fack(struct tcp_sock *tp); 4185c9f3023SJoe Perches void tcp_close(struct sock *sk, long timeout); 4195c9f3023SJoe Perches void tcp_init_sock(struct sock *sk); 4205c9f3023SJoe Perches unsigned int tcp_poll(struct file *file, struct socket *sock, 42153d3176bSChangli Gao struct poll_table_struct *wait); 4225c9f3023SJoe Perches int tcp_getsockopt(struct sock *sk, int level, int optname, 4233fdadf7dSDmitry Mishin char __user *optval, int __user *optlen); 4245c9f3023SJoe Perches int tcp_setsockopt(struct sock *sk, int level, int optname, 42553d3176bSChangli Gao char __user *optval, unsigned int optlen); 4265c9f3023SJoe Perches int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 42753d3176bSChangli Gao char __user *optval, int __user *optlen); 4285c9f3023SJoe Perches int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 429b7058842SDavid S. Miller char __user *optval, unsigned int optlen); 4305c9f3023SJoe Perches void tcp_set_keepalive(struct sock *sk, int val); 43142cb80a2SEric Dumazet void tcp_syn_ack_timeout(const struct request_sock *req); 4321b784140SYing Xue int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, 4331b784140SYing Xue int flags, int *addr_len); 434eed29f17SEric Dumazet void tcp_parse_options(const struct net *net, const struct sk_buff *skb, 4351a2c6181SChristoph Paasch struct tcp_options_received *opt_rx, 4362100c8d2SYuchung Cheng int estab, struct tcp_fastopen_cookie *foc); 4375c9f3023SJoe Perches const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); 4387d5d5525SYOSHIFUJI Hideaki 4391da177e4SLinus Torvalds /* 4401da177e4SLinus Torvalds * TCP v4 functions exported for the inet6 API 4411da177e4SLinus Torvalds */ 4421da177e4SLinus Torvalds 4435c9f3023SJoe Perches void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); 4444fab9071SNeal Cardwell void tcp_v4_mtu_reduced(struct sock *sk); 4459cf74903SEric Dumazet void tcp_req_err(struct sock *sk, u32 seq, bool abort); 4465c9f3023SJoe Perches int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); 447c28c6f04SEric Dumazet struct sock *tcp_create_openreq_child(const struct sock *sk, 44860236fddSArnaldo Carvalho de Melo struct request_sock *req, 4491da177e4SLinus Torvalds struct sk_buff *skb); 45081164413SDaniel Borkmann void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst); 4510c27171eSEric Dumazet struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, 45260236fddSArnaldo Carvalho de Melo struct request_sock *req, 4535e0724d0SEric Dumazet struct dst_entry *dst, 4545e0724d0SEric Dumazet struct request_sock *req_unhash, 4555e0724d0SEric Dumazet bool *own_req); 4565c9f3023SJoe Perches int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb); 4575c9f3023SJoe Perches int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 4585c9f3023SJoe Perches int tcp_connect(struct sock *sk); 459b3d05147SEric Dumazet enum tcp_synack_type { 460b3d05147SEric Dumazet TCP_SYNACK_NORMAL, 461b3d05147SEric Dumazet TCP_SYNACK_FASTOPEN, 462b3d05147SEric Dumazet TCP_SYNACK_COOKIE, 463b3d05147SEric Dumazet }; 4645d062de7SEric Dumazet struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, 465e6b4d113SWilliam Allen Simpson struct request_sock *req, 466ca6fb065SEric Dumazet struct tcp_fastopen_cookie *foc, 467b3d05147SEric Dumazet enum tcp_synack_type synack_type); 4685c9f3023SJoe Perches int tcp_disconnect(struct sock *sk, int flags); 4691da177e4SLinus Torvalds 470370816aeSPavel Emelyanov void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); 471292e8d8cSPavel Emelyanov int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); 47263d02d15SEric Dumazet void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); 4731da177e4SLinus Torvalds 4741da177e4SLinus Torvalds /* From syncookies.c */ 475b80c0e78SEric Dumazet struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, 476b80c0e78SEric Dumazet struct request_sock *req, 47784b114b9SEric Dumazet struct dst_entry *dst, u32 tsoff); 4785c9f3023SJoe Perches int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, 4790198230bSPatrick McHardy u32 cookie); 480461b74c3SCong Wang struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb); 481e05c82d3SEric Dumazet #ifdef CONFIG_SYN_COOKIES 4828c27bd75SFlorian Westphal 48363262315SEric Dumazet /* Syncookies use a monotonic timer which increments every 60 seconds. 4848c27bd75SFlorian Westphal * This counter is used both as a hash input and partially encoded into 4858c27bd75SFlorian Westphal * the cookie value. A cookie is only validated further if the delta 4868c27bd75SFlorian Westphal * between the current counter value and the encoded one is less than this, 48763262315SEric Dumazet * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if 4888c27bd75SFlorian Westphal * the counter advances immediately after a cookie is generated). 4898c27bd75SFlorian Westphal */ 4908c27bd75SFlorian Westphal #define MAX_SYNCOOKIE_AGE 2 491264ea103SEric Dumazet #define TCP_SYNCOOKIE_PERIOD (60 * HZ) 492264ea103SEric Dumazet #define TCP_SYNCOOKIE_VALID (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD) 493264ea103SEric Dumazet 494264ea103SEric Dumazet /* syncookies: remember time of last synqueue overflow 495264ea103SEric Dumazet * But do not dirty this field too often (once per second is enough) 4963f684b4bSEric Dumazet * It is racy as we do not hold a lock, but race is very minor. 497264ea103SEric Dumazet */ 4983f684b4bSEric Dumazet static inline void tcp_synq_overflow(const struct sock *sk) 499264ea103SEric Dumazet { 500264ea103SEric Dumazet unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 501264ea103SEric Dumazet unsigned long now = jiffies; 502264ea103SEric Dumazet 503264ea103SEric Dumazet if (time_after(now, last_overflow + HZ)) 504264ea103SEric Dumazet tcp_sk(sk)->rx_opt.ts_recent_stamp = now; 505264ea103SEric Dumazet } 506264ea103SEric Dumazet 507264ea103SEric Dumazet /* syncookies: no recent synqueue overflow on this listening socket? */ 508264ea103SEric Dumazet static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) 509264ea103SEric Dumazet { 510264ea103SEric Dumazet unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; 511264ea103SEric Dumazet 512264ea103SEric Dumazet return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID); 513264ea103SEric Dumazet } 5148c27bd75SFlorian Westphal 5158c27bd75SFlorian Westphal static inline u32 tcp_cookie_time(void) 5168c27bd75SFlorian Westphal { 51763262315SEric Dumazet u64 val = get_jiffies_64(); 51863262315SEric Dumazet 519264ea103SEric Dumazet do_div(val, TCP_SYNCOOKIE_PERIOD); 52063262315SEric Dumazet return val; 5218c27bd75SFlorian Westphal } 5228c27bd75SFlorian Westphal 5235c9f3023SJoe Perches u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, 5245c9f3023SJoe Perches u16 *mssp); 5253f684b4bSEric Dumazet __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); 5269a568de4SEric Dumazet u64 cookie_init_timestamp(struct request_sock *req); 527f9301034SEric Dumazet bool cookie_timestamp_decode(const struct net *net, 528f9301034SEric Dumazet struct tcp_options_received *opt); 529f1673381SFlorian Westphal bool cookie_ecn_ok(const struct tcp_options_received *opt, 530f7b3bec6SFlorian Westphal const struct net *net, const struct dst_entry *dst); 5314dfc2817SFlorian Westphal 532c6aefafbSGlenn Griffin /* From net/ipv6/syncookies.c */ 5335c9f3023SJoe Perches int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th, 53481eb6a14SPatrick McHardy u32 cookie); 5355c9f3023SJoe Perches struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 536f1673381SFlorian Westphal 5375c9f3023SJoe Perches u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, 53881eb6a14SPatrick McHardy const struct tcphdr *th, u16 *mssp); 5393f684b4bSEric Dumazet __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss); 540e05c82d3SEric Dumazet #endif 5411da177e4SLinus Torvalds /* tcp_output.c */ 5421da177e4SLinus Torvalds 5431b3878caSNeal Cardwell u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, 5441b3878caSNeal Cardwell int min_tso_segs); 5455c9f3023SJoe Perches void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, 5469e412ba7SIlpo Järvinen int nonagle); 5475c9f3023SJoe Perches bool tcp_may_send_now(struct sock *sk); 54810d3be56SEric Dumazet int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 54910d3be56SEric Dumazet int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs); 5505c9f3023SJoe Perches void tcp_retransmit_timer(struct sock *sk); 5515c9f3023SJoe Perches void tcp_xmit_retransmit_queue(struct sock *); 5525c9f3023SJoe Perches void tcp_simple_retransmit(struct sock *); 55357dde7f7SYuchung Cheng void tcp_enter_recovery(struct sock *sk, bool ece_ack); 5545c9f3023SJoe Perches int tcp_trim_head(struct sock *, struct sk_buff *, u32); 5556cc55e09SOctavian Purdila int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t); 5561da177e4SLinus Torvalds 5575c9f3023SJoe Perches void tcp_send_probe0(struct sock *); 5585c9f3023SJoe Perches void tcp_send_partial(struct sock *); 559e520af48SEric Dumazet int tcp_write_wakeup(struct sock *, int mib); 5605c9f3023SJoe Perches void tcp_send_fin(struct sock *sk); 5615c9f3023SJoe Perches void tcp_send_active_reset(struct sock *sk, gfp_t priority); 5625c9f3023SJoe Perches int tcp_send_synack(struct sock *); 5635c9f3023SJoe Perches void tcp_push_one(struct sock *, unsigned int mss_now); 5645c9f3023SJoe Perches void tcp_send_ack(struct sock *sk); 5655c9f3023SJoe Perches void tcp_send_delayed_ack(struct sock *sk); 5665c9f3023SJoe Perches void tcp_send_loss_probe(struct sock *sk); 5675c9f3023SJoe Perches bool tcp_schedule_loss_probe(struct sock *sk); 568cfea5a68SMartin KaFai Lau void tcp_skb_collapse_tstamp(struct sk_buff *skb, 569cfea5a68SMartin KaFai Lau const struct sk_buff *next_skb); 5701da177e4SLinus Torvalds 571a762a980SDavid S. Miller /* tcp_input.c */ 5725c9f3023SJoe Perches void tcp_rearm_rto(struct sock *sk); 5730f1c28aeSYuchung Cheng void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); 5745c9f3023SJoe Perches void tcp_reset(struct sock *sk); 5754f41b1c5SYuchung Cheng void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); 576e3e17b77SEric Dumazet void tcp_fin(struct sock *sk); 577a762a980SDavid S. Miller 5781da177e4SLinus Torvalds /* tcp_timer.c */ 5795c9f3023SJoe Perches void tcp_init_xmit_timers(struct sock *); 580463c84b9SArnaldo Carvalho de Melo static inline void tcp_clear_xmit_timers(struct sock *sk) 581463c84b9SArnaldo Carvalho de Melo { 582218af599SEric Dumazet hrtimer_cancel(&tcp_sk(sk)->pacing_timer); 583463c84b9SArnaldo Carvalho de Melo inet_csk_clear_xmit_timers(sk); 584463c84b9SArnaldo Carvalho de Melo } 5851da177e4SLinus Torvalds 5865c9f3023SJoe Perches unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 5875c9f3023SJoe Perches unsigned int tcp_current_mss(struct sock *sk); 5880c54b85fSIlpo Järvinen 5890c54b85fSIlpo Järvinen /* Bound MSS / TSO packet size with the half of the window */ 5900c54b85fSIlpo Järvinen static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) 5910c54b85fSIlpo Järvinen { 59201f83d69SAlexey Kuznetsov int cutoff; 59301f83d69SAlexey Kuznetsov 59401f83d69SAlexey Kuznetsov /* When peer uses tiny windows, there is no use in packetizing 59501f83d69SAlexey Kuznetsov * to sub-MSS pieces for the sake of SWS or making sure there 59601f83d69SAlexey Kuznetsov * are enough packets in the pipe for fast recovery. 59701f83d69SAlexey Kuznetsov * 59801f83d69SAlexey Kuznetsov * On the other hand, for extremely large MSS devices, handling 59901f83d69SAlexey Kuznetsov * smaller than MSS windows in this way does make sense. 60001f83d69SAlexey Kuznetsov */ 6012631b79fSSeymour, Shane M if (tp->max_window > TCP_MSS_DEFAULT) 60201f83d69SAlexey Kuznetsov cutoff = (tp->max_window >> 1); 60301f83d69SAlexey Kuznetsov else 60401f83d69SAlexey Kuznetsov cutoff = tp->max_window; 60501f83d69SAlexey Kuznetsov 60601f83d69SAlexey Kuznetsov if (cutoff && pktsize > cutoff) 60701f83d69SAlexey Kuznetsov return max_t(int, cutoff, 68U - tp->tcp_header_len); 6080c54b85fSIlpo Järvinen else 6090c54b85fSIlpo Järvinen return pktsize; 6100c54b85fSIlpo Järvinen } 6111da177e4SLinus Torvalds 61217b085eaSArnaldo Carvalho de Melo /* tcp.c */ 6130df48c26SEric Dumazet void tcp_get_info(struct sock *, struct tcp_info *); 6141da177e4SLinus Torvalds 6151da177e4SLinus Torvalds /* Read 'sendfile()'-style from a TCP socket */ 6165c9f3023SJoe Perches int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 6171da177e4SLinus Torvalds sk_read_actor_t recv_actor); 6181da177e4SLinus Torvalds 6195c9f3023SJoe Perches void tcp_initialize_rcv_mss(struct sock *sk); 6201da177e4SLinus Torvalds 6215c9f3023SJoe Perches int tcp_mtu_to_mss(struct sock *sk, int pmtu); 6225c9f3023SJoe Perches int tcp_mss_to_mtu(struct sock *sk, int mss); 6235c9f3023SJoe Perches void tcp_mtup_init(struct sock *sk); 6245c9f3023SJoe Perches void tcp_init_buffer_space(struct sock *sk); 6255d424d5aSJohn Heffner 626f1ecd5d9SDamian Lukowski static inline void tcp_bound_rto(const struct sock *sk) 627f1ecd5d9SDamian Lukowski { 628f1ecd5d9SDamian Lukowski if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 629f1ecd5d9SDamian Lukowski inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 630f1ecd5d9SDamian Lukowski } 631f1ecd5d9SDamian Lukowski 632f1ecd5d9SDamian Lukowski static inline u32 __tcp_set_rto(const struct tcp_sock *tp) 633f1ecd5d9SDamian Lukowski { 634740b0f18SEric Dumazet return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); 635f1ecd5d9SDamian Lukowski } 636f1ecd5d9SDamian Lukowski 6370c266898SSatoru SATOH /* Compute the actual rto_min value */ 6380c266898SSatoru SATOH static inline u32 tcp_rto_min(struct sock *sk) 6390c266898SSatoru SATOH { 640cf533ea5SEric Dumazet const struct dst_entry *dst = __sk_dst_get(sk); 6410c266898SSatoru SATOH u32 rto_min = TCP_RTO_MIN; 6420c266898SSatoru SATOH 6430c266898SSatoru SATOH if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 6440c266898SSatoru SATOH rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); 6450c266898SSatoru SATOH return rto_min; 6460c266898SSatoru SATOH } 6470c266898SSatoru SATOH 648740b0f18SEric Dumazet static inline u32 tcp_rto_min_us(struct sock *sk) 649740b0f18SEric Dumazet { 650740b0f18SEric Dumazet return jiffies_to_usecs(tcp_rto_min(sk)); 651740b0f18SEric Dumazet } 652740b0f18SEric Dumazet 65381164413SDaniel Borkmann static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) 65481164413SDaniel Borkmann { 65581164413SDaniel Borkmann return dst_metric_locked(dst, RTAX_CC_ALGO); 65681164413SDaniel Borkmann } 65781164413SDaniel Borkmann 658f6722583SYuchung Cheng /* Minimum RTT in usec. ~0 means not available. */ 659f6722583SYuchung Cheng static inline u32 tcp_min_rtt(const struct tcp_sock *tp) 660f6722583SYuchung Cheng { 66164033892SNeal Cardwell return minmax_get(&tp->rtt_min); 662f6722583SYuchung Cheng } 663f6722583SYuchung Cheng 6641da177e4SLinus Torvalds /* Compute the actual receive window we are currently advertising. 6651da177e4SLinus Torvalds * Rcv_nxt can be after the window if our peer push more data 6661da177e4SLinus Torvalds * than the offered window. 6671da177e4SLinus Torvalds */ 66840efc6faSStephen Hemminger static inline u32 tcp_receive_window(const struct tcp_sock *tp) 6691da177e4SLinus Torvalds { 6701da177e4SLinus Torvalds s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; 6711da177e4SLinus Torvalds 6721da177e4SLinus Torvalds if (win < 0) 6731da177e4SLinus Torvalds win = 0; 6741da177e4SLinus Torvalds return (u32) win; 6751da177e4SLinus Torvalds } 6761da177e4SLinus Torvalds 6771da177e4SLinus Torvalds /* Choose a new window, without checks for shrinking, and without 6781da177e4SLinus Torvalds * scaling applied to the result. The caller does these things 6791da177e4SLinus Torvalds * if necessary. This is a "raw" window selection. 6801da177e4SLinus Torvalds */ 6815c9f3023SJoe Perches u32 __tcp_select_window(struct sock *sk); 6821da177e4SLinus Torvalds 683ee995283SPavel Emelyanov void tcp_send_window_probe(struct sock *sk); 684ee995283SPavel Emelyanov 685ec66eda8SEric Dumazet /* TCP uses 32bit jiffies to save some space. 686ec66eda8SEric Dumazet * Note that this is different from tcp_time_stamp, which 687ec66eda8SEric Dumazet * historically has been the same until linux-4.13. 688ec66eda8SEric Dumazet */ 689ec66eda8SEric Dumazet #define tcp_jiffies32 ((u32)jiffies) 690ec66eda8SEric Dumazet 6919a568de4SEric Dumazet /* 6929a568de4SEric Dumazet * Deliver a 32bit value for TCP timestamp option (RFC 7323) 6939a568de4SEric Dumazet * It is no longer tied to jiffies, but to 1 ms clock. 6949a568de4SEric Dumazet * Note: double check if you want to use tcp_jiffies32 instead of this. 6951da177e4SLinus Torvalds */ 6969a568de4SEric Dumazet #define TCP_TS_HZ 1000 6979a568de4SEric Dumazet 6989a568de4SEric Dumazet static inline u64 tcp_clock_ns(void) 6999a568de4SEric Dumazet { 7009a568de4SEric Dumazet return local_clock(); 7019a568de4SEric Dumazet } 7029a568de4SEric Dumazet 7039a568de4SEric Dumazet static inline u64 tcp_clock_us(void) 7049a568de4SEric Dumazet { 7059a568de4SEric Dumazet return div_u64(tcp_clock_ns(), NSEC_PER_USEC); 7069a568de4SEric Dumazet } 7079a568de4SEric Dumazet 7089a568de4SEric Dumazet /* This should only be used in contexts where tp->tcp_mstamp is up to date */ 7099a568de4SEric Dumazet static inline u32 tcp_time_stamp(const struct tcp_sock *tp) 7109a568de4SEric Dumazet { 7119a568de4SEric Dumazet return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ); 7129a568de4SEric Dumazet } 7139a568de4SEric Dumazet 7149a568de4SEric Dumazet /* Could use tcp_clock_us() / 1000, but this version uses a single divide */ 7159a568de4SEric Dumazet static inline u32 tcp_time_stamp_raw(void) 7169a568de4SEric Dumazet { 7179a568de4SEric Dumazet return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ); 7189a568de4SEric Dumazet } 7199a568de4SEric Dumazet 7209a568de4SEric Dumazet 7219a568de4SEric Dumazet /* Refresh 1us clock of a TCP socket, 7229a568de4SEric Dumazet * ensuring monotically increasing values. 7239a568de4SEric Dumazet */ 7249a568de4SEric Dumazet static inline void tcp_mstamp_refresh(struct tcp_sock *tp) 7259a568de4SEric Dumazet { 7269a568de4SEric Dumazet u64 val = tcp_clock_us(); 7279a568de4SEric Dumazet 7289a568de4SEric Dumazet if (val > tp->tcp_mstamp) 7299a568de4SEric Dumazet tp->tcp_mstamp = val; 7309a568de4SEric Dumazet } 7319a568de4SEric Dumazet 7329a568de4SEric Dumazet static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0) 7339a568de4SEric Dumazet { 7349a568de4SEric Dumazet return max_t(s64, t1 - t0, 0); 7359a568de4SEric Dumazet } 7361da177e4SLinus Torvalds 7377faee5c0SEric Dumazet static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) 7387faee5c0SEric Dumazet { 7399a568de4SEric Dumazet return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); 7407faee5c0SEric Dumazet } 7417faee5c0SEric Dumazet 7427faee5c0SEric Dumazet 743a3433f35SChangli Gao #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 744a3433f35SChangli Gao 745a3433f35SChangli Gao #define TCPHDR_FIN 0x01 746a3433f35SChangli Gao #define TCPHDR_SYN 0x02 747a3433f35SChangli Gao #define TCPHDR_RST 0x04 748a3433f35SChangli Gao #define TCPHDR_PSH 0x08 749a3433f35SChangli Gao #define TCPHDR_ACK 0x10 750a3433f35SChangli Gao #define TCPHDR_URG 0x20 751a3433f35SChangli Gao #define TCPHDR_ECE 0x40 752a3433f35SChangli Gao #define TCPHDR_CWR 0x80 753a3433f35SChangli Gao 75449213555SDaniel Borkmann #define TCPHDR_SYN_ECN (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR) 75549213555SDaniel Borkmann 756caa20d9aSStephen Hemminger /* This is what the send packet queuing engine uses to pass 757f86586faSEric Dumazet * TCP per-packet control information to the transmission code. 758f86586faSEric Dumazet * We also store the host-order sequence numbers in here too. 759f86586faSEric Dumazet * This is 44 bytes if IPV6 is enabled. 760f86586faSEric Dumazet * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately. 7611da177e4SLinus Torvalds */ 7621da177e4SLinus Torvalds struct tcp_skb_cb { 7631da177e4SLinus Torvalds __u32 seq; /* Starting sequence number */ 7641da177e4SLinus Torvalds __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 765cd7d8498SEric Dumazet union { 766cd7d8498SEric Dumazet /* Note : tcp_tw_isn is used in input path only 767cd7d8498SEric Dumazet * (isn chosen by tcp_timewait_state_process()) 768cd7d8498SEric Dumazet * 769f69ad292SEric Dumazet * tcp_gso_segs/size are used in write queue only, 770f69ad292SEric Dumazet * cf tcp_skb_pcount()/tcp_skb_mss() 771cd7d8498SEric Dumazet */ 772cd7d8498SEric Dumazet __u32 tcp_tw_isn; 773f69ad292SEric Dumazet struct { 774f69ad292SEric Dumazet u16 tcp_gso_segs; 775f69ad292SEric Dumazet u16 tcp_gso_size; 776f69ad292SEric Dumazet }; 77798aaa913SMike Maloney 77898aaa913SMike Maloney /* Used to stash the receive timestamp while this skb is in the 77998aaa913SMike Maloney * out of order queue, as skb->tstamp is overwritten by the 78098aaa913SMike Maloney * rbnode. 78198aaa913SMike Maloney */ 78298aaa913SMike Maloney ktime_t swtstamp; 783cd7d8498SEric Dumazet }; 7844de075e0SEric Dumazet __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ 785f4f9f6e7SNeal Cardwell 7861da177e4SLinus Torvalds __u8 sacked; /* State flags for SACK/FACK. */ 7871da177e4SLinus Torvalds #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ 7881da177e4SLinus Torvalds #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ 7891da177e4SLinus Torvalds #define TCPCB_LOST 0x04 /* SKB is lost */ 7901da177e4SLinus Torvalds #define TCPCB_TAGBITS 0x07 /* All tag bits */ 7919d186cacSAndrey Vagin #define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */ 7921da177e4SLinus Torvalds #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ 7939d186cacSAndrey Vagin #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \ 7949d186cacSAndrey Vagin TCPCB_REPAIRED) 7951da177e4SLinus Torvalds 796f4f9f6e7SNeal Cardwell __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ 7976b084928SSoheil Hassas Yeganeh __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ 798c134ecb8SMartin KaFai Lau eor:1, /* Is skb MSG_EOR marked? */ 79998aaa913SMike Maloney has_rxtstamp:1, /* SKB has a RX timestamp */ 80098aaa913SMike Maloney unused:5; 8011da177e4SLinus Torvalds __u32 ack_seq; /* Sequence number ACK'd */ 802971f10ecSEric Dumazet union { 803b75803d5SLawrence Brakmo struct { 804b9f64820SYuchung Cheng /* There is space for up to 24 bytes */ 805d7722e85SSoheil Hassas Yeganeh __u32 in_flight:30,/* Bytes in flight at transmit */ 806d7722e85SSoheil Hassas Yeganeh is_app_limited:1, /* cwnd not fully used? */ 807d7722e85SSoheil Hassas Yeganeh unused:1; 808b9f64820SYuchung Cheng /* pkts S/ACKed so far upon tx of skb, incl retrans: */ 809b9f64820SYuchung Cheng __u32 delivered; 810b9f64820SYuchung Cheng /* start of send pipeline phase */ 8119a568de4SEric Dumazet u64 first_tx_mstamp; 812b9f64820SYuchung Cheng /* when we reached the "delivered" count */ 8139a568de4SEric Dumazet u64 delivered_mstamp; 814b75803d5SLawrence Brakmo } tx; /* only used for outgoing skbs */ 815b75803d5SLawrence Brakmo union { 816971f10ecSEric Dumazet struct inet_skb_parm h4; 817971f10ecSEric Dumazet #if IS_ENABLED(CONFIG_IPV6) 818971f10ecSEric Dumazet struct inet6_skb_parm h6; 819971f10ecSEric Dumazet #endif 820b75803d5SLawrence Brakmo } header; /* For incoming skbs */ 821b75803d5SLawrence Brakmo }; 8221da177e4SLinus Torvalds }; 8231da177e4SLinus Torvalds 8241da177e4SLinus Torvalds #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 8251da177e4SLinus Torvalds 826870c3151SEric Dumazet 827815afe17SEric Dumazet #if IS_ENABLED(CONFIG_IPV6) 828870c3151SEric Dumazet /* This is the variant of inet6_iif() that must be used by TCP, 829870c3151SEric Dumazet * as TCP moves IP6CB into a different location in skb->cb[] 830870c3151SEric Dumazet */ 831870c3151SEric Dumazet static inline int tcp_v6_iif(const struct sk_buff *skb) 832870c3151SEric Dumazet { 833a04a480dSDavid Ahern bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); 83474b20582SDavid Ahern 83574b20582SDavid Ahern return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; 836870c3151SEric Dumazet } 8374297a0efSDavid Ahern 8384297a0efSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */ 8394297a0efSDavid Ahern static inline int tcp_v6_sdif(const struct sk_buff *skb) 8404297a0efSDavid Ahern { 8414297a0efSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 8424297a0efSDavid Ahern if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) 8434297a0efSDavid Ahern return TCP_SKB_CB(skb)->header.h6.iif; 8444297a0efSDavid Ahern #endif 8454297a0efSDavid Ahern return 0; 8464297a0efSDavid Ahern } 847815afe17SEric Dumazet #endif 848870c3151SEric Dumazet 849a04a480dSDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */ 850a04a480dSDavid Ahern static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) 851a04a480dSDavid Ahern { 852a04a480dSDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 853a04a480dSDavid Ahern if (!net->ipv4.sysctl_tcp_l3mdev_accept && 854da96786eSDavid Ahern skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 855a04a480dSDavid Ahern return true; 856a04a480dSDavid Ahern #endif 857a04a480dSDavid Ahern return false; 858a04a480dSDavid Ahern } 859a04a480dSDavid Ahern 8603fa6f616SDavid Ahern /* TCP_SKB_CB reference means this can not be used from early demux */ 8613fa6f616SDavid Ahern static inline int tcp_v4_sdif(struct sk_buff *skb) 8623fa6f616SDavid Ahern { 8633fa6f616SDavid Ahern #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) 8643fa6f616SDavid Ahern if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) 8653fa6f616SDavid Ahern return TCP_SKB_CB(skb)->header.h4.iif; 8663fa6f616SDavid Ahern #endif 8673fa6f616SDavid Ahern return 0; 8683fa6f616SDavid Ahern } 8693fa6f616SDavid Ahern 8701da177e4SLinus Torvalds /* Due to TSO, an SKB can be composed of multiple actual 8711da177e4SLinus Torvalds * packets. To keep these tracked properly, we use this. 8721da177e4SLinus Torvalds */ 8731da177e4SLinus Torvalds static inline int tcp_skb_pcount(const struct sk_buff *skb) 8741da177e4SLinus Torvalds { 875cd7d8498SEric Dumazet return TCP_SKB_CB(skb)->tcp_gso_segs; 876cd7d8498SEric Dumazet } 877cd7d8498SEric Dumazet 878cd7d8498SEric Dumazet static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) 879cd7d8498SEric Dumazet { 880cd7d8498SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_segs = segs; 881cd7d8498SEric Dumazet } 882cd7d8498SEric Dumazet 883cd7d8498SEric Dumazet static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) 884cd7d8498SEric Dumazet { 885cd7d8498SEric Dumazet TCP_SKB_CB(skb)->tcp_gso_segs += segs; 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds 888f69ad292SEric Dumazet /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */ 8891da177e4SLinus Torvalds static inline int tcp_skb_mss(const struct sk_buff *skb) 8901da177e4SLinus Torvalds { 891f69ad292SEric Dumazet return TCP_SKB_CB(skb)->tcp_gso_size; 8921da177e4SLinus Torvalds } 8931da177e4SLinus Torvalds 894c134ecb8SMartin KaFai Lau static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) 895c134ecb8SMartin KaFai Lau { 896c134ecb8SMartin KaFai Lau return likely(!TCP_SKB_CB(skb)->eor); 897c134ecb8SMartin KaFai Lau } 898c134ecb8SMartin KaFai Lau 899317a76f9SStephen Hemminger /* Events passed to congestion control interface */ 900317a76f9SStephen Hemminger enum tcp_ca_event { 901317a76f9SStephen Hemminger CA_EVENT_TX_START, /* first transmit when no packets in flight */ 902317a76f9SStephen Hemminger CA_EVENT_CWND_RESTART, /* congestion window restart */ 903317a76f9SStephen Hemminger CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 904317a76f9SStephen Hemminger CA_EVENT_LOSS, /* loss timeout */ 9059890092eSFlorian Westphal CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */ 9069890092eSFlorian Westphal CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */ 9079890092eSFlorian Westphal CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */ 9089890092eSFlorian Westphal CA_EVENT_NON_DELAYED_ACK, 9097354c8c3SFlorian Westphal }; 9107354c8c3SFlorian Westphal 9119890092eSFlorian Westphal /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ 9127354c8c3SFlorian Westphal enum tcp_ca_ack_event_flags { 913*c1d2b4c3SFlorian Westphal CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ 914*c1d2b4c3SFlorian Westphal CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ 915*c1d2b4c3SFlorian Westphal CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ 916317a76f9SStephen Hemminger }; 917317a76f9SStephen Hemminger 918317a76f9SStephen Hemminger /* 919317a76f9SStephen Hemminger * Interface for adding new TCP congestion control handlers 920317a76f9SStephen Hemminger */ 921317a76f9SStephen Hemminger #define TCP_CA_NAME_MAX 16 9223ff825b2SStephen Hemminger #define TCP_CA_MAX 128 9233ff825b2SStephen Hemminger #define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX) 9243ff825b2SStephen Hemminger 925c5c6a8abSDaniel Borkmann #define TCP_CA_UNSPEC 0 926c5c6a8abSDaniel Borkmann 92730e502a3SDaniel Borkmann /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */ 928164891aaSStephen Hemminger #define TCP_CONG_NON_RESTRICTED 0x1 92930e502a3SDaniel Borkmann /* Requires ECN/ECT set on all packets */ 93030e502a3SDaniel Borkmann #define TCP_CONG_NEEDS_ECN 0x2 931164891aaSStephen Hemminger 93264f40ff5SEric Dumazet union tcp_cc_info; 93364f40ff5SEric Dumazet 934756ee172SLawrence Brakmo struct ack_sample { 935756ee172SLawrence Brakmo u32 pkts_acked; 936756ee172SLawrence Brakmo s32 rtt_us; 9376f094b9eSLawrence Brakmo u32 in_flight; 938756ee172SLawrence Brakmo }; 939756ee172SLawrence Brakmo 940b9f64820SYuchung Cheng /* A rate sample measures the number of (original/retransmitted) data 941b9f64820SYuchung Cheng * packets delivered "delivered" over an interval of time "interval_us". 942b9f64820SYuchung Cheng * The tcp_rate.c code fills in the rate sample, and congestion 943b9f64820SYuchung Cheng * control modules that define a cong_control function to run at the end 944b9f64820SYuchung Cheng * of ACK processing can optionally chose to consult this sample when 945b9f64820SYuchung Cheng * setting cwnd and pacing rate. 946b9f64820SYuchung Cheng * A sample is invalid if "delivered" or "interval_us" is negative. 947b9f64820SYuchung Cheng */ 948b9f64820SYuchung Cheng struct rate_sample { 9499a568de4SEric Dumazet u64 prior_mstamp; /* starting timestamp for interval */ 950b9f64820SYuchung Cheng u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ 951b9f64820SYuchung Cheng s32 delivered; /* number of packets delivered over interval */ 952b9f64820SYuchung Cheng long interval_us; /* time for tp->delivered to incr "delivered" */ 953b9f64820SYuchung Cheng long rtt_us; /* RTT of last (S)ACKed packet (or -1) */ 954b9f64820SYuchung Cheng int losses; /* number of packets marked lost upon ACK */ 955b9f64820SYuchung Cheng u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */ 956b9f64820SYuchung Cheng u32 prior_in_flight; /* in flight before this ACK */ 957d7722e85SSoheil Hassas Yeganeh bool is_app_limited; /* is sample from packet with bubble in pipe? */ 958b9f64820SYuchung Cheng bool is_retrans; /* is sample from retransmission? */ 959b9f64820SYuchung Cheng }; 960b9f64820SYuchung Cheng 961317a76f9SStephen Hemminger struct tcp_congestion_ops { 962317a76f9SStephen Hemminger struct list_head list; 963c5c6a8abSDaniel Borkmann u32 key; 964c5c6a8abSDaniel Borkmann u32 flags; 965317a76f9SStephen Hemminger 966317a76f9SStephen Hemminger /* initialize private data (optional) */ 9676687e988SArnaldo Carvalho de Melo void (*init)(struct sock *sk); 968317a76f9SStephen Hemminger /* cleanup private data (optional) */ 9696687e988SArnaldo Carvalho de Melo void (*release)(struct sock *sk); 970317a76f9SStephen Hemminger 971317a76f9SStephen Hemminger /* return slow start threshold (required) */ 9726687e988SArnaldo Carvalho de Melo u32 (*ssthresh)(struct sock *sk); 973317a76f9SStephen Hemminger /* do new cwnd calculation (required) */ 97424901551SEric Dumazet void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 975317a76f9SStephen Hemminger /* call before changing ca_state (optional) */ 9766687e988SArnaldo Carvalho de Melo void (*set_state)(struct sock *sk, u8 new_state); 977317a76f9SStephen Hemminger /* call when cwnd event occurs (optional) */ 9786687e988SArnaldo Carvalho de Melo void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); 9797354c8c3SFlorian Westphal /* call when ack arrives (optional) */ 9807354c8c3SFlorian Westphal void (*in_ack_event)(struct sock *sk, u32 flags); 9811e0ce2a1SAnmol Sarma /* new value of cwnd after loss (required) */ 9826687e988SArnaldo Carvalho de Melo u32 (*undo_cwnd)(struct sock *sk); 983317a76f9SStephen Hemminger /* hook for packet ack accounting (optional) */ 984756ee172SLawrence Brakmo void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); 985ed6e7268SNeal Cardwell /* suggest number of segments for each skb to transmit (optional) */ 986ed6e7268SNeal Cardwell u32 (*tso_segs_goal)(struct sock *sk); 98777bfc174SYuchung Cheng /* returns the multiplier used in tcp_sndbuf_expand (optional) */ 98877bfc174SYuchung Cheng u32 (*sndbuf_expand)(struct sock *sk); 989c0402760SYuchung Cheng /* call when packets are delivered to update cwnd and pacing rate, 990c0402760SYuchung Cheng * after all the ca_state processing. (optional) 991c0402760SYuchung Cheng */ 992c0402760SYuchung Cheng void (*cong_control)(struct sock *sk, const struct rate_sample *rs); 99373c1f4a0SArnaldo Carvalho de Melo /* get info for inet_diag (optional) */ 99464f40ff5SEric Dumazet size_t (*get_info)(struct sock *sk, u32 ext, int *attr, 99564f40ff5SEric Dumazet union tcp_cc_info *info); 996317a76f9SStephen Hemminger 997317a76f9SStephen Hemminger char name[TCP_CA_NAME_MAX]; 998317a76f9SStephen Hemminger struct module *owner; 999317a76f9SStephen Hemminger }; 1000317a76f9SStephen Hemminger 10015c9f3023SJoe Perches int tcp_register_congestion_control(struct tcp_congestion_ops *type); 10025c9f3023SJoe Perches void tcp_unregister_congestion_control(struct tcp_congestion_ops *type); 1003317a76f9SStephen Hemminger 100455d8694fSFlorian Westphal void tcp_assign_congestion_control(struct sock *sk); 10055c9f3023SJoe Perches void tcp_init_congestion_control(struct sock *sk); 10065c9f3023SJoe Perches void tcp_cleanup_congestion_control(struct sock *sk); 10075c9f3023SJoe Perches int tcp_set_default_congestion_control(const char *name); 10085c9f3023SJoe Perches void tcp_get_default_congestion_control(char *name); 10095c9f3023SJoe Perches void tcp_get_available_congestion_control(char *buf, size_t len); 10105c9f3023SJoe Perches void tcp_get_allowed_congestion_control(char *buf, size_t len); 10115c9f3023SJoe Perches int tcp_set_allowed_congestion_control(char *allowed); 101291b5b21cSLawrence Brakmo int tcp_set_congestion_control(struct sock *sk, const char *name, bool load); 101391b5b21cSLawrence Brakmo void tcp_reinit_congestion_control(struct sock *sk, 101491b5b21cSLawrence Brakmo const struct tcp_congestion_ops *ca); 1015e73ebb08SNeal Cardwell u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); 1016e73ebb08SNeal Cardwell void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); 1017317a76f9SStephen Hemminger 10185c9f3023SJoe Perches u32 tcp_reno_ssthresh(struct sock *sk); 1019e9799183SFlorian Westphal u32 tcp_reno_undo_cwnd(struct sock *sk); 102024901551SEric Dumazet void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 1021a8acfbacSDavid S. Miller extern struct tcp_congestion_ops tcp_reno; 1022317a76f9SStephen Hemminger 1023c5c6a8abSDaniel Borkmann struct tcp_congestion_ops *tcp_ca_find_key(u32 key); 1024c3a8d947SDaniel Borkmann u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca); 1025ea697639SDaniel Borkmann #ifdef CONFIG_INET 1026c5c6a8abSDaniel Borkmann char *tcp_ca_get_name_by_key(u32 key, char *buffer); 1027ea697639SDaniel Borkmann #else 1028ea697639SDaniel Borkmann static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer) 1029ea697639SDaniel Borkmann { 1030ea697639SDaniel Borkmann return NULL; 1031ea697639SDaniel Borkmann } 1032ea697639SDaniel Borkmann #endif 1033c5c6a8abSDaniel Borkmann 103430e502a3SDaniel Borkmann static inline bool tcp_ca_needs_ecn(const struct sock *sk) 103530e502a3SDaniel Borkmann { 103630e502a3SDaniel Borkmann const struct inet_connection_sock *icsk = inet_csk(sk); 103730e502a3SDaniel Borkmann 103830e502a3SDaniel Borkmann return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN; 103930e502a3SDaniel Borkmann } 104030e502a3SDaniel Borkmann 10416687e988SArnaldo Carvalho de Melo static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 1042317a76f9SStephen Hemminger { 10436687e988SArnaldo Carvalho de Melo struct inet_connection_sock *icsk = inet_csk(sk); 10446687e988SArnaldo Carvalho de Melo 10456687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_ops->set_state) 10466687e988SArnaldo Carvalho de Melo icsk->icsk_ca_ops->set_state(sk, ca_state); 10476687e988SArnaldo Carvalho de Melo icsk->icsk_ca_state = ca_state; 1048317a76f9SStephen Hemminger } 1049317a76f9SStephen Hemminger 10506687e988SArnaldo Carvalho de Melo static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) 1051317a76f9SStephen Hemminger { 10526687e988SArnaldo Carvalho de Melo const struct inet_connection_sock *icsk = inet_csk(sk); 10536687e988SArnaldo Carvalho de Melo 10546687e988SArnaldo Carvalho de Melo if (icsk->icsk_ca_ops->cwnd_event) 10556687e988SArnaldo Carvalho de Melo icsk->icsk_ca_ops->cwnd_event(sk, event); 1056317a76f9SStephen Hemminger } 1057317a76f9SStephen Hemminger 1058b9f64820SYuchung Cheng /* From tcp_rate.c */ 1059b9f64820SYuchung Cheng void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb); 1060b9f64820SYuchung Cheng void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, 1061b9f64820SYuchung Cheng struct rate_sample *rs); 1062b9f64820SYuchung Cheng void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, 106388d5c650SEric Dumazet struct rate_sample *rs); 1064d7722e85SSoheil Hassas Yeganeh void tcp_rate_check_app_limited(struct sock *sk); 1065b9f64820SYuchung Cheng 1066e60402d0SIlpo Järvinen /* These functions determine how the current flow behaves in respect of SACK 1067e60402d0SIlpo Järvinen * handling. SACK is negotiated with the peer, and therefore it can vary 1068e60402d0SIlpo Järvinen * between different flows. 1069e60402d0SIlpo Järvinen * 1070e60402d0SIlpo Järvinen * tcp_is_sack - SACK enabled 1071e60402d0SIlpo Järvinen * tcp_is_reno - No SACK 1072e60402d0SIlpo Järvinen * tcp_is_fack - FACK enabled, implies SACK enabled 1073e60402d0SIlpo Järvinen */ 1074e60402d0SIlpo Järvinen static inline int tcp_is_sack(const struct tcp_sock *tp) 1075e60402d0SIlpo Järvinen { 1076e60402d0SIlpo Järvinen return tp->rx_opt.sack_ok; 1077e60402d0SIlpo Järvinen } 1078e60402d0SIlpo Järvinen 1079a2a385d6SEric Dumazet static inline bool tcp_is_reno(const struct tcp_sock *tp) 1080e60402d0SIlpo Järvinen { 1081e60402d0SIlpo Järvinen return !tcp_is_sack(tp); 1082e60402d0SIlpo Järvinen } 1083e60402d0SIlpo Järvinen 1084a2a385d6SEric Dumazet static inline bool tcp_is_fack(const struct tcp_sock *tp) 1085e60402d0SIlpo Järvinen { 1086ab56222aSVijay Subramanian return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; 1087e60402d0SIlpo Järvinen } 1088e60402d0SIlpo Järvinen 1089e60402d0SIlpo Järvinen static inline void tcp_enable_fack(struct tcp_sock *tp) 1090e60402d0SIlpo Järvinen { 1091ab56222aSVijay Subramanian tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; 1092e60402d0SIlpo Järvinen } 1093e60402d0SIlpo Järvinen 109483ae4088SIlpo Järvinen static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 109583ae4088SIlpo Järvinen { 109683ae4088SIlpo Järvinen return tp->sacked_out + tp->lost_out; 109783ae4088SIlpo Järvinen } 109883ae4088SIlpo Järvinen 10991da177e4SLinus Torvalds /* This determines how many packets are "in the network" to the best 11001da177e4SLinus Torvalds * of our knowledge. In many cases it is conservative, but where 11011da177e4SLinus Torvalds * detailed information is available from the receiver (via SACK 11021da177e4SLinus Torvalds * blocks etc.) we can make more aggressive calculations. 11031da177e4SLinus Torvalds * 11041da177e4SLinus Torvalds * Use this for decisions involving congestion control, use just 11051da177e4SLinus Torvalds * tp->packets_out to determine if the send queue is empty or not. 11061da177e4SLinus Torvalds * 11071da177e4SLinus Torvalds * Read this equation as: 11081da177e4SLinus Torvalds * 11091da177e4SLinus Torvalds * "Packets sent once on transmission queue" MINUS 11101da177e4SLinus Torvalds * "Packets left network, but not honestly ACKed yet" PLUS 11111da177e4SLinus Torvalds * "Packets fast retransmitted" 11121da177e4SLinus Torvalds */ 111340efc6faSStephen Hemminger static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp) 11141da177e4SLinus Torvalds { 111583ae4088SIlpo Järvinen return tp->packets_out - tcp_left_out(tp) + tp->retrans_out; 11161da177e4SLinus Torvalds } 11171da177e4SLinus Torvalds 11180b6a05c1SIlpo Järvinen #define TCP_INFINITE_SSTHRESH 0x7fffffff 11190b6a05c1SIlpo Järvinen 1120071d5080SYuchung Cheng static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 1121071d5080SYuchung Cheng { 112276174004SYuchung Cheng return tp->snd_cwnd < tp->snd_ssthresh; 1123071d5080SYuchung Cheng } 1124071d5080SYuchung Cheng 11250b6a05c1SIlpo Järvinen static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 11260b6a05c1SIlpo Järvinen { 11270b6a05c1SIlpo Järvinen return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH; 11280b6a05c1SIlpo Järvinen } 11290b6a05c1SIlpo Järvinen 1130684bad11SYuchung Cheng static inline bool tcp_in_cwnd_reduction(const struct sock *sk) 1131684bad11SYuchung Cheng { 1132684bad11SYuchung Cheng return (TCPF_CA_CWR | TCPF_CA_Recovery) & 1133684bad11SYuchung Cheng (1 << inet_csk(sk)->icsk_ca_state); 1134684bad11SYuchung Cheng } 1135684bad11SYuchung Cheng 11361da177e4SLinus Torvalds /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1137684bad11SYuchung Cheng * The exception is cwnd reduction phase, when cwnd is decreasing towards 11381da177e4SLinus Torvalds * ssthresh. 11391da177e4SLinus Torvalds */ 11406687e988SArnaldo Carvalho de Melo static inline __u32 tcp_current_ssthresh(const struct sock *sk) 11411da177e4SLinus Torvalds { 11426687e988SArnaldo Carvalho de Melo const struct tcp_sock *tp = tcp_sk(sk); 1143cf533ea5SEric Dumazet 1144684bad11SYuchung Cheng if (tcp_in_cwnd_reduction(sk)) 11451da177e4SLinus Torvalds return tp->snd_ssthresh; 11461da177e4SLinus Torvalds else 11471da177e4SLinus Torvalds return max(tp->snd_ssthresh, 11481da177e4SLinus Torvalds ((tp->snd_cwnd >> 1) + 11491da177e4SLinus Torvalds (tp->snd_cwnd >> 2))); 11501da177e4SLinus Torvalds } 11511da177e4SLinus Torvalds 1152b9c4595bSIlpo Järvinen /* Use define here intentionally to get WARN_ON location shown at the caller */ 1153b9c4595bSIlpo Järvinen #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 11541da177e4SLinus Torvalds 11555ee2c941SChristoph Paasch void tcp_enter_cwr(struct sock *sk); 11565c9f3023SJoe Perches __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 11571da177e4SLinus Torvalds 11586b5a5c0dSNeal Cardwell /* The maximum number of MSS of available cwnd for which TSO defers 11596b5a5c0dSNeal Cardwell * sending if not using sysctl_tcp_tso_win_divisor. 11606b5a5c0dSNeal Cardwell */ 11616b5a5c0dSNeal Cardwell static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp) 11626b5a5c0dSNeal Cardwell { 11636b5a5c0dSNeal Cardwell return 3; 11646b5a5c0dSNeal Cardwell } 11656b5a5c0dSNeal Cardwell 116690840defSIlpo Järvinen /* Returns end sequence number of the receiver's advertised window */ 116790840defSIlpo Järvinen static inline u32 tcp_wnd_end(const struct tcp_sock *tp) 116890840defSIlpo Järvinen { 116990840defSIlpo Järvinen return tp->snd_una + tp->snd_wnd; 117090840defSIlpo Järvinen } 1171e114a710SEric Dumazet 1172e114a710SEric Dumazet /* We follow the spirit of RFC2861 to validate cwnd but implement a more 1173e114a710SEric Dumazet * flexible approach. The RFC suggests cwnd should not be raised unless 1174ca8a2263SNeal Cardwell * it was fully used previously. And that's exactly what we do in 1175ca8a2263SNeal Cardwell * congestion avoidance mode. But in slow start we allow cwnd to grow 1176ca8a2263SNeal Cardwell * as long as the application has used half the cwnd. 1177e114a710SEric Dumazet * Example : 1178e114a710SEric Dumazet * cwnd is 10 (IW10), but application sends 9 frames. 1179e114a710SEric Dumazet * We allow cwnd to reach 18 when all frames are ACKed. 1180e114a710SEric Dumazet * This check is safe because it's as aggressive as slow start which already 1181e114a710SEric Dumazet * risks 100% overshoot. The advantage is that we discourage application to 1182e114a710SEric Dumazet * either send more filler packets or data to artificially blow up the cwnd 1183e114a710SEric Dumazet * usage, and allow application-limited process to probe bw more aggressively. 1184e114a710SEric Dumazet */ 118524901551SEric Dumazet static inline bool tcp_is_cwnd_limited(const struct sock *sk) 1186e114a710SEric Dumazet { 1187e114a710SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1188e114a710SEric Dumazet 1189ca8a2263SNeal Cardwell /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1190071d5080SYuchung Cheng if (tcp_in_slow_start(tp)) 1191ca8a2263SNeal Cardwell return tp->snd_cwnd < 2 * tp->max_packets_out; 1192ca8a2263SNeal Cardwell 1193ca8a2263SNeal Cardwell return tp->is_cwnd_limited; 1194e114a710SEric Dumazet } 1195f4805edeSStephen Hemminger 119621c8fe99SEric Dumazet /* Something is really bad, we could not queue an additional packet, 119721c8fe99SEric Dumazet * because qdisc is full or receiver sent a 0 window. 119821c8fe99SEric Dumazet * We do not want to add fuel to the fire, or abort too early, 119921c8fe99SEric Dumazet * so make sure the timer we arm now is at least 200ms in the future, 120021c8fe99SEric Dumazet * regardless of current icsk_rto value (as it could be ~2ms) 120121c8fe99SEric Dumazet */ 120221c8fe99SEric Dumazet static inline unsigned long tcp_probe0_base(const struct sock *sk) 120321c8fe99SEric Dumazet { 120421c8fe99SEric Dumazet return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN); 120521c8fe99SEric Dumazet } 120621c8fe99SEric Dumazet 120721c8fe99SEric Dumazet /* Variant of inet_csk_rto_backoff() used for zero window probes */ 120821c8fe99SEric Dumazet static inline unsigned long tcp_probe0_when(const struct sock *sk, 120921c8fe99SEric Dumazet unsigned long max_when) 121021c8fe99SEric Dumazet { 121121c8fe99SEric Dumazet u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff; 121221c8fe99SEric Dumazet 121321c8fe99SEric Dumazet return (unsigned long)min_t(u64, when, max_when); 121421c8fe99SEric Dumazet } 121521c8fe99SEric Dumazet 12169e412ba7SIlpo Järvinen static inline void tcp_check_probe_timer(struct sock *sk) 12171da177e4SLinus Torvalds { 121821c8fe99SEric Dumazet if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending) 12193f421baaSArnaldo Carvalho de Melo inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 122021c8fe99SEric Dumazet tcp_probe0_base(sk), TCP_RTO_MAX); 12211da177e4SLinus Torvalds } 12221da177e4SLinus Torvalds 1223ee7537b6SHantzis Fotis static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq) 12241da177e4SLinus Torvalds { 12251da177e4SLinus Torvalds tp->snd_wl1 = seq; 12261da177e4SLinus Torvalds } 12271da177e4SLinus Torvalds 1228ee7537b6SHantzis Fotis static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq) 12291da177e4SLinus Torvalds { 12301da177e4SLinus Torvalds tp->snd_wl1 = seq; 12311da177e4SLinus Torvalds } 12321da177e4SLinus Torvalds 12331da177e4SLinus Torvalds /* 12341da177e4SLinus Torvalds * Calculate(/check) TCP checksum 12351da177e4SLinus Torvalds */ 1236ba7808eaSFrederik Deweerdt static inline __sum16 tcp_v4_check(int len, __be32 saddr, 1237ba7808eaSFrederik Deweerdt __be32 daddr, __wsum base) 12381da177e4SLinus Torvalds { 12391da177e4SLinus Torvalds return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); 12401da177e4SLinus Torvalds } 12411da177e4SLinus Torvalds 1242b51655b9SAl Viro static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) 12431da177e4SLinus Torvalds { 1244fb286bb2SHerbert Xu return __skb_checksum_complete(skb); 12451da177e4SLinus Torvalds } 12461da177e4SLinus Torvalds 1247a2a385d6SEric Dumazet static inline bool tcp_checksum_complete(struct sk_buff *skb) 12481da177e4SLinus Torvalds { 124960476372SHerbert Xu return !skb_csum_unnecessary(skb) && 12501da177e4SLinus Torvalds __tcp_checksum_complete(skb); 12511da177e4SLinus Torvalds } 12521da177e4SLinus Torvalds 1253c9c33212SEric Dumazet bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); 1254ac6e7800SEric Dumazet int tcp_filter(struct sock *sk, struct sk_buff *skb); 12551da177e4SLinus Torvalds 12561da177e4SLinus Torvalds #undef STATE_TRACE 12571da177e4SLinus Torvalds 12581da177e4SLinus Torvalds #ifdef STATE_TRACE 12591da177e4SLinus Torvalds static const char *statename[]={ 12601da177e4SLinus Torvalds "Unused","Established","Syn Sent","Syn Recv", 12611da177e4SLinus Torvalds "Fin Wait 1","Fin Wait 2","Time Wait", "Close", 12621da177e4SLinus Torvalds "Close Wait","Last ACK","Listen","Closing" 12631da177e4SLinus Torvalds }; 12641da177e4SLinus Torvalds #endif 12655c9f3023SJoe Perches void tcp_set_state(struct sock *sk, int state); 12661da177e4SLinus Torvalds 12675c9f3023SJoe Perches void tcp_done(struct sock *sk); 12681da177e4SLinus Torvalds 1269c1e64e29SLorenzo Colitti int tcp_abort(struct sock *sk, int err); 1270c1e64e29SLorenzo Colitti 127140efc6faSStephen Hemminger static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) 12721da177e4SLinus Torvalds { 12731da177e4SLinus Torvalds rx_opt->dsack = 0; 12741da177e4SLinus Torvalds rx_opt->num_sacks = 0; 12751da177e4SLinus Torvalds } 12761da177e4SLinus Torvalds 12775c9f3023SJoe Perches u32 tcp_default_init_rwnd(u32 mss); 12786f021c62SEric Dumazet void tcp_cwnd_restart(struct sock *sk, s32 delta); 12796f021c62SEric Dumazet 12806f021c62SEric Dumazet static inline void tcp_slow_start_after_idle_check(struct sock *sk) 12816f021c62SEric Dumazet { 12821b1fc3fdSWei Wang const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; 12836f021c62SEric Dumazet struct tcp_sock *tp = tcp_sk(sk); 12846f021c62SEric Dumazet s32 delta; 12856f021c62SEric Dumazet 12861b1fc3fdSWei Wang if (!sysctl_tcp_slow_start_after_idle || tp->packets_out || 12871b1fc3fdSWei Wang ca_ops->cong_control) 12886f021c62SEric Dumazet return; 1289d635fbe2SEric Dumazet delta = tcp_jiffies32 - tp->lsndtime; 12906f021c62SEric Dumazet if (delta > inet_csk(sk)->icsk_rto) 12916f021c62SEric Dumazet tcp_cwnd_restart(sk, delta); 12926f021c62SEric Dumazet } 129385f16525SYuchung Cheng 12941da177e4SLinus Torvalds /* Determine a window scaling and initial window to offer. */ 12955c9f3023SJoe Perches void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd, 12965c9f3023SJoe Perches __u32 *window_clamp, int wscale_ok, 12975c9f3023SJoe Perches __u8 *rcv_wscale, __u32 init_rcv_wnd); 12981da177e4SLinus Torvalds 12991da177e4SLinus Torvalds static inline int tcp_win_from_space(int space) 13001da177e4SLinus Torvalds { 1301c4836742SGao Feng int tcp_adv_win_scale = sysctl_tcp_adv_win_scale; 1302c4836742SGao Feng 1303c4836742SGao Feng return tcp_adv_win_scale <= 0 ? 1304c4836742SGao Feng (space>>(-tcp_adv_win_scale)) : 1305c4836742SGao Feng space - (space>>tcp_adv_win_scale); 13061da177e4SLinus Torvalds } 13071da177e4SLinus Torvalds 13081da177e4SLinus Torvalds /* Note: caller must be prepared to deal with negative returns */ 13091da177e4SLinus Torvalds static inline int tcp_space(const struct sock *sk) 13101da177e4SLinus Torvalds { 13111da177e4SLinus Torvalds return tcp_win_from_space(sk->sk_rcvbuf - 13121da177e4SLinus Torvalds atomic_read(&sk->sk_rmem_alloc)); 13131da177e4SLinus Torvalds } 13141da177e4SLinus Torvalds 13151da177e4SLinus Torvalds static inline int tcp_full_space(const struct sock *sk) 13161da177e4SLinus Torvalds { 13171da177e4SLinus Torvalds return tcp_win_from_space(sk->sk_rcvbuf); 13181da177e4SLinus Torvalds } 13191da177e4SLinus Torvalds 1320843f4a55SYuchung Cheng extern void tcp_openreq_init_rwin(struct request_sock *req, 1321b1964b5fSEric Dumazet const struct sock *sk_listener, 1322b1964b5fSEric Dumazet const struct dst_entry *dst); 1323843f4a55SYuchung Cheng 13245c9f3023SJoe Perches void tcp_enter_memory_pressure(struct sock *sk); 132506044751SEric Dumazet void tcp_leave_memory_pressure(struct sock *sk); 13261da177e4SLinus Torvalds 13271da177e4SLinus Torvalds static inline int keepalive_intvl_when(const struct tcp_sock *tp) 13281da177e4SLinus Torvalds { 1329b840d15dSNikolay Borisov struct net *net = sock_net((struct sock *)tp); 1330b840d15dSNikolay Borisov 1331b840d15dSNikolay Borisov return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; 13321da177e4SLinus Torvalds } 13331da177e4SLinus Torvalds 13341da177e4SLinus Torvalds static inline int keepalive_time_when(const struct tcp_sock *tp) 13351da177e4SLinus Torvalds { 133613b287e8SNikolay Borisov struct net *net = sock_net((struct sock *)tp); 133713b287e8SNikolay Borisov 133813b287e8SNikolay Borisov return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; 13391da177e4SLinus Torvalds } 13401da177e4SLinus Torvalds 1341df19a626SEric Dumazet static inline int keepalive_probes(const struct tcp_sock *tp) 1342df19a626SEric Dumazet { 13439bd6861bSNikolay Borisov struct net *net = sock_net((struct sock *)tp); 13449bd6861bSNikolay Borisov 13459bd6861bSNikolay Borisov return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; 1346df19a626SEric Dumazet } 1347df19a626SEric Dumazet 13486c37e5deSFlavio Leitner static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) 13496c37e5deSFlavio Leitner { 13506c37e5deSFlavio Leitner const struct inet_connection_sock *icsk = &tp->inet_conn; 13516c37e5deSFlavio Leitner 135270eabf0eSEric Dumazet return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime, 135370eabf0eSEric Dumazet tcp_jiffies32 - tp->rcv_tstamp); 13546c37e5deSFlavio Leitner } 13556c37e5deSFlavio Leitner 1356463c84b9SArnaldo Carvalho de Melo static inline int tcp_fin_time(const struct sock *sk) 13571da177e4SLinus Torvalds { 13581e579caaSNikolay Borisov int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; 1359463c84b9SArnaldo Carvalho de Melo const int rto = inet_csk(sk)->icsk_rto; 13601da177e4SLinus Torvalds 1361463c84b9SArnaldo Carvalho de Melo if (fin_timeout < (rto << 2) - (rto >> 1)) 1362463c84b9SArnaldo Carvalho de Melo fin_timeout = (rto << 2) - (rto >> 1); 13631da177e4SLinus Torvalds 13641da177e4SLinus Torvalds return fin_timeout; 13651da177e4SLinus Torvalds } 13661da177e4SLinus Torvalds 1367a2a385d6SEric Dumazet static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, 1368c887e6d2SIlpo Järvinen int paws_win) 13691da177e4SLinus Torvalds { 1370c887e6d2SIlpo Järvinen if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) 1371a2a385d6SEric Dumazet return true; 1372c887e6d2SIlpo Järvinen if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) 1373a2a385d6SEric Dumazet return true; 1374bc2ce894SEric Dumazet /* 1375bc2ce894SEric Dumazet * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, 1376bc2ce894SEric Dumazet * then following tcp messages have valid values. Ignore 0 value, 1377bc2ce894SEric Dumazet * or else 'negative' tsval might forbid us to accept their packets. 1378bc2ce894SEric Dumazet */ 1379bc2ce894SEric Dumazet if (!rx_opt->ts_recent) 1380a2a385d6SEric Dumazet return true; 1381a2a385d6SEric Dumazet return false; 1382c887e6d2SIlpo Järvinen } 1383c887e6d2SIlpo Järvinen 1384a2a385d6SEric Dumazet static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, 1385c887e6d2SIlpo Järvinen int rst) 1386c887e6d2SIlpo Järvinen { 1387c887e6d2SIlpo Järvinen if (tcp_paws_check(rx_opt, 0)) 1388a2a385d6SEric Dumazet return false; 13891da177e4SLinus Torvalds 13901da177e4SLinus Torvalds /* RST segments are not recommended to carry timestamp, 13911da177e4SLinus Torvalds and, if they do, it is recommended to ignore PAWS because 13921da177e4SLinus Torvalds "their cleanup function should take precedence over timestamps." 13931da177e4SLinus Torvalds Certainly, it is mistake. It is necessary to understand the reasons 13941da177e4SLinus Torvalds of this constraint to relax it: if peer reboots, clock may go 13951da177e4SLinus Torvalds out-of-sync and half-open connections will not be reset. 13961da177e4SLinus Torvalds Actually, the problem would be not existing if all 13971da177e4SLinus Torvalds the implementations followed draft about maintaining clock 13981da177e4SLinus Torvalds via reboots. Linux-2.2 DOES NOT! 13991da177e4SLinus Torvalds 14001da177e4SLinus Torvalds However, we can relax time bounds for RST segments to MSL. 14011da177e4SLinus Torvalds */ 14029d729f72SJames Morris if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) 1403a2a385d6SEric Dumazet return false; 1404a2a385d6SEric Dumazet return true; 14051da177e4SLinus Torvalds } 14061da177e4SLinus Torvalds 14077970ddc8SEric Dumazet bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, 14087970ddc8SEric Dumazet int mib_idx, u32 *last_oow_ack_time); 1409032ee423SNeal Cardwell 1410a9c19329SPavel Emelyanov static inline void tcp_mib_init(struct net *net) 14111da177e4SLinus Torvalds { 14121da177e4SLinus Torvalds /* See RFC 2012 */ 14136aef70a8SEric Dumazet TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1); 14146aef70a8SEric Dumazet TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); 14156aef70a8SEric Dumazet TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); 14166aef70a8SEric Dumazet TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1); 14171da177e4SLinus Torvalds } 14181da177e4SLinus Torvalds 14196a438bbeSStephen Hemminger /* from STCP */ 1420ef9da47cSIlpo Järvinen static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp) 14210800f170SDavid S. Miller { 14226a438bbeSStephen Hemminger tp->lost_skb_hint = NULL; 1423ef9da47cSIlpo Järvinen } 1424ef9da47cSIlpo Järvinen 1425ef9da47cSIlpo Järvinen static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) 1426ef9da47cSIlpo Järvinen { 1427ef9da47cSIlpo Järvinen tcp_clear_retrans_hints_partial(tp); 14286a438bbeSStephen Hemminger tp->retransmit_skb_hint = NULL; 1429b7689205SIlpo Järvinen } 1430b7689205SIlpo Järvinen 1431a915da9bSEric Dumazet union tcp_md5_addr { 1432a915da9bSEric Dumazet struct in_addr a4; 1433a915da9bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6) 1434a915da9bSEric Dumazet struct in6_addr a6; 1435a915da9bSEric Dumazet #endif 1436a915da9bSEric Dumazet }; 1437a915da9bSEric Dumazet 1438cfb6eeb4SYOSHIFUJI Hideaki /* - key database */ 1439cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_key { 1440a915da9bSEric Dumazet struct hlist_node node; 1441cfb6eeb4SYOSHIFUJI Hideaki u8 keylen; 1442a915da9bSEric Dumazet u8 family; /* AF_INET or AF_INET6 */ 1443a915da9bSEric Dumazet union tcp_md5_addr addr; 14446797318eSIvan Delalande u8 prefixlen; 1445a915da9bSEric Dumazet u8 key[TCP_MD5SIG_MAXKEYLEN]; 1446a915da9bSEric Dumazet struct rcu_head rcu; 1447cfb6eeb4SYOSHIFUJI Hideaki }; 1448cfb6eeb4SYOSHIFUJI Hideaki 1449cfb6eeb4SYOSHIFUJI Hideaki /* - sock block */ 1450cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_info { 1451a915da9bSEric Dumazet struct hlist_head head; 1452a8afca03SEric Dumazet struct rcu_head rcu; 1453cfb6eeb4SYOSHIFUJI Hideaki }; 1454cfb6eeb4SYOSHIFUJI Hideaki 1455cfb6eeb4SYOSHIFUJI Hideaki /* - pseudo header */ 1456cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr { 1457cfb6eeb4SYOSHIFUJI Hideaki __be32 saddr; 1458cfb6eeb4SYOSHIFUJI Hideaki __be32 daddr; 1459cfb6eeb4SYOSHIFUJI Hideaki __u8 pad; 1460cfb6eeb4SYOSHIFUJI Hideaki __u8 protocol; 1461cfb6eeb4SYOSHIFUJI Hideaki __be16 len; 1462cfb6eeb4SYOSHIFUJI Hideaki }; 1463cfb6eeb4SYOSHIFUJI Hideaki 1464cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr { 1465cfb6eeb4SYOSHIFUJI Hideaki struct in6_addr saddr; 1466cfb6eeb4SYOSHIFUJI Hideaki struct in6_addr daddr; 1467cfb6eeb4SYOSHIFUJI Hideaki __be32 len; 1468cfb6eeb4SYOSHIFUJI Hideaki __be32 protocol; /* including padding */ 1469cfb6eeb4SYOSHIFUJI Hideaki }; 1470cfb6eeb4SYOSHIFUJI Hideaki 1471cfb6eeb4SYOSHIFUJI Hideaki union tcp_md5sum_block { 1472cfb6eeb4SYOSHIFUJI Hideaki struct tcp4_pseudohdr ip4; 1473dfd56b8bSEric Dumazet #if IS_ENABLED(CONFIG_IPV6) 1474cfb6eeb4SYOSHIFUJI Hideaki struct tcp6_pseudohdr ip6; 1475cfb6eeb4SYOSHIFUJI Hideaki #endif 1476cfb6eeb4SYOSHIFUJI Hideaki }; 1477cfb6eeb4SYOSHIFUJI Hideaki 1478cfb6eeb4SYOSHIFUJI Hideaki /* - pool: digest algorithm, hash description and scratch buffer */ 1479cfb6eeb4SYOSHIFUJI Hideaki struct tcp_md5sig_pool { 1480cf80e0e4SHerbert Xu struct ahash_request *md5_req; 148119689e38SEric Dumazet void *scratch; 1482cfb6eeb4SYOSHIFUJI Hideaki }; 1483cfb6eeb4SYOSHIFUJI Hideaki 1484cfb6eeb4SYOSHIFUJI Hideaki /* - functions */ 148539f8e58eSEric Dumazet int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, 148639f8e58eSEric Dumazet const struct sock *sk, const struct sk_buff *skb); 14875c9f3023SJoe Perches int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, 14886797318eSIvan Delalande int family, u8 prefixlen, const u8 *newkey, u8 newkeylen, 14896797318eSIvan Delalande gfp_t gfp); 14905c9f3023SJoe Perches int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, 14916797318eSIvan Delalande int family, u8 prefixlen); 1492b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, 1493fd3a154aSEric Dumazet const struct sock *addr_sk); 1494cfb6eeb4SYOSHIFUJI Hideaki 14959501f972SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 1496b83e3debSEric Dumazet struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 14975c9f3023SJoe Perches const union tcp_md5_addr *addr, 14985c9f3023SJoe Perches int family); 1499a915da9bSEric Dumazet #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) 15009501f972SYOSHIFUJI Hideaki #else 1501b83e3debSEric Dumazet static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, 1502a915da9bSEric Dumazet const union tcp_md5_addr *addr, 1503a915da9bSEric Dumazet int family) 1504a915da9bSEric Dumazet { 1505a915da9bSEric Dumazet return NULL; 1506a915da9bSEric Dumazet } 15079501f972SYOSHIFUJI Hideaki #define tcp_twsk_md5_key(twsk) NULL 15089501f972SYOSHIFUJI Hideaki #endif 15099501f972SYOSHIFUJI Hideaki 15105c9f3023SJoe Perches bool tcp_alloc_md5sig_pool(void); 1511cfb6eeb4SYOSHIFUJI Hideaki 15125c9f3023SJoe Perches struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); 151371cea17eSEric Dumazet static inline void tcp_put_md5sig_pool(void) 151471cea17eSEric Dumazet { 151571cea17eSEric Dumazet local_bh_enable(); 151671cea17eSEric Dumazet } 151735790c04SEric Dumazet 15185c9f3023SJoe Perches int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 151995c96174SEric Dumazet unsigned int header_len); 15205c9f3023SJoe Perches int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1521cf533ea5SEric Dumazet const struct tcp_md5sig_key *key); 1522cfb6eeb4SYOSHIFUJI Hideaki 152310467163SJerry Chu /* From tcp_fastopen.c */ 15245c9f3023SJoe Perches void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, 15255c9f3023SJoe Perches struct tcp_fastopen_cookie *cookie, int *syn_loss, 15265c9f3023SJoe Perches unsigned long *last_syn_loss); 15275c9f3023SJoe Perches void tcp_fastopen_cache_set(struct sock *sk, u16 mss, 15282646c831SDaniel Lee struct tcp_fastopen_cookie *cookie, bool syn_lost, 15292646c831SDaniel Lee u16 try_exp); 1530783237e8SYuchung Cheng struct tcp_fastopen_request { 1531783237e8SYuchung Cheng /* Fast Open cookie. Size 0 means a cookie request */ 1532783237e8SYuchung Cheng struct tcp_fastopen_cookie cookie; 1533783237e8SYuchung Cheng struct msghdr *data; /* data in MSG_FASTOPEN */ 1534f5ddcbbbSEric Dumazet size_t size; 1535f5ddcbbbSEric Dumazet int copied; /* queued in tcp_connect() */ 1536783237e8SYuchung Cheng }; 1537783237e8SYuchung Cheng void tcp_free_fastopen_req(struct tcp_sock *tp); 1538783237e8SYuchung Cheng 153910467163SJerry Chu extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; 154010467163SJerry Chu int tcp_fastopen_reset_cipher(void *key, unsigned int len); 154161d2bcaeSEric Dumazet void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); 15427c85af88SEric Dumazet struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, 15435b7ed089SYuchung Cheng struct request_sock *req, 154411199369STonghao Zhang struct tcp_fastopen_cookie *foc); 1545222e83d2SHannes Frederic Sowa void tcp_fastopen_init_key_once(bool publish); 1546065263f4SWei Wang bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, 1547065263f4SWei Wang struct tcp_fastopen_cookie *cookie); 154819f6d3f3SWei Wang bool tcp_fastopen_defer_connect(struct sock *sk, int *err); 154910467163SJerry Chu #define TCP_FASTOPEN_KEY_LENGTH 16 155010467163SJerry Chu 155110467163SJerry Chu /* Fastopen key context */ 155210467163SJerry Chu struct tcp_fastopen_context { 15537ae8639cSEric Dumazet struct crypto_cipher *tfm; 155410467163SJerry Chu __u8 key[TCP_FASTOPEN_KEY_LENGTH]; 155510467163SJerry Chu struct rcu_head rcu; 155610467163SJerry Chu }; 155710467163SJerry Chu 1558cf1ef3f0SWei Wang extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; 155946c2fa39SWei Wang void tcp_fastopen_active_disable(struct sock *sk); 1560cf1ef3f0SWei Wang bool tcp_fastopen_active_should_disable(struct sock *sk); 1561cf1ef3f0SWei Wang void tcp_fastopen_active_disable_ofo_check(struct sock *sk); 1562cf1ef3f0SWei Wang void tcp_fastopen_active_timeout_reset(void); 1563cf1ef3f0SWei Wang 156405b055e8SFrancis Yan /* Latencies incurred by various limits for a sender. They are 156505b055e8SFrancis Yan * chronograph-like stats that are mutually exclusive. 156605b055e8SFrancis Yan */ 156705b055e8SFrancis Yan enum tcp_chrono { 156805b055e8SFrancis Yan TCP_CHRONO_UNSPEC, 156905b055e8SFrancis Yan TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */ 157005b055e8SFrancis Yan TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */ 157105b055e8SFrancis Yan TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */ 157205b055e8SFrancis Yan __TCP_CHRONO_MAX, 157305b055e8SFrancis Yan }; 157405b055e8SFrancis Yan 157505b055e8SFrancis Yan void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type); 157605b055e8SFrancis Yan void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type); 157705b055e8SFrancis Yan 1578fe067e8aSDavid S. Miller /* write queue abstraction */ 1579fe067e8aSDavid S. Miller static inline void tcp_write_queue_purge(struct sock *sk) 1580fe067e8aSDavid S. Miller { 1581fe067e8aSDavid S. Miller struct sk_buff *skb; 1582fe067e8aSDavid S. Miller 15830f87230dSFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 1584fe067e8aSDavid S. Miller while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) 15853ab224beSHideo Aoki sk_wmem_free_skb(sk, skb); 15863ab224beSHideo Aoki sk_mem_reclaim(sk); 15878818a9d8SIlpo Järvinen tcp_clear_all_retrans_hints(tcp_sk(sk)); 1588fe067e8aSDavid S. Miller } 1589fe067e8aSDavid S. Miller 1590cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) 1591fe067e8aSDavid S. Miller { 1592cd07a8eaSDavid S. Miller return skb_peek(&sk->sk_write_queue); 1593fe067e8aSDavid S. Miller } 1594fe067e8aSDavid S. Miller 1595cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) 1596fe067e8aSDavid S. Miller { 1597cd07a8eaSDavid S. Miller return skb_peek_tail(&sk->sk_write_queue); 1598fe067e8aSDavid S. Miller } 1599fe067e8aSDavid S. Miller 1600cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk, 1601cf533ea5SEric Dumazet const struct sk_buff *skb) 1602fe067e8aSDavid S. Miller { 1603cd07a8eaSDavid S. Miller return skb_queue_next(&sk->sk_write_queue, skb); 1604fe067e8aSDavid S. Miller } 1605fe067e8aSDavid S. Miller 1606cf533ea5SEric Dumazet static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk, 1607cf533ea5SEric Dumazet const struct sk_buff *skb) 1608832d11c5SIlpo Järvinen { 1609832d11c5SIlpo Järvinen return skb_queue_prev(&sk->sk_write_queue, skb); 1610832d11c5SIlpo Järvinen } 1611832d11c5SIlpo Järvinen 1612fe067e8aSDavid S. Miller #define tcp_for_write_queue(skb, sk) \ 1613cd07a8eaSDavid S. Miller skb_queue_walk(&(sk)->sk_write_queue, skb) 1614fe067e8aSDavid S. Miller 1615fe067e8aSDavid S. Miller #define tcp_for_write_queue_from(skb, sk) \ 1616cd07a8eaSDavid S. Miller skb_queue_walk_from(&(sk)->sk_write_queue, skb) 1617fe067e8aSDavid S. Miller 1618234b6860SIlpo Järvinen #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1619cd07a8eaSDavid S. Miller skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1620234b6860SIlpo Järvinen 1621cf533ea5SEric Dumazet static inline struct sk_buff *tcp_send_head(const struct sock *sk) 1622fe067e8aSDavid S. Miller { 1623fe067e8aSDavid S. Miller return sk->sk_send_head; 1624fe067e8aSDavid S. Miller } 1625fe067e8aSDavid S. Miller 1626cd07a8eaSDavid S. Miller static inline bool tcp_skb_is_last(const struct sock *sk, 1627cd07a8eaSDavid S. Miller const struct sk_buff *skb) 1628cd07a8eaSDavid S. Miller { 1629cd07a8eaSDavid S. Miller return skb_queue_is_last(&sk->sk_write_queue, skb); 1630cd07a8eaSDavid S. Miller } 1631cd07a8eaSDavid S. Miller 1632cf533ea5SEric Dumazet static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb) 1633fe067e8aSDavid S. Miller { 1634cd07a8eaSDavid S. Miller if (tcp_skb_is_last(sk, skb)) 1635fe067e8aSDavid S. Miller sk->sk_send_head = NULL; 1636cd07a8eaSDavid S. Miller else 1637cd07a8eaSDavid S. Miller sk->sk_send_head = tcp_write_queue_next(sk, skb); 1638fe067e8aSDavid S. Miller } 1639fe067e8aSDavid S. Miller 1640fe067e8aSDavid S. Miller static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) 1641fe067e8aSDavid S. Miller { 16420f87230dSFrancis Yan if (sk->sk_send_head == skb_unlinked) { 1643fe067e8aSDavid S. Miller sk->sk_send_head = NULL; 16440f87230dSFrancis Yan tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 16450f87230dSFrancis Yan } 1646bb1fcecaSEric Dumazet if (tcp_sk(sk)->highest_sack == skb_unlinked) 1647bb1fcecaSEric Dumazet tcp_sk(sk)->highest_sack = NULL; 1648fe067e8aSDavid S. Miller } 1649fe067e8aSDavid S. Miller 1650fe067e8aSDavid S. Miller static inline void tcp_init_send_head(struct sock *sk) 1651fe067e8aSDavid S. Miller { 1652fe067e8aSDavid S. Miller sk->sk_send_head = NULL; 1653fe067e8aSDavid S. Miller } 1654fe067e8aSDavid S. Miller 1655fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 1656fe067e8aSDavid S. Miller { 1657fe067e8aSDavid S. Miller __skb_queue_tail(&sk->sk_write_queue, skb); 1658fe067e8aSDavid S. Miller } 1659fe067e8aSDavid S. Miller 1660fe067e8aSDavid S. Miller static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) 1661fe067e8aSDavid S. Miller { 1662fe067e8aSDavid S. Miller __tcp_add_write_queue_tail(sk, skb); 1663fe067e8aSDavid S. Miller 1664fe067e8aSDavid S. Miller /* Queue it, remembering where we must start sending. */ 16656859d494SIlpo Järvinen if (sk->sk_send_head == NULL) { 1666fe067e8aSDavid S. Miller sk->sk_send_head = skb; 16670f87230dSFrancis Yan tcp_chrono_start(sk, TCP_CHRONO_BUSY); 16686859d494SIlpo Järvinen 16696859d494SIlpo Järvinen if (tcp_sk(sk)->highest_sack == NULL) 16706859d494SIlpo Järvinen tcp_sk(sk)->highest_sack = skb; 16716859d494SIlpo Järvinen } 1672fe067e8aSDavid S. Miller } 1673fe067e8aSDavid S. Miller 1674fe067e8aSDavid S. Miller static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) 1675fe067e8aSDavid S. Miller { 1676fe067e8aSDavid S. Miller __skb_queue_head(&sk->sk_write_queue, skb); 1677fe067e8aSDavid S. Miller } 1678fe067e8aSDavid S. Miller 1679fe067e8aSDavid S. Miller /* Insert buff after skb on the write queue of sk. */ 1680fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_after(struct sk_buff *skb, 1681fe067e8aSDavid S. Miller struct sk_buff *buff, 1682fe067e8aSDavid S. Miller struct sock *sk) 1683fe067e8aSDavid S. Miller { 16847de6c033SGerrit Renker __skb_queue_after(&sk->sk_write_queue, skb, buff); 1685fe067e8aSDavid S. Miller } 1686fe067e8aSDavid S. Miller 168743f59c89SDavid S. Miller /* Insert new before skb on the write queue of sk. */ 1688fe067e8aSDavid S. Miller static inline void tcp_insert_write_queue_before(struct sk_buff *new, 1689fe067e8aSDavid S. Miller struct sk_buff *skb, 1690fe067e8aSDavid S. Miller struct sock *sk) 1691fe067e8aSDavid S. Miller { 169243f59c89SDavid S. Miller __skb_queue_before(&sk->sk_write_queue, skb, new); 16936e421410SIlpo Järvinen 16946e421410SIlpo Järvinen if (sk->sk_send_head == skb) 16956e421410SIlpo Järvinen sk->sk_send_head = new; 1696fe067e8aSDavid S. Miller } 1697fe067e8aSDavid S. Miller 1698fe067e8aSDavid S. Miller static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) 1699fe067e8aSDavid S. Miller { 1700fe067e8aSDavid S. Miller __skb_unlink(skb, &sk->sk_write_queue); 1701fe067e8aSDavid S. Miller } 1702fe067e8aSDavid S. Miller 1703a2a385d6SEric Dumazet static inline bool tcp_write_queue_empty(struct sock *sk) 1704fe067e8aSDavid S. Miller { 1705fe067e8aSDavid S. Miller return skb_queue_empty(&sk->sk_write_queue); 1706fe067e8aSDavid S. Miller } 1707fe067e8aSDavid S. Miller 170812d50c46SKrishna Kumar static inline void tcp_push_pending_frames(struct sock *sk) 170912d50c46SKrishna Kumar { 171012d50c46SKrishna Kumar if (tcp_send_head(sk)) { 171112d50c46SKrishna Kumar struct tcp_sock *tp = tcp_sk(sk); 171212d50c46SKrishna Kumar 171312d50c46SKrishna Kumar __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle); 171412d50c46SKrishna Kumar } 171512d50c46SKrishna Kumar } 171612d50c46SKrishna Kumar 1717ecb97192SNeal Cardwell /* Start sequence of the skb just after the highest skb with SACKed 1718ecb97192SNeal Cardwell * bit, valid only if sacked_out > 0 or when the caller has ensured 1719ecb97192SNeal Cardwell * validity by itself. 1720a47e5a98SIlpo Järvinen */ 1721a47e5a98SIlpo Järvinen static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp) 1722a47e5a98SIlpo Järvinen { 1723a47e5a98SIlpo Järvinen if (!tp->sacked_out) 1724a47e5a98SIlpo Järvinen return tp->snd_una; 17256859d494SIlpo Järvinen 17266859d494SIlpo Järvinen if (tp->highest_sack == NULL) 17276859d494SIlpo Järvinen return tp->snd_nxt; 17286859d494SIlpo Järvinen 1729a47e5a98SIlpo Järvinen return TCP_SKB_CB(tp->highest_sack)->seq; 1730a47e5a98SIlpo Järvinen } 1731a47e5a98SIlpo Järvinen 17326859d494SIlpo Järvinen static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) 17336859d494SIlpo Järvinen { 17346859d494SIlpo Järvinen tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL : 17356859d494SIlpo Järvinen tcp_write_queue_next(sk, skb); 17366859d494SIlpo Järvinen } 17376859d494SIlpo Järvinen 17386859d494SIlpo Järvinen static inline struct sk_buff *tcp_highest_sack(struct sock *sk) 17396859d494SIlpo Järvinen { 17406859d494SIlpo Järvinen return tcp_sk(sk)->highest_sack; 17416859d494SIlpo Järvinen } 17426859d494SIlpo Järvinen 17436859d494SIlpo Järvinen static inline void tcp_highest_sack_reset(struct sock *sk) 17446859d494SIlpo Järvinen { 17456859d494SIlpo Järvinen tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk); 17466859d494SIlpo Järvinen } 17476859d494SIlpo Järvinen 17486859d494SIlpo Järvinen /* Called when old skb is about to be deleted (to be combined with new skb) */ 17496859d494SIlpo Järvinen static inline void tcp_highest_sack_combine(struct sock *sk, 17506859d494SIlpo Järvinen struct sk_buff *old, 17516859d494SIlpo Järvinen struct sk_buff *new) 17526859d494SIlpo Järvinen { 17536859d494SIlpo Järvinen if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack)) 17546859d494SIlpo Järvinen tcp_sk(sk)->highest_sack = new; 17556859d494SIlpo Järvinen } 17566859d494SIlpo Järvinen 1757b1f0a0e9SFlorian Westphal /* This helper checks if socket has IP_TRANSPARENT set */ 1758b1f0a0e9SFlorian Westphal static inline bool inet_sk_transparent(const struct sock *sk) 1759b1f0a0e9SFlorian Westphal { 1760b1f0a0e9SFlorian Westphal switch (sk->sk_state) { 1761b1f0a0e9SFlorian Westphal case TCP_TIME_WAIT: 1762b1f0a0e9SFlorian Westphal return inet_twsk(sk)->tw_transparent; 1763b1f0a0e9SFlorian Westphal case TCP_NEW_SYN_RECV: 1764b1f0a0e9SFlorian Westphal return inet_rsk(inet_reqsk(sk))->no_srccheck; 1765b1f0a0e9SFlorian Westphal } 1766b1f0a0e9SFlorian Westphal return inet_sk(sk)->transparent; 1767b1f0a0e9SFlorian Westphal } 1768b1f0a0e9SFlorian Westphal 17695aa4b32fSAndreas Petlund /* Determines whether this is a thin stream (which may suffer from 17705aa4b32fSAndreas Petlund * increased latency). Used to trigger latency-reducing mechanisms. 17715aa4b32fSAndreas Petlund */ 1772a2a385d6SEric Dumazet static inline bool tcp_stream_is_thin(struct tcp_sock *tp) 17735aa4b32fSAndreas Petlund { 17745aa4b32fSAndreas Petlund return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); 17755aa4b32fSAndreas Petlund } 17765aa4b32fSAndreas Petlund 17771da177e4SLinus Torvalds /* /proc */ 17781da177e4SLinus Torvalds enum tcp_seq_states { 17791da177e4SLinus Torvalds TCP_SEQ_STATE_LISTENING, 17801da177e4SLinus Torvalds TCP_SEQ_STATE_ESTABLISHED, 17811da177e4SLinus Torvalds }; 17821da177e4SLinus Torvalds 178373cb88ecSArjan van de Ven int tcp_seq_open(struct inode *inode, struct file *file); 178473cb88ecSArjan van de Ven 17851da177e4SLinus Torvalds struct tcp_seq_afinfo { 17861da177e4SLinus Torvalds char *name; 17871da177e4SLinus Torvalds sa_family_t family; 178873cb88ecSArjan van de Ven const struct file_operations *seq_fops; 17899427c4b3SDenis V. Lunev struct seq_operations seq_ops; 17901da177e4SLinus Torvalds }; 17911da177e4SLinus Torvalds 17921da177e4SLinus Torvalds struct tcp_iter_state { 1793a4146b1bSDenis V. Lunev struct seq_net_private p; 17941da177e4SLinus Torvalds sa_family_t family; 17951da177e4SLinus Torvalds enum tcp_seq_states state; 17961da177e4SLinus Torvalds struct sock *syn_wait_sk; 1797a7cb5a49SEric W. Biederman int bucket, offset, sbucket, num; 1798a8b690f9STom Herbert loff_t last_pos; 17991da177e4SLinus Torvalds }; 18001da177e4SLinus Torvalds 18015c9f3023SJoe Perches int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo); 18025c9f3023SJoe Perches void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo); 18031da177e4SLinus Torvalds 180420380731SArnaldo Carvalho de Melo extern struct request_sock_ops tcp_request_sock_ops; 1805c6aefafbSGlenn Griffin extern struct request_sock_ops tcp6_request_sock_ops; 180620380731SArnaldo Carvalho de Melo 18075c9f3023SJoe Perches void tcp_v4_destroy_sock(struct sock *sk); 180820380731SArnaldo Carvalho de Melo 180928be6e07SEric Dumazet struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1810c8f44affSMichał Mirosław netdev_features_t features); 18115c9f3023SJoe Perches struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); 18125c9f3023SJoe Perches int tcp_gro_complete(struct sk_buff *skb); 181328850dc7SDaniel Borkmann 18145c9f3023SJoe Perches void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1815f4c50d99SHerbert Xu 1816c9bee3b7SEric Dumazet static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) 1817c9bee3b7SEric Dumazet { 18184979f2d9SNikolay Borisov struct net *net = sock_net((struct sock *)tp); 18194979f2d9SNikolay Borisov return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; 1820c9bee3b7SEric Dumazet } 1821c9bee3b7SEric Dumazet 1822c9bee3b7SEric Dumazet static inline bool tcp_stream_memory_free(const struct sock *sk) 1823c9bee3b7SEric Dumazet { 1824c9bee3b7SEric Dumazet const struct tcp_sock *tp = tcp_sk(sk); 1825c9bee3b7SEric Dumazet u32 notsent_bytes = tp->write_seq - tp->snd_nxt; 1826c9bee3b7SEric Dumazet 1827c9bee3b7SEric Dumazet return notsent_bytes < tcp_notsent_lowat(tp); 1828c9bee3b7SEric Dumazet } 1829c9bee3b7SEric Dumazet 183020380731SArnaldo Carvalho de Melo #ifdef CONFIG_PROC_FS 18315c9f3023SJoe Perches int tcp4_proc_init(void); 18325c9f3023SJoe Perches void tcp4_proc_exit(void); 183320380731SArnaldo Carvalho de Melo #endif 183420380731SArnaldo Carvalho de Melo 1835ea3bea3aSEric Dumazet int tcp_rtx_synack(const struct sock *sk, struct request_sock *req); 18361fb6f159SOctavian Purdila int tcp_conn_request(struct request_sock_ops *rsk_ops, 18371fb6f159SOctavian Purdila const struct tcp_request_sock_ops *af_ops, 18381fb6f159SOctavian Purdila struct sock *sk, struct sk_buff *skb); 18395db92c99SOctavian Purdila 1840cfb6eeb4SYOSHIFUJI Hideaki /* TCP af-specific functions */ 1841cfb6eeb4SYOSHIFUJI Hideaki struct tcp_sock_af_ops { 1842cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 1843b83e3debSEric Dumazet struct tcp_md5sig_key *(*md5_lookup) (const struct sock *sk, 1844fd3a154aSEric Dumazet const struct sock *addr_sk); 1845cfb6eeb4SYOSHIFUJI Hideaki int (*calc_md5_hash)(char *location, 184639f8e58eSEric Dumazet const struct tcp_md5sig_key *md5, 1847318cf7aaSEric Dumazet const struct sock *sk, 1848318cf7aaSEric Dumazet const struct sk_buff *skb); 1849cfb6eeb4SYOSHIFUJI Hideaki int (*md5_parse)(struct sock *sk, 18508917a777SIvan Delalande int optname, 1851cfb6eeb4SYOSHIFUJI Hideaki char __user *optval, 1852cfb6eeb4SYOSHIFUJI Hideaki int optlen); 1853cfb6eeb4SYOSHIFUJI Hideaki #endif 1854cfb6eeb4SYOSHIFUJI Hideaki }; 1855cfb6eeb4SYOSHIFUJI Hideaki 1856cfb6eeb4SYOSHIFUJI Hideaki struct tcp_request_sock_ops { 18572aec4a29SOctavian Purdila u16 mss_clamp; 1858cfb6eeb4SYOSHIFUJI Hideaki #ifdef CONFIG_TCP_MD5SIG 1859b83e3debSEric Dumazet struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk, 1860fd3a154aSEric Dumazet const struct sock *addr_sk); 1861e3afe7b7SJohn Dykstra int (*calc_md5_hash) (char *location, 186239f8e58eSEric Dumazet const struct tcp_md5sig_key *md5, 1863318cf7aaSEric Dumazet const struct sock *sk, 1864318cf7aaSEric Dumazet const struct sk_buff *skb); 1865cfb6eeb4SYOSHIFUJI Hideaki #endif 1866b40cf18eSEric Dumazet void (*init_req)(struct request_sock *req, 1867b40cf18eSEric Dumazet const struct sock *sk_listener, 186816bea70aSOctavian Purdila struct sk_buff *skb); 1869fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES 18703f684b4bSEric Dumazet __u32 (*cookie_init_seq)(const struct sk_buff *skb, 1871fb7b37a7SOctavian Purdila __u16 *mss); 1872fb7b37a7SOctavian Purdila #endif 1873f964629eSEric Dumazet struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, 18744396e461SSoheil Hassas Yeganeh const struct request_sock *req); 187584b114b9SEric Dumazet u32 (*init_seq)(const struct sk_buff *skb); 18765d2ed052SEric Dumazet u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb); 18770f935dbeSEric Dumazet int (*send_synack)(const struct sock *sk, struct dst_entry *dst, 1878d6274bd8SOctavian Purdila struct flowi *fl, struct request_sock *req, 1879dc6ef6beSEric Dumazet struct tcp_fastopen_cookie *foc, 1880b3d05147SEric Dumazet enum tcp_synack_type synack_type); 1881cfb6eeb4SYOSHIFUJI Hideaki }; 1882cfb6eeb4SYOSHIFUJI Hideaki 1883fb7b37a7SOctavian Purdila #ifdef CONFIG_SYN_COOKIES 1884fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 18853f684b4bSEric Dumazet const struct sock *sk, struct sk_buff *skb, 1886fb7b37a7SOctavian Purdila __u16 *mss) 1887fb7b37a7SOctavian Purdila { 18883f684b4bSEric Dumazet tcp_synq_overflow(sk); 188902a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 18903f684b4bSEric Dumazet return ops->cookie_init_seq(skb, mss); 1891fb7b37a7SOctavian Purdila } 1892fb7b37a7SOctavian Purdila #else 1893fb7b37a7SOctavian Purdila static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, 18943f684b4bSEric Dumazet const struct sock *sk, struct sk_buff *skb, 1895fb7b37a7SOctavian Purdila __u16 *mss) 1896fb7b37a7SOctavian Purdila { 1897fb7b37a7SOctavian Purdila return 0; 1898fb7b37a7SOctavian Purdila } 1899fb7b37a7SOctavian Purdila #endif 1900fb7b37a7SOctavian Purdila 19015c9f3023SJoe Perches int tcpv4_offload_init(void); 190228850dc7SDaniel Borkmann 19035c9f3023SJoe Perches void tcp_v4_init(void); 19045c9f3023SJoe Perches void tcp_init(void); 190520380731SArnaldo Carvalho de Melo 1906659a8ad5SYuchung Cheng /* tcp_recovery.c */ 1907128eda86SEric Dumazet extern void tcp_rack_mark_lost(struct sock *sk); 19081d0833dfSYuchung Cheng extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, 19099a568de4SEric Dumazet u64 xmit_time); 191057dde7f7SYuchung Cheng extern void tcp_rack_reo_timeout(struct sock *sk); 1911659a8ad5SYuchung Cheng 1912e1a10ef7SNeal Cardwell /* At how many usecs into the future should the RTO fire? */ 1913e1a10ef7SNeal Cardwell static inline s64 tcp_rto_delta_us(const struct sock *sk) 1914e1a10ef7SNeal Cardwell { 1915e1a10ef7SNeal Cardwell const struct sk_buff *skb = tcp_write_queue_head(sk); 1916e1a10ef7SNeal Cardwell u32 rto = inet_csk(sk)->icsk_rto; 1917e1a10ef7SNeal Cardwell u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); 1918e1a10ef7SNeal Cardwell 1919e1a10ef7SNeal Cardwell return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 1920e1a10ef7SNeal Cardwell } 1921e1a10ef7SNeal Cardwell 1922e25f866fSCong Wang /* 1923e25f866fSCong Wang * Save and compile IPv4 options, return a pointer to it 1924e25f866fSCong Wang */ 192591ed1e66SPaolo Abeni static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net, 192691ed1e66SPaolo Abeni struct sk_buff *skb) 1927e25f866fSCong Wang { 1928e25f866fSCong Wang const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; 1929e25f866fSCong Wang struct ip_options_rcu *dopt = NULL; 1930e25f866fSCong Wang 1931461b74c3SCong Wang if (opt->optlen) { 1932e25f866fSCong Wang int opt_size = sizeof(*dopt) + opt->optlen; 1933e25f866fSCong Wang 1934e25f866fSCong Wang dopt = kmalloc(opt_size, GFP_ATOMIC); 193591ed1e66SPaolo Abeni if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) { 1936e25f866fSCong Wang kfree(dopt); 1937e25f866fSCong Wang dopt = NULL; 1938e25f866fSCong Wang } 1939e25f866fSCong Wang } 1940e25f866fSCong Wang return dopt; 1941e25f866fSCong Wang } 1942e25f866fSCong Wang 194398781965SEric Dumazet /* locally generated TCP pure ACKs have skb->truesize == 2 194498781965SEric Dumazet * (check tcp_send_ack() in net/ipv4/tcp_output.c ) 194598781965SEric Dumazet * This is much faster than dissecting the packet to find out. 194698781965SEric Dumazet * (Think of GRE encapsulations, IPv4, IPv6, ...) 194798781965SEric Dumazet */ 194898781965SEric Dumazet static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb) 194998781965SEric Dumazet { 195098781965SEric Dumazet return skb->truesize == 2; 195198781965SEric Dumazet } 195298781965SEric Dumazet 195398781965SEric Dumazet static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) 195498781965SEric Dumazet { 195598781965SEric Dumazet skb->truesize = 2; 195698781965SEric Dumazet } 195798781965SEric Dumazet 1958473bd239STom Herbert static inline int tcp_inq(struct sock *sk) 1959473bd239STom Herbert { 1960473bd239STom Herbert struct tcp_sock *tp = tcp_sk(sk); 1961473bd239STom Herbert int answ; 1962473bd239STom Herbert 1963473bd239STom Herbert if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 1964473bd239STom Herbert answ = 0; 1965473bd239STom Herbert } else if (sock_flag(sk, SOCK_URGINLINE) || 1966473bd239STom Herbert !tp->urg_data || 1967473bd239STom Herbert before(tp->urg_seq, tp->copied_seq) || 1968473bd239STom Herbert !before(tp->urg_seq, tp->rcv_nxt)) { 1969473bd239STom Herbert 1970473bd239STom Herbert answ = tp->rcv_nxt - tp->copied_seq; 1971473bd239STom Herbert 1972473bd239STom Herbert /* Subtract 1, if FIN was received */ 1973473bd239STom Herbert if (answ && sock_flag(sk, SOCK_DONE)) 1974473bd239STom Herbert answ--; 1975473bd239STom Herbert } else { 1976473bd239STom Herbert answ = tp->urg_seq - tp->copied_seq; 1977473bd239STom Herbert } 1978473bd239STom Herbert 1979473bd239STom Herbert return answ; 1980473bd239STom Herbert } 1981473bd239STom Herbert 198232035585STom Herbert int tcp_peek_len(struct socket *sock); 198332035585STom Herbert 1984a44d6eacSMartin KaFai Lau static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) 1985a44d6eacSMartin KaFai Lau { 1986a44d6eacSMartin KaFai Lau u16 segs_in; 1987a44d6eacSMartin KaFai Lau 1988a44d6eacSMartin KaFai Lau segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs); 1989a44d6eacSMartin KaFai Lau tp->segs_in += segs_in; 1990a44d6eacSMartin KaFai Lau if (skb->len > tcp_hdrlen(skb)) 1991a44d6eacSMartin KaFai Lau tp->data_segs_in += segs_in; 1992a44d6eacSMartin KaFai Lau } 1993a44d6eacSMartin KaFai Lau 19949caad864SEric Dumazet /* 19959caad864SEric Dumazet * TCP listen path runs lockless. 19969caad864SEric Dumazet * We forced "struct sock" to be const qualified to make sure 19979caad864SEric Dumazet * we don't modify one of its field by mistake. 19989caad864SEric Dumazet * Here, we increment sk_drops which is an atomic_t, so we can safely 19999caad864SEric Dumazet * make sock writable again. 20009caad864SEric Dumazet */ 20019caad864SEric Dumazet static inline void tcp_listendrop(const struct sock *sk) 20029caad864SEric Dumazet { 20039caad864SEric Dumazet atomic_inc(&((struct sock *)sk)->sk_drops); 200402a1d6e7SEric Dumazet __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); 20059caad864SEric Dumazet } 20069caad864SEric Dumazet 2007218af599SEric Dumazet enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer); 2008218af599SEric Dumazet 2009734942ccSDave Watson /* 2010734942ccSDave Watson * Interface for adding Upper Level Protocols over TCP 2011734942ccSDave Watson */ 2012734942ccSDave Watson 2013734942ccSDave Watson #define TCP_ULP_NAME_MAX 16 2014734942ccSDave Watson #define TCP_ULP_MAX 128 2015734942ccSDave Watson #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) 2016734942ccSDave Watson 2017734942ccSDave Watson struct tcp_ulp_ops { 2018734942ccSDave Watson struct list_head list; 2019734942ccSDave Watson 2020734942ccSDave Watson /* initialize ulp */ 2021734942ccSDave Watson int (*init)(struct sock *sk); 2022734942ccSDave Watson /* cleanup ulp */ 2023734942ccSDave Watson void (*release)(struct sock *sk); 2024734942ccSDave Watson 2025734942ccSDave Watson char name[TCP_ULP_NAME_MAX]; 2026734942ccSDave Watson struct module *owner; 2027734942ccSDave Watson }; 2028734942ccSDave Watson int tcp_register_ulp(struct tcp_ulp_ops *type); 2029734942ccSDave Watson void tcp_unregister_ulp(struct tcp_ulp_ops *type); 2030734942ccSDave Watson int tcp_set_ulp(struct sock *sk, const char *name); 2031734942ccSDave Watson void tcp_get_available_ulp(char *buf, size_t len); 2032734942ccSDave Watson void tcp_cleanup_ulp(struct sock *sk); 2033734942ccSDave Watson 203440304b2aSLawrence Brakmo /* Call BPF_SOCK_OPS program that returns an int. If the return value 203540304b2aSLawrence Brakmo * is < 0, then the BPF op failed (for example if the loaded BPF 203640304b2aSLawrence Brakmo * program does not support the chosen operation or there is no BPF 203740304b2aSLawrence Brakmo * program loaded). 203840304b2aSLawrence Brakmo */ 203940304b2aSLawrence Brakmo #ifdef CONFIG_BPF 204040304b2aSLawrence Brakmo static inline int tcp_call_bpf(struct sock *sk, int op) 204140304b2aSLawrence Brakmo { 204240304b2aSLawrence Brakmo struct bpf_sock_ops_kern sock_ops; 204340304b2aSLawrence Brakmo int ret; 204440304b2aSLawrence Brakmo 204540304b2aSLawrence Brakmo if (sk_fullsock(sk)) 204640304b2aSLawrence Brakmo sock_owned_by_me(sk); 204740304b2aSLawrence Brakmo 204840304b2aSLawrence Brakmo memset(&sock_ops, 0, sizeof(sock_ops)); 204940304b2aSLawrence Brakmo sock_ops.sk = sk; 205040304b2aSLawrence Brakmo sock_ops.op = op; 205140304b2aSLawrence Brakmo 205240304b2aSLawrence Brakmo ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); 205340304b2aSLawrence Brakmo if (ret == 0) 205440304b2aSLawrence Brakmo ret = sock_ops.reply; 205540304b2aSLawrence Brakmo else 205640304b2aSLawrence Brakmo ret = -1; 205740304b2aSLawrence Brakmo return ret; 205840304b2aSLawrence Brakmo } 205940304b2aSLawrence Brakmo #else 206040304b2aSLawrence Brakmo static inline int tcp_call_bpf(struct sock *sk, int op) 206140304b2aSLawrence Brakmo { 206240304b2aSLawrence Brakmo return -EPERM; 206340304b2aSLawrence Brakmo } 206440304b2aSLawrence Brakmo #endif 206540304b2aSLawrence Brakmo 20668550f328SLawrence Brakmo static inline u32 tcp_timeout_init(struct sock *sk) 20678550f328SLawrence Brakmo { 20688550f328SLawrence Brakmo int timeout; 20698550f328SLawrence Brakmo 20708550f328SLawrence Brakmo timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT); 20718550f328SLawrence Brakmo 20728550f328SLawrence Brakmo if (timeout <= 0) 20738550f328SLawrence Brakmo timeout = TCP_TIMEOUT_INIT; 20748550f328SLawrence Brakmo return timeout; 20758550f328SLawrence Brakmo } 20768550f328SLawrence Brakmo 207713d3b1ebSLawrence Brakmo static inline u32 tcp_rwnd_init_bpf(struct sock *sk) 207813d3b1ebSLawrence Brakmo { 207913d3b1ebSLawrence Brakmo int rwnd; 208013d3b1ebSLawrence Brakmo 208113d3b1ebSLawrence Brakmo rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT); 208213d3b1ebSLawrence Brakmo 208313d3b1ebSLawrence Brakmo if (rwnd < 0) 208413d3b1ebSLawrence Brakmo rwnd = 0; 208513d3b1ebSLawrence Brakmo return rwnd; 208613d3b1ebSLawrence Brakmo } 208791b5b21cSLawrence Brakmo 208891b5b21cSLawrence Brakmo static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) 208991b5b21cSLawrence Brakmo { 209091b5b21cSLawrence Brakmo return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1); 209191b5b21cSLawrence Brakmo } 20921da177e4SLinus Torvalds #endif /* _TCP_H */ 2093