xref: /illumos-gate/usr/src/uts/common/inet/tcp_impl.h (revision 1edba515a3484e0f74b638b203d462b3112ac84d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2019 Joyent, Inc.
24  * Copyright (c) 2013, OmniTI Computer Consulting, Inc. All rights reserved.
25  * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
26  * Copyright 2024 Oxide Computer Company
27  */
28 
29 #ifndef	_INET_TCP_IMPL_H
30 #define	_INET_TCP_IMPL_H
31 
32 /*
33  * TCP implementation private declarations.  These interfaces are
34  * used to build the IP module and are not meant to be accessed
35  * by any modules except IP itself.  They are undocumented and are
36  * subject to change without notice.
37  */
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #ifdef _KERNEL
44 
45 #include <sys/cpuvar.h>
46 #include <sys/clock_impl.h>	/* For LBOLT_FASTPATH{,64} */
47 #include <inet/optcom.h>
48 #include <inet/tcp.h>
49 #include <inet/tunables.h>
50 
51 #define	TCP_MOD_ID	5105
52 
53 extern struct qinit	tcp_sock_winit;
54 extern struct qinit	tcp_winit;
55 
56 extern sock_downcalls_t sock_tcp_downcalls;
57 
58 /*
59  * Note that by default, the _snd_lowat_fraction tunable controls the value of
60  * the transmit low water mark.  TCP_XMIT_LOWATER (and thus the _xmit_lowat
61  * property) is only used if the administrator has disabled _snd_lowat_fraction
62  * by setting it to 0.
63  */
64 #define	TCP_XMIT_LOWATER	4096
65 #define	TCP_XMIT_HIWATER	49152
66 #define	TCP_RECV_LOWATER	2048
67 #define	TCP_RECV_HIWATER	128000
68 
69 /*
70  * Bind hash list size and has function.  It has to be a power of 2 for
71  * hashing.
72  */
73 #define	TCP_BIND_FANOUT_SIZE	1024
74 #define	TCP_BIND_HASH(lport) (ntohs(lport) & (TCP_BIND_FANOUT_SIZE - 1))
75 
76 /*
77  * This implementation follows the 4.3BSD interpretation of the urgent
78  * pointer and not RFC 1122. Switching to RFC 1122 behavior would cause
79  * incompatible changes in protocols like telnet and rlogin.
80  */
81 #define	TCP_OLD_URP_INTERPRETATION	1
82 
83 /* TCP option length */
84 #define	TCPOPT_NOP_LEN		1
85 #define	TCPOPT_MAXSEG_LEN	4
86 #define	TCPOPT_WS_LEN		3
87 #define	TCPOPT_REAL_WS_LEN	(TCPOPT_WS_LEN+1)
88 #define	TCPOPT_TSTAMP_LEN	10
89 #define	TCPOPT_REAL_TS_LEN	(TCPOPT_TSTAMP_LEN+2)
90 #define	TCPOPT_SACK_OK_LEN	2
91 #define	TCPOPT_REAL_SACK_OK_LEN	(TCPOPT_SACK_OK_LEN+2)
92 #define	TCPOPT_REAL_SACK_LEN	4
93 #define	TCPOPT_MAX_SACK_LEN	36
94 #define	TCPOPT_HEADER_LEN	2
95 #define	TCPOPT_MD5_LEN		18
96 #define	TCPOPT_REAL_MD5_LEN	(TCPOPT_MD5_LEN + 2)
97 
98 /* Round up the value to the nearest mss. */
99 #define	MSS_ROUNDUP(value, mss)		((((value) - 1) / (mss) + 1) * (mss))
100 
101 /*
102  * Was this tcp created via socket() interface?
103  */
104 #define	TCP_IS_SOCKET(tcp)	((tcp)->tcp_issocket)
105 
106 /*
107  * Is this tcp not attached to any upper client?
108  */
109 #define	TCP_IS_DETACHED(tcp)	((tcp)->tcp_detached)
110 
111 /* TCP timers related data structures.  Refer to tcp_timers.c. */
112 typedef struct tcp_timer_s {
113 	conn_t	*connp;
114 	void	(*tcpt_proc)(void *);
115 	callout_id_t   tcpt_tid;
116 } tcp_timer_t;
117 
118 extern kmem_cache_t *tcp_timercache;
119 
120 /*
121  * Macro for starting various timers.  Retransmission timer has its own macro,
122  * TCP_TIMER_RESTART().  tim is in millisec.
123  */
124 #define	TCP_TIMER(tcp, f, tim)		\
125 	tcp_timeout(tcp->tcp_connp, f, tim)
126 #define	TCP_TIMER_CANCEL(tcp, id)	\
127 	tcp_timeout_cancel(tcp->tcp_connp, id)
128 
129 /*
130  * To restart the TCP retransmission timer.  intvl is in millisec.
131  */
132 #define	TCP_TIMER_RESTART(tcp, intvl) {					\
133 	if ((tcp)->tcp_timer_tid != 0)					\
134 		(void) TCP_TIMER_CANCEL((tcp), (tcp)->tcp_timer_tid);	\
135 	(tcp)->tcp_timer_tid = TCP_TIMER((tcp), tcp_timer, (intvl));	\
136 }
137 
138 
139 /*
140  * Maximum TIME_WAIT timeout.  It is defined here (instead of tcp_tunables.c)
141  * so that other parameters can be derived from it.
142  */
143 #define	TCP_TIME_WAIT_MAX	(10 * MINUTES)
144 
145 /*
146  * TCP_TIME_WAIT_DELAY governs how often the time_wait_collector runs.
147  * Running it every 5 seconds seems to yield a reasonable balance between
148  * cleanup liveliness and system load.
149  */
150 #define	TCP_TIME_WAIT_DELAY	(5 * SECONDS)
151 
152 #define	TCP_TIME_WAIT_BUCKETS	((TCP_TIME_WAIT_MAX / TCP_TIME_WAIT_DELAY) + 1)
153 
154 /*
155  * For scalability, we must not run a timer for every TCP connection
156  * in TIME_WAIT state.  To see why, consider (for time wait interval of
157  * 1 minutes):
158  *	10,000 connections/sec * 60 seconds/time wait = 600,000 active conn's
159  *
160  * Since TIME_WAIT expiration occurs on a per-squeue basis, handling
161  * connections from all netstacks on the system, a simple queue is inadequate
162  * for pending entries.  This is because tcp_time_wait_interval may differ
163  * between connections, causing tail insertion to violate expiration order.
164  *
165  * Instead of performing expensive sorting or unnecessary list traversal to
166  * counteract interval variance between netstacks, a timing wheel structure is
167  * used.  The duration covered by each bucket in the wheel is determined by the
168  * TCP_TIME_WAIT_DELAY (5 seconds).  The number of buckets in the wheel is
169  * determined by dividing the maximum TIME_WAIT interval (10 minutes) by
170  * TCP_TIME_WAIT_DELAY, with one added bucket for rollover protection.
171  * (Yielding 121 buckets with the current parameters)  When items are inserted
172  * into the set of buckets, they are indexed by using their expiration time
173  * divided by the bucket size, modulo the number of buckets.  This means that
174  * when each bucket is processed, all items within should have expired within
175  * the last TCP_TIME_WAIT_DELAY interval.
176  *
177  * Since bucket timer schedules are rounded to the nearest TCP_TIME_WAIT_DELAY
178  * interval to ensure all connections in the pending bucket will be expired, a
179  * per-squeue offset is used when doing TIME_WAIT scheduling.  This offset is
180  * between 0 and the TCP_TIME_WAIT_DELAY and is designed to avoid scheduling
181  * all of the tcp_time_wait_collector threads to run in lock-step.  The offset
182  * is fixed while there are any connections present in the buckets.
183  *
184  * When a tcp_t enters TIME_WAIT state, a timer is started (timeout is
185  * tcps_time_wait_interval).  When the tcp_t is detached (upper layer closes
186  * the end point), it is scheduled to be cleaned up by the squeue-driving
187  * tcp_time_wait_collector (also using tcps_time_wait_interval).  This means
188  * that the TIME_WAIT state can be extended (up to doubled) if the tcp_t
189  * doesn't become detached for a long time.
190  *
191  * The list manipulations (including tcp_time_wait_next/prev)
192  * are protected by the tcp_time_wait_lock. The content of the
193  * detached TIME_WAIT connections is protected by the normal perimeters.
194  *
195  * These connection lists are per squeue and squeues are shared across the
196  * tcp_stack_t instances.  Things in a tcp_time_wait_bucket remain associated
197  * with the tcp_stack_t and conn_netstack.  Any tcp_t connections stored in the
198  * tcp_free_list are disassociated and have NULL tcp_tcps and conn_netstack
199  * pointers.
200  */
201 typedef struct tcp_squeue_priv_s {
202 	kmutex_t	tcp_time_wait_lock;
203 	boolean_t	tcp_time_wait_collector_active;
204 	callout_id_t	tcp_time_wait_tid;
205 	uint64_t	tcp_time_wait_cnt;
206 	int64_t		tcp_time_wait_schedule;
207 	int64_t		tcp_time_wait_offset;
208 	tcp_t		*tcp_time_wait_bucket[TCP_TIME_WAIT_BUCKETS];
209 	tcp_t		*tcp_free_list;
210 	uint_t		tcp_free_list_cnt;
211 } tcp_squeue_priv_t;
212 
213 /*
214  * Parameters for TCP Initial Send Sequence number (ISS) generation.  When
215  * tcp_strong_iss is set to 1, which is the default, the ISS is calculated
216  * by adding three components: a time component which grows by 1 every 4096
217  * nanoseconds (versus every 4 microseconds suggested by RFC 793, page 27);
218  * a per-connection component which grows by 125000 for every new connection;
219  * and an "extra" component that grows by a random amount centered
220  * approximately on 64000.  This causes the ISS generator to cycle every
221  * 4.89 hours if no TCP connections are made, and faster if connections are
222  * made.
223  *
224  * When tcp_strong_iss is set to 0, ISS is calculated by adding two
225  * components: a time component which grows by 250000 every second; and
226  * a per-connection component which grows by 125000 for every new connections.
227  *
228  * A third method, when tcp_strong_iss is set to 2, for generating ISS is
229  * prescribed by Steve Bellovin.  This involves adding time, the 125000 per
230  * connection, and a one-way hash (MD5) of the connection ID <sport, dport,
231  * src, dst>, a "truly" random (per RFC 1750) number, and a console-entered
232  * password.
233  */
234 #define	ISS_INCR	250000
235 #define	ISS_NSEC_SHT	12
236 
237 /* Macros for timestamp comparisons */
238 #define	TSTMP_GEQ(a, b)	((int32_t)((a)-(b)) >= 0)
239 #define	TSTMP_LT(a, b)	((int32_t)((a)-(b)) < 0)
240 
241 /*
242  * Initialize cwnd according to RFC 3390.  def_max_init_cwnd is
243  * either tcp_slow_start_initial or tcp_slow_start_after idle
244  * depending on the caller.  If the upper layer has not used the
245  * TCP_INIT_CWND option to change the initial cwnd, tcp_init_cwnd
246  * should be 0 and we use the formula in RFC 3390 to set tcp_cwnd.
247  * If the upper layer has changed set the tcp_init_cwnd, just use
248  * it to calculate the tcp_cwnd.
249  *
250  * "An Argument for Increasing TCP's Initial Congestion Window"
251  * ACM SIGCOMM Computer Communications Review, vol. 40 (2010), pp. 27-33
252  *  -- Nandita Dukkipati, Tiziana Refice, Yuchung Cheng,
253  *     Hsiao-keng Jerry Chu, Tom Herbert, Amit Agarwal,
254  *     Arvind Jain, Natalia Sutin
255  *
256  *   "Based on the results from our experiments, we believe the
257  *    initial congestion window should be at least ten segments
258  *    and the same be investigated for standardization by the IETF."
259  *
260  * As such, the def_max_init_cwnd argument with which this macro is
261  * invoked is either the tcps_slow_start_initial or
262  * tcps_slow_start_after_idle which both default to 0 and will respect
263  * RFC 3390 exactly.  If the tunables are explicitly set by the operator,
264  * then the initial congestion window should be set as the operator
265  * demands, within reason. We shall arbitrarily define reason as a
266  * maximum of 16 (same as used by the TCP_INIT_CWND setsockopt).
267  */
268 
269 /* Maximum TCP initial cwin (start/restart). */
270 #define	TCP_MAX_INIT_CWND	16
271 
272 #define	TCP_SET_INIT_CWND(tcp, mss, def_max_init_cwnd)			\
273 {									\
274 	if ((tcp)->tcp_init_cwnd == 0) {				\
275 		if (def_max_init_cwnd == 0) {				\
276 			(tcp)->tcp_cwnd = MIN(4 * (mss),		\
277 			    MAX(2 * (mss), 4380 / (mss) * (mss)));	\
278 		} else {						\
279 			(tcp)->tcp_cwnd = MIN(TCP_MAX_INIT_CWND * (mss),\
280 			    def_max_init_cwnd * (mss));			\
281 		}							\
282 	} else {							\
283 		(tcp)->tcp_cwnd = (tcp)->tcp_init_cwnd * (mss);		\
284 	}								\
285 	tcp->tcp_cwnd_cnt = 0;						\
286 }
287 
288 /*
289  * Set ECN capable transport (ECT) code point in IP header.
290  *
291  * Note that there are 2 ECT code points '01' and '10', which are called
292  * ECT(1) and ECT(0) respectively.  Here we follow the original ECT code
293  * point ECT(0) for TCP as described in RFC 2481.
294  */
295 #define	TCP_SET_ECT(tcp, iph) \
296 	if ((tcp)->tcp_connp->conn_ipversion == IPV4_VERSION) { \
297 		/* We need to clear the code point first. */ \
298 		((ipha_t *)(iph))->ipha_type_of_service &= 0xFC; \
299 		((ipha_t *)(iph))->ipha_type_of_service |= IPH_ECN_ECT0; \
300 	} else { \
301 		((ip6_t *)(iph))->ip6_vcf &= htonl(0xFFCFFFFF); \
302 		((ip6_t *)(iph))->ip6_vcf |= htonl(IPH_ECN_ECT0 << 20); \
303 	}
304 
305 /*
306  * TCP options struct returned from tcp_parse_options.
307  */
308 typedef struct tcp_opt_s {
309 	uint32_t	tcp_opt_mss;
310 	uint32_t	tcp_opt_wscale;
311 	uint32_t	tcp_opt_ts_val;
312 	uint32_t	tcp_opt_ts_ecr;
313 	uint8_t		tcp_opt_sig[MD5_DIGEST_LENGTH];
314 	tcp_t		*tcp;
315 } tcp_opt_t;
316 
317 /*
318  * Flags returned from tcp_parse_options.
319  */
320 #define	TCP_OPT_MSS_PRESENT	1
321 #define	TCP_OPT_WSCALE_PRESENT	2
322 #define	TCP_OPT_TSTAMP_PRESENT	4
323 #define	TCP_OPT_SACK_OK_PRESENT	8
324 #define	TCP_OPT_SACK_PRESENT	16
325 #define	TCP_OPT_SIG_PRESENT	32
326 
327 /*
328  * Write-side flow-control is implemented via the per instance STREAMS
329  * write-side Q by explicitly setting QFULL to stop the flow of mblk_t(s)
330  * and clearing QFULL and calling qbackenable() to restart the flow based
331  * on the number of TCP unsent bytes (i.e. those not on the wire waiting
332  * for a remote ACK).
333  *
334  * This is different than a standard STREAMS kmod which when using the
335  * STREAMS Q the framework would automatictly flow-control based on the
336  * defined hiwat/lowat values as mblk_t's are enqueued/dequeued.
337  *
338  * As of FireEngine TCP write-side flow-control needs to take into account
339  * both the unsent tcp_xmit list bytes but also any squeue_t enqueued bytes
340  * (i.e. from tcp_wput() -> tcp_output()).
341  *
342  * This is accomplished by adding a new tcp_t fields, tcp_squeue_bytes, to
343  * count the number of bytes enqueued by tcp_wput() and the number of bytes
344  * dequeued and processed by tcp_output().
345  *
346  * So, the total number of bytes unsent is (squeue_bytes + unsent) with all
347  * flow-control uses of unsent replaced with the macro TCP_UNSENT_BYTES.
348  */
349 extern void	tcp_clrqfull(tcp_t *);
350 extern void	tcp_setqfull(tcp_t *);
351 
352 #define	TCP_UNSENT_BYTES(tcp) \
353 	((tcp)->tcp_squeue_bytes + (tcp)->tcp_unsent)
354 
355 /*
356  * Linked list struct to store listener connection limit configuration per
357  * IP stack.  The list is stored at tcps_listener_conf in tcp_stack_t.
358  *
359  * tl_port: the listener port of this limit configuration
360  * tl_ratio: the maximum amount of memory consumed by all concurrent TCP
361  *           connections created by a listener does not exceed 1/tl_ratio
362  *           of the total system memory.  Note that this is only an
363  *           approximation.
364  * tl_link: linked list struct
365  */
366 typedef struct tcp_listener_s {
367 	in_port_t	tl_port;
368 	uint32_t	tl_ratio;
369 	list_node_t	tl_link;
370 } tcp_listener_t;
371 
372 /*
373  * If there is a limit set on the number of connections allowed per each
374  * listener, the following struct is used to store that counter.  It keeps
375  * the number of TCP connection created by a listener.  Note that this needs
376  * to be separated from the listener since the listener can go away before
377  * all the connections are gone.
378  *
379  * When the struct is allocated, tlc_cnt is set to 1.  When a new connection
380  * is created by the listener, tlc_cnt is incremented by 1.  When a connection
381  * created by the listener goes away, tlc_count is decremented by 1.  When the
382  * listener itself goes away, tlc_cnt is decremented  by one.  The last
383  * connection (or the listener) which decrements tlc_cnt to zero frees the
384  * struct.
385  *
386  * tlc_max is the maximum number of concurrent TCP connections created from a
387  * listner.  It is calculated when the tcp_listen_cnt_t is allocated.
388  *
389  * tlc_report_time stores the time when cmn_err() is called to report that the
390  * max has been exceeeded.  Report is done at most once every
391  * TCP_TLC_REPORT_INTERVAL mins for a listener.
392  *
393  * tlc_drop stores the number of connection attempt dropped because the
394  * limit has reached.
395  */
396 typedef struct tcp_listen_cnt_s {
397 	uint32_t	tlc_max;
398 	uint32_t	tlc_cnt;
399 	int64_t		tlc_report_time;
400 	uint32_t	tlc_drop;
401 } tcp_listen_cnt_t;
402 
403 #define	TCP_TLC_REPORT_INTERVAL	(30 * MINUTES)
404 
405 #define	TCP_DECR_LISTEN_CNT(tcp)					\
406 {									\
407 	ASSERT((tcp)->tcp_listen_cnt->tlc_cnt > 0);			\
408 	if (atomic_dec_32_nv(&(tcp)->tcp_listen_cnt->tlc_cnt) == 0) \
409 		kmem_free((tcp)->tcp_listen_cnt, sizeof (tcp_listen_cnt_t)); \
410 	(tcp)->tcp_listen_cnt = NULL;					\
411 }
412 
413 /* Increment and decrement the number of connections in tcp_stack_t. */
414 #define	TCPS_CONN_INC(tcps)						\
415 	atomic_inc_64(							\
416 	    (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt)
417 
418 #define	TCPS_CONN_DEC(tcps)						\
419 	atomic_dec_64(							\
420 	    (uint64_t *)&(tcps)->tcps_sc[CPU->cpu_seqid]->tcp_sc_conn_cnt)
421 
422 /*
423  * When the system is under memory pressure, stack variable tcps_reclaim is
424  * true, we shorten the connection timeout abort interval to tcp_early_abort
425  * seconds.  Defined in tcp.c.
426  */
427 extern uint32_t tcp_early_abort;
428 
429 /*
430  * To reach to an eager in Q0 which can be dropped due to an incoming
431  * new SYN request when Q0 is full, a new doubly linked list is
432  * introduced. This list allows to select an eager from Q0 in O(1) time.
433  * This is needed to avoid spending too much time walking through the
434  * long list of eagers in Q0 when tcp_drop_q0() is called. Each member of
435  * this new list has to be a member of Q0.
436  * This list is headed by listener's tcp_t. When the list is empty,
437  * both the pointers - tcp_eager_next_drop_q0 and tcp_eager_prev_drop_q0,
438  * of listener's tcp_t point to listener's tcp_t itself.
439  *
440  * Given an eager in Q0 and a listener, MAKE_DROPPABLE() puts the eager
441  * in the list. MAKE_UNDROPPABLE() takes the eager out of the list.
442  * These macros do not affect the eager's membership to Q0.
443  */
444 #define	MAKE_DROPPABLE(listener, eager)					\
445 	if ((eager)->tcp_eager_next_drop_q0 == NULL) {			\
446 		(listener)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0\
447 		    = (eager);						\
448 		(eager)->tcp_eager_prev_drop_q0 = (listener);		\
449 		(eager)->tcp_eager_next_drop_q0 =			\
450 		    (listener)->tcp_eager_next_drop_q0;			\
451 		(listener)->tcp_eager_next_drop_q0 = (eager);		\
452 	}
453 
454 #define	MAKE_UNDROPPABLE(eager)						\
455 	if ((eager)->tcp_eager_next_drop_q0 != NULL) {			\
456 		(eager)->tcp_eager_next_drop_q0->tcp_eager_prev_drop_q0	\
457 		    = (eager)->tcp_eager_prev_drop_q0;			\
458 		(eager)->tcp_eager_prev_drop_q0->tcp_eager_next_drop_q0	\
459 		    = (eager)->tcp_eager_next_drop_q0;			\
460 		(eager)->tcp_eager_prev_drop_q0 = NULL;			\
461 		(eager)->tcp_eager_next_drop_q0 = NULL;			\
462 	}
463 
464 /*
465  * The format argument to pass to tcp_display().
466  * DISP_PORT_ONLY means that the returned string has only port info.
467  * DISP_ADDR_AND_PORT means that the returned string also contains the
468  * remote and local IP address.
469  */
470 #define	DISP_PORT_ONLY		1
471 #define	DISP_ADDR_AND_PORT	2
472 
473 #define	IP_ADDR_CACHE_SIZE	2048
474 #define	IP_ADDR_CACHE_HASH(faddr)					\
475 	(ntohl(faddr) & (IP_ADDR_CACHE_SIZE -1))
476 
477 /*
478  * TCP reassembly macros.  We hide starting and ending sequence numbers in
479  * b_next and b_prev of messages on the reassembly queue.  The messages are
480  * chained using b_cont.  These macros are used in tcp_reass() so we don't
481  * have to see the ugly casts and assignments.
482  */
483 #define	TCP_REASS_SEQ(mp)		((uint32_t)(uintptr_t)((mp)->b_next))
484 #define	TCP_REASS_SET_SEQ(mp, u)	((mp)->b_next = \
485 					(mblk_t *)(uintptr_t)(u))
486 #define	TCP_REASS_END(mp)		((uint32_t)(uintptr_t)((mp)->b_prev))
487 #define	TCP_REASS_SET_END(mp, u)	((mp)->b_prev = \
488 					(mblk_t *)(uintptr_t)(u))
489 
490 #define	tcps_time_wait_interval		tcps_propinfo_tbl[0].prop_cur_uval
491 #define	tcps_conn_req_max_q		tcps_propinfo_tbl[1].prop_cur_uval
492 #define	tcps_conn_req_max_q0		tcps_propinfo_tbl[2].prop_cur_uval
493 #define	tcps_conn_req_min		tcps_propinfo_tbl[3].prop_cur_uval
494 #define	tcps_conn_grace_period		tcps_propinfo_tbl[4].prop_cur_uval
495 #define	tcps_cwnd_max_			tcps_propinfo_tbl[5].prop_cur_uval
496 #define	tcps_dbg			tcps_propinfo_tbl[6].prop_cur_uval
497 #define	tcps_smallest_nonpriv_port	tcps_propinfo_tbl[7].prop_cur_uval
498 #define	tcps_ip_abort_cinterval		tcps_propinfo_tbl[8].prop_cur_uval
499 #define	tcps_ip_abort_linterval		tcps_propinfo_tbl[9].prop_cur_uval
500 #define	tcps_ip_abort_interval		tcps_propinfo_tbl[10].prop_cur_uval
501 #define	tcps_ip_notify_cinterval	tcps_propinfo_tbl[11].prop_cur_uval
502 #define	tcps_ip_notify_interval		tcps_propinfo_tbl[12].prop_cur_uval
503 #define	tcps_ipv4_ttl			tcps_propinfo_tbl[13].prop_cur_uval
504 #define	tcps_keepalive_interval_high	tcps_propinfo_tbl[14].prop_max_uval
505 #define	tcps_keepalive_interval		tcps_propinfo_tbl[14].prop_cur_uval
506 #define	tcps_keepalive_interval_low	tcps_propinfo_tbl[14].prop_min_uval
507 #define	tcps_maxpsz_multiplier		tcps_propinfo_tbl[15].prop_cur_uval
508 #define	tcps_mss_def_ipv4		tcps_propinfo_tbl[16].prop_cur_uval
509 #define	tcps_mss_max_ipv4		tcps_propinfo_tbl[17].prop_cur_uval
510 #define	tcps_mss_min			tcps_propinfo_tbl[18].prop_cur_uval
511 #define	tcps_naglim_def			tcps_propinfo_tbl[19].prop_cur_uval
512 #define	tcps_rexmit_interval_initial_high	\
513 					tcps_propinfo_tbl[20].prop_max_uval
514 #define	tcps_rexmit_interval_initial	tcps_propinfo_tbl[20].prop_cur_uval
515 #define	tcps_rexmit_interval_initial_low	\
516 					tcps_propinfo_tbl[20].prop_min_uval
517 #define	tcps_rexmit_interval_max_high	tcps_propinfo_tbl[21].prop_max_uval
518 #define	tcps_rexmit_interval_max	tcps_propinfo_tbl[21].prop_cur_uval
519 #define	tcps_rexmit_interval_max_low	tcps_propinfo_tbl[21].prop_min_uval
520 #define	tcps_rexmit_interval_min_high	tcps_propinfo_tbl[22].prop_max_uval
521 #define	tcps_rexmit_interval_min	tcps_propinfo_tbl[22].prop_cur_uval
522 #define	tcps_rexmit_interval_min_low	tcps_propinfo_tbl[22].prop_min_uval
523 #define	tcps_deferred_ack_interval	tcps_propinfo_tbl[23].prop_cur_uval
524 #define	tcps_snd_lowat_fraction		tcps_propinfo_tbl[24].prop_cur_uval
525 #define	tcps_dupack_fast_retransmit	tcps_propinfo_tbl[25].prop_cur_uval
526 #define	tcps_ignore_path_mtu		tcps_propinfo_tbl[26].prop_cur_bval
527 #define	tcps_smallest_anon_port		tcps_propinfo_tbl[27].prop_cur_uval
528 #define	tcps_largest_anon_port		tcps_propinfo_tbl[28].prop_cur_uval
529 #define	tcps_xmit_hiwat			tcps_propinfo_tbl[29].prop_cur_uval
530 #define	tcps_xmit_lowat			tcps_propinfo_tbl[30].prop_cur_uval
531 #define	tcps_recv_hiwat			tcps_propinfo_tbl[31].prop_cur_uval
532 #define	tcps_recv_hiwat_minmss		tcps_propinfo_tbl[32].prop_cur_uval
533 #define	tcps_fin_wait_2_flush_interval_high	\
534 					tcps_propinfo_tbl[33].prop_max_uval
535 #define	tcps_fin_wait_2_flush_interval	tcps_propinfo_tbl[33].prop_cur_uval
536 #define	tcps_fin_wait_2_flush_interval_low	\
537 					tcps_propinfo_tbl[33].prop_min_uval
538 #define	tcps_max_buf			tcps_propinfo_tbl[34].prop_cur_uval
539 #define	tcps_strong_iss			tcps_propinfo_tbl[35].prop_cur_uval
540 #define	tcps_rtt_updates		tcps_propinfo_tbl[36].prop_cur_uval
541 #define	tcps_wscale_always		tcps_propinfo_tbl[37].prop_cur_bval
542 #define	tcps_tstamp_always		tcps_propinfo_tbl[38].prop_cur_bval
543 #define	tcps_tstamp_if_wscale		tcps_propinfo_tbl[39].prop_cur_bval
544 #define	tcps_rexmit_interval_extra	tcps_propinfo_tbl[40].prop_cur_uval
545 #define	tcps_deferred_acks_max		tcps_propinfo_tbl[41].prop_cur_uval
546 #define	tcps_slow_start_after_idle	tcps_propinfo_tbl[42].prop_cur_uval
547 #define	tcps_slow_start_initial		tcps_propinfo_tbl[43].prop_cur_uval
548 #define	tcps_sack_permitted		tcps_propinfo_tbl[44].prop_cur_uval
549 #define	tcps_ipv6_hoplimit		tcps_propinfo_tbl[45].prop_cur_uval
550 #define	tcps_mss_def_ipv6		tcps_propinfo_tbl[46].prop_cur_uval
551 #define	tcps_mss_max_ipv6		tcps_propinfo_tbl[47].prop_cur_uval
552 #define	tcps_rev_src_routes		tcps_propinfo_tbl[48].prop_cur_bval
553 #define	tcps_local_dack_interval	tcps_propinfo_tbl[49].prop_cur_uval
554 #define	tcps_local_dacks_max		tcps_propinfo_tbl[50].prop_cur_uval
555 #define	tcps_ecn_permitted		tcps_propinfo_tbl[51].prop_cur_uval
556 #define	tcps_rst_sent_rate_enabled	tcps_propinfo_tbl[52].prop_cur_bval
557 #define	tcps_rst_sent_rate		tcps_propinfo_tbl[53].prop_cur_uval
558 #define	tcps_push_timer_interval	tcps_propinfo_tbl[54].prop_cur_uval
559 #define	tcps_use_smss_as_mss_opt	tcps_propinfo_tbl[55].prop_cur_bval
560 #define	tcps_keepalive_abort_interval_high \
561 					tcps_propinfo_tbl[56].prop_max_uval
562 #define	tcps_keepalive_abort_interval \
563 					tcps_propinfo_tbl[56].prop_cur_uval
564 #define	tcps_keepalive_abort_interval_low \
565 					tcps_propinfo_tbl[56].prop_min_uval
566 #define	tcps_wroff_xtra			tcps_propinfo_tbl[57].prop_cur_uval
567 #define	tcps_dev_flow_ctl		tcps_propinfo_tbl[58].prop_cur_bval
568 #define	tcps_reass_timeout		tcps_propinfo_tbl[59].prop_cur_uval
569 #define	tcps_iss_incr			tcps_propinfo_tbl[65].prop_cur_uval
570 #define	tcps_abc			tcps_propinfo_tbl[67].prop_cur_bval
571 #define	tcps_abc_l_var			tcps_propinfo_tbl[68].prop_cur_uval
572 
573 
574 /*
575  * As defined in RFC 6298, the RTO is the average estimates (SRTT) plus a
576  * multiple of the deviation estimates (K * RTTVAR):
577  *
578  * RTO = SRTT + max(G, K * RTTVAR)
579  *
580  * K is defined in the RFC as 4, and G is the clock granularity. We constrain
581  * the minimum mean deviation to TCP_SD_MIN when processing new RTTs, so this
582  * becomes:
583  *
584  * RTO = SRTT + 4 * RTTVAR
585  *
586  * In practice, however, we make several additions to it. As we use a finer
587  * grained clock than BSD and update RTO for every ACK, we add in another 1/4 of
588  * RTT to the deviation of RTO to accommodate burstiness of 1/4 of window size:
589  *
590  * RTO = SRTT + (SRTT / 4) + 4 * RTTVAR
591  *
592  * Since tcp_rtt_sa is 8 times the SRTT, and tcp_rtt_sd is 4 times the RTTVAR,
593  * this becomes:
594  *
595  * RTO = (tcp_rtt_sa / 8) + ((tcp_rtt_sa / 8) / 4) + tcp_rtt_sd
596  * RTO = (tcp_rtt_sa / 2^3) + (tcp_rtt_sa / 2^5) + tcp_rtt_sd
597  * RTO = (tcp_rtt_sa >> 3) + (tcp_rtt_sa >> 5) + tcp_rtt_sd
598  *
599  * The "tcp_rexmit_interval_extra" and "tcp_conn_grace_period" tunables are
600  * used to help account for extreme environments where the algorithm fails to
601  * work; by default they should be 0. (The latter tunable is only used for
602  * calculating the intial RTO, and so is optionally passed in as "extra".) We
603  * add them here:
604  *
605  * RTO = (tcp_rtt_sa >> 3) + (tcp_rtt_sa >> 5) + tcp_rtt_sd +
606  *     tcps_rexmit_interval_extra + tcps_conn_grace_period
607  *
608  * We then pin the RTO within our configured boundaries (sections 2.4 and 2.5
609  * of RFC 6298).
610  */
611 static __GNU_INLINE clock_t
tcp_calculate_rto(tcp_t * tcp,tcp_stack_t * tcps,uint32_t extra)612 tcp_calculate_rto(tcp_t *tcp, tcp_stack_t *tcps, uint32_t extra)
613 {
614 	clock_t rto;
615 
616 	rto = NSEC2MSEC((tcp->tcp_rtt_sa >> 3) + (tcp->tcp_rtt_sa >> 5) +
617 	    tcp->tcp_rtt_sd) + tcps->tcps_rexmit_interval_extra + extra;
618 
619 	if (rto < tcp->tcp_rto_min) {
620 		rto = tcp->tcp_rto_min;
621 	} else if (rto > tcp->tcp_rto_max) {
622 		rto = tcp->tcp_rto_max;
623 	}
624 
625 	return (rto);
626 }
627 
628 extern struct qinit tcp_rinitv4, tcp_rinitv6;
629 extern boolean_t do_tcp_fusion;
630 
631 /*
632  * Object to represent database of options to search passed to
633  * {sock,tpi}optcom_req() interface routine to take care of option
634  * management and associated methods.
635  */
636 extern optdb_obj_t	tcp_opt_obj;
637 extern uint_t		tcp_max_optsize;
638 
639 extern int tcp_squeue_flag;
640 
641 extern uint_t tcp_free_list_max_cnt;
642 
643 /*
644  * Functions in tcp.c.
645  */
646 extern void	tcp_acceptor_hash_insert(t_uscalar_t, tcp_t *);
647 extern tcp_t	*tcp_acceptor_hash_lookup(t_uscalar_t, tcp_stack_t *);
648 extern void	tcp_acceptor_hash_remove(tcp_t *);
649 extern mblk_t	*tcp_ack_mp(tcp_t *);
650 extern int	tcp_build_hdrs(tcp_t *);
651 extern void	tcp_cleanup(tcp_t *);
652 extern int	tcp_clean_death(tcp_t *, int);
653 extern void	tcp_clean_death_wrapper(void *, mblk_t *, void *,
654 		    ip_recv_attr_t *);
655 extern void	tcp_close_common(conn_t *, int);
656 extern void	tcp_close_detached(tcp_t *);
657 extern void	tcp_close_mpp(mblk_t **);
658 extern void	tcp_closei_local(tcp_t *);
659 extern sock_lower_handle_t tcp_create(int, int, int, sock_downcalls_t **,
660 		    uint_t *, int *, int, cred_t *);
661 extern conn_t	*tcp_create_common(cred_t *, boolean_t, boolean_t, int *);
662 extern void	tcp_disconnect(tcp_t *, mblk_t *);
663 extern char	*tcp_display(tcp_t *, char *, char);
664 extern int	tcp_do_bind(conn_t *, struct sockaddr *, socklen_t, cred_t *,
665 		    boolean_t);
666 extern int	tcp_do_connect(conn_t *, const struct sockaddr *, socklen_t,
667 		    cred_t *, pid_t);
668 extern int	tcp_do_listen(conn_t *, struct sockaddr *, socklen_t, int,
669 		    cred_t *, boolean_t);
670 extern int	tcp_do_unbind(conn_t *);
671 extern boolean_t	tcp_eager_blowoff(tcp_t *, t_scalar_t);
672 extern void	tcp_eager_cleanup(tcp_t *, boolean_t);
673 extern void	tcp_eager_kill(void *, mblk_t *, void *, ip_recv_attr_t *);
674 extern void	tcp_eager_unlink(tcp_t *);
675 extern void	tcp_init_values(tcp_t *, tcp_t *);
676 extern void	tcp_ipsec_cleanup(tcp_t *);
677 extern int	tcp_maxpsz_set(tcp_t *, boolean_t);
678 extern void	tcp_mss_set(tcp_t *, uint32_t);
679 extern void	tcp_reinput(conn_t *, mblk_t *, ip_recv_attr_t *, ip_stack_t *);
680 extern int	tcp_rsrv(queue_t *);
681 extern uint_t	tcp_rwnd_reopen(tcp_t *);
682 extern int	tcp_rwnd_set(tcp_t *, uint32_t);
683 extern int	tcp_set_destination(tcp_t *);
684 extern void	tcp_set_ws_value(tcp_t *);
685 extern void	tcp_stop_lingering(tcp_t *);
686 extern void	tcp_update_pmtu(tcp_t *, boolean_t);
687 extern mblk_t	*tcp_zcopy_backoff(tcp_t *, mblk_t *, boolean_t);
688 extern boolean_t	tcp_zcopy_check(tcp_t *);
689 extern void	tcp_zcopy_notify(tcp_t *);
690 extern void	tcp_get_proto_props(tcp_t *, struct sock_proto_props *);
691 
692 /*
693  * Bind related functions in tcp_bind.c
694  */
695 extern int	tcp_bind_check(conn_t *, struct sockaddr *, socklen_t,
696 		    cred_t *, boolean_t);
697 extern void	tcp_bind_hash_insert(tf_t *, tcp_t *, int);
698 extern void	tcp_bind_hash_remove(tcp_t *);
699 extern in_port_t	tcp_bindi(tcp_t *, in_port_t, const in6_addr_t *,
700 			    int, boolean_t, boolean_t, boolean_t);
701 extern in_port_t	tcp_update_next_port(in_port_t, const tcp_t *,
702 			    boolean_t);
703 
704 /*
705  * Fusion related functions in tcp_fusion.c.
706  */
707 extern void	tcp_fuse(tcp_t *, uchar_t *, tcpha_t *);
708 extern void	tcp_unfuse(tcp_t *);
709 extern boolean_t tcp_fuse_output(tcp_t *, mblk_t *, uint32_t);
710 extern void	tcp_fuse_output_urg(tcp_t *, mblk_t *);
711 extern boolean_t tcp_fuse_rcv_drain(queue_t *, tcp_t *, mblk_t **);
712 extern size_t	tcp_fuse_set_rcv_hiwat(tcp_t *, size_t);
713 extern int	tcp_fuse_maxpsz(tcp_t *);
714 extern void	tcp_fuse_backenable(tcp_t *);
715 extern void	tcp_iss_key_init(uint8_t *, int, tcp_stack_t *);
716 
717 /*
718  * Output related functions in tcp_output.c.
719  */
720 extern void	tcp_close_output(void *, mblk_t *, void *, ip_recv_attr_t *);
721 extern void	tcp_output(void *, mblk_t *, void *, ip_recv_attr_t *);
722 extern void	tcp_output_urgent(void *, mblk_t *, void *, ip_recv_attr_t *);
723 extern void	tcp_rexmit_after_error(tcp_t *);
724 extern void	tcp_sack_rexmit(tcp_t *, uint_t *);
725 extern void	tcp_send_data(tcp_t *, mblk_t *);
726 extern void	tcp_send_synack(void *, mblk_t *, void *, ip_recv_attr_t *);
727 extern void	tcp_shutdown_output(void *, mblk_t *, void *, ip_recv_attr_t *);
728 extern void	tcp_ss_rexmit(tcp_t *);
729 extern void	tcp_update_xmit_tail(tcp_t *, uint32_t);
730 extern int	tcp_wput(queue_t *, mblk_t *);
731 extern void	tcp_wput_data(tcp_t *, mblk_t *, boolean_t);
732 extern int	tcp_wput_sock(queue_t *, mblk_t *);
733 extern int	tcp_wput_fallback(queue_t *, mblk_t *);
734 extern void	tcp_xmit_ctl(char *, tcp_t *, uint32_t, uint32_t, int);
735 extern void	tcp_xmit_listeners_reset(mblk_t *, ip_recv_attr_t *,
736 		    ip_stack_t *i, conn_t *);
737 extern mblk_t	*tcp_xmit_mp(tcp_t *, mblk_t *, int32_t, int32_t *,
738 		    mblk_t **, uint32_t, boolean_t, uint32_t *, boolean_t);
739 
740 /*
741  * Input related functions in tcp_input.c.
742  */
743 extern void	cc_cong_signal(tcp_t *, uint32_t, uint32_t);
744 extern void	tcp_icmp_input(void *, mblk_t *, void *, ip_recv_attr_t *);
745 extern void	tcp_input_data(void *, mblk_t *, void *, ip_recv_attr_t *);
746 extern void	tcp_input_listener_unbound(void *, mblk_t *, void *,
747 		    ip_recv_attr_t *);
748 extern boolean_t	tcp_paws_check(tcp_t *, const tcp_opt_t *);
749 extern int	tcp_parse_options(tcpha_t *, tcp_opt_t *);
750 extern uint_t	tcp_rcv_drain(tcp_t *);
751 extern void	tcp_rcv_enqueue(tcp_t *, mblk_t *, uint_t, cred_t *);
752 extern boolean_t	tcp_verifyicmp(conn_t *, void *, icmph_t *, icmp6_t *,
753 			    ip_recv_attr_t *);
754 
755 /*
756  * Kernel socket related functions in tcp_socket.c.
757  */
758 extern int	tcp_fallback(sock_lower_handle_t, queue_t *, boolean_t,
759 		    so_proto_quiesced_cb_t, sock_quiesce_arg_t *);
760 extern boolean_t tcp_newconn_notify(tcp_t *, ip_recv_attr_t *);
761 
762 /*
763  * Timer related functions in tcp_timers.c.
764  */
765 extern void	tcp_ack_timer(void *);
766 extern void	tcp_close_linger_timeout(void *);
767 extern void	tcp_keepalive_timer(void *);
768 extern void	tcp_push_timer(void *);
769 extern void	tcp_reass_timer(void *);
770 extern mblk_t	*tcp_timermp_alloc(int);
771 extern void	tcp_timermp_free(tcp_t *);
772 extern timeout_id_t tcp_timeout(conn_t *, void (*)(void *), hrtime_t);
773 extern clock_t	tcp_timeout_cancel(conn_t *, timeout_id_t);
774 extern void	tcp_timer(void *arg);
775 extern void	tcp_timers_stop(tcp_t *);
776 
777 /*
778  * TCP TPI related functions in tcp_tpi.c.
779  */
780 extern void	tcp_addr_req(tcp_t *, mblk_t *);
781 extern void	tcp_capability_req(tcp_t *, mblk_t *);
782 extern boolean_t	tcp_conn_con(tcp_t *, uchar_t *, mblk_t *,
783 			    mblk_t **, ip_recv_attr_t *);
784 extern void	tcp_err_ack(tcp_t *, mblk_t *, int, int);
785 extern void	tcp_err_ack_prim(tcp_t *, mblk_t *, int, int, int);
786 extern void	tcp_info_req(tcp_t *, mblk_t *);
787 extern void	tcp_send_conn_ind(void *, mblk_t *, void *);
788 extern void	tcp_send_pending(void *, mblk_t *, void *, ip_recv_attr_t *);
789 extern int	tcp_tpi_accept(queue_t *, mblk_t *);
790 extern void	tcp_tpi_bind(tcp_t *, mblk_t *);
791 extern int	tcp_tpi_close(queue_t *, int, cred_t *);
792 extern int	tcp_tpi_close_accept(queue_t *, int, cred_t *);
793 extern void	tcp_tpi_connect(tcp_t *, mblk_t *);
794 extern int	tcp_tpi_opt_get(queue_t *, t_scalar_t, t_scalar_t, uchar_t *);
795 extern int	tcp_tpi_opt_set(queue_t *, uint_t, int, int, uint_t, uchar_t *,
796 		    uint_t *, uchar_t *, void *, cred_t *);
797 extern void	tcp_tpi_unbind(tcp_t *, mblk_t *);
798 extern void	tcp_tli_accept(tcp_t *, mblk_t *);
799 extern void	tcp_use_pure_tpi(tcp_t *);
800 extern void	tcp_do_capability_ack(tcp_t *, struct T_capability_ack *,
801 		    t_uscalar_t);
802 
803 /*
804  * TCP option processing related functions in tcp_opt_data.c
805  */
806 extern int	tcp_opt_get(conn_t *, int, int, uchar_t *);
807 extern int	tcp_opt_set(conn_t *, uint_t, int, int, uint_t, uchar_t *,
808 		    uint_t *, uchar_t *, void *, cred_t *);
809 
810 /*
811  * TCP time wait processing related functions in tcp_time_wait.c.
812  */
813 extern void		tcp_time_wait_append(tcp_t *);
814 extern void		tcp_time_wait_collector(void *);
815 extern boolean_t	tcp_time_wait_remove(tcp_t *, tcp_squeue_priv_t *);
816 extern void		tcp_time_wait_processing(tcp_t *, mblk_t *, uint32_t,
817 			    uint32_t, int, tcpha_t *, ip_recv_attr_t *);
818 
819 /*
820  * Misc functions in tcp_misc.c.
821  */
822 extern uint32_t	tcp_find_listener_conf(tcp_stack_t *, in_port_t);
823 extern void	tcp_ioctl_abort_conn(queue_t *, mblk_t *);
824 extern void	tcp_listener_conf_cleanup(tcp_stack_t *);
825 extern void	tcp_stack_cpu_add(tcp_stack_t *, processorid_t);
826 
827 #endif	/* _KERNEL */
828 
829 #ifdef	__cplusplus
830 }
831 #endif
832 
833 #endif	/* _INET_TCP_IMPL_H */
834