xref: /titanic_41/usr/src/uts/common/inet/tcp.h (revision 9b214d32697277d03ed2e5d98c4a7bfef16dcf4d)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /* Copyright (c) 1990 Mentat Inc. */
26 
27 #ifndef	_INET_TCP_H
28 #define	_INET_TCP_H
29 
30 #ifdef	__cplusplus
31 extern "C" {
32 #endif
33 
34 #include <sys/inttypes.h>
35 #include <netinet/ip6.h>
36 #include <netinet/tcp.h>
37 #include <sys/socket.h>
38 #include <sys/socket_proto.h>
39 #include <sys/multidata.h>
40 #include <sys/md5.h>
41 #include <inet/common.h>
42 #include <inet/ip.h>
43 #include <inet/ip6.h>
44 #include <inet/mi.h>
45 #include <inet/mib2.h>
46 #include <inet/tcp_stack.h>
47 #include <inet/tcp_sack.h>
48 #include <inet/kssl/ksslapi.h>
49 
50 /*
51  * Private (and possibly temporary) ioctl used by configuration code
52  * to lock in the "default" stream for detached closes.
53  */
54 #define	TCP_IOC_DEFAULT_Q	(('T' << 8) + 51)
55 
56 /* TCP states */
57 #define	TCPS_CLOSED		-6
58 #define	TCPS_IDLE		-5	/* idle (opened, but not bound) */
59 #define	TCPS_BOUND		-4	/* bound, ready to connect or accept */
60 #define	TCPS_LISTEN		-3	/* listening for connection */
61 #define	TCPS_SYN_SENT		-2	/* active, have sent syn */
62 #define	TCPS_SYN_RCVD		-1	/* have received syn (and sent ours) */
63 /* states < TCPS_ESTABLISHED are those where connections not established */
64 #define	TCPS_ESTABLISHED	0	/* established */
65 #define	TCPS_CLOSE_WAIT		1	/* rcvd fin, waiting for close */
66 /* states > TCPS_CLOSE_WAIT are those where user has closed */
67 #define	TCPS_FIN_WAIT_1		2	/* have closed and sent fin */
68 #define	TCPS_CLOSING		3	/* closed, xchd FIN, await FIN ACK */
69 #define	TCPS_LAST_ACK		4	/* had fin and close; await FIN ACK */
70 /* states > TCPS_CLOSE_WAIT && < TCPS_FIN_WAIT_2 await ACK of FIN */
71 #define	TCPS_FIN_WAIT_2		5	/* have closed, fin is acked */
72 #define	TCPS_TIME_WAIT		6	/* in 2*msl quiet wait after close */
73 
74 /*
75  * Internal flags used in conjunction with the packet header flags.
76  * Used in tcp_rput_data to keep track of what needs to be done.
77  */
78 #define	TH_LIMIT_XMIT		0x0400	/* Limited xmit is needed */
79 #define	TH_XMIT_NEEDED		0x0800	/* Window opened - send queued data */
80 #define	TH_REXMIT_NEEDED	0x1000	/* Time expired for unacked data */
81 #define	TH_ACK_NEEDED		0x2000	/* Send an ack now. */
82 #define	TH_NEED_SACK_REXMIT	0x4000	/* Use SACK info to retransmission */
83 #define	TH_ACK_TIMER_NEEDED	0x8000	/* Start the delayed ACK timer */
84 #define	TH_ORDREL_NEEDED	0x10000	/* Generate an ordrel indication */
85 #define	TH_MARKNEXT_NEEDED	0x20000	/* Data should have MSGMARKNEXT */
86 #define	TH_SEND_URP_MARK	0x40000	/* Send up tcp_urp_mark_mp */
87 
88 /*
89  * TCP sequence numbers are 32 bit integers operated
90  * on with modular arithmetic.  These macros can be
91  * used to compare such integers.
92  */
93 #define	SEQ_LT(a, b)	((int32_t)((a)-(b)) < 0)
94 #define	SEQ_LEQ(a, b)	((int32_t)((a)-(b)) <= 0)
95 #define	SEQ_GT(a, b)	((int32_t)((a)-(b)) > 0)
96 #define	SEQ_GEQ(a, b)	((int32_t)((a)-(b)) >= 0)
97 
98 /* TCP Protocol header */
99 typedef	struct tcphdr_s {
100 	uint8_t		th_lport[2];	/* Source port */
101 	uint8_t		th_fport[2];	/* Destination port */
102 	uint8_t		th_seq[4];	/* Sequence number */
103 	uint8_t		th_ack[4];	/* Acknowledgement number */
104 	uint8_t		th_offset_and_rsrvd[1]; /* Offset to the packet data */
105 	uint8_t		th_flags[1];
106 	uint8_t		th_win[2];	/* Allocation number */
107 	uint8_t		th_sum[2];	/* TCP checksum */
108 	uint8_t		th_urp[2];	/* Urgent pointer */
109 } tcph_t;
110 
111 #define	TCP_HDR_LENGTH(tcph) (((tcph)->th_offset_and_rsrvd[0] >>2) &(0xF << 2))
112 #define	TCP_MAX_COMBINED_HEADER_LENGTH	(60 + 60) /* Maxed out ip + tcp */
113 #define	TCP_MAX_IP_OPTIONS_LENGTH	(60 - IP_SIMPLE_HDR_LENGTH)
114 #define	TCP_MAX_HDR_LENGTH		60
115 #define	TCP_MAX_TCP_OPTIONS_LENGTH	(60 - sizeof (tcph_t))
116 #define	TCP_MIN_HEADER_LENGTH		20
117 #define	TCP_MAXWIN			65535
118 #define	TCP_PORT_LEN			sizeof (in_port_t)
119 #define	TCP_MAX_WINSHIFT		14
120 #define	TCP_MAX_LARGEWIN		(TCP_MAXWIN << TCP_MAX_WINSHIFT)
121 #define	TCP_MAX_LSO_LENGTH	(IP_MAXPACKET - TCP_MAX_COMBINED_HEADER_LENGTH)
122 
123 #define	TCPIP_HDR_LENGTH(mp, n)					\
124 	(n) = IPH_HDR_LENGTH((mp)->b_rptr),			\
125 	(n) += TCP_HDR_LENGTH((tcph_t *)&(mp)->b_rptr[(n)])
126 
127 /* TCP Protocol header (used if the header is known to be 32-bit aligned) */
128 typedef	struct tcphdra_s {
129 	in_port_t	tha_lport;	/* Source port */
130 	in_port_t	tha_fport;	/* Destination port */
131 	uint32_t	tha_seq;	/* Sequence number */
132 	uint32_t	tha_ack;	/* Acknowledgement number */
133 	uint8_t tha_offset_and_reserved; /* Offset to the packet data */
134 	uint8_t		tha_flags;
135 	uint16_t	tha_win;	/* Allocation number */
136 	uint16_t	tha_sum;	/* TCP checksum */
137 	uint16_t	tha_urp;	/* Urgent pointer */
138 } tcpha_t;
139 
140 struct conn_s;
141 
142 /*
143  * Control structure for each open TCP stream,
144  * defined only within the kernel or for a kmem user.
145  * NOTE: tcp_reinit_values MUST have a line for each field in this structure!
146  */
147 #if (defined(_KERNEL) || defined(_KMEMUSER))
148 
149 typedef struct tcp_s {
150 				/* Pointer to previous bind hash next. */
151 	struct tcp_s	*tcp_time_wait_next;
152 				/* Pointer to next T/W block */
153 	struct tcp_s	*tcp_time_wait_prev;
154 				/* Pointer to previous T/W next */
155 	clock_t		tcp_time_wait_expire;
156 
157 	struct conn_s	*tcp_connp;
158 	tcp_stack_t	*tcp_tcps;	/* Shortcut via conn_netstack */
159 
160 	int32_t	tcp_state;
161 	int32_t	tcp_rcv_ws;		/* My window scale power */
162 	int32_t	tcp_snd_ws;		/* Sender's window scale power */
163 	uint32_t tcp_ts_recent;		/* Timestamp of earliest unacked */
164 					/*  data segment */
165 	clock_t	tcp_rto;		/* Round trip timeout */
166 	clock_t	tcp_last_rcv_lbolt;
167 				/* lbolt on last packet, used for PAWS */
168 
169 	uint32_t tcp_snxt;		/* Senders next seq num */
170 	uint32_t tcp_swnd;		/* Senders window (relative to suna) */
171 	uint32_t tcp_mss;		/* Max segment size */
172 	uint32_t tcp_iss;		/* Initial send seq num */
173 	uint32_t tcp_rnxt;		/* Seq we expect to recv next */
174 	uint32_t tcp_rwnd;
175 
176 	queue_t	*tcp_rq;		/* Our upstream neighbor (client) */
177 	queue_t	*tcp_wq;		/* Our downstream neighbor */
178 
179 	/* Fields arranged in approximate access order along main paths */
180 	mblk_t	*tcp_xmit_head;		/* Head of rexmit list */
181 	mblk_t	*tcp_xmit_last;		/* last valid data seen by tcp_wput */
182 	mblk_t	*tcp_xmit_tail;		/* Last rexmit data sent */
183 	uint32_t tcp_unsent;		/* # of bytes in hand that are unsent */
184 	uint32_t tcp_xmit_tail_unsent;	/* # of unsent bytes in xmit_tail */
185 
186 	uint32_t tcp_suna;		/* Sender unacknowledged */
187 	uint32_t tcp_rexmit_nxt;	/* Next rexmit seq num */
188 	uint32_t tcp_rexmit_max;	/* Max retran seq num */
189 	int32_t	tcp_snd_burst;		/* Send burst factor */
190 	uint32_t tcp_cwnd;		/* Congestion window */
191 	int32_t tcp_cwnd_cnt;		/* cwnd cnt in congestion avoidance */
192 
193 	uint32_t tcp_ibsegs;		/* Inbound segments on this stream */
194 	uint32_t tcp_obsegs;		/* Outbound segments on this stream */
195 
196 	uint32_t tcp_naglim;		/* Tunable nagle limit */
197 	uint32_t	tcp_valid_bits;
198 #define	TCP_ISS_VALID	0x1	/* Is the tcp_iss seq num active? */
199 #define	TCP_FSS_VALID	0x2	/* Is the tcp_fss seq num active? */
200 #define	TCP_URG_VALID	0x4	/* Is the tcp_urg seq num active? */
201 #define	TCP_OFO_FIN_VALID 0x8	/* Has TCP received an out of order FIN? */
202 
203 
204 
205 	timeout_id_t	tcp_timer_tid;	/* Control block for timer service */
206 	uchar_t	tcp_timer_backoff;	/* Backoff shift count. */
207 	int64_t tcp_last_recv_time;	/* Last time we receive a segment. */
208 	uint32_t tcp_init_cwnd;		/* Initial cwnd (start/restart) */
209 
210 	/*
211 	 * Following socket options are set by sockfs outside the squeue
212 	 * and we want to separate these bit fields from the other bit fields
213 	 * set by TCP to avoid grabbing locks. sockfs ensures that only one
214 	 * thread in sockfs can set a socket option at a time on a conn_t.
215 	 * However TCP may read these options concurrently. The linger option
216 	 * needs atomicity since tcp_lingertime also needs to be in sync.
217 	 * However TCP uses it only during close, and by then no socket option
218 	 * can come down. So we don't need any locks, instead just separating
219 	 * the sockfs settable bit fields from the other bit fields is
220 	 * sufficient.
221 	 */
222 	uint32_t
223 		tcp_debug : 1,		/* SO_DEBUG "socket" option. */
224 		tcp_dontroute : 1,	/* SO_DONTROUTE "socket" option. */
225 		tcp_broadcast : 1,	/* SO_BROADCAST "socket" option. */
226 		tcp_useloopback : 1,	/* SO_USELOOPBACK "socket" option. */
227 
228 		tcp_oobinline : 1,	/* SO_OOBINLINE "socket" option. */
229 		tcp_dgram_errind : 1,	/* SO_DGRAM_ERRIND option */
230 		tcp_linger : 1,		/* SO_LINGER turned on */
231 		tcp_reuseaddr	: 1,	/* SO_REUSEADDR "socket" option. */
232 
233 		tcp_junk_to_bit_31 : 24;
234 
235 	/* Following manipulated by TCP under squeue protection */
236 	uint32_t
237 		tcp_urp_last_valid : 1,	/* Is tcp_urp_last valid? */
238 		tcp_hard_binding : 1,	/* If we've started a full bind */
239 		tcp_hard_bound : 1,	/* If we've done a full bind with IP */
240 		tcp_fin_acked : 1,	/* Has our FIN been acked? */
241 
242 		tcp_fin_rcvd : 1,	/* Have we seen a FIN? */
243 		tcp_fin_sent : 1,	/* Have we sent our FIN yet? */
244 		tcp_ordrel_done : 1,	/* Have we sent the ord_rel upstream? */
245 		tcp_detached : 1,	/* If we're detached from a stream */
246 
247 		tcp_bind_pending : 1,	/* Client is waiting for bind ack */
248 		tcp_unbind_pending : 1, /* Client sent T_UNBIND_REQ */
249 		tcp_ka_enabled: 1,	/* Connection KeepAlive Timer needed */
250 		tcp_zero_win_probe: 1,	/* Zero win probing is in progress */
251 
252 		tcp_loopback: 1,	/* src and dst are the same machine */
253 		tcp_localnet: 1,	/* src and dst are on the same subnet */
254 		tcp_syn_defense: 1,	/* For defense against SYN attack */
255 #define	tcp_dontdrop	tcp_syn_defense
256 		tcp_set_timer : 1,
257 
258 		tcp_active_open: 1,	/* This is a active open */
259 		tcp_rexmit : 1,		/* TCP is retransmitting */
260 		tcp_snd_sack_ok : 1,	/* Can use SACK for this connection */
261 		tcp_empty_flag : 1,	/* Empty flag for future use */
262 
263 		tcp_recvdstaddr : 1,	/* return T_EXTCONN_IND with dst addr */
264 		tcp_hwcksum : 1,	/* The NIC is capable of hwcksum */
265 		tcp_ip_forward_progress : 1,
266 		tcp_anon_priv_bind : 1,
267 
268 		tcp_ecn_ok : 1,		/* Can use ECN for this connection */
269 		tcp_ecn_echo_on : 1,	/* Need to do ECN echo */
270 		tcp_ecn_cwr_sent : 1,	/* ECN_CWR has been sent */
271 		tcp_cwr : 1,		/* Cwnd has reduced recently */
272 
273 		tcp_pad_to_bit31 : 4;
274 	/* Following manipulated by TCP under squeue protection */
275 	uint32_t
276 		tcp_mdt : 1,		/* Lower layer is capable of MDT */
277 		tcp_snd_ts_ok  : 1,
278 		tcp_snd_ws_ok  : 1,
279 		tcp_exclbind	: 1,	/* ``exclusive'' binding */
280 
281 		tcp_hdr_grown	: 1,
282 		tcp_in_free_list : 1,
283 		tcp_snd_zcopy_on : 1,	/* xmit zero-copy enabled */
284 
285 		tcp_snd_zcopy_aware : 1, /* client is zero-copy aware */
286 		tcp_xmit_zc_clean : 1,	/* the xmit list is free of zc-mblk */
287 		tcp_wait_for_eagers : 1, /* Wait for eagers to disappear */
288 		tcp_accept_error : 1,	/* Error during TLI accept */
289 
290 		tcp_send_discon_ind : 1, /* TLI accept err, send discon ind */
291 		tcp_cork : 1,		/* tcp_cork option */
292 		tcp_tconnind_started : 1, /* conn_ind message is being sent */
293 		tcp_lso :1,		/* Lower layer is capable of LSO */
294 		tcp_refuse :1,		/* Connection needs refusing */
295 		tcp_pad_to_bit_31 : 16;
296 
297 	uint32_t	tcp_if_mtu;	/* Outgoing interface MTU. */
298 
299 	mblk_t	*tcp_reass_head;	/* Out of order reassembly list head */
300 	mblk_t	*tcp_reass_tail;	/* Out of order reassembly list tail */
301 
302 	tcp_sack_info_t	*tcp_sack_info;
303 
304 #define	tcp_pipe	tcp_sack_info->tcp_pipe
305 #define	tcp_fack	tcp_sack_info->tcp_fack
306 #define	tcp_sack_snxt	tcp_sack_info->tcp_sack_snxt
307 #define	tcp_max_sack_blk	tcp_sack_info->tcp_max_sack_blk
308 #define	tcp_num_sack_blk	tcp_sack_info->tcp_num_sack_blk
309 #define	tcp_sack_list		tcp_sack_info->tcp_sack_list
310 #define	tcp_num_notsack_blk	tcp_sack_info->tcp_num_notsack_blk
311 #define	tcp_cnt_notsack_list	tcp_sack_info->tcp_cnt_notsack_list
312 #define	tcp_notsack_list		tcp_sack_info->tcp_notsack_list
313 
314 	mblk_t	*tcp_rcv_list;		/* Queued until push, urgent data, */
315 	mblk_t	*tcp_rcv_last_head;	/* optdata, or the count exceeds */
316 	mblk_t	*tcp_rcv_last_tail;	/* tcp_rcv_push_wait. */
317 	uint32_t tcp_rcv_cnt;		/* tcp_rcv_list is b_next chain. */
318 
319 	uint32_t tcp_cwnd_ssthresh;	/* Congestion window */
320 	uint32_t tcp_cwnd_max;
321 	uint32_t tcp_csuna;		/* Clear (no rexmits in window) suna */
322 
323 	clock_t	tcp_rtt_sa;		/* Round trip smoothed average */
324 	clock_t	tcp_rtt_sd;		/* Round trip smoothed deviation */
325 	clock_t	tcp_rtt_update;		/* Round trip update(s) */
326 	clock_t tcp_ms_we_have_waited;	/* Total retrans time */
327 
328 	uint32_t tcp_swl1;		/* These help us avoid using stale */
329 	uint32_t tcp_swl2;		/*  packets to update state */
330 
331 	uint32_t tcp_rack;		/* Seq # we have acked */
332 	uint32_t tcp_rack_cnt;		/* # of segs we have deferred ack */
333 	uint32_t tcp_rack_cur_max;	/* # of segs we may defer ack for now */
334 	uint32_t tcp_rack_abs_max;	/* # of segs we may defer ack ever */
335 	timeout_id_t	tcp_ack_tid;	/* Delayed ACK timer ID */
336 	timeout_id_t	tcp_push_tid;	/* Push timer ID */
337 
338 	uint32_t tcp_max_swnd;		/* Maximum swnd we have seen */
339 
340 	struct tcp_s *tcp_listener;	/* Our listener */
341 
342 	size_t	tcp_xmit_hiwater;	/* Send buffer high water mark. */
343 	size_t	tcp_xmit_lowater;	/* Send buffer low water mark. */
344 	size_t	tcp_recv_hiwater;	/* Recv high water mark */
345 	size_t	tcp_recv_lowater;	/* Recv low water mark */
346 
347 	uint32_t tcp_irs;		/* Initial recv seq num */
348 	uint32_t tcp_fss;		/* Final/fin send seq num */
349 	uint32_t tcp_urg;		/* Urgent data seq num */
350 
351 	clock_t	tcp_first_timer_threshold;  /* When to prod IP */
352 	clock_t	tcp_second_timer_threshold; /* When to give up completely */
353 	clock_t	tcp_first_ctimer_threshold; /* 1st threshold while connecting */
354 	clock_t tcp_second_ctimer_threshold; /* 2nd ... while connecting */
355 
356 	int	tcp_lingertime;		/* Close linger time (in seconds) */
357 
358 	uint32_t tcp_urp_last;		/* Last urp for which signal sent */
359 	mblk_t	*tcp_urp_mp;		/* T_EXDATA_IND for urgent byte */
360 	mblk_t	*tcp_urp_mark_mp;	/* zero-length marked/unmarked msg */
361 
362 	int tcp_conn_req_cnt_q0;	/* # of conn reqs in SYN_RCVD */
363 	int tcp_conn_req_cnt_q;	/* # of conn reqs in ESTABLISHED */
364 	int tcp_conn_req_max;	/* # of ESTABLISHED conn reqs allowed */
365 	t_scalar_t tcp_conn_req_seqnum;	/* Incrementing pending conn req ID */
366 #define	tcp_ip_addr_cache	tcp_reass_tail
367 					/* Cache ip addresses that */
368 					/* complete the 3-way handshake */
369 	kmutex_t  tcp_eager_lock;
370 	struct tcp_s *tcp_eager_next_q; /* next eager in ESTABLISHED state */
371 	struct tcp_s *tcp_eager_last_q;	/* last eager in ESTABLISHED state */
372 	struct tcp_s *tcp_eager_next_q0; /* next eager in SYN_RCVD state */
373 	struct tcp_s *tcp_eager_prev_q0; /* prev eager in SYN_RCVD state */
374 					/* all eagers form a circular list */
375 	boolean_t tcp_conn_def_q0;	/* move from q0 to q deferred */
376 
377 	union {
378 	    mblk_t *tcp_eager_conn_ind; /* T_CONN_IND waiting for 3rd ack. */
379 	    mblk_t *tcp_opts_conn_req; /* T_CONN_REQ w/ options processed */
380 	} tcp_conn;
381 	uint32_t tcp_syn_rcvd_timeout;	/* How many SYN_RCVD timeout in q0 */
382 
383 	/* TCP Keepalive Timer members */
384 	int32_t	tcp_ka_last_intrvl;	/* Last probe interval */
385 	timeout_id_t tcp_ka_tid;	/* Keepalive timer ID */
386 	uint32_t tcp_ka_interval;	/* Keepalive interval */
387 	uint32_t tcp_ka_abort_thres;	/* Keepalive abort threshold */
388 
389 	int32_t	tcp_client_errno;	/* How the client screwed up */
390 
391 	char	*tcp_iphc;		/* Buffer holding tcp/ip hdr template */
392 	int	tcp_iphc_len;		/* actual allocated buffer size */
393 	int32_t	tcp_hdr_len;		/* Byte len of combined TCP/IP hdr */
394 	ipha_t	*tcp_ipha;		/* IPv4 header in the buffer */
395 	ip6_t	*tcp_ip6h;		/* IPv6 header in the buffer */
396 	int	tcp_ip_hdr_len;		/* Byte len of our current IPvx hdr */
397 	tcph_t	*tcp_tcph;		/* tcp header within combined hdr */
398 	int32_t	tcp_tcp_hdr_len;	/* tcp header len within combined */
399 	/* Saved peer headers in the case of re-fusion */
400 	ipha_t	tcp_saved_ipha;
401 	ip6_t	tcp_saved_ip6h;
402 	tcph_t	tcp_saved_tcph;
403 
404 	uint32_t tcp_sum;		/* checksum to compensate for source */
405 					/* routed packets. Host byte order */
406 	uint16_t tcp_last_sent_len;	/* Record length for nagle */
407 	uint16_t tcp_dupack_cnt;	/* # of consequtive duplicate acks */
408 
409 	kmutex_t	*tcp_acceptor_lockp;	/* Ptr to tf_lock */
410 
411 	mblk_t		*tcp_ordrel_mp;		/* T_ordrel_ind mblk */
412 	t_uscalar_t	tcp_acceptor_id;	/* ACCEPTOR_id */
413 
414 	int		tcp_ipsec_overhead;
415 	/*
416 	 * Address family that app wishes returned addrsses to be in.
417 	 * Currently taken from address family used in T_BIND_REQ, but
418 	 * should really come from family used in original socket() call.
419 	 * Value can be AF_INET or AF_INET6.
420 	 */
421 	uint_t	tcp_family;
422 	/*
423 	 * used for a quick test to determine if any ancillary bits are
424 	 * set
425 	 */
426 	uint_t		tcp_ipv6_recvancillary;		/* Flags */
427 #define	TCP_IPV6_RECVPKTINFO	0x01	/* IPV6_RECVPKTINFO option  */
428 #define	TCP_IPV6_RECVHOPLIMIT	0x02	/* IPV6_RECVHOPLIMIT option */
429 #define	TCP_IPV6_RECVHOPOPTS	0x04	/* IPV6_RECVHOPOPTS option */
430 #define	TCP_IPV6_RECVDSTOPTS	0x08	/* IPV6_RECVDSTOPTS option */
431 #define	TCP_IPV6_RECVRTHDR	0x10	/* IPV6_RECVRTHDR option */
432 #define	TCP_IPV6_RECVRTDSTOPTS	0x20	/* IPV6_RECVRTHDRDSTOPTS option */
433 #define	TCP_IPV6_RECVTCLASS	0x40	/* IPV6_RECVTCLASS option */
434 #define	TCP_OLD_IPV6_RECVDSTOPTS 0x80	/* old IPV6_RECVDSTOPTS option */
435 
436 	uint_t		tcp_recvifindex; /* Last received IPV6_RCVPKTINFO */
437 	uint_t		tcp_recvhops;	/* Last received IPV6_RECVHOPLIMIT */
438 	uint_t		tcp_recvtclass;	/* Last received IPV6_RECVTCLASS */
439 	ip6_hbh_t	*tcp_hopopts;	/* Last received IPV6_RECVHOPOPTS */
440 	ip6_dest_t	*tcp_dstopts;	/* Last received IPV6_RECVDSTOPTS */
441 	ip6_dest_t	*tcp_rtdstopts;	/* Last recvd IPV6_RECVRTHDRDSTOPTS */
442 	ip6_rthdr_t	*tcp_rthdr;	/* Last received IPV6_RECVRTHDR */
443 	uint_t		tcp_hopoptslen;
444 	uint_t		tcp_dstoptslen;
445 	uint_t		tcp_rtdstoptslen;
446 	uint_t		tcp_rthdrlen;
447 
448 	mblk_t		*tcp_timercache;
449 	cred_t		*tcp_cred;	/* Credentials when this was opened */
450 	pid_t		tcp_cpid;	/* Process id when this was opened */
451 	uint64_t	tcp_open_time;	/* time when this was opened */
452 
453 
454 	union {
455 		struct {
456 			uchar_t	v4_ttl;
457 				/* Dup of tcp_ipha.iph_type_of_service */
458 			uchar_t	v4_tos; /* Dup of tcp_ipha.iph_ttl */
459 		} v4_hdr_info;
460 		struct {
461 			uint_t	v6_vcf;		/* Dup of tcp_ip6h.ip6h_vcf */
462 			uchar_t	v6_hops;	/* Dup of tcp_ip6h.ip6h_hops */
463 		} v6_hdr_info;
464 	} tcp_hdr_info;
465 #define	tcp_ttl	tcp_hdr_info.v4_hdr_info.v4_ttl
466 #define	tcp_tos	tcp_hdr_info.v4_hdr_info.v4_tos
467 #define	tcp_ip6_vcf	tcp_hdr_info.v6_hdr_info.v6_vcf
468 #define	tcp_ip6_hops	tcp_hdr_info.v6_hdr_info.v6_hops
469 
470 	ushort_t	tcp_ipversion;
471 	uint_t		tcp_bound_if;	/* IPV6_BOUND_IF */
472 
473 #define	tcp_lport	tcp_connp->conn_lport
474 #define	tcp_fport	tcp_connp->conn_fport
475 #define	tcp_ports	tcp_connp->conn_ports
476 
477 #define	tcp_remote	tcp_connp->conn_rem
478 #define	tcp_ip_src	tcp_connp->conn_src
479 
480 #define	tcp_remote_v6	tcp_connp->conn_remv6
481 #define	tcp_ip_src_v6	tcp_connp->conn_srcv6
482 #define	tcp_bound_source_v6	tcp_connp->conn_bound_source_v6
483 #define	tcp_bound_source	tcp_connp->conn_bound_source
484 
485 	kmutex_t	tcp_closelock;
486 	kcondvar_t	tcp_closecv;
487 	uint8_t		tcp_closed;
488 	uint8_t		tcp_closeflags;
489 	uint8_t		tcp_cleandeathtag;
490 	mblk_t		tcp_closemp;
491 	timeout_id_t	tcp_linger_tid;	/* Linger timer ID */
492 
493 	struct tcp_s *tcp_acceptor_hash; /* Acceptor hash chain */
494 	struct tcp_s **tcp_ptpahn; /* Pointer to previous accept hash next. */
495 	struct tcp_s *tcp_bind_hash; /* Bind hash chain */
496 	struct tcp_s *tcp_bind_hash_port; /* tcp_t's bound to the same lport */
497 	struct tcp_s **tcp_ptpbhn;
498 
499 	boolean_t	tcp_ire_ill_check_done;
500 	uint_t		tcp_maxpsz;
501 
502 	/*
503 	 * used for Multidata Transmit
504 	 */
505 	uint_t	tcp_mdt_hdr_head; /* leading header fragment extra space */
506 	uint_t	tcp_mdt_hdr_tail; /* trailing header fragment extra space */
507 	int	tcp_mdt_max_pld;  /* maximum payload buffers per Multidata */
508 
509 	uint32_t	tcp_lso_max; /* maximum LSO payload */
510 
511 	uint32_t	tcp_ofo_fin_seq; /* Recv out of order FIN seq num */
512 	uint32_t	tcp_cwr_snd_max;
513 	uint_t		tcp_drop_opt_ack_cnt; /* # tcp generated optmgmt */
514 	ip6_pkt_t	tcp_sticky_ipp;			/* Sticky options */
515 #define	tcp_ipp_fields	tcp_sticky_ipp.ipp_fields	/* valid fields */
516 #define	tcp_ipp_ifindex	tcp_sticky_ipp.ipp_ifindex	/* pktinfo ifindex */
517 #define	tcp_ipp_addr	tcp_sticky_ipp.ipp_addr	/* pktinfo src/dst addr */
518 #define	tcp_ipp_hoplimit	tcp_sticky_ipp.ipp_hoplimit
519 #define	tcp_ipp_hopoptslen	tcp_sticky_ipp.ipp_hopoptslen
520 #define	tcp_ipp_rtdstoptslen	tcp_sticky_ipp.ipp_rtdstoptslen
521 #define	tcp_ipp_rthdrlen	tcp_sticky_ipp.ipp_rthdrlen
522 #define	tcp_ipp_dstoptslen	tcp_sticky_ipp.ipp_dstoptslen
523 #define	tcp_ipp_hopopts		tcp_sticky_ipp.ipp_hopopts
524 #define	tcp_ipp_rtdstopts	tcp_sticky_ipp.ipp_rtdstopts
525 #define	tcp_ipp_rthdr		tcp_sticky_ipp.ipp_rthdr
526 #define	tcp_ipp_dstopts		tcp_sticky_ipp.ipp_dstopts
527 #define	tcp_ipp_nexthop		tcp_sticky_ipp.ipp_nexthop
528 #define	tcp_ipp_use_min_mtu	tcp_sticky_ipp.ipp_use_min_mtu
529 	struct tcp_s *tcp_saved_listener;	/* saved value of listener */
530 
531 	uint32_t	tcp_in_ack_unsent;	/* ACK for unsent data cnt. */
532 
533 	/*
534 	 * The following fusion-related fields are protected by squeue.
535 	 */
536 	struct tcp_s *tcp_loopback_peer;	/* peer tcp for loopback */
537 	mblk_t	*tcp_fused_sigurg_mp;		/* M_PCSIG mblk for SIGURG */
538 	size_t	tcp_fuse_rcv_hiwater;		/* fusion receive queue size */
539 	uint_t	tcp_fuse_rcv_unread_hiwater;	/* max # of outstanding pkts */
540 	/*
541 	 * The following fusion-related fields and bit fields are to be
542 	 * manipulated with squeue protection or with tcp_non_sq_lock held.
543 	 * tcp_non_sq_lock is used to protect fields that may be modified
544 	 * accessed outside the squeue.
545 	 */
546 	kmutex_t tcp_non_sq_lock;
547 	kcondvar_t tcp_fuse_plugcv;
548 	uint_t tcp_fuse_rcv_unread_cnt;	/* # of outstanding pkts */
549 	uint32_t
550 		tcp_fused : 1,		/* loopback tcp in fusion mode */
551 		tcp_unfusable : 1,	/* fusion not allowed on endpoint */
552 		tcp_fused_sigurg : 1,	/* send SIGURG upon draining */
553 		tcp_direct_sockfs : 1,	/* direct calls to sockfs */
554 
555 		tcp_fuse_syncstr_stopped : 1, /* synchronous streams stopped */
556 		tcp_fuse_syncstr_plugged : 1, /* synchronous streams plugged */
557 		tcp_fuse_to_bit_31 : 26;
558 
559 	/*
560 	 * This variable is accessed without any lock protection
561 	 * and therefore must not be declared as a bit field along
562 	 * with the rest which require such condition.
563 	 */
564 	boolean_t	tcp_issocket;	/* this is a socket tcp */
565 
566 	/* protected by the tcp_non_sq_lock lock */
567 	uint32_t	tcp_squeue_bytes;
568 	/*
569 	 * Kernel SSL session information
570 	 */
571 	boolean_t		tcp_kssl_pending; /* waiting for 1st SSL rec. */
572 	boolean_t		tcp_kssl_inhandshake; /* during SSL handshake */
573 	kssl_ent_t		tcp_kssl_ent;	/* SSL table entry */
574 	kssl_ctx_t		tcp_kssl_ctx;	/* SSL session */
575 	uint_t	tcp_label_len;	/* length of cached label */
576 
577 	/*
578 	 * tcp_closemp_used is protected by listener's tcp_eager_lock
579 	 * when used for eagers. When used for a tcp in TIME_WAIT state
580 	 * or in tcp_close(), it is not protected by any lock as we
581 	 * do not expect any other thread to use it concurrently.
582 	 * We do allow re-use of tcp_closemp in tcp_time_wait_collector()
583 	 * and tcp_close() but not concurrently.
584 	 */
585 	boolean_t tcp_closemp_used;
586 
587 	/*
588 	 * previous and next eagers in the list of droppable eagers. See
589 	 * the comments before MAKE_DROPPABLE(). These pointers are
590 	 * protected by listener's tcp_eager_lock.
591 	 */
592 	struct tcp_s	*tcp_eager_prev_drop_q0;
593 	struct tcp_s	*tcp_eager_next_drop_q0;
594 
595 	/*
596 	 * Have we flow controlled xmitter?
597 	 * This variable can be modified outside the squeue and hence must
598 	 * not be declared as a bit field along with the rest that are
599 	 * modified only within the squeue.
600 	 * protected by the tcp_non_sq_lock lock.
601 	 */
602 	boolean_t	tcp_flow_stopped;
603 
604 	/*
605 	 * The socket generation number is bumped when an outgoing connection
606 	 * attempts is made, and it sent up to the socket when the
607 	 * connection was successfully established, or an error occured. The
608 	 * generation is used to ensure that the socket does not miss the
609 	 * asynchronous notification.
610 	 */
611 	sock_connid_t	tcp_connid;
612 
613 	/* mblk_t used to enter TCP's squeue from the service routine. */
614 	mblk_t		*tcp_rsrv_mp;
615 	/* Mutex for accessing tcp_rsrv_mp */
616 	kmutex_t	tcp_rsrv_mp_lock;
617 
618 #ifdef DEBUG
619 	pc_t			tcmp_stk[15];
620 #endif
621 } tcp_t;
622 
623 #ifdef DEBUG
624 #define	TCP_DEBUG_GETPCSTACK(buffer, depth)	((void) getpcstack(buffer, \
625 						    depth))
626 #else
627 #define	TCP_DEBUG_GETPCSTACK(buffer, depth)
628 #endif
629 
630 /*
631  * Track a reference count on the tcps in order to know when
632  * the tcps_g_q can be removed. As long as there is any
633  * tcp_t, other that the tcps_g_q itself, in the tcp_stack_t we
634  * need to keep tcps_g_q around so that a closing connection can
635  * switch to using tcps_g_q as part of it closing.
636  */
637 #define	TCPS_REFHOLD(tcps) {					\
638 	atomic_add_32(&(tcps)->tcps_refcnt, 1);			\
639 	ASSERT((tcps)->tcps_refcnt != 0);			\
640 	DTRACE_PROBE1(tcps__refhold, tcp_stack_t, tcps);	\
641 }
642 
643 /*
644  * Decrement the reference count on the tcp_stack_t.
645  * In architectures e.g sun4u, where atomic_add_32_nv is just
646  * a cas, we need to maintain the right memory barrier semantics
647  * as that of mutex_exit i.e all the loads and stores should complete
648  * before the cas is executed. membar_exit() does that here.
649  */
650 #define	TCPS_REFRELE(tcps) {					\
651 	ASSERT((tcps)->tcps_refcnt != 0);			\
652 	membar_exit();						\
653 	DTRACE_PROBE1(tcps__refrele, tcp_stack_t, tcps);	\
654 	if (atomic_add_32_nv(&(tcps)->tcps_refcnt, -1) == 0 &&	\
655 	    (tcps)->tcps_g_q != NULL) {				\
656 		/* Only tcps_g_q left */			\
657 		tcp_g_q_inactive(tcps);				\
658 	}							\
659 }
660 
661 extern void 	tcp_free(tcp_t *tcp);
662 extern void	tcp_ddi_g_init(void);
663 extern void	tcp_ddi_g_destroy(void);
664 extern void	tcp_g_q_inactive(tcp_stack_t *);
665 extern void	tcp_xmit_listeners_reset(mblk_t *mp, uint_t ip_hdr_len,
666     zoneid_t zoneid, tcp_stack_t *, conn_t *connp);
667 extern void	tcp_conn_request(void *arg, mblk_t *mp, void *arg2);
668 extern void	tcp_conn_request_unbound(void *arg, mblk_t *mp, void *arg2);
669 extern void 	tcp_input(void *arg, mblk_t *mp, void *arg2);
670 extern void	tcp_rput_data(void *arg, mblk_t *mp, void *arg2);
671 extern void 	*tcp_get_conn(void *arg, tcp_stack_t *);
672 extern void	tcp_time_wait_collector(void *arg);
673 extern mblk_t	*tcp_snmp_get(queue_t *, mblk_t *);
674 extern int	tcp_snmp_set(queue_t *, int, int, uchar_t *, int len);
675 extern mblk_t	*tcp_xmit_mp(tcp_t *tcp, mblk_t *mp, int32_t max_to_send,
676 		    int32_t *offset, mblk_t **end_mp, uint32_t seq,
677 		    boolean_t sendall, uint32_t *seg_len, boolean_t rexmit);
678 extern void	tcp_xmit_reset(void *arg, mblk_t *mp, void *arg2);
679 
680 /*
681  * The TCP Fanout structure.
682  * The hash tables and their linkage (tcp_*_hash_next, tcp_ptp*hn) are
683  * protected by the per-bucket tf_lock. Each tcp_t
684  * inserted in the list points back at this lock using tcp_*_lockp.
685  *
686  * The listener and acceptor hash queues are lists of tcp_t.
687  */
688 /* listener hash and acceptor hash queue head */
689 typedef struct tf_s {
690 	tcp_t		*tf_tcp;
691 	kmutex_t	tf_lock;
692 } tf_t;
693 #endif	/* (defined(_KERNEL) || defined(_KMEMUSER)) */
694 
695 /* Contract private interface between TCP and Clustering. */
696 
697 #define	CL_TCPI_V1	1	/* cl_tcpi_version number */
698 
699 typedef struct cl_tcp_info_s {
700 	ushort_t	cl_tcpi_version;	/* cl_tcp_info_t's version no */
701 	ushort_t	cl_tcpi_ipversion;	/* IP version */
702 	int32_t		cl_tcpi_state;		/* TCP state */
703 	in_port_t	cl_tcpi_lport;		/* Local port */
704 	in_port_t	cl_tcpi_fport;		/* Remote port */
705 	in6_addr_t	cl_tcpi_laddr_v6;	/* Local IP address */
706 	in6_addr_t	cl_tcpi_faddr_v6;	/* Remote IP address */
707 #ifdef _KERNEL
708 /* Note: V4_PART_OF_V6 is meant to be used only for _KERNEL defined stuff */
709 #define	cl_tcpi_laddr	V4_PART_OF_V6(cl_tcpi_laddr_v6)
710 #define	cl_tcpi_faddr	V4_PART_OF_V6(cl_tcpi_faddr_v6)
711 
712 #endif	/* _KERNEL */
713 } cl_tcp_info_t;
714 
715 /*
716  * Contracted Consolidation Private ioctl for aborting TCP connections.
717  * In order to keep the offsets and size of the structure the same between
718  * a 32-bit application and a 64-bit amd64 kernel, we use a #pragma
719  * pack(4).
720  */
721 #define	TCP_IOC_ABORT_CONN	(('T' << 8) + 91)
722 
723 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
724 #pragma pack(4)
725 #endif
726 
727 typedef struct tcp_ioc_abort_conn_s {
728 	struct sockaddr_storage ac_local;	/* local addr and port */
729 	struct sockaddr_storage ac_remote;	/* remote addr and port */
730 	int32_t ac_start;			/* start state */
731 	int32_t ac_end;				/* end state  */
732 	int32_t ac_zoneid;			/* zoneid */
733 } tcp_ioc_abort_conn_t;
734 
735 #if _LONG_LONG_ALIGNMENT == 8 && _LONG_LONG_ALIGNMENT_32 == 4
736 #pragma pack()
737 #endif
738 
739 #if (defined(_KERNEL) || defined(_KMEMUSER))
740 extern void tcp_rput_other(tcp_t *tcp, mblk_t *mp);
741 #endif
742 
743 #if (defined(_KERNEL))
744 #define	TCP_XRE_EVENT_IP_FANOUT_TCP 1
745 
746 /*
747  * This is a private structure used to pass data to an squeue function during
748  * tcp's listener reset sending path.
749  */
750 typedef struct tcp_xmit_reset_event {
751 	int		tcp_xre_event;
752 	int		tcp_xre_iphdrlen;
753 	zoneid_t	tcp_xre_zoneid;
754 	tcp_stack_t	*tcp_xre_tcps;
755 } tcp_xmit_reset_event_t;
756 #endif
757 
758 #ifdef	__cplusplus
759 }
760 #endif
761 
762 #endif	/* _INET_TCP_H */
763