xref: /titanic_52/usr/src/uts/common/inet/tcp/tcp_stats.c (revision 85651ed95aca395e50aaa44d246d8afb743a384f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 #include <sys/types.h>
27 #include <sys/tihdr.h>
28 #include <sys/policy.h>
29 #include <sys/tsol/tnet.h>
30 
31 #include <inet/common.h>
32 #include <inet/ip.h>
33 #include <inet/tcp.h>
34 #include <inet/tcp_impl.h>
35 #include <inet/tcp_stats.h>
36 #include <inet/kstatcom.h>
37 #include <inet/snmpcom.h>
38 
39 static int	tcp_kstat_update(kstat_t *kp, int rw);
40 static int	tcp_kstat2_update(kstat_t *kp, int rw);
41 static void	tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *);
42 
43 static void	tcp_add_mib(mib2_tcp_t *, mib2_tcp_t *);
44 static void	tcp_add_stats(tcp_stat_counter_t *, tcp_stat_t *);
45 static void	tcp_clr_stats(tcp_stat_t *);
46 
47 tcp_g_stat_t	tcp_g_statistics;
48 kstat_t		*tcp_g_kstat;
49 
50 /* Translate TCP state to MIB2 TCP state. */
51 static int
52 tcp_snmp_state(tcp_t *tcp)
53 {
54 	if (tcp == NULL)
55 		return (0);
56 
57 	switch (tcp->tcp_state) {
58 	case TCPS_CLOSED:
59 	case TCPS_IDLE:	/* RFC1213 doesn't have analogue for IDLE & BOUND */
60 	case TCPS_BOUND:
61 		return (MIB2_TCP_closed);
62 	case TCPS_LISTEN:
63 		return (MIB2_TCP_listen);
64 	case TCPS_SYN_SENT:
65 		return (MIB2_TCP_synSent);
66 	case TCPS_SYN_RCVD:
67 		return (MIB2_TCP_synReceived);
68 	case TCPS_ESTABLISHED:
69 		return (MIB2_TCP_established);
70 	case TCPS_CLOSE_WAIT:
71 		return (MIB2_TCP_closeWait);
72 	case TCPS_FIN_WAIT_1:
73 		return (MIB2_TCP_finWait1);
74 	case TCPS_CLOSING:
75 		return (MIB2_TCP_closing);
76 	case TCPS_LAST_ACK:
77 		return (MIB2_TCP_lastAck);
78 	case TCPS_FIN_WAIT_2:
79 		return (MIB2_TCP_finWait2);
80 	case TCPS_TIME_WAIT:
81 		return (MIB2_TCP_timeWait);
82 	default:
83 		return (0);
84 	}
85 }
86 
87 /*
88  * Return SNMP stuff in buffer in mpdata.
89  */
90 mblk_t *
91 tcp_snmp_get(queue_t *q, mblk_t *mpctl)
92 {
93 	mblk_t			*mpdata;
94 	mblk_t			*mp_conn_ctl = NULL;
95 	mblk_t			*mp_conn_tail;
96 	mblk_t			*mp_attr_ctl = NULL;
97 	mblk_t			*mp_attr_tail;
98 	mblk_t			*mp6_conn_ctl = NULL;
99 	mblk_t			*mp6_conn_tail;
100 	mblk_t			*mp6_attr_ctl = NULL;
101 	mblk_t			*mp6_attr_tail;
102 	struct opthdr		*optp;
103 	mib2_tcpConnEntry_t	tce;
104 	mib2_tcp6ConnEntry_t	tce6;
105 	mib2_transportMLPEntry_t mlp;
106 	connf_t			*connfp;
107 	int			i;
108 	boolean_t 		ispriv;
109 	zoneid_t 		zoneid;
110 	int			v4_conn_idx;
111 	int			v6_conn_idx;
112 	conn_t			*connp = Q_TO_CONN(q);
113 	tcp_stack_t		*tcps;
114 	ip_stack_t		*ipst;
115 	mblk_t			*mp2ctl;
116 	mib2_tcp_t		tcp_mib;
117 
118 	/*
119 	 * make a copy of the original message
120 	 */
121 	mp2ctl = copymsg(mpctl);
122 
123 	if (mpctl == NULL ||
124 	    (mpdata = mpctl->b_cont) == NULL ||
125 	    (mp_conn_ctl = copymsg(mpctl)) == NULL ||
126 	    (mp_attr_ctl = copymsg(mpctl)) == NULL ||
127 	    (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
128 	    (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
129 		freemsg(mp_conn_ctl);
130 		freemsg(mp_attr_ctl);
131 		freemsg(mp6_conn_ctl);
132 		freemsg(mp6_attr_ctl);
133 		freemsg(mpctl);
134 		freemsg(mp2ctl);
135 		return (NULL);
136 	}
137 
138 	ipst = connp->conn_netstack->netstack_ip;
139 	tcps = connp->conn_netstack->netstack_tcp;
140 
141 	bzero(&tcp_mib, sizeof (tcp_mib));
142 
143 	/* build table of connections -- need count in fixed part */
144 	SET_MIB(tcp_mib.tcpRtoAlgorithm, 4);   /* vanj */
145 	SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min);
146 	SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max);
147 	SET_MIB(tcp_mib.tcpMaxConn, -1);
148 	SET_MIB(tcp_mib.tcpCurrEstab, 0);
149 
150 	ispriv =
151 	    secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0;
152 	zoneid = Q_TO_CONN(q)->conn_zoneid;
153 
154 	v4_conn_idx = v6_conn_idx = 0;
155 	mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;
156 
157 	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
158 		ipst = tcps->tcps_netstack->netstack_ip;
159 
160 		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
161 
162 		connp = NULL;
163 
164 		while ((connp =
165 		    ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
166 			tcp_t *tcp;
167 			boolean_t needattr;
168 
169 			if (connp->conn_zoneid != zoneid)
170 				continue;	/* not in this zone */
171 
172 			tcp = connp->conn_tcp;
173 			TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
174 			tcp->tcp_ibsegs = 0;
175 			TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
176 			tcp->tcp_obsegs = 0;
177 
178 			tce6.tcp6ConnState = tce.tcpConnState =
179 			    tcp_snmp_state(tcp);
180 			if (tce.tcpConnState == MIB2_TCP_established ||
181 			    tce.tcpConnState == MIB2_TCP_closeWait)
182 				BUMP_MIB(&tcp_mib, tcpCurrEstab);
183 
184 			needattr = B_FALSE;
185 			bzero(&mlp, sizeof (mlp));
186 			if (connp->conn_mlp_type != mlptSingle) {
187 				if (connp->conn_mlp_type == mlptShared ||
188 				    connp->conn_mlp_type == mlptBoth)
189 					mlp.tme_flags |= MIB2_TMEF_SHARED;
190 				if (connp->conn_mlp_type == mlptPrivate ||
191 				    connp->conn_mlp_type == mlptBoth)
192 					mlp.tme_flags |= MIB2_TMEF_PRIVATE;
193 				needattr = B_TRUE;
194 			}
195 			if (connp->conn_anon_mlp) {
196 				mlp.tme_flags |= MIB2_TMEF_ANONMLP;
197 				needattr = B_TRUE;
198 			}
199 			switch (connp->conn_mac_mode) {
200 			case CONN_MAC_DEFAULT:
201 				break;
202 			case CONN_MAC_AWARE:
203 				mlp.tme_flags |= MIB2_TMEF_MACEXEMPT;
204 				needattr = B_TRUE;
205 				break;
206 			case CONN_MAC_IMPLICIT:
207 				mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT;
208 				needattr = B_TRUE;
209 				break;
210 			}
211 			if (connp->conn_ixa->ixa_tsl != NULL) {
212 				ts_label_t *tsl;
213 
214 				tsl = connp->conn_ixa->ixa_tsl;
215 				mlp.tme_flags |= MIB2_TMEF_IS_LABELED;
216 				mlp.tme_doi = label2doi(tsl);
217 				mlp.tme_label = *label2bslabel(tsl);
218 				needattr = B_TRUE;
219 			}
220 
221 			/* Create a message to report on IPv6 entries */
222 			if (connp->conn_ipversion == IPV6_VERSION) {
223 			tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6;
224 			tce6.tcp6ConnRemAddress = connp->conn_faddr_v6;
225 			tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport);
226 			tce6.tcp6ConnRemPort = ntohs(connp->conn_fport);
227 			if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) {
228 				tce6.tcp6ConnIfIndex =
229 				    connp->conn_ixa->ixa_scopeid;
230 			} else {
231 				tce6.tcp6ConnIfIndex = connp->conn_bound_if;
232 			}
233 			/* Don't want just anybody seeing these... */
234 			if (ispriv) {
235 				tce6.tcp6ConnEntryInfo.ce_snxt =
236 				    tcp->tcp_snxt;
237 				tce6.tcp6ConnEntryInfo.ce_suna =
238 				    tcp->tcp_suna;
239 				tce6.tcp6ConnEntryInfo.ce_rnxt =
240 				    tcp->tcp_rnxt;
241 				tce6.tcp6ConnEntryInfo.ce_rack =
242 				    tcp->tcp_rack;
243 			} else {
244 				/*
245 				 * Netstat, unfortunately, uses this to
246 				 * get send/receive queue sizes.  How to fix?
247 				 * Why not compute the difference only?
248 				 */
249 				tce6.tcp6ConnEntryInfo.ce_snxt =
250 				    tcp->tcp_snxt - tcp->tcp_suna;
251 				tce6.tcp6ConnEntryInfo.ce_suna = 0;
252 				tce6.tcp6ConnEntryInfo.ce_rnxt =
253 				    tcp->tcp_rnxt - tcp->tcp_rack;
254 				tce6.tcp6ConnEntryInfo.ce_rack = 0;
255 			}
256 
257 			tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd;
258 			tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
259 			tce6.tcp6ConnEntryInfo.ce_rto =  tcp->tcp_rto;
260 			tce6.tcp6ConnEntryInfo.ce_mss =  tcp->tcp_mss;
261 			tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state;
262 
263 			tce6.tcp6ConnCreationProcess =
264 			    (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS :
265 			    connp->conn_cpid;
266 			tce6.tcp6ConnCreationTime = connp->conn_open_time;
267 
268 			(void) snmp_append_data2(mp6_conn_ctl->b_cont,
269 			    &mp6_conn_tail, (char *)&tce6, sizeof (tce6));
270 
271 			mlp.tme_connidx = v6_conn_idx++;
272 			if (needattr)
273 				(void) snmp_append_data2(mp6_attr_ctl->b_cont,
274 				    &mp6_attr_tail, (char *)&mlp, sizeof (mlp));
275 			}
276 			/*
277 			 * Create an IPv4 table entry for IPv4 entries and also
278 			 * for IPv6 entries which are bound to in6addr_any
279 			 * but don't have IPV6_V6ONLY set.
280 			 * (i.e. anything an IPv4 peer could connect to)
281 			 */
282 			if (connp->conn_ipversion == IPV4_VERSION ||
283 			    (tcp->tcp_state <= TCPS_LISTEN &&
284 			    !connp->conn_ipv6_v6only &&
285 			    IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) {
286 				if (connp->conn_ipversion == IPV6_VERSION) {
287 					tce.tcpConnRemAddress = INADDR_ANY;
288 					tce.tcpConnLocalAddress = INADDR_ANY;
289 				} else {
290 					tce.tcpConnRemAddress =
291 					    connp->conn_faddr_v4;
292 					tce.tcpConnLocalAddress =
293 					    connp->conn_laddr_v4;
294 				}
295 				tce.tcpConnLocalPort = ntohs(connp->conn_lport);
296 				tce.tcpConnRemPort = ntohs(connp->conn_fport);
297 				/* Don't want just anybody seeing these... */
298 				if (ispriv) {
299 					tce.tcpConnEntryInfo.ce_snxt =
300 					    tcp->tcp_snxt;
301 					tce.tcpConnEntryInfo.ce_suna =
302 					    tcp->tcp_suna;
303 					tce.tcpConnEntryInfo.ce_rnxt =
304 					    tcp->tcp_rnxt;
305 					tce.tcpConnEntryInfo.ce_rack =
306 					    tcp->tcp_rack;
307 				} else {
308 					/*
309 					 * Netstat, unfortunately, uses this to
310 					 * get send/receive queue sizes.  How
311 					 * to fix?
312 					 * Why not compute the difference only?
313 					 */
314 					tce.tcpConnEntryInfo.ce_snxt =
315 					    tcp->tcp_snxt - tcp->tcp_suna;
316 					tce.tcpConnEntryInfo.ce_suna = 0;
317 					tce.tcpConnEntryInfo.ce_rnxt =
318 					    tcp->tcp_rnxt - tcp->tcp_rack;
319 					tce.tcpConnEntryInfo.ce_rack = 0;
320 				}
321 
322 				tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd;
323 				tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
324 				tce.tcpConnEntryInfo.ce_rto =  tcp->tcp_rto;
325 				tce.tcpConnEntryInfo.ce_mss =  tcp->tcp_mss;
326 				tce.tcpConnEntryInfo.ce_state =
327 				    tcp->tcp_state;
328 
329 				tce.tcpConnCreationProcess =
330 				    (connp->conn_cpid < 0) ?
331 				    MIB2_UNKNOWN_PROCESS :
332 				    connp->conn_cpid;
333 				tce.tcpConnCreationTime = connp->conn_open_time;
334 
335 				(void) snmp_append_data2(mp_conn_ctl->b_cont,
336 				    &mp_conn_tail, (char *)&tce, sizeof (tce));
337 
338 				mlp.tme_connidx = v4_conn_idx++;
339 				if (needattr)
340 					(void) snmp_append_data2(
341 					    mp_attr_ctl->b_cont,
342 					    &mp_attr_tail, (char *)&mlp,
343 					    sizeof (mlp));
344 			}
345 		}
346 	}
347 
348 	tcp_sum_mib(tcps, &tcp_mib);
349 
350 	/*
351 	 * Synchronize 32- and 64-bit counters.  Note that tcpInSegs and
352 	 * tcpOutSegs are not updated anywhere in TCP.  The new 64 bits
353 	 * counters are used.  Hence the old counters' values in tcp_sc_mib
354 	 * are always 0.
355 	 */
356 	SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs);
357 	SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs);
358 
359 	optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
360 	optp->level = MIB2_TCP;
361 	optp->name = 0;
362 	(void) snmp_append_data(mpdata, (char *)&tcp_mib, sizeof (tcp_mib));
363 	optp->len = msgdsize(mpdata);
364 	qreply(q, mpctl);
365 
366 	/* table of connections... */
367 	optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
368 	    sizeof (struct T_optmgmt_ack)];
369 	optp->level = MIB2_TCP;
370 	optp->name = MIB2_TCP_CONN;
371 	optp->len = msgdsize(mp_conn_ctl->b_cont);
372 	qreply(q, mp_conn_ctl);
373 
374 	/* table of MLP attributes... */
375 	optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
376 	    sizeof (struct T_optmgmt_ack)];
377 	optp->level = MIB2_TCP;
378 	optp->name = EXPER_XPORT_MLP;
379 	optp->len = msgdsize(mp_attr_ctl->b_cont);
380 	if (optp->len == 0)
381 		freemsg(mp_attr_ctl);
382 	else
383 		qreply(q, mp_attr_ctl);
384 
385 	/* table of IPv6 connections... */
386 	optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
387 	    sizeof (struct T_optmgmt_ack)];
388 	optp->level = MIB2_TCP6;
389 	optp->name = MIB2_TCP6_CONN;
390 	optp->len = msgdsize(mp6_conn_ctl->b_cont);
391 	qreply(q, mp6_conn_ctl);
392 
393 	/* table of IPv6 MLP attributes... */
394 	optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
395 	    sizeof (struct T_optmgmt_ack)];
396 	optp->level = MIB2_TCP6;
397 	optp->name = EXPER_XPORT_MLP;
398 	optp->len = msgdsize(mp6_attr_ctl->b_cont);
399 	if (optp->len == 0)
400 		freemsg(mp6_attr_ctl);
401 	else
402 		qreply(q, mp6_attr_ctl);
403 	return (mp2ctl);
404 }
405 
406 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests  */
407 /* ARGSUSED */
408 int
409 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len)
410 {
411 	mib2_tcpConnEntry_t	*tce = (mib2_tcpConnEntry_t *)ptr;
412 
413 	switch (level) {
414 	case MIB2_TCP:
415 		switch (name) {
416 		case 13:
417 			if (tce->tcpConnState != MIB2_TCP_deleteTCB)
418 				return (0);
419 			/* TODO: delete entry defined by tce */
420 			return (1);
421 		default:
422 			return (0);
423 		}
424 	default:
425 		return (1);
426 	}
427 }
428 
429 /*
430  * TCP Kstats implementation
431  */
432 void *
433 tcp_kstat_init(netstackid_t stackid)
434 {
435 	kstat_t	*ksp;
436 
437 	tcp_named_kstat_t template = {
438 		{ "rtoAlgorithm",	KSTAT_DATA_INT32, 0 },
439 		{ "rtoMin",		KSTAT_DATA_INT32, 0 },
440 		{ "rtoMax",		KSTAT_DATA_INT32, 0 },
441 		{ "maxConn",		KSTAT_DATA_INT32, 0 },
442 		{ "activeOpens",	KSTAT_DATA_UINT32, 0 },
443 		{ "passiveOpens",	KSTAT_DATA_UINT32, 0 },
444 		{ "attemptFails",	KSTAT_DATA_UINT32, 0 },
445 		{ "estabResets",	KSTAT_DATA_UINT32, 0 },
446 		{ "currEstab",		KSTAT_DATA_UINT32, 0 },
447 		{ "inSegs",		KSTAT_DATA_UINT64, 0 },
448 		{ "outSegs",		KSTAT_DATA_UINT64, 0 },
449 		{ "retransSegs",	KSTAT_DATA_UINT32, 0 },
450 		{ "connTableSize",	KSTAT_DATA_INT32, 0 },
451 		{ "outRsts",		KSTAT_DATA_UINT32, 0 },
452 		{ "outDataSegs",	KSTAT_DATA_UINT32, 0 },
453 		{ "outDataBytes",	KSTAT_DATA_UINT32, 0 },
454 		{ "retransBytes",	KSTAT_DATA_UINT32, 0 },
455 		{ "outAck",		KSTAT_DATA_UINT32, 0 },
456 		{ "outAckDelayed",	KSTAT_DATA_UINT32, 0 },
457 		{ "outUrg",		KSTAT_DATA_UINT32, 0 },
458 		{ "outWinUpdate",	KSTAT_DATA_UINT32, 0 },
459 		{ "outWinProbe",	KSTAT_DATA_UINT32, 0 },
460 		{ "outControl",		KSTAT_DATA_UINT32, 0 },
461 		{ "outFastRetrans",	KSTAT_DATA_UINT32, 0 },
462 		{ "inAckSegs",		KSTAT_DATA_UINT32, 0 },
463 		{ "inAckBytes",		KSTAT_DATA_UINT32, 0 },
464 		{ "inDupAck",		KSTAT_DATA_UINT32, 0 },
465 		{ "inAckUnsent",	KSTAT_DATA_UINT32, 0 },
466 		{ "inDataInorderSegs",	KSTAT_DATA_UINT32, 0 },
467 		{ "inDataInorderBytes",	KSTAT_DATA_UINT32, 0 },
468 		{ "inDataUnorderSegs",	KSTAT_DATA_UINT32, 0 },
469 		{ "inDataUnorderBytes",	KSTAT_DATA_UINT32, 0 },
470 		{ "inDataDupSegs",	KSTAT_DATA_UINT32, 0 },
471 		{ "inDataDupBytes",	KSTAT_DATA_UINT32, 0 },
472 		{ "inDataPartDupSegs",	KSTAT_DATA_UINT32, 0 },
473 		{ "inDataPartDupBytes",	KSTAT_DATA_UINT32, 0 },
474 		{ "inDataPastWinSegs",	KSTAT_DATA_UINT32, 0 },
475 		{ "inDataPastWinBytes",	KSTAT_DATA_UINT32, 0 },
476 		{ "inWinProbe",		KSTAT_DATA_UINT32, 0 },
477 		{ "inWinUpdate",	KSTAT_DATA_UINT32, 0 },
478 		{ "inClosed",		KSTAT_DATA_UINT32, 0 },
479 		{ "rttUpdate",		KSTAT_DATA_UINT32, 0 },
480 		{ "rttNoUpdate",	KSTAT_DATA_UINT32, 0 },
481 		{ "timRetrans",		KSTAT_DATA_UINT32, 0 },
482 		{ "timRetransDrop",	KSTAT_DATA_UINT32, 0 },
483 		{ "timKeepalive",	KSTAT_DATA_UINT32, 0 },
484 		{ "timKeepaliveProbe",	KSTAT_DATA_UINT32, 0 },
485 		{ "timKeepaliveDrop",	KSTAT_DATA_UINT32, 0 },
486 		{ "listenDrop",		KSTAT_DATA_UINT32, 0 },
487 		{ "listenDropQ0",	KSTAT_DATA_UINT32, 0 },
488 		{ "halfOpenDrop",	KSTAT_DATA_UINT32, 0 },
489 		{ "outSackRetransSegs",	KSTAT_DATA_UINT32, 0 },
490 		{ "connTableSize6",	KSTAT_DATA_INT32, 0 }
491 	};
492 
493 	ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2",
494 	    KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid);
495 
496 	if (ksp == NULL)
497 		return (NULL);
498 
499 	template.rtoAlgorithm.value.ui32 = 4;
500 	template.maxConn.value.i32 = -1;
501 
502 	bcopy(&template, ksp->ks_data, sizeof (template));
503 	ksp->ks_update = tcp_kstat_update;
504 	ksp->ks_private = (void *)(uintptr_t)stackid;
505 
506 	kstat_install(ksp);
507 	return (ksp);
508 }
509 
510 void
511 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp)
512 {
513 	if (ksp != NULL) {
514 		ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
515 		kstat_delete_netstack(ksp, stackid);
516 	}
517 }
518 
519 static int
520 tcp_kstat_update(kstat_t *kp, int rw)
521 {
522 	tcp_named_kstat_t *tcpkp;
523 	tcp_t		*tcp;
524 	connf_t		*connfp;
525 	conn_t		*connp;
526 	int 		i;
527 	netstackid_t	stackid = (netstackid_t)(uintptr_t)kp->ks_private;
528 	netstack_t	*ns;
529 	tcp_stack_t	*tcps;
530 	ip_stack_t	*ipst;
531 	mib2_tcp_t	tcp_mib;
532 
533 	if (rw == KSTAT_WRITE)
534 		return (EACCES);
535 
536 	ns = netstack_find_by_stackid(stackid);
537 	if (ns == NULL)
538 		return (-1);
539 	tcps = ns->netstack_tcp;
540 	if (tcps == NULL) {
541 		netstack_rele(ns);
542 		return (-1);
543 	}
544 
545 	tcpkp = (tcp_named_kstat_t *)kp->ks_data;
546 
547 	tcpkp->currEstab.value.ui32 = 0;
548 	tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min;
549 	tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max;
550 
551 	ipst = ns->netstack_ip;
552 
553 	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
554 		connfp = &ipst->ips_ipcl_globalhash_fanout[i];
555 		connp = NULL;
556 		while ((connp =
557 		    ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
558 			tcp = connp->conn_tcp;
559 			switch (tcp_snmp_state(tcp)) {
560 			case MIB2_TCP_established:
561 			case MIB2_TCP_closeWait:
562 				tcpkp->currEstab.value.ui32++;
563 				break;
564 			}
565 		}
566 	}
567 	bzero(&tcp_mib, sizeof (tcp_mib));
568 	tcp_sum_mib(tcps, &tcp_mib);
569 
570 	tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens;
571 	tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens;
572 	tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails;
573 	tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets;
574 	tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs;
575 	tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs;
576 	tcpkp->retransSegs.value.ui32 =	tcp_mib.tcpRetransSegs;
577 	tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize;
578 	tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts;
579 	tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs;
580 	tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes;
581 	tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes;
582 	tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck;
583 	tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed;
584 	tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg;
585 	tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate;
586 	tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe;
587 	tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl;
588 	tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans;
589 	tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs;
590 	tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes;
591 	tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck;
592 	tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent;
593 	tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs;
594 	tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes;
595 	tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs;
596 	tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes;
597 	tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs;
598 	tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes;
599 	tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs;
600 	tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes;
601 	tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs;
602 	tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes;
603 	tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe;
604 	tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate;
605 	tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed;
606 	tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate;
607 	tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate;
608 	tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans;
609 	tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop;
610 	tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive;
611 	tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe;
612 	tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop;
613 	tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop;
614 	tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0;
615 	tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop;
616 	tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs;
617 	tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize;
618 
619 	netstack_rele(ns);
620 	return (0);
621 }
622 
623 /*
624  * kstats related to squeues i.e. not per IP instance
625  */
626 void *
627 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp)
628 {
629 	kstat_t *ksp;
630 
631 	tcp_g_stat_t template = {
632 		{ "tcp_timermp_alloced",	KSTAT_DATA_UINT64 },
633 		{ "tcp_timermp_allocfail",	KSTAT_DATA_UINT64 },
634 		{ "tcp_timermp_allocdblfail",	KSTAT_DATA_UINT64 },
635 		{ "tcp_freelist_cleanup",	KSTAT_DATA_UINT64 },
636 	};
637 
638 	ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net",
639 	    KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t),
640 	    KSTAT_FLAG_VIRTUAL);
641 
642 	if (ksp == NULL)
643 		return (NULL);
644 
645 	bcopy(&template, tcp_g_statp, sizeof (template));
646 	ksp->ks_data = (void *)tcp_g_statp;
647 
648 	kstat_install(ksp);
649 	return (ksp);
650 }
651 
652 void
653 tcp_g_kstat_fini(kstat_t *ksp)
654 {
655 	if (ksp != NULL) {
656 		kstat_delete(ksp);
657 	}
658 }
659 
660 void *
661 tcp_kstat2_init(netstackid_t stackid)
662 {
663 	kstat_t *ksp;
664 
665 	tcp_stat_t template = {
666 		{ "tcp_time_wait_syn_success",	KSTAT_DATA_UINT64, 0 },
667 		{ "tcp_clean_death_nondetached",	KSTAT_DATA_UINT64, 0 },
668 		{ "tcp_eager_blowoff_q",	KSTAT_DATA_UINT64, 0 },
669 		{ "tcp_eager_blowoff_q0",	KSTAT_DATA_UINT64, 0 },
670 		{ "tcp_no_listener",		KSTAT_DATA_UINT64, 0 },
671 		{ "tcp_listendrop",		KSTAT_DATA_UINT64, 0 },
672 		{ "tcp_listendropq0",		KSTAT_DATA_UINT64, 0 },
673 		{ "tcp_wsrv_called",		KSTAT_DATA_UINT64, 0 },
674 		{ "tcp_flwctl_on",		KSTAT_DATA_UINT64, 0 },
675 		{ "tcp_timer_fire_early",	KSTAT_DATA_UINT64, 0 },
676 		{ "tcp_timer_fire_miss",	KSTAT_DATA_UINT64, 0 },
677 		{ "tcp_zcopy_on",		KSTAT_DATA_UINT64, 0 },
678 		{ "tcp_zcopy_off",		KSTAT_DATA_UINT64, 0 },
679 		{ "tcp_zcopy_backoff",		KSTAT_DATA_UINT64, 0 },
680 		{ "tcp_fusion_flowctl",		KSTAT_DATA_UINT64, 0 },
681 		{ "tcp_fusion_backenabled",	KSTAT_DATA_UINT64, 0 },
682 		{ "tcp_fusion_urg",		KSTAT_DATA_UINT64, 0 },
683 		{ "tcp_fusion_putnext",		KSTAT_DATA_UINT64, 0 },
684 		{ "tcp_fusion_unfusable",	KSTAT_DATA_UINT64, 0 },
685 		{ "tcp_fusion_aborted",		KSTAT_DATA_UINT64, 0 },
686 		{ "tcp_fusion_unqualified",	KSTAT_DATA_UINT64, 0 },
687 		{ "tcp_fusion_rrw_busy",	KSTAT_DATA_UINT64, 0 },
688 		{ "tcp_fusion_rrw_msgcnt",	KSTAT_DATA_UINT64, 0 },
689 		{ "tcp_fusion_rrw_plugged",	KSTAT_DATA_UINT64, 0 },
690 		{ "tcp_in_ack_unsent_drop",	KSTAT_DATA_UINT64, 0 },
691 		{ "tcp_sock_fallback",		KSTAT_DATA_UINT64, 0 },
692 		{ "tcp_lso_enabled",		KSTAT_DATA_UINT64, 0 },
693 		{ "tcp_lso_disabled",		KSTAT_DATA_UINT64, 0 },
694 		{ "tcp_lso_times",		KSTAT_DATA_UINT64, 0 },
695 		{ "tcp_lso_pkt_out",		KSTAT_DATA_UINT64, 0 },
696 		{ "tcp_listen_cnt_drop",	KSTAT_DATA_UINT64, 0 },
697 		{ "tcp_listen_mem_drop",	KSTAT_DATA_UINT64, 0 },
698 		{ "tcp_zwin_mem_drop",		KSTAT_DATA_UINT64, 0 },
699 		{ "tcp_zwin_ack_syn",		KSTAT_DATA_UINT64, 0 },
700 		{ "tcp_rst_unsent",		KSTAT_DATA_UINT64, 0 },
701 		{ "tcp_reclaim_cnt",		KSTAT_DATA_UINT64, 0 },
702 		{ "tcp_reass_timeout",		KSTAT_DATA_UINT64, 0 },
703 #ifdef TCP_DEBUG_COUNTER
704 		{ "tcp_time_wait",		KSTAT_DATA_UINT64, 0 },
705 		{ "tcp_rput_time_wait",		KSTAT_DATA_UINT64, 0 },
706 		{ "tcp_detach_time_wait",	KSTAT_DATA_UINT64, 0 },
707 		{ "tcp_timeout_calls",		KSTAT_DATA_UINT64, 0 },
708 		{ "tcp_timeout_cached_alloc",	KSTAT_DATA_UINT64, 0 },
709 		{ "tcp_timeout_cancel_reqs",	KSTAT_DATA_UINT64, 0 },
710 		{ "tcp_timeout_canceled",	KSTAT_DATA_UINT64, 0 },
711 		{ "tcp_timermp_freed",		KSTAT_DATA_UINT64, 0 },
712 		{ "tcp_push_timer_cnt",		KSTAT_DATA_UINT64, 0 },
713 		{ "tcp_ack_timer_cnt",		KSTAT_DATA_UINT64, 0 },
714 #endif
715 	};
716 
717 	ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net",
718 	    KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0,
719 	    stackid);
720 
721 	if (ksp == NULL)
722 		return (NULL);
723 
724 	bcopy(&template, ksp->ks_data, sizeof (template));
725 	ksp->ks_private = (void *)(uintptr_t)stackid;
726 	ksp->ks_update = tcp_kstat2_update;
727 
728 	kstat_install(ksp);
729 	return (ksp);
730 }
731 
732 void
733 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp)
734 {
735 	if (ksp != NULL) {
736 		ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private);
737 		kstat_delete_netstack(ksp, stackid);
738 	}
739 }
740 
741 /*
742  * Sum up all per CPU tcp_stat_t kstat counters.
743  */
744 static int
745 tcp_kstat2_update(kstat_t *kp, int rw)
746 {
747 	netstackid_t	stackid = (netstackid_t)(uintptr_t)kp->ks_private;
748 	netstack_t	*ns;
749 	tcp_stack_t	*tcps;
750 	tcp_stat_t	*stats;
751 	int		i;
752 	int		cnt;
753 
754 	if (rw == KSTAT_WRITE)
755 		return (EACCES);
756 
757 	ns = netstack_find_by_stackid(stackid);
758 	if (ns == NULL)
759 		return (-1);
760 	tcps = ns->netstack_tcp;
761 	if (tcps == NULL) {
762 		netstack_rele(ns);
763 		return (-1);
764 	}
765 
766 	stats = (tcp_stat_t *)kp->ks_data;
767 	tcp_clr_stats(stats);
768 
769 	/*
770 	 * tcps_sc_cnt may change in the middle of the loop.  It is better
771 	 * to get its value first.
772 	 */
773 	cnt = tcps->tcps_sc_cnt;
774 	for (i = 0; i < cnt; i++)
775 		tcp_add_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats);
776 
777 	netstack_rele(ns);
778 	return (0);
779 }
780 
781 /*
782  * To add stats from one mib2_tcp_t to another.  Static fields are not added.
783  * The caller should set them up propertly.
784  */
785 void
786 tcp_add_mib(mib2_tcp_t *from, mib2_tcp_t *to)
787 {
788 	to->tcpActiveOpens += from->tcpActiveOpens;
789 	to->tcpPassiveOpens += from->tcpPassiveOpens;
790 	to->tcpAttemptFails += from->tcpAttemptFails;
791 	to->tcpEstabResets += from->tcpEstabResets;
792 	to->tcpInSegs += from->tcpInSegs;
793 	to->tcpOutSegs += from->tcpOutSegs;
794 	to->tcpRetransSegs += from->tcpRetransSegs;
795 	to->tcpOutRsts += from->tcpOutRsts;
796 
797 	to->tcpOutDataSegs += from->tcpOutDataSegs;
798 	to->tcpOutDataBytes += from->tcpOutDataBytes;
799 	to->tcpRetransBytes += from->tcpRetransBytes;
800 	to->tcpOutAck += from->tcpOutAck;
801 	to->tcpOutAckDelayed += from->tcpOutAckDelayed;
802 	to->tcpOutUrg += from->tcpOutUrg;
803 	to->tcpOutWinUpdate += from->tcpOutWinUpdate;
804 	to->tcpOutWinProbe += from->tcpOutWinProbe;
805 	to->tcpOutControl += from->tcpOutControl;
806 	to->tcpOutFastRetrans += from->tcpOutFastRetrans;
807 
808 	to->tcpInAckBytes += from->tcpInAckBytes;
809 	to->tcpInDupAck += from->tcpInDupAck;
810 	to->tcpInAckUnsent += from->tcpInAckUnsent;
811 	to->tcpInDataInorderSegs += from->tcpInDataInorderSegs;
812 	to->tcpInDataInorderBytes += from->tcpInDataInorderBytes;
813 	to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs;
814 	to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes;
815 	to->tcpInDataDupSegs += from->tcpInDataDupSegs;
816 	to->tcpInDataDupBytes += from->tcpInDataDupBytes;
817 	to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs;
818 	to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes;
819 	to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs;
820 	to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes;
821 	to->tcpInWinProbe += from->tcpInWinProbe;
822 	to->tcpInWinUpdate += from->tcpInWinUpdate;
823 	to->tcpInClosed += from->tcpInClosed;
824 
825 	to->tcpRttNoUpdate += from->tcpRttNoUpdate;
826 	to->tcpRttUpdate += from->tcpRttUpdate;
827 	to->tcpTimRetrans += from->tcpTimRetrans;
828 	to->tcpTimRetransDrop += from->tcpTimRetransDrop;
829 	to->tcpTimKeepalive += from->tcpTimKeepalive;
830 	to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe;
831 	to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop;
832 	to->tcpListenDrop += from->tcpListenDrop;
833 	to->tcpListenDropQ0 += from->tcpListenDropQ0;
834 	to->tcpHalfOpenDrop += from->tcpHalfOpenDrop;
835 	to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs;
836 	to->tcpHCInSegs += from->tcpHCInSegs;
837 	to->tcpHCOutSegs += from->tcpHCOutSegs;
838 }
839 
840 /*
841  * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats.  The
842  * caller should initialize the target mib2_tcp_t properly as this function
843  * just adds up all the per CPU stats.
844  */
845 static void
846 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib)
847 {
848 	int i;
849 	int cnt;
850 
851 	/*
852 	 * tcps_sc_cnt may change in the middle of the loop.  It is better
853 	 * to get its value first.
854 	 */
855 	cnt = tcps->tcps_sc_cnt;
856 	for (i = 0; i < cnt; i++)
857 		tcp_add_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib);
858 
859 	/* Fixed length structure for IPv4 and IPv6 counters */
860 	SET_MIB(tcp_mib->tcpConnTableSize, sizeof (mib2_tcpConnEntry_t));
861 	SET_MIB(tcp_mib->tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t));
862 }
863 
864 /*
865  * To set all tcp_stat_t counters to 0.
866  */
867 static void
868 tcp_clr_stats(tcp_stat_t *stats)
869 {
870 	stats->tcp_time_wait_syn_success.value.ui64 = 0;
871 	stats->tcp_clean_death_nondetached.value.ui64 = 0;
872 	stats->tcp_eager_blowoff_q.value.ui64 = 0;
873 	stats->tcp_eager_blowoff_q0.value.ui64 = 0;
874 	stats->tcp_no_listener.value.ui64 = 0;
875 	stats->tcp_listendrop.value.ui64 = 0;
876 	stats->tcp_listendropq0.value.ui64 = 0;
877 	stats->tcp_wsrv_called.value.ui64 = 0;
878 	stats->tcp_flwctl_on.value.ui64 = 0;
879 	stats->tcp_timer_fire_early.value.ui64 = 0;
880 	stats->tcp_timer_fire_miss.value.ui64 = 0;
881 	stats->tcp_zcopy_on.value.ui64 = 0;
882 	stats->tcp_zcopy_off.value.ui64 = 0;
883 	stats->tcp_zcopy_backoff.value.ui64 = 0;
884 	stats->tcp_fusion_flowctl.value.ui64 = 0;
885 	stats->tcp_fusion_backenabled.value.ui64 = 0;
886 	stats->tcp_fusion_urg.value.ui64 = 0;
887 	stats->tcp_fusion_putnext.value.ui64 = 0;
888 	stats->tcp_fusion_unfusable.value.ui64 = 0;
889 	stats->tcp_fusion_aborted.value.ui64 = 0;
890 	stats->tcp_fusion_unqualified.value.ui64 = 0;
891 	stats->tcp_fusion_rrw_busy.value.ui64 = 0;
892 	stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0;
893 	stats->tcp_fusion_rrw_plugged.value.ui64 = 0;
894 	stats->tcp_in_ack_unsent_drop.value.ui64 = 0;
895 	stats->tcp_sock_fallback.value.ui64 = 0;
896 	stats->tcp_lso_enabled.value.ui64 = 0;
897 	stats->tcp_lso_disabled.value.ui64 = 0;
898 	stats->tcp_lso_times.value.ui64 = 0;
899 	stats->tcp_lso_pkt_out.value.ui64 = 0;
900 	stats->tcp_listen_cnt_drop.value.ui64 = 0;
901 	stats->tcp_listen_mem_drop.value.ui64 = 0;
902 	stats->tcp_zwin_mem_drop.value.ui64 = 0;
903 	stats->tcp_zwin_ack_syn.value.ui64 = 0;
904 	stats->tcp_rst_unsent.value.ui64 = 0;
905 	stats->tcp_reclaim_cnt.value.ui64 = 0;
906 	stats->tcp_reass_timeout.value.ui64 = 0;
907 
908 #ifdef TCP_DEBUG_COUNTER
909 	stats->tcp_time_wait.value.ui64 = 0;
910 	stats->tcp_rput_time_wait.value.ui64 = 0;
911 	stats->tcp_detach_time_wait.value.ui64 = 0;
912 	stats->tcp_timeout_calls.value.ui64 = 0;
913 	stats->tcp_timeout_cached_alloc.value.ui64 = 0;
914 	stats->tcp_timeout_cancel_reqs.value.ui64 = 0;
915 	stats->tcp_timeout_canceled.value.ui64 = 0;
916 	stats->tcp_timermp_freed.value.ui64 = 0;
917 	stats->tcp_push_timer_cnt.value.ui64 = 0;
918 	stats->tcp_ack_timer_cnt.value.ui64 = 0;
919 #endif
920 }
921 
922 /*
923  * To add counters from the per CPU tcp_stat_counter_t to the stack
924  * tcp_stat_t.
925  */
926 static void
927 tcp_add_stats(tcp_stat_counter_t *from, tcp_stat_t *to)
928 {
929 	to->tcp_time_wait_syn_success.value.ui64 +=
930 	    from->tcp_time_wait_syn_success;
931 	to->tcp_clean_death_nondetached.value.ui64 +=
932 	    from->tcp_clean_death_nondetached;
933 	to->tcp_eager_blowoff_q.value.ui64 +=
934 	    from->tcp_eager_blowoff_q;
935 	to->tcp_eager_blowoff_q0.value.ui64 +=
936 	    from->tcp_eager_blowoff_q0;
937 	to->tcp_no_listener.value.ui64 +=
938 	    from->tcp_no_listener;
939 	to->tcp_listendrop.value.ui64 +=
940 	    from->tcp_listendrop;
941 	to->tcp_listendropq0.value.ui64 +=
942 	    from->tcp_listendropq0;
943 	to->tcp_wsrv_called.value.ui64 +=
944 	    from->tcp_wsrv_called;
945 	to->tcp_flwctl_on.value.ui64 +=
946 	    from->tcp_flwctl_on;
947 	to->tcp_timer_fire_early.value.ui64 +=
948 	    from->tcp_timer_fire_early;
949 	to->tcp_timer_fire_miss.value.ui64 +=
950 	    from->tcp_timer_fire_miss;
951 	to->tcp_zcopy_on.value.ui64 +=
952 	    from->tcp_zcopy_on;
953 	to->tcp_zcopy_off.value.ui64 +=
954 	    from->tcp_zcopy_off;
955 	to->tcp_zcopy_backoff.value.ui64 +=
956 	    from->tcp_zcopy_backoff;
957 	to->tcp_fusion_flowctl.value.ui64 +=
958 	    from->tcp_fusion_flowctl;
959 	to->tcp_fusion_backenabled.value.ui64 +=
960 	    from->tcp_fusion_backenabled;
961 	to->tcp_fusion_urg.value.ui64 +=
962 	    from->tcp_fusion_urg;
963 	to->tcp_fusion_putnext.value.ui64 +=
964 	    from->tcp_fusion_putnext;
965 	to->tcp_fusion_unfusable.value.ui64 +=
966 	    from->tcp_fusion_unfusable;
967 	to->tcp_fusion_aborted.value.ui64 +=
968 	    from->tcp_fusion_aborted;
969 	to->tcp_fusion_unqualified.value.ui64 +=
970 	    from->tcp_fusion_unqualified;
971 	to->tcp_fusion_rrw_busy.value.ui64 +=
972 	    from->tcp_fusion_rrw_busy;
973 	to->tcp_fusion_rrw_msgcnt.value.ui64 +=
974 	    from->tcp_fusion_rrw_msgcnt;
975 	to->tcp_fusion_rrw_plugged.value.ui64 +=
976 	    from->tcp_fusion_rrw_plugged;
977 	to->tcp_in_ack_unsent_drop.value.ui64 +=
978 	    from->tcp_in_ack_unsent_drop;
979 	to->tcp_sock_fallback.value.ui64 +=
980 	    from->tcp_sock_fallback;
981 	to->tcp_lso_enabled.value.ui64 +=
982 	    from->tcp_lso_enabled;
983 	to->tcp_lso_disabled.value.ui64 +=
984 	    from->tcp_lso_disabled;
985 	to->tcp_lso_times.value.ui64 +=
986 	    from->tcp_lso_times;
987 	to->tcp_lso_pkt_out.value.ui64 +=
988 	    from->tcp_lso_pkt_out;
989 	to->tcp_listen_cnt_drop.value.ui64 +=
990 	    from->tcp_listen_cnt_drop;
991 	to->tcp_listen_mem_drop.value.ui64 +=
992 	    from->tcp_listen_mem_drop;
993 	to->tcp_zwin_mem_drop.value.ui64 +=
994 	    from->tcp_zwin_mem_drop;
995 	to->tcp_zwin_ack_syn.value.ui64 +=
996 	    from->tcp_zwin_ack_syn;
997 	to->tcp_rst_unsent.value.ui64 +=
998 	    from->tcp_rst_unsent;
999 	to->tcp_reclaim_cnt.value.ui64 +=
1000 	    from->tcp_reclaim_cnt;
1001 	to->tcp_reass_timeout.value.ui64 +=
1002 	    from->tcp_reass_timeout;
1003 
1004 #ifdef TCP_DEBUG_COUNTER
1005 	to->tcp_time_wait.value.ui64 +=
1006 	    from->tcp_time_wait;
1007 	to->tcp_rput_time_wait.value.ui64 +=
1008 	    from->tcp_rput_time_wait;
1009 	to->tcp_detach_time_wait.value.ui64 +=
1010 	    from->tcp_detach_time_wait;
1011 	to->tcp_timeout_calls.value.ui64 +=
1012 	    from->tcp_timeout_calls;
1013 	to->tcp_timeout_cached_alloc.value.ui64 +=
1014 	    from->tcp_timeout_cached_alloc;
1015 	to->tcp_timeout_cancel_reqs.value.ui64 +=
1016 	    from->tcp_timeout_cancel_reqs;
1017 	to->tcp_timeout_canceled.value.ui64 +=
1018 	    from->tcp_timeout_canceled;
1019 	to->tcp_timermp_freed.value.ui64 +=
1020 	    from->tcp_timermp_freed;
1021 	to->tcp_push_timer_cnt.value.ui64 +=
1022 	    from->tcp_push_timer_cnt;
1023 	to->tcp_ack_timer_cnt.value.ui64 +=
1024 	    from->tcp_ack_timer_cnt;
1025 #endif
1026 }
1027