1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 * Copyright (c) 2011, Joyent Inc. All rights reserved. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/tihdr.h> 29 #include <sys/policy.h> 30 #include <sys/tsol/tnet.h> 31 #include <sys/kstat.h> 32 33 #include <inet/common.h> 34 #include <inet/ip.h> 35 #include <inet/tcp.h> 36 #include <inet/tcp_impl.h> 37 #include <inet/tcp_stats.h> 38 #include <inet/kstatcom.h> 39 #include <inet/snmpcom.h> 40 41 static int tcp_kstat_update(kstat_t *, int); 42 static int tcp_kstat2_update(kstat_t *, int); 43 static void tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *); 44 45 static void tcp_add_mib(mib2_tcp_t *, mib2_tcp_t *); 46 static void tcp_add_stats(tcp_stat_counter_t *, tcp_stat_t *); 47 static void tcp_clr_stats(tcp_stat_t *); 48 49 tcp_g_stat_t tcp_g_statistics; 50 kstat_t *tcp_g_kstat; 51 52 /* Translate TCP state to MIB2 TCP state. */ 53 static int 54 tcp_snmp_state(tcp_t *tcp) 55 { 56 if (tcp == NULL) 57 return (0); 58 59 switch (tcp->tcp_state) { 60 case TCPS_CLOSED: 61 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 62 case TCPS_BOUND: 63 return (MIB2_TCP_closed); 64 case TCPS_LISTEN: 65 return (MIB2_TCP_listen); 66 case TCPS_SYN_SENT: 67 return (MIB2_TCP_synSent); 68 case TCPS_SYN_RCVD: 69 return (MIB2_TCP_synReceived); 70 case TCPS_ESTABLISHED: 71 return (MIB2_TCP_established); 72 case TCPS_CLOSE_WAIT: 73 return (MIB2_TCP_closeWait); 74 case TCPS_FIN_WAIT_1: 75 return (MIB2_TCP_finWait1); 76 case TCPS_CLOSING: 77 return (MIB2_TCP_closing); 78 case TCPS_LAST_ACK: 79 return (MIB2_TCP_lastAck); 80 case TCPS_FIN_WAIT_2: 81 return (MIB2_TCP_finWait2); 82 case TCPS_TIME_WAIT: 83 return (MIB2_TCP_timeWait); 84 default: 85 return (0); 86 } 87 } 88 89 /* 90 * Return SNMP stuff in buffer in mpdata. 91 */ 92 mblk_t * 93 tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req) 94 { 95 mblk_t *mpdata; 96 mblk_t *mp_conn_ctl = NULL; 97 mblk_t *mp_conn_tail; 98 mblk_t *mp_attr_ctl = NULL; 99 mblk_t *mp_attr_tail; 100 mblk_t *mp6_conn_ctl = NULL; 101 mblk_t *mp6_conn_tail; 102 mblk_t *mp6_attr_ctl = NULL; 103 mblk_t *mp6_attr_tail; 104 struct opthdr *optp; 105 mib2_tcpConnEntry_t tce; 106 mib2_tcp6ConnEntry_t tce6; 107 mib2_transportMLPEntry_t mlp; 108 connf_t *connfp; 109 int i; 110 boolean_t ispriv; 111 zoneid_t zoneid; 112 int v4_conn_idx; 113 int v6_conn_idx; 114 conn_t *connp = Q_TO_CONN(q); 115 tcp_stack_t *tcps; 116 ip_stack_t *ipst; 117 mblk_t *mp2ctl; 118 mib2_tcp_t tcp_mib; 119 size_t tcp_mib_size, tce_size, tce6_size; 120 121 /* 122 * make a copy of the original message 123 */ 124 mp2ctl = copymsg(mpctl); 125 126 if (mpctl == NULL || 127 (mpdata = mpctl->b_cont) == NULL || 128 (mp_conn_ctl = copymsg(mpctl)) == NULL || 129 (mp_attr_ctl = copymsg(mpctl)) == NULL || 130 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 131 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 132 freemsg(mp_conn_ctl); 133 freemsg(mp_attr_ctl); 134 freemsg(mp6_conn_ctl); 135 freemsg(mp6_attr_ctl); 136 freemsg(mpctl); 137 freemsg(mp2ctl); 138 return (NULL); 139 } 140 141 ipst = connp->conn_netstack->netstack_ip; 142 tcps = connp->conn_netstack->netstack_tcp; 143 144 if (legacy_req) { 145 tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t); 146 tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t); 147 tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t); 148 } else { 149 tcp_mib_size = sizeof (mib2_tcp_t); 150 tce_size = sizeof (mib2_tcpConnEntry_t); 151 tce6_size = sizeof (mib2_tcp6ConnEntry_t); 152 } 153 154 bzero(&tcp_mib, sizeof (tcp_mib)); 155 156 /* build table of connections -- need count in fixed part */ 157 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */ 158 SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); 159 SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); 160 SET_MIB(tcp_mib.tcpMaxConn, -1); 161 SET_MIB(tcp_mib.tcpCurrEstab, 0); 162 163 ispriv = 164 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 165 zoneid = Q_TO_CONN(q)->conn_zoneid; 166 167 v4_conn_idx = v6_conn_idx = 0; 168 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 169 170 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 171 ipst = tcps->tcps_netstack->netstack_ip; 172 173 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 174 175 connp = NULL; 176 177 while ((connp = 178 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { 179 tcp_t *tcp; 180 boolean_t needattr; 181 182 if (connp->conn_zoneid != zoneid) 183 continue; /* not in this zone */ 184 185 tcp = connp->conn_tcp; 186 TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs); 187 tcp->tcp_ibsegs = 0; 188 TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs); 189 tcp->tcp_obsegs = 0; 190 191 tce6.tcp6ConnState = tce.tcpConnState = 192 tcp_snmp_state(tcp); 193 if (tce.tcpConnState == MIB2_TCP_established || 194 tce.tcpConnState == MIB2_TCP_closeWait) 195 BUMP_MIB(&tcp_mib, tcpCurrEstab); 196 197 needattr = B_FALSE; 198 bzero(&mlp, sizeof (mlp)); 199 if (connp->conn_mlp_type != mlptSingle) { 200 if (connp->conn_mlp_type == mlptShared || 201 connp->conn_mlp_type == mlptBoth) 202 mlp.tme_flags |= MIB2_TMEF_SHARED; 203 if (connp->conn_mlp_type == mlptPrivate || 204 connp->conn_mlp_type == mlptBoth) 205 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 206 needattr = B_TRUE; 207 } 208 if (connp->conn_anon_mlp) { 209 mlp.tme_flags |= MIB2_TMEF_ANONMLP; 210 needattr = B_TRUE; 211 } 212 switch (connp->conn_mac_mode) { 213 case CONN_MAC_DEFAULT: 214 break; 215 case CONN_MAC_AWARE: 216 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT; 217 needattr = B_TRUE; 218 break; 219 case CONN_MAC_IMPLICIT: 220 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT; 221 needattr = B_TRUE; 222 break; 223 } 224 if (connp->conn_ixa->ixa_tsl != NULL) { 225 ts_label_t *tsl; 226 227 tsl = connp->conn_ixa->ixa_tsl; 228 mlp.tme_flags |= MIB2_TMEF_IS_LABELED; 229 mlp.tme_doi = label2doi(tsl); 230 mlp.tme_label = *label2bslabel(tsl); 231 needattr = B_TRUE; 232 } 233 234 /* Create a message to report on IPv6 entries */ 235 if (connp->conn_ipversion == IPV6_VERSION) { 236 tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6; 237 tce6.tcp6ConnRemAddress = connp->conn_faddr_v6; 238 tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport); 239 tce6.tcp6ConnRemPort = ntohs(connp->conn_fport); 240 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) { 241 tce6.tcp6ConnIfIndex = 242 connp->conn_ixa->ixa_scopeid; 243 } else { 244 tce6.tcp6ConnIfIndex = connp->conn_bound_if; 245 } 246 /* Don't want just anybody seeing these... */ 247 if (ispriv) { 248 tce6.tcp6ConnEntryInfo.ce_snxt = 249 tcp->tcp_snxt; 250 tce6.tcp6ConnEntryInfo.ce_suna = 251 tcp->tcp_suna; 252 tce6.tcp6ConnEntryInfo.ce_rnxt = 253 tcp->tcp_rnxt; 254 tce6.tcp6ConnEntryInfo.ce_rack = 255 tcp->tcp_rack; 256 } else { 257 /* 258 * Netstat, unfortunately, uses this to 259 * get send/receive queue sizes. How to fix? 260 * Why not compute the difference only? 261 */ 262 tce6.tcp6ConnEntryInfo.ce_snxt = 263 tcp->tcp_snxt - tcp->tcp_suna; 264 tce6.tcp6ConnEntryInfo.ce_suna = 0; 265 tce6.tcp6ConnEntryInfo.ce_rnxt = 266 tcp->tcp_rnxt - tcp->tcp_rack; 267 tce6.tcp6ConnEntryInfo.ce_rack = 0; 268 } 269 270 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 271 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 272 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 273 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 274 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 275 276 tce6.tcp6ConnCreationProcess = 277 (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 278 connp->conn_cpid; 279 tce6.tcp6ConnCreationTime = connp->conn_open_time; 280 281 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 282 &mp6_conn_tail, (char *)&tce6, tce6_size); 283 284 mlp.tme_connidx = v6_conn_idx++; 285 if (needattr) 286 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 287 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 288 } 289 /* 290 * Create an IPv4 table entry for IPv4 entries and also 291 * for IPv6 entries which are bound to in6addr_any 292 * but don't have IPV6_V6ONLY set. 293 * (i.e. anything an IPv4 peer could connect to) 294 */ 295 if (connp->conn_ipversion == IPV4_VERSION || 296 (tcp->tcp_state <= TCPS_LISTEN && 297 !connp->conn_ipv6_v6only && 298 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) { 299 if (connp->conn_ipversion == IPV6_VERSION) { 300 tce.tcpConnRemAddress = INADDR_ANY; 301 tce.tcpConnLocalAddress = INADDR_ANY; 302 } else { 303 tce.tcpConnRemAddress = 304 connp->conn_faddr_v4; 305 tce.tcpConnLocalAddress = 306 connp->conn_laddr_v4; 307 } 308 tce.tcpConnLocalPort = ntohs(connp->conn_lport); 309 tce.tcpConnRemPort = ntohs(connp->conn_fport); 310 /* Don't want just anybody seeing these... */ 311 if (ispriv) { 312 tce.tcpConnEntryInfo.ce_snxt = 313 tcp->tcp_snxt; 314 tce.tcpConnEntryInfo.ce_suna = 315 tcp->tcp_suna; 316 tce.tcpConnEntryInfo.ce_rnxt = 317 tcp->tcp_rnxt; 318 tce.tcpConnEntryInfo.ce_rack = 319 tcp->tcp_rack; 320 } else { 321 /* 322 * Netstat, unfortunately, uses this to 323 * get send/receive queue sizes. How 324 * to fix? 325 * Why not compute the difference only? 326 */ 327 tce.tcpConnEntryInfo.ce_snxt = 328 tcp->tcp_snxt - tcp->tcp_suna; 329 tce.tcpConnEntryInfo.ce_suna = 0; 330 tce.tcpConnEntryInfo.ce_rnxt = 331 tcp->tcp_rnxt - tcp->tcp_rack; 332 tce.tcpConnEntryInfo.ce_rack = 0; 333 } 334 335 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 336 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 337 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 338 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 339 tce.tcpConnEntryInfo.ce_state = 340 tcp->tcp_state; 341 342 tce.tcpConnCreationProcess = 343 (connp->conn_cpid < 0) ? 344 MIB2_UNKNOWN_PROCESS : 345 connp->conn_cpid; 346 tce.tcpConnCreationTime = connp->conn_open_time; 347 348 (void) snmp_append_data2(mp_conn_ctl->b_cont, 349 &mp_conn_tail, (char *)&tce, tce_size); 350 351 mlp.tme_connidx = v4_conn_idx++; 352 if (needattr) 353 (void) snmp_append_data2( 354 mp_attr_ctl->b_cont, 355 &mp_attr_tail, (char *)&mlp, 356 sizeof (mlp)); 357 } 358 } 359 } 360 361 tcp_sum_mib(tcps, &tcp_mib); 362 363 /* Fixed length structure for IPv4 and IPv6 counters */ 364 SET_MIB(tcp_mib.tcpConnTableSize, tce_size); 365 SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size); 366 367 /* 368 * Synchronize 32- and 64-bit counters. Note that tcpInSegs and 369 * tcpOutSegs are not updated anywhere in TCP. The new 64 bits 370 * counters are used. Hence the old counters' values in tcp_sc_mib 371 * are always 0. 372 */ 373 SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs); 374 SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs); 375 376 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 377 optp->level = MIB2_TCP; 378 optp->name = 0; 379 (void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size); 380 optp->len = msgdsize(mpdata); 381 qreply(q, mpctl); 382 383 /* table of connections... */ 384 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 385 sizeof (struct T_optmgmt_ack)]; 386 optp->level = MIB2_TCP; 387 optp->name = MIB2_TCP_CONN; 388 optp->len = msgdsize(mp_conn_ctl->b_cont); 389 qreply(q, mp_conn_ctl); 390 391 /* table of MLP attributes... */ 392 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 393 sizeof (struct T_optmgmt_ack)]; 394 optp->level = MIB2_TCP; 395 optp->name = EXPER_XPORT_MLP; 396 optp->len = msgdsize(mp_attr_ctl->b_cont); 397 if (optp->len == 0) 398 freemsg(mp_attr_ctl); 399 else 400 qreply(q, mp_attr_ctl); 401 402 /* table of IPv6 connections... */ 403 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 404 sizeof (struct T_optmgmt_ack)]; 405 optp->level = MIB2_TCP6; 406 optp->name = MIB2_TCP6_CONN; 407 optp->len = msgdsize(mp6_conn_ctl->b_cont); 408 qreply(q, mp6_conn_ctl); 409 410 /* table of IPv6 MLP attributes... */ 411 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 412 sizeof (struct T_optmgmt_ack)]; 413 optp->level = MIB2_TCP6; 414 optp->name = EXPER_XPORT_MLP; 415 optp->len = msgdsize(mp6_attr_ctl->b_cont); 416 if (optp->len == 0) 417 freemsg(mp6_attr_ctl); 418 else 419 qreply(q, mp6_attr_ctl); 420 return (mp2ctl); 421 } 422 423 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 424 /* ARGSUSED */ 425 int 426 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 427 { 428 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 429 430 switch (level) { 431 case MIB2_TCP: 432 switch (name) { 433 case 13: 434 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 435 return (0); 436 /* TODO: delete entry defined by tce */ 437 return (1); 438 default: 439 return (0); 440 } 441 default: 442 return (1); 443 } 444 } 445 446 /* 447 * TCP Kstats implementation 448 */ 449 void * 450 tcp_kstat_init(netstackid_t stackid) 451 { 452 kstat_t *ksp; 453 454 tcp_named_kstat_t template = { 455 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 456 { "rtoMin", KSTAT_DATA_INT32, 0 }, 457 { "rtoMax", KSTAT_DATA_INT32, 0 }, 458 { "maxConn", KSTAT_DATA_INT32, 0 }, 459 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 460 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 461 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 462 { "estabResets", KSTAT_DATA_UINT32, 0 }, 463 { "currEstab", KSTAT_DATA_UINT32, 0 }, 464 { "inSegs", KSTAT_DATA_UINT64, 0 }, 465 { "outSegs", KSTAT_DATA_UINT64, 0 }, 466 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 467 { "connTableSize", KSTAT_DATA_INT32, 0 }, 468 { "outRsts", KSTAT_DATA_UINT32, 0 }, 469 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 470 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 471 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 472 { "outAck", KSTAT_DATA_UINT32, 0 }, 473 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 474 { "outUrg", KSTAT_DATA_UINT32, 0 }, 475 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 476 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 477 { "outControl", KSTAT_DATA_UINT32, 0 }, 478 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 479 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 480 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 481 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 482 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 483 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 484 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 485 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 486 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 487 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 488 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 489 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 490 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 491 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 492 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 493 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 494 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 495 { "inClosed", KSTAT_DATA_UINT32, 0 }, 496 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 497 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 498 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 499 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 500 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 501 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 502 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 503 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 504 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 505 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 506 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 507 { "connTableSize6", KSTAT_DATA_INT32, 0 } 508 }; 509 510 ksp = kstat_create_netstack(TCP_MOD_NAME, stackid, TCP_MOD_NAME, "mib2", 511 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid); 512 513 if (ksp == NULL) 514 return (NULL); 515 516 template.rtoAlgorithm.value.ui32 = 4; 517 template.maxConn.value.i32 = -1; 518 519 bcopy(&template, ksp->ks_data, sizeof (template)); 520 ksp->ks_update = tcp_kstat_update; 521 ksp->ks_private = (void *)(uintptr_t)stackid; 522 523 /* 524 * If this is an exclusive netstack for a local zone, the global zone 525 * should still be able to read the kstat. 526 */ 527 if (stackid != GLOBAL_NETSTACKID) 528 kstat_zone_add(ksp, GLOBAL_ZONEID); 529 530 kstat_install(ksp); 531 return (ksp); 532 } 533 534 void 535 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 536 { 537 if (ksp != NULL) { 538 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 539 kstat_delete_netstack(ksp, stackid); 540 } 541 } 542 543 static int 544 tcp_kstat_update(kstat_t *kp, int rw) 545 { 546 tcp_named_kstat_t *tcpkp; 547 tcp_t *tcp; 548 connf_t *connfp; 549 conn_t *connp; 550 int i; 551 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 552 netstack_t *ns; 553 tcp_stack_t *tcps; 554 ip_stack_t *ipst; 555 mib2_tcp_t tcp_mib; 556 557 if (rw == KSTAT_WRITE) 558 return (EACCES); 559 560 ns = netstack_find_by_stackid(stackid); 561 if (ns == NULL) 562 return (-1); 563 tcps = ns->netstack_tcp; 564 if (tcps == NULL) { 565 netstack_rele(ns); 566 return (-1); 567 } 568 569 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 570 571 tcpkp->currEstab.value.ui32 = 0; 572 tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min; 573 tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max; 574 575 ipst = ns->netstack_ip; 576 577 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 578 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 579 connp = NULL; 580 while ((connp = 581 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { 582 tcp = connp->conn_tcp; 583 switch (tcp_snmp_state(tcp)) { 584 case MIB2_TCP_established: 585 case MIB2_TCP_closeWait: 586 tcpkp->currEstab.value.ui32++; 587 break; 588 } 589 } 590 } 591 bzero(&tcp_mib, sizeof (tcp_mib)); 592 tcp_sum_mib(tcps, &tcp_mib); 593 594 /* Fixed length structure for IPv4 and IPv6 counters */ 595 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 596 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t)); 597 598 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens; 599 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens; 600 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails; 601 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets; 602 tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs; 603 tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs; 604 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs; 605 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize; 606 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts; 607 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs; 608 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes; 609 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes; 610 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck; 611 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed; 612 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg; 613 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate; 614 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe; 615 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl; 616 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans; 617 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs; 618 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes; 619 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck; 620 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent; 621 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs; 622 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes; 623 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs; 624 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes; 625 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs; 626 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes; 627 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs; 628 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes; 629 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs; 630 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes; 631 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe; 632 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate; 633 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed; 634 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate; 635 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate; 636 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans; 637 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop; 638 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive; 639 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe; 640 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop; 641 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop; 642 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0; 643 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop; 644 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs; 645 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize; 646 647 netstack_rele(ns); 648 return (0); 649 } 650 651 /* 652 * kstats related to squeues i.e. not per IP instance 653 */ 654 void * 655 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp) 656 { 657 kstat_t *ksp; 658 659 tcp_g_stat_t template = { 660 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 661 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 662 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 663 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 664 }; 665 666 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net", 667 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 668 KSTAT_FLAG_VIRTUAL); 669 670 if (ksp == NULL) 671 return (NULL); 672 673 bcopy(&template, tcp_g_statp, sizeof (template)); 674 ksp->ks_data = (void *)tcp_g_statp; 675 676 kstat_install(ksp); 677 return (ksp); 678 } 679 680 void 681 tcp_g_kstat_fini(kstat_t *ksp) 682 { 683 if (ksp != NULL) { 684 kstat_delete(ksp); 685 } 686 } 687 688 void * 689 tcp_kstat2_init(netstackid_t stackid) 690 { 691 kstat_t *ksp; 692 693 tcp_stat_t template = { 694 { "tcp_time_wait_syn_success", KSTAT_DATA_UINT64, 0 }, 695 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64, 0 }, 696 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64, 0 }, 697 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64, 0 }, 698 { "tcp_no_listener", KSTAT_DATA_UINT64, 0 }, 699 { "tcp_listendrop", KSTAT_DATA_UINT64, 0 }, 700 { "tcp_listendropq0", KSTAT_DATA_UINT64, 0 }, 701 { "tcp_wsrv_called", KSTAT_DATA_UINT64, 0 }, 702 { "tcp_flwctl_on", KSTAT_DATA_UINT64, 0 }, 703 { "tcp_timer_fire_early", KSTAT_DATA_UINT64, 0 }, 704 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64, 0 }, 705 { "tcp_zcopy_on", KSTAT_DATA_UINT64, 0 }, 706 { "tcp_zcopy_off", KSTAT_DATA_UINT64, 0 }, 707 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64, 0 }, 708 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64, 0 }, 709 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64, 0 }, 710 { "tcp_fusion_urg", KSTAT_DATA_UINT64, 0 }, 711 { "tcp_fusion_putnext", KSTAT_DATA_UINT64, 0 }, 712 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64, 0 }, 713 { "tcp_fusion_aborted", KSTAT_DATA_UINT64, 0 }, 714 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64, 0 }, 715 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64, 0 }, 716 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64, 0 }, 717 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64, 0 }, 718 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64, 0 }, 719 { "tcp_sock_fallback", KSTAT_DATA_UINT64, 0 }, 720 { "tcp_lso_enabled", KSTAT_DATA_UINT64, 0 }, 721 { "tcp_lso_disabled", KSTAT_DATA_UINT64, 0 }, 722 { "tcp_lso_times", KSTAT_DATA_UINT64, 0 }, 723 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64, 0 }, 724 { "tcp_listen_cnt_drop", KSTAT_DATA_UINT64, 0 }, 725 { "tcp_listen_mem_drop", KSTAT_DATA_UINT64, 0 }, 726 { "tcp_zwin_mem_drop", KSTAT_DATA_UINT64, 0 }, 727 { "tcp_zwin_ack_syn", KSTAT_DATA_UINT64, 0 }, 728 { "tcp_rst_unsent", KSTAT_DATA_UINT64, 0 }, 729 { "tcp_reclaim_cnt", KSTAT_DATA_UINT64, 0 }, 730 { "tcp_reass_timeout", KSTAT_DATA_UINT64, 0 }, 731 #ifdef TCP_DEBUG_COUNTER 732 { "tcp_time_wait", KSTAT_DATA_UINT64, 0 }, 733 { "tcp_rput_time_wait", KSTAT_DATA_UINT64, 0 }, 734 { "tcp_detach_time_wait", KSTAT_DATA_UINT64, 0 }, 735 { "tcp_timeout_calls", KSTAT_DATA_UINT64, 0 }, 736 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64, 0 }, 737 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64, 0 }, 738 { "tcp_timeout_canceled", KSTAT_DATA_UINT64, 0 }, 739 { "tcp_timermp_freed", KSTAT_DATA_UINT64, 0 }, 740 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64, 0 }, 741 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64, 0 }, 742 #endif 743 }; 744 745 ksp = kstat_create_netstack(TCP_MOD_NAME, stackid, "tcpstat", "net", 746 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0, 747 stackid); 748 749 if (ksp == NULL) 750 return (NULL); 751 752 bcopy(&template, ksp->ks_data, sizeof (template)); 753 ksp->ks_private = (void *)(uintptr_t)stackid; 754 ksp->ks_update = tcp_kstat2_update; 755 756 /* 757 * If this is an exclusive netstack for a local zone, the global zone 758 * should still be able to read the kstat. 759 */ 760 if (stackid != GLOBAL_NETSTACKID) 761 kstat_zone_add(ksp, GLOBAL_ZONEID); 762 763 kstat_install(ksp); 764 return (ksp); 765 } 766 767 void 768 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 769 { 770 if (ksp != NULL) { 771 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 772 kstat_delete_netstack(ksp, stackid); 773 } 774 } 775 776 /* 777 * Sum up all per CPU tcp_stat_t kstat counters. 778 */ 779 static int 780 tcp_kstat2_update(kstat_t *kp, int rw) 781 { 782 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 783 netstack_t *ns; 784 tcp_stack_t *tcps; 785 tcp_stat_t *stats; 786 int i; 787 int cnt; 788 789 if (rw == KSTAT_WRITE) 790 return (EACCES); 791 792 ns = netstack_find_by_stackid(stackid); 793 if (ns == NULL) 794 return (-1); 795 tcps = ns->netstack_tcp; 796 if (tcps == NULL) { 797 netstack_rele(ns); 798 return (-1); 799 } 800 801 stats = (tcp_stat_t *)kp->ks_data; 802 tcp_clr_stats(stats); 803 804 /* 805 * tcps_sc_cnt may change in the middle of the loop. It is better 806 * to get its value first. 807 */ 808 cnt = tcps->tcps_sc_cnt; 809 for (i = 0; i < cnt; i++) 810 tcp_add_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats); 811 812 netstack_rele(ns); 813 return (0); 814 } 815 816 /* 817 * To add stats from one mib2_tcp_t to another. Static fields are not added. 818 * The caller should set them up propertly. 819 */ 820 static void 821 tcp_add_mib(mib2_tcp_t *from, mib2_tcp_t *to) 822 { 823 to->tcpActiveOpens += from->tcpActiveOpens; 824 to->tcpPassiveOpens += from->tcpPassiveOpens; 825 to->tcpAttemptFails += from->tcpAttemptFails; 826 to->tcpEstabResets += from->tcpEstabResets; 827 to->tcpInSegs += from->tcpInSegs; 828 to->tcpOutSegs += from->tcpOutSegs; 829 to->tcpRetransSegs += from->tcpRetransSegs; 830 to->tcpOutRsts += from->tcpOutRsts; 831 832 to->tcpOutDataSegs += from->tcpOutDataSegs; 833 to->tcpOutDataBytes += from->tcpOutDataBytes; 834 to->tcpRetransBytes += from->tcpRetransBytes; 835 to->tcpOutAck += from->tcpOutAck; 836 to->tcpOutAckDelayed += from->tcpOutAckDelayed; 837 to->tcpOutUrg += from->tcpOutUrg; 838 to->tcpOutWinUpdate += from->tcpOutWinUpdate; 839 to->tcpOutWinProbe += from->tcpOutWinProbe; 840 to->tcpOutControl += from->tcpOutControl; 841 to->tcpOutFastRetrans += from->tcpOutFastRetrans; 842 843 to->tcpInAckBytes += from->tcpInAckBytes; 844 to->tcpInDupAck += from->tcpInDupAck; 845 to->tcpInAckUnsent += from->tcpInAckUnsent; 846 to->tcpInDataInorderSegs += from->tcpInDataInorderSegs; 847 to->tcpInDataInorderBytes += from->tcpInDataInorderBytes; 848 to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs; 849 to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes; 850 to->tcpInDataDupSegs += from->tcpInDataDupSegs; 851 to->tcpInDataDupBytes += from->tcpInDataDupBytes; 852 to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs; 853 to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes; 854 to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs; 855 to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes; 856 to->tcpInWinProbe += from->tcpInWinProbe; 857 to->tcpInWinUpdate += from->tcpInWinUpdate; 858 to->tcpInClosed += from->tcpInClosed; 859 860 to->tcpRttNoUpdate += from->tcpRttNoUpdate; 861 to->tcpRttUpdate += from->tcpRttUpdate; 862 to->tcpTimRetrans += from->tcpTimRetrans; 863 to->tcpTimRetransDrop += from->tcpTimRetransDrop; 864 to->tcpTimKeepalive += from->tcpTimKeepalive; 865 to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe; 866 to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop; 867 to->tcpListenDrop += from->tcpListenDrop; 868 to->tcpListenDropQ0 += from->tcpListenDropQ0; 869 to->tcpHalfOpenDrop += from->tcpHalfOpenDrop; 870 to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs; 871 to->tcpHCInSegs += from->tcpHCInSegs; 872 to->tcpHCOutSegs += from->tcpHCOutSegs; 873 } 874 875 /* 876 * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats. The 877 * caller should initialize the target mib2_tcp_t properly as this function 878 * just adds up all the per CPU stats. 879 */ 880 static void 881 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib) 882 { 883 int i; 884 int cnt; 885 886 /* 887 * tcps_sc_cnt may change in the middle of the loop. It is better 888 * to get its value first. 889 */ 890 cnt = tcps->tcps_sc_cnt; 891 for (i = 0; i < cnt; i++) 892 tcp_add_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib); 893 } 894 895 /* 896 * To set all tcp_stat_t counters to 0. 897 */ 898 static void 899 tcp_clr_stats(tcp_stat_t *stats) 900 { 901 stats->tcp_time_wait_syn_success.value.ui64 = 0; 902 stats->tcp_clean_death_nondetached.value.ui64 = 0; 903 stats->tcp_eager_blowoff_q.value.ui64 = 0; 904 stats->tcp_eager_blowoff_q0.value.ui64 = 0; 905 stats->tcp_no_listener.value.ui64 = 0; 906 stats->tcp_listendrop.value.ui64 = 0; 907 stats->tcp_listendropq0.value.ui64 = 0; 908 stats->tcp_wsrv_called.value.ui64 = 0; 909 stats->tcp_flwctl_on.value.ui64 = 0; 910 stats->tcp_timer_fire_early.value.ui64 = 0; 911 stats->tcp_timer_fire_miss.value.ui64 = 0; 912 stats->tcp_zcopy_on.value.ui64 = 0; 913 stats->tcp_zcopy_off.value.ui64 = 0; 914 stats->tcp_zcopy_backoff.value.ui64 = 0; 915 stats->tcp_fusion_flowctl.value.ui64 = 0; 916 stats->tcp_fusion_backenabled.value.ui64 = 0; 917 stats->tcp_fusion_urg.value.ui64 = 0; 918 stats->tcp_fusion_putnext.value.ui64 = 0; 919 stats->tcp_fusion_unfusable.value.ui64 = 0; 920 stats->tcp_fusion_aborted.value.ui64 = 0; 921 stats->tcp_fusion_unqualified.value.ui64 = 0; 922 stats->tcp_fusion_rrw_busy.value.ui64 = 0; 923 stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0; 924 stats->tcp_fusion_rrw_plugged.value.ui64 = 0; 925 stats->tcp_in_ack_unsent_drop.value.ui64 = 0; 926 stats->tcp_sock_fallback.value.ui64 = 0; 927 stats->tcp_lso_enabled.value.ui64 = 0; 928 stats->tcp_lso_disabled.value.ui64 = 0; 929 stats->tcp_lso_times.value.ui64 = 0; 930 stats->tcp_lso_pkt_out.value.ui64 = 0; 931 stats->tcp_listen_cnt_drop.value.ui64 = 0; 932 stats->tcp_listen_mem_drop.value.ui64 = 0; 933 stats->tcp_zwin_mem_drop.value.ui64 = 0; 934 stats->tcp_zwin_ack_syn.value.ui64 = 0; 935 stats->tcp_rst_unsent.value.ui64 = 0; 936 stats->tcp_reclaim_cnt.value.ui64 = 0; 937 stats->tcp_reass_timeout.value.ui64 = 0; 938 939 #ifdef TCP_DEBUG_COUNTER 940 stats->tcp_time_wait.value.ui64 = 0; 941 stats->tcp_rput_time_wait.value.ui64 = 0; 942 stats->tcp_detach_time_wait.value.ui64 = 0; 943 stats->tcp_timeout_calls.value.ui64 = 0; 944 stats->tcp_timeout_cached_alloc.value.ui64 = 0; 945 stats->tcp_timeout_cancel_reqs.value.ui64 = 0; 946 stats->tcp_timeout_canceled.value.ui64 = 0; 947 stats->tcp_timermp_freed.value.ui64 = 0; 948 stats->tcp_push_timer_cnt.value.ui64 = 0; 949 stats->tcp_ack_timer_cnt.value.ui64 = 0; 950 #endif 951 } 952 953 /* 954 * To add counters from the per CPU tcp_stat_counter_t to the stack 955 * tcp_stat_t. 956 */ 957 static void 958 tcp_add_stats(tcp_stat_counter_t *from, tcp_stat_t *to) 959 { 960 to->tcp_time_wait_syn_success.value.ui64 += 961 from->tcp_time_wait_syn_success; 962 to->tcp_clean_death_nondetached.value.ui64 += 963 from->tcp_clean_death_nondetached; 964 to->tcp_eager_blowoff_q.value.ui64 += 965 from->tcp_eager_blowoff_q; 966 to->tcp_eager_blowoff_q0.value.ui64 += 967 from->tcp_eager_blowoff_q0; 968 to->tcp_no_listener.value.ui64 += 969 from->tcp_no_listener; 970 to->tcp_listendrop.value.ui64 += 971 from->tcp_listendrop; 972 to->tcp_listendropq0.value.ui64 += 973 from->tcp_listendropq0; 974 to->tcp_wsrv_called.value.ui64 += 975 from->tcp_wsrv_called; 976 to->tcp_flwctl_on.value.ui64 += 977 from->tcp_flwctl_on; 978 to->tcp_timer_fire_early.value.ui64 += 979 from->tcp_timer_fire_early; 980 to->tcp_timer_fire_miss.value.ui64 += 981 from->tcp_timer_fire_miss; 982 to->tcp_zcopy_on.value.ui64 += 983 from->tcp_zcopy_on; 984 to->tcp_zcopy_off.value.ui64 += 985 from->tcp_zcopy_off; 986 to->tcp_zcopy_backoff.value.ui64 += 987 from->tcp_zcopy_backoff; 988 to->tcp_fusion_flowctl.value.ui64 += 989 from->tcp_fusion_flowctl; 990 to->tcp_fusion_backenabled.value.ui64 += 991 from->tcp_fusion_backenabled; 992 to->tcp_fusion_urg.value.ui64 += 993 from->tcp_fusion_urg; 994 to->tcp_fusion_putnext.value.ui64 += 995 from->tcp_fusion_putnext; 996 to->tcp_fusion_unfusable.value.ui64 += 997 from->tcp_fusion_unfusable; 998 to->tcp_fusion_aborted.value.ui64 += 999 from->tcp_fusion_aborted; 1000 to->tcp_fusion_unqualified.value.ui64 += 1001 from->tcp_fusion_unqualified; 1002 to->tcp_fusion_rrw_busy.value.ui64 += 1003 from->tcp_fusion_rrw_busy; 1004 to->tcp_fusion_rrw_msgcnt.value.ui64 += 1005 from->tcp_fusion_rrw_msgcnt; 1006 to->tcp_fusion_rrw_plugged.value.ui64 += 1007 from->tcp_fusion_rrw_plugged; 1008 to->tcp_in_ack_unsent_drop.value.ui64 += 1009 from->tcp_in_ack_unsent_drop; 1010 to->tcp_sock_fallback.value.ui64 += 1011 from->tcp_sock_fallback; 1012 to->tcp_lso_enabled.value.ui64 += 1013 from->tcp_lso_enabled; 1014 to->tcp_lso_disabled.value.ui64 += 1015 from->tcp_lso_disabled; 1016 to->tcp_lso_times.value.ui64 += 1017 from->tcp_lso_times; 1018 to->tcp_lso_pkt_out.value.ui64 += 1019 from->tcp_lso_pkt_out; 1020 to->tcp_listen_cnt_drop.value.ui64 += 1021 from->tcp_listen_cnt_drop; 1022 to->tcp_listen_mem_drop.value.ui64 += 1023 from->tcp_listen_mem_drop; 1024 to->tcp_zwin_mem_drop.value.ui64 += 1025 from->tcp_zwin_mem_drop; 1026 to->tcp_zwin_ack_syn.value.ui64 += 1027 from->tcp_zwin_ack_syn; 1028 to->tcp_rst_unsent.value.ui64 += 1029 from->tcp_rst_unsent; 1030 to->tcp_reclaim_cnt.value.ui64 += 1031 from->tcp_reclaim_cnt; 1032 to->tcp_reass_timeout.value.ui64 += 1033 from->tcp_reass_timeout; 1034 1035 #ifdef TCP_DEBUG_COUNTER 1036 to->tcp_time_wait.value.ui64 += 1037 from->tcp_time_wait; 1038 to->tcp_rput_time_wait.value.ui64 += 1039 from->tcp_rput_time_wait; 1040 to->tcp_detach_time_wait.value.ui64 += 1041 from->tcp_detach_time_wait; 1042 to->tcp_timeout_calls.value.ui64 += 1043 from->tcp_timeout_calls; 1044 to->tcp_timeout_cached_alloc.value.ui64 += 1045 from->tcp_timeout_cached_alloc; 1046 to->tcp_timeout_cancel_reqs.value.ui64 += 1047 from->tcp_timeout_cancel_reqs; 1048 to->tcp_timeout_canceled.value.ui64 += 1049 from->tcp_timeout_canceled; 1050 to->tcp_timermp_freed.value.ui64 += 1051 from->tcp_timermp_freed; 1052 to->tcp_push_timer_cnt.value.ui64 += 1053 from->tcp_push_timer_cnt; 1054 to->tcp_ack_timer_cnt.value.ui64 += 1055 from->tcp_ack_timer_cnt; 1056 #endif 1057 } 1058