1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <sys/types.h> 28 #include <sys/tihdr.h> 29 #include <sys/policy.h> 30 #include <sys/tsol/tnet.h> 31 32 #include <inet/common.h> 33 #include <inet/ip.h> 34 #include <inet/tcp.h> 35 #include <inet/tcp_impl.h> 36 #include <inet/tcp_stats.h> 37 #include <inet/kstatcom.h> 38 #include <inet/snmpcom.h> 39 40 static int tcp_kstat_update(kstat_t *kp, int rw); 41 static int tcp_kstat2_update(kstat_t *kp, int rw); 42 static void tcp_sum_mib(tcp_stack_t *, mib2_tcp_t *); 43 44 static void tcp_cp_mib(mib2_tcp_t *, mib2_tcp_t *); 45 static void tcp_cp_stats(tcp_stat_t *, tcp_stat_t *); 46 static void tcp_clr_stats(tcp_stat_t *); 47 48 tcp_g_stat_t tcp_g_statistics; 49 kstat_t *tcp_g_kstat; 50 51 /* Translate TCP state to MIB2 TCP state. */ 52 static int 53 tcp_snmp_state(tcp_t *tcp) 54 { 55 if (tcp == NULL) 56 return (0); 57 58 switch (tcp->tcp_state) { 59 case TCPS_CLOSED: 60 case TCPS_IDLE: /* RFC1213 doesn't have analogue for IDLE & BOUND */ 61 case TCPS_BOUND: 62 return (MIB2_TCP_closed); 63 case TCPS_LISTEN: 64 return (MIB2_TCP_listen); 65 case TCPS_SYN_SENT: 66 return (MIB2_TCP_synSent); 67 case TCPS_SYN_RCVD: 68 return (MIB2_TCP_synReceived); 69 case TCPS_ESTABLISHED: 70 return (MIB2_TCP_established); 71 case TCPS_CLOSE_WAIT: 72 return (MIB2_TCP_closeWait); 73 case TCPS_FIN_WAIT_1: 74 return (MIB2_TCP_finWait1); 75 case TCPS_CLOSING: 76 return (MIB2_TCP_closing); 77 case TCPS_LAST_ACK: 78 return (MIB2_TCP_lastAck); 79 case TCPS_FIN_WAIT_2: 80 return (MIB2_TCP_finWait2); 81 case TCPS_TIME_WAIT: 82 return (MIB2_TCP_timeWait); 83 default: 84 return (0); 85 } 86 } 87 88 /* 89 * Return SNMP stuff in buffer in mpdata. 90 */ 91 mblk_t * 92 tcp_snmp_get(queue_t *q, mblk_t *mpctl) 93 { 94 mblk_t *mpdata; 95 mblk_t *mp_conn_ctl = NULL; 96 mblk_t *mp_conn_tail; 97 mblk_t *mp_attr_ctl = NULL; 98 mblk_t *mp_attr_tail; 99 mblk_t *mp6_conn_ctl = NULL; 100 mblk_t *mp6_conn_tail; 101 mblk_t *mp6_attr_ctl = NULL; 102 mblk_t *mp6_attr_tail; 103 struct opthdr *optp; 104 mib2_tcpConnEntry_t tce; 105 mib2_tcp6ConnEntry_t tce6; 106 mib2_transportMLPEntry_t mlp; 107 connf_t *connfp; 108 int i; 109 boolean_t ispriv; 110 zoneid_t zoneid; 111 int v4_conn_idx; 112 int v6_conn_idx; 113 conn_t *connp = Q_TO_CONN(q); 114 tcp_stack_t *tcps; 115 ip_stack_t *ipst; 116 mblk_t *mp2ctl; 117 mib2_tcp_t tcp_mib; 118 119 /* 120 * make a copy of the original message 121 */ 122 mp2ctl = copymsg(mpctl); 123 124 if (mpctl == NULL || 125 (mpdata = mpctl->b_cont) == NULL || 126 (mp_conn_ctl = copymsg(mpctl)) == NULL || 127 (mp_attr_ctl = copymsg(mpctl)) == NULL || 128 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 129 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 130 freemsg(mp_conn_ctl); 131 freemsg(mp_attr_ctl); 132 freemsg(mp6_conn_ctl); 133 freemsg(mp6_attr_ctl); 134 freemsg(mpctl); 135 freemsg(mp2ctl); 136 return (NULL); 137 } 138 139 ipst = connp->conn_netstack->netstack_ip; 140 tcps = connp->conn_netstack->netstack_tcp; 141 142 bzero(&tcp_mib, sizeof (tcp_mib)); 143 144 /* build table of connections -- need count in fixed part */ 145 SET_MIB(tcp_mib.tcpRtoAlgorithm, 4); /* vanj */ 146 SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min); 147 SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max); 148 SET_MIB(tcp_mib.tcpMaxConn, -1); 149 SET_MIB(tcp_mib.tcpCurrEstab, 0); 150 151 ispriv = 152 secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0; 153 zoneid = Q_TO_CONN(q)->conn_zoneid; 154 155 v4_conn_idx = v6_conn_idx = 0; 156 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 157 158 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 159 ipst = tcps->tcps_netstack->netstack_ip; 160 161 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 162 163 connp = NULL; 164 165 while ((connp = 166 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { 167 tcp_t *tcp; 168 boolean_t needattr; 169 170 if (connp->conn_zoneid != zoneid) 171 continue; /* not in this zone */ 172 173 tcp = connp->conn_tcp; 174 TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs); 175 tcp->tcp_ibsegs = 0; 176 TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs); 177 tcp->tcp_obsegs = 0; 178 179 tce6.tcp6ConnState = tce.tcpConnState = 180 tcp_snmp_state(tcp); 181 if (tce.tcpConnState == MIB2_TCP_established || 182 tce.tcpConnState == MIB2_TCP_closeWait) 183 TCPS_BUMP_MIB(tcps, tcpCurrEstab); 184 185 needattr = B_FALSE; 186 bzero(&mlp, sizeof (mlp)); 187 if (connp->conn_mlp_type != mlptSingle) { 188 if (connp->conn_mlp_type == mlptShared || 189 connp->conn_mlp_type == mlptBoth) 190 mlp.tme_flags |= MIB2_TMEF_SHARED; 191 if (connp->conn_mlp_type == mlptPrivate || 192 connp->conn_mlp_type == mlptBoth) 193 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 194 needattr = B_TRUE; 195 } 196 if (connp->conn_anon_mlp) { 197 mlp.tme_flags |= MIB2_TMEF_ANONMLP; 198 needattr = B_TRUE; 199 } 200 switch (connp->conn_mac_mode) { 201 case CONN_MAC_DEFAULT: 202 break; 203 case CONN_MAC_AWARE: 204 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT; 205 needattr = B_TRUE; 206 break; 207 case CONN_MAC_IMPLICIT: 208 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT; 209 needattr = B_TRUE; 210 break; 211 } 212 if (connp->conn_ixa->ixa_tsl != NULL) { 213 ts_label_t *tsl; 214 215 tsl = connp->conn_ixa->ixa_tsl; 216 mlp.tme_flags |= MIB2_TMEF_IS_LABELED; 217 mlp.tme_doi = label2doi(tsl); 218 mlp.tme_label = *label2bslabel(tsl); 219 needattr = B_TRUE; 220 } 221 222 /* Create a message to report on IPv6 entries */ 223 if (connp->conn_ipversion == IPV6_VERSION) { 224 tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6; 225 tce6.tcp6ConnRemAddress = connp->conn_faddr_v6; 226 tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport); 227 tce6.tcp6ConnRemPort = ntohs(connp->conn_fport); 228 if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) { 229 tce6.tcp6ConnIfIndex = 230 connp->conn_ixa->ixa_scopeid; 231 } else { 232 tce6.tcp6ConnIfIndex = connp->conn_bound_if; 233 } 234 /* Don't want just anybody seeing these... */ 235 if (ispriv) { 236 tce6.tcp6ConnEntryInfo.ce_snxt = 237 tcp->tcp_snxt; 238 tce6.tcp6ConnEntryInfo.ce_suna = 239 tcp->tcp_suna; 240 tce6.tcp6ConnEntryInfo.ce_rnxt = 241 tcp->tcp_rnxt; 242 tce6.tcp6ConnEntryInfo.ce_rack = 243 tcp->tcp_rack; 244 } else { 245 /* 246 * Netstat, unfortunately, uses this to 247 * get send/receive queue sizes. How to fix? 248 * Why not compute the difference only? 249 */ 250 tce6.tcp6ConnEntryInfo.ce_snxt = 251 tcp->tcp_snxt - tcp->tcp_suna; 252 tce6.tcp6ConnEntryInfo.ce_suna = 0; 253 tce6.tcp6ConnEntryInfo.ce_rnxt = 254 tcp->tcp_rnxt - tcp->tcp_rack; 255 tce6.tcp6ConnEntryInfo.ce_rack = 0; 256 } 257 258 tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd; 259 tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 260 tce6.tcp6ConnEntryInfo.ce_rto = tcp->tcp_rto; 261 tce6.tcp6ConnEntryInfo.ce_mss = tcp->tcp_mss; 262 tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state; 263 264 tce6.tcp6ConnCreationProcess = 265 (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS : 266 connp->conn_cpid; 267 tce6.tcp6ConnCreationTime = connp->conn_open_time; 268 269 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 270 &mp6_conn_tail, (char *)&tce6, sizeof (tce6)); 271 272 mlp.tme_connidx = v6_conn_idx++; 273 if (needattr) 274 (void) snmp_append_data2(mp6_attr_ctl->b_cont, 275 &mp6_attr_tail, (char *)&mlp, sizeof (mlp)); 276 } 277 /* 278 * Create an IPv4 table entry for IPv4 entries and also 279 * for IPv6 entries which are bound to in6addr_any 280 * but don't have IPV6_V6ONLY set. 281 * (i.e. anything an IPv4 peer could connect to) 282 */ 283 if (connp->conn_ipversion == IPV4_VERSION || 284 (tcp->tcp_state <= TCPS_LISTEN && 285 !connp->conn_ipv6_v6only && 286 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) { 287 if (connp->conn_ipversion == IPV6_VERSION) { 288 tce.tcpConnRemAddress = INADDR_ANY; 289 tce.tcpConnLocalAddress = INADDR_ANY; 290 } else { 291 tce.tcpConnRemAddress = 292 connp->conn_faddr_v4; 293 tce.tcpConnLocalAddress = 294 connp->conn_laddr_v4; 295 } 296 tce.tcpConnLocalPort = ntohs(connp->conn_lport); 297 tce.tcpConnRemPort = ntohs(connp->conn_fport); 298 /* Don't want just anybody seeing these... */ 299 if (ispriv) { 300 tce.tcpConnEntryInfo.ce_snxt = 301 tcp->tcp_snxt; 302 tce.tcpConnEntryInfo.ce_suna = 303 tcp->tcp_suna; 304 tce.tcpConnEntryInfo.ce_rnxt = 305 tcp->tcp_rnxt; 306 tce.tcpConnEntryInfo.ce_rack = 307 tcp->tcp_rack; 308 } else { 309 /* 310 * Netstat, unfortunately, uses this to 311 * get send/receive queue sizes. How 312 * to fix? 313 * Why not compute the difference only? 314 */ 315 tce.tcpConnEntryInfo.ce_snxt = 316 tcp->tcp_snxt - tcp->tcp_suna; 317 tce.tcpConnEntryInfo.ce_suna = 0; 318 tce.tcpConnEntryInfo.ce_rnxt = 319 tcp->tcp_rnxt - tcp->tcp_rack; 320 tce.tcpConnEntryInfo.ce_rack = 0; 321 } 322 323 tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd; 324 tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd; 325 tce.tcpConnEntryInfo.ce_rto = tcp->tcp_rto; 326 tce.tcpConnEntryInfo.ce_mss = tcp->tcp_mss; 327 tce.tcpConnEntryInfo.ce_state = 328 tcp->tcp_state; 329 330 tce.tcpConnCreationProcess = 331 (connp->conn_cpid < 0) ? 332 MIB2_UNKNOWN_PROCESS : 333 connp->conn_cpid; 334 tce.tcpConnCreationTime = connp->conn_open_time; 335 336 (void) snmp_append_data2(mp_conn_ctl->b_cont, 337 &mp_conn_tail, (char *)&tce, sizeof (tce)); 338 339 mlp.tme_connidx = v4_conn_idx++; 340 if (needattr) 341 (void) snmp_append_data2( 342 mp_attr_ctl->b_cont, 343 &mp_attr_tail, (char *)&mlp, 344 sizeof (mlp)); 345 } 346 } 347 } 348 349 /* fixed length structure for IPv4 and IPv6 counters */ 350 SET_MIB(tcp_mib.tcpConnTableSize, sizeof (mib2_tcpConnEntry_t)); 351 SET_MIB(tcp_mib.tcp6ConnTableSize, sizeof (mib2_tcp6ConnEntry_t)); 352 353 /* synchronize 32- and 64-bit counters */ 354 SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs); 355 SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs); 356 357 tcp_sum_mib(tcps, &tcp_mib); 358 359 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 360 optp->level = MIB2_TCP; 361 optp->name = 0; 362 (void) snmp_append_data(mpdata, (char *)&tcp_mib, sizeof (tcp_mib)); 363 optp->len = msgdsize(mpdata); 364 qreply(q, mpctl); 365 366 /* table of connections... */ 367 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 368 sizeof (struct T_optmgmt_ack)]; 369 optp->level = MIB2_TCP; 370 optp->name = MIB2_TCP_CONN; 371 optp->len = msgdsize(mp_conn_ctl->b_cont); 372 qreply(q, mp_conn_ctl); 373 374 /* table of MLP attributes... */ 375 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 376 sizeof (struct T_optmgmt_ack)]; 377 optp->level = MIB2_TCP; 378 optp->name = EXPER_XPORT_MLP; 379 optp->len = msgdsize(mp_attr_ctl->b_cont); 380 if (optp->len == 0) 381 freemsg(mp_attr_ctl); 382 else 383 qreply(q, mp_attr_ctl); 384 385 /* table of IPv6 connections... */ 386 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 387 sizeof (struct T_optmgmt_ack)]; 388 optp->level = MIB2_TCP6; 389 optp->name = MIB2_TCP6_CONN; 390 optp->len = msgdsize(mp6_conn_ctl->b_cont); 391 qreply(q, mp6_conn_ctl); 392 393 /* table of IPv6 MLP attributes... */ 394 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 395 sizeof (struct T_optmgmt_ack)]; 396 optp->level = MIB2_TCP6; 397 optp->name = EXPER_XPORT_MLP; 398 optp->len = msgdsize(mp6_attr_ctl->b_cont); 399 if (optp->len == 0) 400 freemsg(mp6_attr_ctl); 401 else 402 qreply(q, mp6_attr_ctl); 403 return (mp2ctl); 404 } 405 406 /* Return 0 if invalid set request, 1 otherwise, including non-tcp requests */ 407 /* ARGSUSED */ 408 int 409 tcp_snmp_set(queue_t *q, int level, int name, uchar_t *ptr, int len) 410 { 411 mib2_tcpConnEntry_t *tce = (mib2_tcpConnEntry_t *)ptr; 412 413 switch (level) { 414 case MIB2_TCP: 415 switch (name) { 416 case 13: 417 if (tce->tcpConnState != MIB2_TCP_deleteTCB) 418 return (0); 419 /* TODO: delete entry defined by tce */ 420 return (1); 421 default: 422 return (0); 423 } 424 default: 425 return (1); 426 } 427 } 428 429 /* 430 * TCP Kstats implementation 431 */ 432 void * 433 tcp_kstat_init(netstackid_t stackid) 434 { 435 kstat_t *ksp; 436 437 tcp_named_kstat_t template = { 438 { "rtoAlgorithm", KSTAT_DATA_INT32, 0 }, 439 { "rtoMin", KSTAT_DATA_INT32, 0 }, 440 { "rtoMax", KSTAT_DATA_INT32, 0 }, 441 { "maxConn", KSTAT_DATA_INT32, 0 }, 442 { "activeOpens", KSTAT_DATA_UINT32, 0 }, 443 { "passiveOpens", KSTAT_DATA_UINT32, 0 }, 444 { "attemptFails", KSTAT_DATA_UINT32, 0 }, 445 { "estabResets", KSTAT_DATA_UINT32, 0 }, 446 { "currEstab", KSTAT_DATA_UINT32, 0 }, 447 { "inSegs", KSTAT_DATA_UINT64, 0 }, 448 { "outSegs", KSTAT_DATA_UINT64, 0 }, 449 { "retransSegs", KSTAT_DATA_UINT32, 0 }, 450 { "connTableSize", KSTAT_DATA_INT32, 0 }, 451 { "outRsts", KSTAT_DATA_UINT32, 0 }, 452 { "outDataSegs", KSTAT_DATA_UINT32, 0 }, 453 { "outDataBytes", KSTAT_DATA_UINT32, 0 }, 454 { "retransBytes", KSTAT_DATA_UINT32, 0 }, 455 { "outAck", KSTAT_DATA_UINT32, 0 }, 456 { "outAckDelayed", KSTAT_DATA_UINT32, 0 }, 457 { "outUrg", KSTAT_DATA_UINT32, 0 }, 458 { "outWinUpdate", KSTAT_DATA_UINT32, 0 }, 459 { "outWinProbe", KSTAT_DATA_UINT32, 0 }, 460 { "outControl", KSTAT_DATA_UINT32, 0 }, 461 { "outFastRetrans", KSTAT_DATA_UINT32, 0 }, 462 { "inAckSegs", KSTAT_DATA_UINT32, 0 }, 463 { "inAckBytes", KSTAT_DATA_UINT32, 0 }, 464 { "inDupAck", KSTAT_DATA_UINT32, 0 }, 465 { "inAckUnsent", KSTAT_DATA_UINT32, 0 }, 466 { "inDataInorderSegs", KSTAT_DATA_UINT32, 0 }, 467 { "inDataInorderBytes", KSTAT_DATA_UINT32, 0 }, 468 { "inDataUnorderSegs", KSTAT_DATA_UINT32, 0 }, 469 { "inDataUnorderBytes", KSTAT_DATA_UINT32, 0 }, 470 { "inDataDupSegs", KSTAT_DATA_UINT32, 0 }, 471 { "inDataDupBytes", KSTAT_DATA_UINT32, 0 }, 472 { "inDataPartDupSegs", KSTAT_DATA_UINT32, 0 }, 473 { "inDataPartDupBytes", KSTAT_DATA_UINT32, 0 }, 474 { "inDataPastWinSegs", KSTAT_DATA_UINT32, 0 }, 475 { "inDataPastWinBytes", KSTAT_DATA_UINT32, 0 }, 476 { "inWinProbe", KSTAT_DATA_UINT32, 0 }, 477 { "inWinUpdate", KSTAT_DATA_UINT32, 0 }, 478 { "inClosed", KSTAT_DATA_UINT32, 0 }, 479 { "rttUpdate", KSTAT_DATA_UINT32, 0 }, 480 { "rttNoUpdate", KSTAT_DATA_UINT32, 0 }, 481 { "timRetrans", KSTAT_DATA_UINT32, 0 }, 482 { "timRetransDrop", KSTAT_DATA_UINT32, 0 }, 483 { "timKeepalive", KSTAT_DATA_UINT32, 0 }, 484 { "timKeepaliveProbe", KSTAT_DATA_UINT32, 0 }, 485 { "timKeepaliveDrop", KSTAT_DATA_UINT32, 0 }, 486 { "listenDrop", KSTAT_DATA_UINT32, 0 }, 487 { "listenDropQ0", KSTAT_DATA_UINT32, 0 }, 488 { "halfOpenDrop", KSTAT_DATA_UINT32, 0 }, 489 { "outSackRetransSegs", KSTAT_DATA_UINT32, 0 }, 490 { "connTableSize6", KSTAT_DATA_INT32, 0 } 491 }; 492 493 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, TCP_MOD_NAME, "mib2", 494 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(tcp_named_kstat_t), 0, stackid); 495 496 if (ksp == NULL) 497 return (NULL); 498 499 template.rtoAlgorithm.value.ui32 = 4; 500 template.maxConn.value.i32 = -1; 501 502 bcopy(&template, ksp->ks_data, sizeof (template)); 503 ksp->ks_update = tcp_kstat_update; 504 ksp->ks_private = (void *)(uintptr_t)stackid; 505 506 kstat_install(ksp); 507 return (ksp); 508 } 509 510 void 511 tcp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 512 { 513 if (ksp != NULL) { 514 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 515 kstat_delete_netstack(ksp, stackid); 516 } 517 } 518 519 static int 520 tcp_kstat_update(kstat_t *kp, int rw) 521 { 522 tcp_named_kstat_t *tcpkp; 523 tcp_t *tcp; 524 connf_t *connfp; 525 conn_t *connp; 526 int i; 527 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 528 netstack_t *ns; 529 tcp_stack_t *tcps; 530 ip_stack_t *ipst; 531 mib2_tcp_t tcp_mib; 532 533 if (rw == KSTAT_WRITE) 534 return (EACCES); 535 536 ns = netstack_find_by_stackid(stackid); 537 if (ns == NULL) 538 return (-1); 539 tcps = ns->netstack_tcp; 540 if (tcps == NULL) { 541 netstack_rele(ns); 542 return (-1); 543 } 544 545 tcpkp = (tcp_named_kstat_t *)kp->ks_data; 546 547 tcpkp->currEstab.value.ui32 = 0; 548 tcpkp->rtoMin.value.ui32 = tcps->tcps_rexmit_interval_min; 549 tcpkp->rtoMax.value.ui32 = tcps->tcps_rexmit_interval_max; 550 551 ipst = ns->netstack_ip; 552 553 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 554 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 555 connp = NULL; 556 while ((connp = 557 ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) { 558 tcp = connp->conn_tcp; 559 switch (tcp_snmp_state(tcp)) { 560 case MIB2_TCP_established: 561 case MIB2_TCP_closeWait: 562 tcpkp->currEstab.value.ui32++; 563 break; 564 } 565 } 566 } 567 bzero(&tcp_mib, sizeof (tcp_mib)); 568 tcp_sum_mib(tcps, &tcp_mib); 569 570 tcpkp->activeOpens.value.ui32 = tcp_mib.tcpActiveOpens; 571 tcpkp->passiveOpens.value.ui32 = tcp_mib.tcpPassiveOpens; 572 tcpkp->attemptFails.value.ui32 = tcp_mib.tcpAttemptFails; 573 tcpkp->estabResets.value.ui32 = tcp_mib.tcpEstabResets; 574 tcpkp->inSegs.value.ui64 = tcp_mib.tcpHCInSegs; 575 tcpkp->outSegs.value.ui64 = tcp_mib.tcpHCOutSegs; 576 tcpkp->retransSegs.value.ui32 = tcp_mib.tcpRetransSegs; 577 tcpkp->connTableSize.value.i32 = tcp_mib.tcpConnTableSize; 578 tcpkp->outRsts.value.ui32 = tcp_mib.tcpOutRsts; 579 tcpkp->outDataSegs.value.ui32 = tcp_mib.tcpOutDataSegs; 580 tcpkp->outDataBytes.value.ui32 = tcp_mib.tcpOutDataBytes; 581 tcpkp->retransBytes.value.ui32 = tcp_mib.tcpRetransBytes; 582 tcpkp->outAck.value.ui32 = tcp_mib.tcpOutAck; 583 tcpkp->outAckDelayed.value.ui32 = tcp_mib.tcpOutAckDelayed; 584 tcpkp->outUrg.value.ui32 = tcp_mib.tcpOutUrg; 585 tcpkp->outWinUpdate.value.ui32 = tcp_mib.tcpOutWinUpdate; 586 tcpkp->outWinProbe.value.ui32 = tcp_mib.tcpOutWinProbe; 587 tcpkp->outControl.value.ui32 = tcp_mib.tcpOutControl; 588 tcpkp->outFastRetrans.value.ui32 = tcp_mib.tcpOutFastRetrans; 589 tcpkp->inAckSegs.value.ui32 = tcp_mib.tcpInAckSegs; 590 tcpkp->inAckBytes.value.ui32 = tcp_mib.tcpInAckBytes; 591 tcpkp->inDupAck.value.ui32 = tcp_mib.tcpInDupAck; 592 tcpkp->inAckUnsent.value.ui32 = tcp_mib.tcpInAckUnsent; 593 tcpkp->inDataInorderSegs.value.ui32 = tcp_mib.tcpInDataInorderSegs; 594 tcpkp->inDataInorderBytes.value.ui32 = tcp_mib.tcpInDataInorderBytes; 595 tcpkp->inDataUnorderSegs.value.ui32 = tcp_mib.tcpInDataUnorderSegs; 596 tcpkp->inDataUnorderBytes.value.ui32 = tcp_mib.tcpInDataUnorderBytes; 597 tcpkp->inDataDupSegs.value.ui32 = tcp_mib.tcpInDataDupSegs; 598 tcpkp->inDataDupBytes.value.ui32 = tcp_mib.tcpInDataDupBytes; 599 tcpkp->inDataPartDupSegs.value.ui32 = tcp_mib.tcpInDataPartDupSegs; 600 tcpkp->inDataPartDupBytes.value.ui32 = tcp_mib.tcpInDataPartDupBytes; 601 tcpkp->inDataPastWinSegs.value.ui32 = tcp_mib.tcpInDataPastWinSegs; 602 tcpkp->inDataPastWinBytes.value.ui32 = tcp_mib.tcpInDataPastWinBytes; 603 tcpkp->inWinProbe.value.ui32 = tcp_mib.tcpInWinProbe; 604 tcpkp->inWinUpdate.value.ui32 = tcp_mib.tcpInWinUpdate; 605 tcpkp->inClosed.value.ui32 = tcp_mib.tcpInClosed; 606 tcpkp->rttNoUpdate.value.ui32 = tcp_mib.tcpRttNoUpdate; 607 tcpkp->rttUpdate.value.ui32 = tcp_mib.tcpRttUpdate; 608 tcpkp->timRetrans.value.ui32 = tcp_mib.tcpTimRetrans; 609 tcpkp->timRetransDrop.value.ui32 = tcp_mib.tcpTimRetransDrop; 610 tcpkp->timKeepalive.value.ui32 = tcp_mib.tcpTimKeepalive; 611 tcpkp->timKeepaliveProbe.value.ui32 = tcp_mib.tcpTimKeepaliveProbe; 612 tcpkp->timKeepaliveDrop.value.ui32 = tcp_mib.tcpTimKeepaliveDrop; 613 tcpkp->listenDrop.value.ui32 = tcp_mib.tcpListenDrop; 614 tcpkp->listenDropQ0.value.ui32 = tcp_mib.tcpListenDropQ0; 615 tcpkp->halfOpenDrop.value.ui32 = tcp_mib.tcpHalfOpenDrop; 616 tcpkp->outSackRetransSegs.value.ui32 = tcp_mib.tcpOutSackRetransSegs; 617 tcpkp->connTableSize6.value.i32 = tcp_mib.tcp6ConnTableSize; 618 619 netstack_rele(ns); 620 return (0); 621 } 622 623 /* 624 * kstats related to squeues i.e. not per IP instance 625 */ 626 void * 627 tcp_g_kstat_init(tcp_g_stat_t *tcp_g_statp) 628 { 629 kstat_t *ksp; 630 631 tcp_g_stat_t template = { 632 { "tcp_timermp_alloced", KSTAT_DATA_UINT64 }, 633 { "tcp_timermp_allocfail", KSTAT_DATA_UINT64 }, 634 { "tcp_timermp_allocdblfail", KSTAT_DATA_UINT64 }, 635 { "tcp_freelist_cleanup", KSTAT_DATA_UINT64 }, 636 }; 637 638 ksp = kstat_create(TCP_MOD_NAME, 0, "tcpstat_g", "net", 639 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 640 KSTAT_FLAG_VIRTUAL); 641 642 if (ksp == NULL) 643 return (NULL); 644 645 bcopy(&template, tcp_g_statp, sizeof (template)); 646 ksp->ks_data = (void *)tcp_g_statp; 647 648 kstat_install(ksp); 649 return (ksp); 650 } 651 652 void 653 tcp_g_kstat_fini(kstat_t *ksp) 654 { 655 if (ksp != NULL) { 656 kstat_delete(ksp); 657 } 658 } 659 660 void * 661 tcp_kstat2_init(netstackid_t stackid) 662 { 663 kstat_t *ksp; 664 665 tcp_stat_t template = { 666 { "tcp_time_wait_syn_success", KSTAT_DATA_UINT64, 0 }, 667 { "tcp_clean_death_nondetached", KSTAT_DATA_UINT64, 0 }, 668 { "tcp_eager_blowoff_q", KSTAT_DATA_UINT64, 0 }, 669 { "tcp_eager_blowoff_q0", KSTAT_DATA_UINT64, 0 }, 670 { "tcp_no_listener", KSTAT_DATA_UINT64, 0 }, 671 { "tcp_listendrop", KSTAT_DATA_UINT64, 0 }, 672 { "tcp_listendropq0", KSTAT_DATA_UINT64, 0 }, 673 { "tcp_wsrv_called", KSTAT_DATA_UINT64, 0 }, 674 { "tcp_flwctl_on", KSTAT_DATA_UINT64, 0 }, 675 { "tcp_timer_fire_early", KSTAT_DATA_UINT64, 0 }, 676 { "tcp_timer_fire_miss", KSTAT_DATA_UINT64, 0 }, 677 { "tcp_zcopy_on", KSTAT_DATA_UINT64, 0 }, 678 { "tcp_zcopy_off", KSTAT_DATA_UINT64, 0 }, 679 { "tcp_zcopy_backoff", KSTAT_DATA_UINT64, 0 }, 680 { "tcp_fusion_flowctl", KSTAT_DATA_UINT64, 0 }, 681 { "tcp_fusion_backenabled", KSTAT_DATA_UINT64, 0 }, 682 { "tcp_fusion_urg", KSTAT_DATA_UINT64, 0 }, 683 { "tcp_fusion_putnext", KSTAT_DATA_UINT64, 0 }, 684 { "tcp_fusion_unfusable", KSTAT_DATA_UINT64, 0 }, 685 { "tcp_fusion_aborted", KSTAT_DATA_UINT64, 0 }, 686 { "tcp_fusion_unqualified", KSTAT_DATA_UINT64, 0 }, 687 { "tcp_fusion_rrw_busy", KSTAT_DATA_UINT64, 0 }, 688 { "tcp_fusion_rrw_msgcnt", KSTAT_DATA_UINT64, 0 }, 689 { "tcp_fusion_rrw_plugged", KSTAT_DATA_UINT64, 0 }, 690 { "tcp_in_ack_unsent_drop", KSTAT_DATA_UINT64, 0 }, 691 { "tcp_sock_fallback", KSTAT_DATA_UINT64, 0 }, 692 { "tcp_lso_enabled", KSTAT_DATA_UINT64, 0 }, 693 { "tcp_lso_disabled", KSTAT_DATA_UINT64, 0 }, 694 { "tcp_lso_times", KSTAT_DATA_UINT64, 0 }, 695 { "tcp_lso_pkt_out", KSTAT_DATA_UINT64, 0 }, 696 { "tcp_listen_cnt_drop", KSTAT_DATA_UINT64, 0 }, 697 { "tcp_listen_mem_drop", KSTAT_DATA_UINT64, 0 }, 698 { "tcp_zwin_mem_drop", KSTAT_DATA_UINT64, 0 }, 699 { "tcp_zwin_ack_syn", KSTAT_DATA_UINT64, 0 }, 700 { "tcp_rst_unsent", KSTAT_DATA_UINT64, 0 }, 701 { "tcp_reclaim_cnt", KSTAT_DATA_UINT64, 0 }, 702 { "tcp_reass_timeout", KSTAT_DATA_UINT64, 0 }, 703 #ifdef TCP_DEBUG_COUNTER 704 { "tcp_time_wait", KSTAT_DATA_UINT64, 0 }, 705 { "tcp_rput_time_wait", KSTAT_DATA_UINT64, 0 }, 706 { "tcp_detach_time_wait", KSTAT_DATA_UINT64, 0 }, 707 { "tcp_timeout_calls", KSTAT_DATA_UINT64, 0 }, 708 { "tcp_timeout_cached_alloc", KSTAT_DATA_UINT64, 0 }, 709 { "tcp_timeout_cancel_reqs", KSTAT_DATA_UINT64, 0 }, 710 { "tcp_timeout_canceled", KSTAT_DATA_UINT64, 0 }, 711 { "tcp_timermp_freed", KSTAT_DATA_UINT64, 0 }, 712 { "tcp_push_timer_cnt", KSTAT_DATA_UINT64, 0 }, 713 { "tcp_ack_timer_cnt", KSTAT_DATA_UINT64, 0 }, 714 #endif 715 }; 716 717 ksp = kstat_create_netstack(TCP_MOD_NAME, 0, "tcpstat", "net", 718 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 0, 719 stackid); 720 721 if (ksp == NULL) 722 return (NULL); 723 724 bcopy(&template, ksp->ks_data, sizeof (template)); 725 ksp->ks_private = (void *)(uintptr_t)stackid; 726 ksp->ks_update = tcp_kstat2_update; 727 728 kstat_install(ksp); 729 return (ksp); 730 } 731 732 void 733 tcp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 734 { 735 if (ksp != NULL) { 736 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 737 kstat_delete_netstack(ksp, stackid); 738 } 739 } 740 741 /* 742 * Sum up all per CPU tcp_stat_t kstat counters. 743 */ 744 static int 745 tcp_kstat2_update(kstat_t *kp, int rw) 746 { 747 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 748 netstack_t *ns; 749 tcp_stack_t *tcps; 750 tcp_stat_t *stats; 751 int i; 752 int cnt; 753 754 if (rw == KSTAT_WRITE) 755 return (EACCES); 756 757 ns = netstack_find_by_stackid(stackid); 758 if (ns == NULL) 759 return (-1); 760 tcps = ns->netstack_tcp; 761 if (tcps == NULL) { 762 netstack_rele(ns); 763 return (-1); 764 } 765 766 stats = (tcp_stat_t *)kp->ks_data; 767 tcp_clr_stats(stats); 768 769 /* 770 * tcps_sc_cnt may change in the middle of the loop. It is better 771 * to get its value first. 772 */ 773 cnt = tcps->tcps_sc_cnt; 774 for (i = 0; i < cnt; i++) 775 tcp_cp_stats(&tcps->tcps_sc[i]->tcp_sc_stats, stats); 776 777 netstack_rele(ns); 778 return (0); 779 } 780 781 /* 782 * To copy stats from one mib2_tcp_t to another. Static fields are not copied. 783 * The caller should set them up propertly. 784 */ 785 void 786 tcp_cp_mib(mib2_tcp_t *from, mib2_tcp_t *to) 787 { 788 to->tcpActiveOpens += from->tcpActiveOpens; 789 to->tcpPassiveOpens += from->tcpPassiveOpens; 790 to->tcpAttemptFails += from->tcpAttemptFails; 791 to->tcpEstabResets += from->tcpEstabResets; 792 to->tcpInSegs += from->tcpInSegs; 793 to->tcpOutSegs += from->tcpOutSegs; 794 to->tcpRetransSegs += from->tcpRetransSegs; 795 to->tcpOutRsts += from->tcpOutRsts; 796 797 to->tcpOutDataSegs += from->tcpOutDataSegs; 798 to->tcpOutDataBytes += from->tcpOutDataBytes; 799 to->tcpRetransBytes += from->tcpRetransBytes; 800 to->tcpOutAck += from->tcpOutAck; 801 to->tcpOutAckDelayed += from->tcpOutAckDelayed; 802 to->tcpOutUrg += from->tcpOutUrg; 803 to->tcpOutWinUpdate += from->tcpOutWinUpdate; 804 to->tcpOutWinProbe += from->tcpOutWinProbe; 805 to->tcpOutControl += from->tcpOutControl; 806 to->tcpOutFastRetrans += from->tcpOutFastRetrans; 807 808 to->tcpInAckBytes += from->tcpInAckBytes; 809 to->tcpInDupAck += from->tcpInDupAck; 810 to->tcpInAckUnsent += from->tcpInAckUnsent; 811 to->tcpInDataInorderSegs += from->tcpInDataInorderSegs; 812 to->tcpInDataInorderBytes += from->tcpInDataInorderBytes; 813 to->tcpInDataUnorderSegs += from->tcpInDataUnorderSegs; 814 to->tcpInDataUnorderBytes += from->tcpInDataUnorderBytes; 815 to->tcpInDataDupSegs += from->tcpInDataDupSegs; 816 to->tcpInDataDupBytes += from->tcpInDataDupBytes; 817 to->tcpInDataPartDupSegs += from->tcpInDataPartDupSegs; 818 to->tcpInDataPartDupBytes += from->tcpInDataPartDupBytes; 819 to->tcpInDataPastWinSegs += from->tcpInDataPastWinSegs; 820 to->tcpInDataPastWinBytes += from->tcpInDataPastWinBytes; 821 to->tcpInWinProbe += from->tcpInWinProbe; 822 to->tcpInWinUpdate += from->tcpInWinUpdate; 823 to->tcpInClosed += from->tcpInClosed; 824 825 to->tcpRttNoUpdate += from->tcpRttNoUpdate; 826 to->tcpRttUpdate += from->tcpRttUpdate; 827 to->tcpTimRetrans += from->tcpTimRetrans; 828 to->tcpTimRetransDrop += from->tcpTimRetransDrop; 829 to->tcpTimKeepalive += from->tcpTimKeepalive; 830 to->tcpTimKeepaliveProbe += from->tcpTimKeepaliveProbe; 831 to->tcpTimKeepaliveDrop += from->tcpTimKeepaliveDrop; 832 to->tcpListenDrop += from->tcpListenDrop; 833 to->tcpListenDropQ0 += from->tcpListenDropQ0; 834 to->tcpHalfOpenDrop += from->tcpHalfOpenDrop; 835 to->tcpOutSackRetransSegs += from->tcpOutSackRetransSegs; 836 to->tcpHCInSegs += from->tcpHCInSegs; 837 to->tcpHCOutSegs += from->tcpHCOutSegs; 838 } 839 840 /* 841 * To sum up all MIB2 stats for a tcp_stack_t from all per CPU stats. The 842 * caller should initialize the target mib2_tcp_t properly as this function 843 * just adds up all the per CPU stats. 844 */ 845 static void 846 tcp_sum_mib(tcp_stack_t *tcps, mib2_tcp_t *tcp_mib) 847 { 848 int i; 849 int cnt; 850 851 /* 852 * tcps_sc_cnt may change in the middle of the loop. It is better 853 * to get its value first. 854 */ 855 cnt = tcps->tcps_sc_cnt; 856 for (i = 0; i < cnt; i++) 857 tcp_cp_mib(&tcps->tcps_sc[i]->tcp_sc_mib, tcp_mib); 858 } 859 860 /* 861 * To set all tcp_stat_t counters to 0. 862 */ 863 void 864 tcp_clr_stats(tcp_stat_t *stats) 865 { 866 stats->tcp_time_wait_syn_success.value.ui64 = 0; 867 stats->tcp_clean_death_nondetached.value.ui64 = 0; 868 stats->tcp_eager_blowoff_q.value.ui64 = 0; 869 stats->tcp_eager_blowoff_q0.value.ui64 = 0; 870 stats->tcp_no_listener.value.ui64 = 0; 871 stats->tcp_listendrop.value.ui64 = 0; 872 stats->tcp_listendropq0.value.ui64 = 0; 873 stats->tcp_wsrv_called.value.ui64 = 0; 874 stats->tcp_flwctl_on.value.ui64 = 0; 875 stats->tcp_timer_fire_early.value.ui64 = 0; 876 stats->tcp_timer_fire_miss.value.ui64 = 0; 877 stats->tcp_zcopy_on.value.ui64 = 0; 878 stats->tcp_zcopy_off.value.ui64 = 0; 879 stats->tcp_zcopy_backoff.value.ui64 = 0; 880 stats->tcp_fusion_flowctl.value.ui64 = 0; 881 stats->tcp_fusion_backenabled.value.ui64 = 0; 882 stats->tcp_fusion_urg.value.ui64 = 0; 883 stats->tcp_fusion_putnext.value.ui64 = 0; 884 stats->tcp_fusion_unfusable.value.ui64 = 0; 885 stats->tcp_fusion_aborted.value.ui64 = 0; 886 stats->tcp_fusion_unqualified.value.ui64 = 0; 887 stats->tcp_fusion_rrw_busy.value.ui64 = 0; 888 stats->tcp_fusion_rrw_msgcnt.value.ui64 = 0; 889 stats->tcp_fusion_rrw_plugged.value.ui64 = 0; 890 stats->tcp_in_ack_unsent_drop.value.ui64 = 0; 891 stats->tcp_sock_fallback.value.ui64 = 0; 892 stats->tcp_lso_enabled.value.ui64 = 0; 893 stats->tcp_lso_disabled.value.ui64 = 0; 894 stats->tcp_lso_times.value.ui64 = 0; 895 stats->tcp_lso_pkt_out.value.ui64 = 0; 896 stats->tcp_listen_cnt_drop.value.ui64 = 0; 897 stats->tcp_listen_mem_drop.value.ui64 = 0; 898 stats->tcp_zwin_mem_drop.value.ui64 = 0; 899 stats->tcp_zwin_ack_syn.value.ui64 = 0; 900 stats->tcp_rst_unsent.value.ui64 = 0; 901 stats->tcp_reclaim_cnt.value.ui64 = 0; 902 stats->tcp_reass_timeout.value.ui64 = 0; 903 904 #ifdef TCP_DEBUG_COUNTER 905 stats->tcp_time_wait.value.ui64 = 0; 906 stats->tcp_rput_time_wait.value.ui64 = 0; 907 stats->tcp_detach_time_wait.value.ui64 = 0; 908 stats->tcp_timeout_calls.value.ui64 = 0; 909 stats->tcp_timeout_cached_alloc.value.ui64 = 0; 910 stats->tcp_timeout_cancel_reqs.value.ui64 = 0; 911 stats->tcp_timeout_canceled.value.ui64 = 0; 912 stats->tcp_timermp_freed.value.ui64 = 0; 913 stats->tcp_push_timer_cnt.value.ui64 = 0; 914 stats->tcp_ack_timer_cnt.value.ui64 = 0; 915 #endif 916 } 917 918 /* 919 * To copy counters from one tcp_stat_t to another. 920 */ 921 void 922 tcp_cp_stats(tcp_stat_t *from, tcp_stat_t *to) 923 { 924 to->tcp_time_wait_syn_success.value.ui64 += 925 from->tcp_time_wait_syn_success.value.ui64; 926 to->tcp_clean_death_nondetached.value.ui64 += 927 from->tcp_clean_death_nondetached.value.ui64; 928 to->tcp_eager_blowoff_q.value.ui64 += 929 from->tcp_eager_blowoff_q.value.ui64; 930 to->tcp_eager_blowoff_q0.value.ui64 += 931 from->tcp_eager_blowoff_q0.value.ui64; 932 to->tcp_no_listener.value.ui64 += 933 from->tcp_no_listener.value.ui64; 934 to->tcp_listendrop.value.ui64 += 935 from->tcp_listendrop.value.ui64; 936 to->tcp_listendropq0.value.ui64 += 937 from->tcp_listendropq0.value.ui64; 938 to->tcp_wsrv_called.value.ui64 += 939 from->tcp_wsrv_called.value.ui64; 940 to->tcp_flwctl_on.value.ui64 += 941 from->tcp_flwctl_on.value.ui64; 942 to->tcp_timer_fire_early.value.ui64 += 943 from->tcp_timer_fire_early.value.ui64; 944 to->tcp_timer_fire_miss.value.ui64 += 945 from->tcp_timer_fire_miss.value.ui64; 946 to->tcp_zcopy_on.value.ui64 += 947 from->tcp_zcopy_on.value.ui64; 948 to->tcp_zcopy_off.value.ui64 += 949 from->tcp_zcopy_off.value.ui64; 950 to->tcp_zcopy_backoff.value.ui64 += 951 from->tcp_zcopy_backoff.value.ui64; 952 to->tcp_fusion_flowctl.value.ui64 += 953 from->tcp_fusion_flowctl.value.ui64; 954 to->tcp_fusion_backenabled.value.ui64 += 955 from->tcp_fusion_backenabled.value.ui64; 956 to->tcp_fusion_urg.value.ui64 += 957 from->tcp_fusion_urg.value.ui64; 958 to->tcp_fusion_putnext.value.ui64 += 959 from->tcp_fusion_putnext.value.ui64; 960 to->tcp_fusion_unfusable.value.ui64 += 961 from->tcp_fusion_unfusable.value.ui64; 962 to->tcp_fusion_aborted.value.ui64 += 963 from->tcp_fusion_aborted.value.ui64; 964 to->tcp_fusion_unqualified.value.ui64 += 965 from->tcp_fusion_unqualified.value.ui64; 966 to->tcp_fusion_rrw_busy.value.ui64 += 967 from->tcp_fusion_rrw_busy.value.ui64; 968 to->tcp_fusion_rrw_msgcnt.value.ui64 += 969 from->tcp_fusion_rrw_msgcnt.value.ui64; 970 to->tcp_fusion_rrw_plugged.value.ui64 += 971 from->tcp_fusion_rrw_plugged.value.ui64; 972 to->tcp_in_ack_unsent_drop.value.ui64 += 973 from->tcp_in_ack_unsent_drop.value.ui64; 974 to->tcp_sock_fallback.value.ui64 += 975 from->tcp_sock_fallback.value.ui64; 976 to->tcp_lso_enabled.value.ui64 += 977 from->tcp_lso_enabled.value.ui64; 978 to->tcp_lso_disabled.value.ui64 += 979 from->tcp_lso_disabled.value.ui64; 980 to->tcp_lso_times.value.ui64 += 981 from->tcp_lso_times.value.ui64; 982 to->tcp_lso_pkt_out.value.ui64 += 983 from->tcp_lso_pkt_out.value.ui64; 984 to->tcp_listen_cnt_drop.value.ui64 += 985 from->tcp_listen_cnt_drop.value.ui64; 986 to->tcp_listen_mem_drop.value.ui64 += 987 from->tcp_listen_mem_drop.value.ui64; 988 to->tcp_zwin_mem_drop.value.ui64 += 989 from->tcp_zwin_mem_drop.value.ui64; 990 to->tcp_zwin_ack_syn.value.ui64 += 991 from->tcp_zwin_ack_syn.value.ui64; 992 to->tcp_rst_unsent.value.ui64 += 993 from->tcp_rst_unsent.value.ui64; 994 to->tcp_reclaim_cnt.value.ui64 += 995 from->tcp_reclaim_cnt.value.ui64; 996 to->tcp_reass_timeout.value.ui64 += 997 from->tcp_reass_timeout.value.ui64; 998 999 #ifdef TCP_DEBUG_COUNTER 1000 to->tcp_time_wait.value.ui64 += 1001 from->tcp_time_wait.value.ui64; 1002 to->tcp_rput_time_wait.value.ui64 += 1003 from->tcp_rput_time_wait.value.ui64; 1004 to->tcp_detach_time_wait.value.ui64 += 1005 from->tcp_detach_time_wait.value.ui64; 1006 to->tcp_timeout_calls.value.ui64 += 1007 from->tcp_timeout_calls.value.ui64; 1008 to->tcp_timeout_cached_alloc.value.ui64 += 1009 from->tcp_timeout_cached_alloc.value.ui64; 1010 to->tcp_timeout_cancel_reqs.value.ui64 += 1011 from->tcp_timeout_cancel_reqs.value.ui64; 1012 to->tcp_timeout_canceled.value.ui64 += 1013 from->tcp_timeout_canceled.value.ui64; 1014 to->tcp_timermp_freed.value.ui64 += 1015 from->tcp_timermp_freed.value.ui64; 1016 to->tcp_push_timer_cnt.value.ui64 += 1017 from->tcp_push_timer_cnt.value.ui64; 1018 to->tcp_ack_timer_cnt.value.ui64 += 1019 from->tcp_ack_timer_cnt.value.ui64; 1020 #endif 1021 } 1022