1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/tihdr.h> 28 #include <sys/policy.h> 29 #include <sys/tsol/tnet.h> 30 31 #include <inet/common.h> 32 #include <inet/kstatcom.h> 33 #include <inet/snmpcom.h> 34 #include <inet/mib2.h> 35 #include <inet/optcom.h> 36 #include <inet/snmpcom.h> 37 #include <inet/kstatcom.h> 38 #include <inet/udp_impl.h> 39 40 static int udp_kstat_update(kstat_t *, int); 41 static int udp_kstat2_update(kstat_t *, int); 42 static void udp_sum_mib(udp_stack_t *, mib2_udp_t *); 43 static void udp_clr_stats(udp_stat_t *); 44 static void udp_add_stats(udp_stat_counter_t *, udp_stat_t *); 45 static void udp_add_mib(mib2_udp_t *, mib2_udp_t *); 46 /* 47 * return SNMP stuff in buffer in mpdata. We don't hold any lock and report 48 * information that can be changing beneath us. 49 */ 50 mblk_t * 51 udp_snmp_get(queue_t *q, mblk_t *mpctl) 52 { 53 mblk_t *mpdata; 54 mblk_t *mp_conn_ctl; 55 mblk_t *mp_attr_ctl; 56 mblk_t *mp6_conn_ctl; 57 mblk_t *mp6_attr_ctl; 58 mblk_t *mp_conn_tail; 59 mblk_t *mp_attr_tail; 60 mblk_t *mp6_conn_tail; 61 mblk_t *mp6_attr_tail; 62 struct opthdr *optp; 63 mib2_udpEntry_t ude; 64 mib2_udp6Entry_t ude6; 65 mib2_transportMLPEntry_t mlp; 66 int state; 67 zoneid_t zoneid; 68 int i; 69 connf_t *connfp; 70 conn_t *connp = Q_TO_CONN(q); 71 int v4_conn_idx; 72 int v6_conn_idx; 73 boolean_t needattr; 74 udp_t *udp; 75 ip_stack_t *ipst = connp->conn_netstack->netstack_ip; 76 udp_stack_t *us = connp->conn_netstack->netstack_udp; 77 mblk_t *mp2ctl; 78 mib2_udp_t udp_mib; 79 80 /* 81 * make a copy of the original message 82 */ 83 mp2ctl = copymsg(mpctl); 84 85 mp_conn_ctl = mp_attr_ctl = mp6_conn_ctl = NULL; 86 if (mpctl == NULL || 87 (mpdata = mpctl->b_cont) == NULL || 88 (mp_conn_ctl = copymsg(mpctl)) == NULL || 89 (mp_attr_ctl = copymsg(mpctl)) == NULL || 90 (mp6_conn_ctl = copymsg(mpctl)) == NULL || 91 (mp6_attr_ctl = copymsg(mpctl)) == NULL) { 92 freemsg(mp_conn_ctl); 93 freemsg(mp_attr_ctl); 94 freemsg(mp6_conn_ctl); 95 freemsg(mpctl); 96 freemsg(mp2ctl); 97 return (0); 98 } 99 100 zoneid = connp->conn_zoneid; 101 102 bzero(&udp_mib, sizeof (udp_mib)); 103 /* fixed length structure for IPv4 and IPv6 counters */ 104 SET_MIB(udp_mib.udpEntrySize, sizeof (mib2_udpEntry_t)); 105 SET_MIB(udp_mib.udp6EntrySize, sizeof (mib2_udp6Entry_t)); 106 107 udp_sum_mib(us, &udp_mib); 108 109 /* 110 * Synchronize 32- and 64-bit counters. Note that udpInDatagrams and 111 * udpOutDatagrams are not updated anywhere in UDP. The new 64 bits 112 * counters are used. Hence the old counters' values in us_sc_mib 113 * are always 0. 114 */ 115 SYNC32_MIB(&udp_mib, udpInDatagrams, udpHCInDatagrams); 116 SYNC32_MIB(&udp_mib, udpOutDatagrams, udpHCOutDatagrams); 117 118 optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)]; 119 optp->level = MIB2_UDP; 120 optp->name = 0; 121 (void) snmp_append_data(mpdata, (char *)&udp_mib, sizeof (udp_mib)); 122 optp->len = msgdsize(mpdata); 123 qreply(q, mpctl); 124 125 mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL; 126 v4_conn_idx = v6_conn_idx = 0; 127 128 for (i = 0; i < CONN_G_HASH_SIZE; i++) { 129 connfp = &ipst->ips_ipcl_globalhash_fanout[i]; 130 connp = NULL; 131 132 while ((connp = ipcl_get_next_conn(connfp, connp, 133 IPCL_UDPCONN))) { 134 udp = connp->conn_udp; 135 if (zoneid != connp->conn_zoneid) 136 continue; 137 138 /* 139 * Note that the port numbers are sent in 140 * host byte order 141 */ 142 143 if (udp->udp_state == TS_UNBND) 144 state = MIB2_UDP_unbound; 145 else if (udp->udp_state == TS_IDLE) 146 state = MIB2_UDP_idle; 147 else if (udp->udp_state == TS_DATA_XFER) 148 state = MIB2_UDP_connected; 149 else 150 state = MIB2_UDP_unknown; 151 152 needattr = B_FALSE; 153 bzero(&mlp, sizeof (mlp)); 154 if (connp->conn_mlp_type != mlptSingle) { 155 if (connp->conn_mlp_type == mlptShared || 156 connp->conn_mlp_type == mlptBoth) 157 mlp.tme_flags |= MIB2_TMEF_SHARED; 158 if (connp->conn_mlp_type == mlptPrivate || 159 connp->conn_mlp_type == mlptBoth) 160 mlp.tme_flags |= MIB2_TMEF_PRIVATE; 161 needattr = B_TRUE; 162 } 163 if (connp->conn_anon_mlp) { 164 mlp.tme_flags |= MIB2_TMEF_ANONMLP; 165 needattr = B_TRUE; 166 } 167 switch (connp->conn_mac_mode) { 168 case CONN_MAC_DEFAULT: 169 break; 170 case CONN_MAC_AWARE: 171 mlp.tme_flags |= MIB2_TMEF_MACEXEMPT; 172 needattr = B_TRUE; 173 break; 174 case CONN_MAC_IMPLICIT: 175 mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT; 176 needattr = B_TRUE; 177 break; 178 } 179 mutex_enter(&connp->conn_lock); 180 if (udp->udp_state == TS_DATA_XFER && 181 connp->conn_ixa->ixa_tsl != NULL) { 182 ts_label_t *tsl; 183 184 tsl = connp->conn_ixa->ixa_tsl; 185 mlp.tme_flags |= MIB2_TMEF_IS_LABELED; 186 mlp.tme_doi = label2doi(tsl); 187 mlp.tme_label = *label2bslabel(tsl); 188 needattr = B_TRUE; 189 } 190 mutex_exit(&connp->conn_lock); 191 192 /* 193 * Create an IPv4 table entry for IPv4 entries and also 194 * any IPv6 entries which are bound to in6addr_any 195 * (i.e. anything a IPv4 peer could connect/send to). 196 */ 197 if (connp->conn_ipversion == IPV4_VERSION || 198 (udp->udp_state <= TS_IDLE && 199 IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) { 200 ude.udpEntryInfo.ue_state = state; 201 /* 202 * If in6addr_any this will set it to 203 * INADDR_ANY 204 */ 205 ude.udpLocalAddress = connp->conn_laddr_v4; 206 ude.udpLocalPort = ntohs(connp->conn_lport); 207 if (udp->udp_state == TS_DATA_XFER) { 208 /* 209 * Can potentially get here for 210 * v6 socket if another process 211 * (say, ping) has just done a 212 * sendto(), changing the state 213 * from the TS_IDLE above to 214 * TS_DATA_XFER by the time we hit 215 * this part of the code. 216 */ 217 ude.udpEntryInfo.ue_RemoteAddress = 218 connp->conn_faddr_v4; 219 ude.udpEntryInfo.ue_RemotePort = 220 ntohs(connp->conn_fport); 221 } else { 222 ude.udpEntryInfo.ue_RemoteAddress = 0; 223 ude.udpEntryInfo.ue_RemotePort = 0; 224 } 225 226 /* 227 * We make the assumption that all udp_t 228 * structs will be created within an address 229 * region no larger than 32-bits. 230 */ 231 ude.udpInstance = (uint32_t)(uintptr_t)udp; 232 ude.udpCreationProcess = 233 (connp->conn_cpid < 0) ? 234 MIB2_UNKNOWN_PROCESS : 235 connp->conn_cpid; 236 ude.udpCreationTime = connp->conn_open_time; 237 238 (void) snmp_append_data2(mp_conn_ctl->b_cont, 239 &mp_conn_tail, (char *)&ude, sizeof (ude)); 240 mlp.tme_connidx = v4_conn_idx++; 241 if (needattr) 242 (void) snmp_append_data2( 243 mp_attr_ctl->b_cont, &mp_attr_tail, 244 (char *)&mlp, sizeof (mlp)); 245 } 246 if (connp->conn_ipversion == IPV6_VERSION) { 247 ude6.udp6EntryInfo.ue_state = state; 248 ude6.udp6LocalAddress = connp->conn_laddr_v6; 249 ude6.udp6LocalPort = ntohs(connp->conn_lport); 250 mutex_enter(&connp->conn_lock); 251 if (connp->conn_ixa->ixa_flags & 252 IXAF_SCOPEID_SET) { 253 ude6.udp6IfIndex = 254 connp->conn_ixa->ixa_scopeid; 255 } else { 256 ude6.udp6IfIndex = connp->conn_bound_if; 257 } 258 mutex_exit(&connp->conn_lock); 259 if (udp->udp_state == TS_DATA_XFER) { 260 ude6.udp6EntryInfo.ue_RemoteAddress = 261 connp->conn_faddr_v6; 262 ude6.udp6EntryInfo.ue_RemotePort = 263 ntohs(connp->conn_fport); 264 } else { 265 ude6.udp6EntryInfo.ue_RemoteAddress = 266 sin6_null.sin6_addr; 267 ude6.udp6EntryInfo.ue_RemotePort = 0; 268 } 269 /* 270 * We make the assumption that all udp_t 271 * structs will be created within an address 272 * region no larger than 32-bits. 273 */ 274 ude6.udp6Instance = (uint32_t)(uintptr_t)udp; 275 ude6.udp6CreationProcess = 276 (connp->conn_cpid < 0) ? 277 MIB2_UNKNOWN_PROCESS : 278 connp->conn_cpid; 279 ude6.udp6CreationTime = connp->conn_open_time; 280 281 (void) snmp_append_data2(mp6_conn_ctl->b_cont, 282 &mp6_conn_tail, (char *)&ude6, 283 sizeof (ude6)); 284 mlp.tme_connidx = v6_conn_idx++; 285 if (needattr) 286 (void) snmp_append_data2( 287 mp6_attr_ctl->b_cont, 288 &mp6_attr_tail, (char *)&mlp, 289 sizeof (mlp)); 290 } 291 } 292 } 293 294 /* IPv4 UDP endpoints */ 295 optp = (struct opthdr *)&mp_conn_ctl->b_rptr[ 296 sizeof (struct T_optmgmt_ack)]; 297 optp->level = MIB2_UDP; 298 optp->name = MIB2_UDP_ENTRY; 299 optp->len = msgdsize(mp_conn_ctl->b_cont); 300 qreply(q, mp_conn_ctl); 301 302 /* table of MLP attributes... */ 303 optp = (struct opthdr *)&mp_attr_ctl->b_rptr[ 304 sizeof (struct T_optmgmt_ack)]; 305 optp->level = MIB2_UDP; 306 optp->name = EXPER_XPORT_MLP; 307 optp->len = msgdsize(mp_attr_ctl->b_cont); 308 if (optp->len == 0) 309 freemsg(mp_attr_ctl); 310 else 311 qreply(q, mp_attr_ctl); 312 313 /* IPv6 UDP endpoints */ 314 optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[ 315 sizeof (struct T_optmgmt_ack)]; 316 optp->level = MIB2_UDP6; 317 optp->name = MIB2_UDP6_ENTRY; 318 optp->len = msgdsize(mp6_conn_ctl->b_cont); 319 qreply(q, mp6_conn_ctl); 320 321 /* table of MLP attributes... */ 322 optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[ 323 sizeof (struct T_optmgmt_ack)]; 324 optp->level = MIB2_UDP6; 325 optp->name = EXPER_XPORT_MLP; 326 optp->len = msgdsize(mp6_attr_ctl->b_cont); 327 if (optp->len == 0) 328 freemsg(mp6_attr_ctl); 329 else 330 qreply(q, mp6_attr_ctl); 331 332 return (mp2ctl); 333 } 334 335 /* 336 * Return 0 if invalid set request, 1 otherwise, including non-udp requests. 337 * NOTE: Per MIB-II, UDP has no writable data. 338 * TODO: If this ever actually tries to set anything, it needs to be 339 * to do the appropriate locking. 340 */ 341 /* ARGSUSED */ 342 int 343 udp_snmp_set(queue_t *q, t_scalar_t level, t_scalar_t name, 344 uchar_t *ptr, int len) 345 { 346 switch (level) { 347 case MIB2_UDP: 348 return (0); 349 default: 350 return (1); 351 } 352 } 353 354 void 355 udp_kstat_fini(netstackid_t stackid, kstat_t *ksp) 356 { 357 if (ksp != NULL) { 358 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 359 kstat_delete_netstack(ksp, stackid); 360 } 361 } 362 363 /* 364 * To add stats from one mib2_udp_t to another. Static fields are not added. 365 * The caller should set them up propertly. 366 */ 367 static void 368 udp_add_mib(mib2_udp_t *from, mib2_udp_t *to) 369 { 370 to->udpHCInDatagrams += from->udpHCInDatagrams; 371 to->udpInErrors += from->udpInErrors; 372 to->udpHCOutDatagrams += from->udpHCOutDatagrams; 373 to->udpOutErrors += from->udpOutErrors; 374 } 375 376 377 void * 378 udp_kstat2_init(netstackid_t stackid) 379 { 380 kstat_t *ksp; 381 382 udp_stat_t template = { 383 { "udp_sock_fallback", KSTAT_DATA_UINT64 }, 384 { "udp_out_opt", KSTAT_DATA_UINT64 }, 385 { "udp_out_err_notconn", KSTAT_DATA_UINT64 }, 386 { "udp_out_err_output", KSTAT_DATA_UINT64 }, 387 { "udp_out_err_tudr", KSTAT_DATA_UINT64 }, 388 #ifdef DEBUG 389 { "udp_data_conn", KSTAT_DATA_UINT64 }, 390 { "udp_data_notconn", KSTAT_DATA_UINT64 }, 391 { "udp_out_lastdst", KSTAT_DATA_UINT64 }, 392 { "udp_out_diffdst", KSTAT_DATA_UINT64 }, 393 { "udp_out_ipv6", KSTAT_DATA_UINT64 }, 394 { "udp_out_mapped", KSTAT_DATA_UINT64 }, 395 { "udp_out_ipv4", KSTAT_DATA_UINT64 }, 396 #endif 397 }; 398 399 ksp = kstat_create_netstack(UDP_MOD_NAME, 0, "udpstat", "net", 400 KSTAT_TYPE_NAMED, sizeof (template) / sizeof (kstat_named_t), 401 0, stackid); 402 403 if (ksp == NULL) 404 return (NULL); 405 406 bcopy(&template, ksp->ks_data, sizeof (template)); 407 ksp->ks_update = udp_kstat2_update; 408 ksp->ks_private = (void *)(uintptr_t)stackid; 409 410 kstat_install(ksp); 411 return (ksp); 412 } 413 414 void 415 udp_kstat2_fini(netstackid_t stackid, kstat_t *ksp) 416 { 417 if (ksp != NULL) { 418 ASSERT(stackid == (netstackid_t)(uintptr_t)ksp->ks_private); 419 kstat_delete_netstack(ksp, stackid); 420 } 421 } 422 423 /* 424 * To copy counters from the per CPU udpp_stat_counter_t to the stack 425 * udp_stat_t. 426 */ 427 static void 428 udp_add_stats(udp_stat_counter_t *from, udp_stat_t *to) 429 { 430 to->udp_sock_fallback.value.ui64 += from->udp_sock_fallback; 431 to->udp_out_opt.value.ui64 += from->udp_out_opt; 432 to->udp_out_err_notconn.value.ui64 += from->udp_out_err_notconn; 433 to->udp_out_err_output.value.ui64 += from->udp_out_err_output; 434 to->udp_out_err_tudr.value.ui64 += from->udp_out_err_tudr; 435 #ifdef DEBUG 436 to->udp_data_conn.value.ui64 += from->udp_data_conn; 437 to->udp_data_notconn.value.ui64 += from->udp_data_notconn; 438 to->udp_out_lastdst.value.ui64 += from->udp_out_lastdst; 439 to->udp_out_diffdst.value.ui64 += from->udp_out_diffdst; 440 to->udp_out_ipv6.value.ui64 += from->udp_out_ipv6; 441 to->udp_out_mapped.value.ui64 += from->udp_out_mapped; 442 to->udp_out_ipv4.value.ui64 += from->udp_out_ipv4; 443 #endif 444 } 445 446 /* 447 * To set all udp_stat_t counters to 0. 448 */ 449 static void 450 udp_clr_stats(udp_stat_t *stats) 451 { 452 stats->udp_sock_fallback.value.ui64 = 0; 453 stats->udp_out_opt.value.ui64 = 0; 454 stats->udp_out_err_notconn.value.ui64 = 0; 455 stats->udp_out_err_output.value.ui64 = 0; 456 stats->udp_out_err_tudr.value.ui64 = 0; 457 #ifdef DEBUG 458 stats->udp_data_conn.value.ui64 = 0; 459 stats->udp_data_notconn.value.ui64 = 0; 460 stats->udp_out_lastdst.value.ui64 = 0; 461 stats->udp_out_diffdst.value.ui64 = 0; 462 stats->udp_out_ipv6.value.ui64 = 0; 463 stats->udp_out_mapped.value.ui64 = 0; 464 stats->udp_out_ipv4.value.ui64 = 0; 465 #endif 466 } 467 468 int 469 udp_kstat2_update(kstat_t *kp, int rw) 470 { 471 udp_stat_t *stats; 472 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 473 netstack_t *ns; 474 udp_stack_t *us; 475 int i; 476 int cnt; 477 478 if (rw == KSTAT_WRITE) 479 return (EACCES); 480 481 ns = netstack_find_by_stackid(stackid); 482 if (ns == NULL) 483 return (-1); 484 us = ns->netstack_udp; 485 if (us == NULL) { 486 netstack_rele(ns); 487 return (-1); 488 } 489 stats = (udp_stat_t *)kp->ks_data; 490 udp_clr_stats(stats); 491 492 cnt = us->us_sc_cnt; 493 for (i = 0; i < cnt; i++) 494 udp_add_stats(&us->us_sc[i]->udp_sc_stats, stats); 495 496 netstack_rele(ns); 497 return (0); 498 } 499 500 void * 501 udp_kstat_init(netstackid_t stackid) 502 { 503 kstat_t *ksp; 504 505 udp_named_kstat_t template = { 506 { "inDatagrams", KSTAT_DATA_UINT64, 0 }, 507 { "inErrors", KSTAT_DATA_UINT32, 0 }, 508 { "outDatagrams", KSTAT_DATA_UINT64, 0 }, 509 { "entrySize", KSTAT_DATA_INT32, 0 }, 510 { "entry6Size", KSTAT_DATA_INT32, 0 }, 511 { "outErrors", KSTAT_DATA_UINT32, 0 }, 512 }; 513 514 ksp = kstat_create_netstack(UDP_MOD_NAME, 0, UDP_MOD_NAME, "mib2", 515 KSTAT_TYPE_NAMED, NUM_OF_FIELDS(udp_named_kstat_t), 0, stackid); 516 517 if (ksp == NULL) 518 return (NULL); 519 520 template.entrySize.value.ui32 = sizeof (mib2_udpEntry_t); 521 template.entry6Size.value.ui32 = sizeof (mib2_udp6Entry_t); 522 523 bcopy(&template, ksp->ks_data, sizeof (template)); 524 ksp->ks_update = udp_kstat_update; 525 ksp->ks_private = (void *)(uintptr_t)stackid; 526 527 kstat_install(ksp); 528 return (ksp); 529 } 530 531 /* 532 * To sum up all MIB2 stats for a udp_stack_t from all per CPU stats. The 533 * caller should initialize the target mib2_udp_t properly as this function 534 * just adds up all the per CPU stats. 535 */ 536 static void 537 udp_sum_mib(udp_stack_t *us, mib2_udp_t *udp_mib) 538 { 539 int i; 540 int cnt; 541 542 cnt = us->us_sc_cnt; 543 for (i = 0; i < cnt; i++) 544 udp_add_mib(&us->us_sc[i]->udp_sc_mib, udp_mib); 545 } 546 547 static int 548 udp_kstat_update(kstat_t *kp, int rw) 549 { 550 udp_named_kstat_t *udpkp; 551 netstackid_t stackid = (netstackid_t)(uintptr_t)kp->ks_private; 552 netstack_t *ns; 553 udp_stack_t *us; 554 mib2_udp_t udp_mib; 555 556 if (rw == KSTAT_WRITE) 557 return (EACCES); 558 559 ns = netstack_find_by_stackid(stackid); 560 if (ns == NULL) 561 return (-1); 562 us = ns->netstack_udp; 563 if (us == NULL) { 564 netstack_rele(ns); 565 return (-1); 566 } 567 udpkp = (udp_named_kstat_t *)kp->ks_data; 568 569 bzero(&udp_mib, sizeof (udp_mib)); 570 udp_sum_mib(us, &udp_mib); 571 572 udpkp->inDatagrams.value.ui64 = udp_mib.udpHCInDatagrams; 573 udpkp->inErrors.value.ui32 = udp_mib.udpInErrors; 574 udpkp->outDatagrams.value.ui64 = udp_mib.udpHCOutDatagrams; 575 udpkp->outErrors.value.ui32 = udp_mib.udpOutErrors; 576 netstack_rele(ns); 577 return (0); 578 } 579