1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2018, Joyent, Inc. 28 */ 29 30 /* 31 * FMA event subscription interfaces - subscribe to FMA protocol 32 * from outside the fault manager. 33 */ 34 35 #include <sys/types.h> 36 #include <atomic.h> 37 #include <libsysevent.h> 38 #include <libuutil.h> 39 #include <pthread.h> 40 #include <stdarg.h> 41 #include <stdlib.h> 42 #include <string.h> 43 #include <strings.h> 44 #include <unistd.h> 45 #include <fm/libtopo.h> 46 47 #include <fm/libfmevent.h> 48 49 #include "fmev_impl.h" 50 51 static topo_hdl_t *g_topohdl; 52 53 typedef struct { 54 struct fmev_hdl_cmn sh_cmn; 55 evchan_t *sh_binding; 56 uu_avl_pool_t *sh_pool; 57 uu_avl_t *sh_avl; 58 uint32_t sh_subcnt; 59 uint32_t sh_flags; 60 sysevent_subattr_t *sh_attr; 61 pthread_mutex_t sh_lock; 62 pthread_mutex_t sh_srlz_lock; 63 } fmev_shdl_impl_t; 64 65 #define HDL2IHDL(hdl) ((fmev_shdl_impl_t *)(hdl)) 66 #define IHDL2HDL(ihdl) ((fmev_shdl_t)(ihdl)) 67 68 #define _FMEV_SHMAGIC 0x5368446c /* ShDl */ 69 #define FMEV_SHDL_VALID(ihdl) ((ihdl)->sh_cmn.hc_magic == _FMEV_SHMAGIC) 70 71 #define SHDL_FL_SERIALIZE 0x1 72 73 #define FMEV_API_ENTER(hdl, v) \ 74 fmev_api_enter(&HDL2IHDL(hdl)->sh_cmn, LIBFMEVENT_VERSION_##v) 75 76 /* 77 * For each subscription on a handle we add a node to an avl tree 78 * to track subscriptions. 79 */ 80 81 #define FMEV_SID_SZ (16 + 1) /* Matches MAX_SUBID_LEN */ 82 83 struct fmev_subinfo { 84 uu_avl_node_t si_node; 85 fmev_shdl_impl_t *si_ihdl; 86 char si_pat[FMEV_MAX_CLASS]; 87 char si_sid[FMEV_SID_SZ]; 88 fmev_cbfunc_t *si_cb; 89 void *si_cbarg; 90 }; 91 92 struct fmev_hdl_cmn * 93 fmev_shdl_cmn(fmev_shdl_t hdl) 94 { 95 return (&HDL2IHDL(hdl)->sh_cmn); 96 } 97 98 static int 99 shdlctl_start(fmev_shdl_impl_t *ihdl) 100 { 101 (void) pthread_mutex_lock(&ihdl->sh_lock); 102 103 if (ihdl->sh_subcnt == 0) { 104 return (1); /* lock still held */ 105 } else { 106 (void) pthread_mutex_unlock(&ihdl->sh_lock); 107 return (0); 108 } 109 } 110 111 static void 112 shdlctl_end(fmev_shdl_impl_t *ihdl) 113 { 114 (void) pthread_mutex_unlock(&ihdl->sh_lock); 115 } 116 117 fmev_err_t 118 fmev_shdlctl_serialize(fmev_shdl_t hdl) 119 { 120 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 121 122 if (!FMEV_API_ENTER(hdl, 1)) 123 return (fmev_errno); 124 125 if (!shdlctl_start(ihdl)) 126 return (fmev_seterr(FMEVERR_BUSY)); 127 128 if (!(ihdl->sh_flags & SHDL_FL_SERIALIZE)) { 129 (void) pthread_mutex_init(&ihdl->sh_srlz_lock, NULL); 130 ihdl->sh_flags |= SHDL_FL_SERIALIZE; 131 } 132 133 shdlctl_end(ihdl); 134 return (fmev_seterr(FMEV_SUCCESS)); 135 } 136 137 fmev_err_t 138 fmev_shdlctl_thrattr(fmev_shdl_t hdl, pthread_attr_t *attr) 139 { 140 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 141 142 if (!FMEV_API_ENTER(hdl, 1)) 143 return (fmev_errno); 144 145 if (!shdlctl_start(ihdl)) 146 return (fmev_seterr(FMEVERR_BUSY)); 147 148 sysevent_subattr_thrattr(ihdl->sh_attr, attr); 149 150 shdlctl_end(ihdl); 151 return (fmev_seterr(FMEV_SUCCESS)); 152 } 153 154 fmev_err_t 155 fmev_shdlctl_sigmask(fmev_shdl_t hdl, sigset_t *set) 156 { 157 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 158 159 if (!FMEV_API_ENTER(hdl, 1)) 160 return (fmev_errno); 161 162 if (!shdlctl_start(ihdl)) 163 return (fmev_seterr(FMEVERR_BUSY)); 164 165 sysevent_subattr_sigmask(ihdl->sh_attr, set); 166 167 shdlctl_end(ihdl); 168 return (fmev_seterr(FMEV_SUCCESS)); 169 } 170 171 fmev_err_t 172 fmev_shdlctl_thrsetup(fmev_shdl_t hdl, door_xcreate_thrsetup_func_t *func, 173 void *cookie) 174 { 175 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 176 177 if (!FMEV_API_ENTER(hdl, 1)) 178 return (fmev_errno); 179 180 if (!shdlctl_start(ihdl)) 181 return (fmev_seterr(FMEVERR_BUSY)); 182 183 sysevent_subattr_thrsetup(ihdl->sh_attr, func, cookie); 184 185 shdlctl_end(ihdl); 186 return (fmev_seterr(FMEV_SUCCESS)); 187 } 188 189 fmev_err_t 190 fmev_shdlctl_thrcreate(fmev_shdl_t hdl, door_xcreate_server_func_t *func, 191 void *cookie) 192 { 193 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 194 195 if (!FMEV_API_ENTER(hdl, 1)) 196 return (fmev_errno); 197 198 if (!shdlctl_start(ihdl)) 199 return (fmev_seterr(FMEVERR_BUSY)); 200 201 sysevent_subattr_thrcreate(ihdl->sh_attr, func, cookie); 202 203 shdlctl_end(ihdl); 204 return (fmev_seterr(FMEV_SUCCESS)); 205 } 206 207 /* 208 * Our door service function. We return 0 regardless so that the kernel 209 * does not keep either retrying (EAGAIN) or bleat to cmn_err. 210 */ 211 212 uint64_t fmev_proxy_cb_enomem; 213 214 static int 215 fmev_proxy_cb(sysevent_t *sep, void *arg) 216 { 217 struct fmev_subinfo *sip = arg; 218 fmev_shdl_impl_t *ihdl = sip->si_ihdl; 219 nvlist_t *nvl; 220 char *class; 221 fmev_t ev; 222 223 if ((ev = fmev_sysev2fmev(IHDL2HDL(ihdl), sep, &class, &nvl)) == NULL) { 224 fmev_proxy_cb_enomem++; 225 return (0); 226 } 227 228 if (ihdl->sh_flags & SHDL_FL_SERIALIZE) 229 (void) pthread_mutex_lock(&ihdl->sh_srlz_lock); 230 231 sip->si_cb(ev, class, nvl, sip->si_cbarg); 232 233 if (ihdl->sh_flags & SHDL_FL_SERIALIZE) 234 (void) pthread_mutex_unlock(&ihdl->sh_srlz_lock); 235 236 fmev_rele(ev); /* release hold obtained in fmev_sysev2fmev */ 237 238 return (0); 239 } 240 241 static volatile uint32_t fmev_subid; 242 243 fmev_err_t 244 fmev_shdl_subscribe(fmev_shdl_t hdl, const char *pat, fmev_cbfunc_t func, 245 void *funcarg) 246 { 247 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 248 struct fmev_subinfo *sip; 249 uu_avl_index_t idx; 250 uint64_t nsid; 251 int serr; 252 253 if (!FMEV_API_ENTER(hdl, 1)) 254 return (fmev_errno); 255 256 if (pat == NULL || func == NULL) 257 return (fmev_seterr(FMEVERR_API)); 258 259 /* 260 * Empty class patterns are illegal, as is the sysevent magic for 261 * all classes. Also validate class length. 262 */ 263 if (*pat == '\0' || strncmp(pat, EC_ALL, sizeof (EC_ALL)) == 0 || 264 strncmp(pat, EC_SUB_ALL, sizeof (EC_SUB_ALL)) == 0 || 265 strnlen(pat, FMEV_MAX_CLASS) == FMEV_MAX_CLASS) 266 return (fmev_seterr(FMEVERR_BADCLASS)); 267 268 if ((sip = fmev_shdl_zalloc(hdl, sizeof (*sip))) == NULL) 269 return (fmev_seterr(FMEVERR_ALLOC)); 270 271 (void) strncpy(sip->si_pat, pat, sizeof (sip->si_pat)); 272 273 uu_avl_node_init(sip, &sip->si_node, ihdl->sh_pool); 274 275 (void) pthread_mutex_lock(&ihdl->sh_lock); 276 277 if (uu_avl_find(ihdl->sh_avl, sip, NULL, &idx) != NULL) { 278 (void) pthread_mutex_unlock(&ihdl->sh_lock); 279 fmev_shdl_free(hdl, sip, sizeof (*sip)); 280 return (fmev_seterr(FMEVERR_DUPLICATE)); 281 } 282 283 /* 284 * Generate a subscriber id for GPEC that is unique to this 285 * subscription. There is no provision for persistent 286 * subscribers. The subscriber id must be unique within 287 * this zone. 288 */ 289 nsid = (uint64_t)getpid() << 32 | atomic_inc_32_nv(&fmev_subid); 290 (void) snprintf(sip->si_sid, sizeof (sip->si_sid), "%llx", nsid); 291 292 sip->si_ihdl = ihdl; 293 sip->si_cb = func; 294 sip->si_cbarg = funcarg; 295 296 if ((serr = sysevent_evc_xsubscribe(ihdl->sh_binding, sip->si_sid, 297 sip->si_pat, fmev_proxy_cb, sip, 0, ihdl->sh_attr)) != 0) { 298 fmev_err_t err; 299 300 (void) pthread_mutex_unlock(&ihdl->sh_lock); 301 fmev_shdl_free(hdl, sip, sizeof (*sip)); 302 303 switch (serr) { 304 case ENOMEM: 305 err = FMEVERR_MAX_SUBSCRIBERS; 306 break; 307 308 default: 309 err = FMEVERR_INTERNAL; 310 break; 311 } 312 313 return (fmev_seterr(err)); 314 } 315 316 uu_avl_insert(ihdl->sh_avl, sip, idx); 317 ihdl->sh_subcnt++; 318 319 (void) pthread_mutex_unlock(&ihdl->sh_lock); 320 321 return (fmev_seterr(FMEV_SUCCESS)); 322 } 323 324 static int 325 fmev_subinfo_fini(fmev_shdl_impl_t *ihdl, struct fmev_subinfo *sip, 326 boolean_t doavl) 327 { 328 int err; 329 330 ASSERT(sip->si_ihdl == ihdl); 331 332 err = sysevent_evc_unsubscribe(ihdl->sh_binding, sip->si_sid); 333 334 if (err == 0) { 335 if (doavl) { 336 uu_avl_remove(ihdl->sh_avl, sip); 337 uu_avl_node_fini(sip, &sip->si_node, ihdl->sh_pool); 338 } 339 fmev_shdl_free(IHDL2HDL(ihdl), sip, sizeof (*sip)); 340 ihdl->sh_subcnt--; 341 } 342 343 return (err); 344 } 345 346 fmev_err_t 347 fmev_shdl_unsubscribe(fmev_shdl_t hdl, const char *pat) 348 { 349 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 350 fmev_err_t rv = FMEVERR_NOMATCH; 351 struct fmev_subinfo *sip; 352 struct fmev_subinfo si; 353 int err; 354 355 if (!FMEV_API_ENTER(hdl, 1)) 356 return (fmev_errno); 357 358 if (pat == NULL) 359 return (fmev_seterr(FMEVERR_API)); 360 361 if (*pat == '\0' || strncmp(pat, EVCH_ALLSUB, sizeof (EC_ALL)) == 0 || 362 strnlen(pat, FMEV_MAX_CLASS) == FMEV_MAX_CLASS) 363 return (fmev_seterr(FMEVERR_BADCLASS)); 364 365 (void) strncpy(si.si_pat, pat, sizeof (si.si_pat)); 366 367 (void) pthread_mutex_lock(&ihdl->sh_lock); 368 369 if ((sip = uu_avl_find(ihdl->sh_avl, &si, NULL, NULL)) != NULL) { 370 if ((err = fmev_subinfo_fini(ihdl, sip, B_TRUE)) == 0) { 371 rv = FMEV_SUCCESS; 372 } else { 373 /* 374 * Return an API error if the unsubscribe was 375 * attempted from within a door callback invocation; 376 * other errors should not happen. 377 */ 378 rv = (err == EDEADLK) ? FMEVERR_API : FMEVERR_INTERNAL; 379 } 380 } 381 382 (void) pthread_mutex_unlock(&ihdl->sh_lock); 383 384 return (fmev_seterr(rv)); 385 } 386 387 void * 388 fmev_shdl_alloc(fmev_shdl_t hdl, size_t sz) 389 { 390 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 391 392 if (!FMEV_API_ENTER(hdl, 1)) 393 return (NULL); 394 395 return (ihdl->sh_cmn.hc_alloc(sz)); 396 } 397 398 void * 399 fmev_shdl_zalloc(fmev_shdl_t hdl, size_t sz) 400 { 401 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 402 403 if (!FMEV_API_ENTER(hdl, 1)) 404 return (NULL); 405 406 return (ihdl->sh_cmn.hc_zalloc(sz)); 407 } 408 409 void 410 fmev_shdl_free(fmev_shdl_t hdl, void *buf, size_t sz) 411 { 412 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 413 414 if (!FMEV_API_ENTER(hdl, 1)) 415 return; 416 417 ihdl->sh_cmn.hc_free(buf, sz); 418 } 419 420 char * 421 fmev_shdl_strdup(fmev_shdl_t hdl, char *src) 422 { 423 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 424 size_t srclen; 425 char *dst; 426 427 if (!FMEV_API_ENTER(hdl, 2)) 428 return (NULL); 429 430 srclen = strlen(src); 431 432 if ((dst = ihdl->sh_cmn.hc_alloc(srclen + 1)) == NULL) { 433 (void) fmev_seterr(FMEVERR_ALLOC); 434 return (NULL); 435 } 436 437 (void) strncpy(dst, src, srclen); 438 dst[srclen] = '\0'; 439 return (dst); 440 } 441 442 void 443 fmev_shdl_strfree(fmev_shdl_t hdl, char *buf) 444 { 445 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 446 447 (void) FMEV_API_ENTER(hdl, 2); 448 449 ihdl->sh_cmn.hc_free(buf, strlen(buf) + 1); 450 } 451 452 int 453 fmev_shdl_valid(fmev_shdl_t hdl) 454 { 455 return (FMEV_SHDL_VALID(HDL2IHDL(hdl))); 456 } 457 458 /*ARGSUSED*/ 459 static int 460 fmev_keycmp(const void *l, const void *r, void *arg) 461 { 462 struct fmev_subinfo *left = (struct fmev_subinfo *)l; 463 struct fmev_subinfo *right = (struct fmev_subinfo *)r; 464 465 return (strncmp(left->si_pat, right->si_pat, FMEV_MAX_CLASS)); 466 } 467 468 fmev_shdl_t 469 fmev_shdl_init(uint32_t caller_version, void *(*hdlalloc)(size_t), 470 void *(*hdlzalloc)(size_t), void (*hdlfree)(void *, size_t)) 471 { 472 fmev_shdl_impl_t *ihdl; 473 struct fmev_hdl_cmn hc; 474 const char *chan_name; 475 int err; 476 477 hc.hc_magic = _FMEV_SHMAGIC; 478 hc.hc_api_vers = caller_version; 479 hc.hc_alloc = hdlalloc ? hdlalloc : dflt_alloc; 480 hc.hc_zalloc = hdlzalloc ? hdlzalloc : dflt_zalloc; 481 hc.hc_free = hdlfree ? hdlfree : dflt_free; 482 483 if (!fmev_api_init(&hc)) 484 return (NULL); /* error type set */ 485 486 if (!((hdlalloc == NULL && hdlzalloc == NULL && hdlfree == NULL) || 487 (hdlalloc != NULL && hdlzalloc != NULL && hdlfree != NULL))) { 488 (void) fmev_seterr(FMEVERR_API); 489 return (NULL); 490 } 491 492 if (hdlzalloc == NULL) 493 ihdl = dflt_zalloc(sizeof (*ihdl)); 494 else 495 ihdl = hdlzalloc(sizeof (*ihdl)); 496 497 if (ihdl == NULL) { 498 (void) fmev_seterr(FMEVERR_ALLOC); 499 return (NULL); 500 } 501 502 ihdl->sh_cmn = hc; 503 504 if ((ihdl->sh_attr = sysevent_subattr_alloc()) == NULL) { 505 err = FMEVERR_ALLOC; 506 goto error; 507 } 508 509 (void) pthread_mutex_init(&ihdl->sh_lock, NULL); 510 511 /* 512 * For simulation purposes we allow an environment variable 513 * to provide a different channel name. 514 */ 515 if ((chan_name = getenv("FMD_SNOOP_CHANNEL")) == NULL) 516 chan_name = FMD_SNOOP_CHANNEL; 517 518 /* 519 * Try to bind to the event channel. If it's not already present, 520 * attempt to create the channel so that we can startup before 521 * the event producer (who will also apply choices such as 522 * channel depth when they bind to the channel). 523 */ 524 if (sysevent_evc_bind(chan_name, &ihdl->sh_binding, 525 EVCH_CREAT | EVCH_HOLD_PEND_INDEF) != 0) { 526 switch (errno) { 527 case EINVAL: 528 default: 529 err = FMEVERR_INTERNAL; 530 break; 531 case ENOMEM: 532 err = FMEVERR_ALLOC; 533 break; 534 case EPERM: 535 err = FMEVERR_NOPRIV; 536 break; 537 } 538 goto error; 539 } 540 541 if ((ihdl->sh_pool = uu_avl_pool_create("subinfo_pool", 542 sizeof (struct fmev_subinfo), 543 offsetof(struct fmev_subinfo, si_node), fmev_keycmp, 544 UU_AVL_POOL_DEBUG)) == NULL) { 545 err = FMEVERR_INTERNAL; 546 goto error; 547 } 548 549 if ((ihdl->sh_avl = uu_avl_create(ihdl->sh_pool, NULL, 550 UU_DEFAULT)) == NULL) { 551 err = FMEVERR_INTERNAL; 552 goto error; 553 } 554 555 return (IHDL2HDL(ihdl)); 556 557 error: 558 (void) fmev_shdl_fini(IHDL2HDL(ihdl)); 559 (void) fmev_seterr(err); 560 return (NULL); 561 } 562 563 fmev_err_t 564 fmev_shdl_getauthority(fmev_shdl_t hdl, nvlist_t **nvlp) 565 { 566 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 567 nvlist_t *propnvl; 568 fmev_err_t rc; 569 570 if (!FMEV_API_ENTER(hdl, 2)) 571 return (fmev_errno); 572 573 (void) pthread_mutex_lock(&ihdl->sh_lock); 574 575 if (sysevent_evc_getpropnvl(ihdl->sh_binding, &propnvl) != 0) { 576 *nvlp = NULL; 577 (void) pthread_mutex_unlock(&ihdl->sh_lock); 578 return (fmev_seterr(FMEVERR_UNKNOWN)); 579 } 580 581 if (propnvl == NULL) { 582 rc = FMEVERR_BUSY; /* Other end has not bound */ 583 } else { 584 nvlist_t *auth; 585 586 if (nvlist_lookup_nvlist(propnvl, "fmdauth", &auth) == 0) { 587 rc = (nvlist_dup(auth, nvlp, 0) == 0) ? FMEV_SUCCESS : 588 FMEVERR_ALLOC; 589 } else { 590 rc = FMEVERR_INTERNAL; 591 } 592 nvlist_free(propnvl); 593 } 594 595 (void) pthread_mutex_unlock(&ihdl->sh_lock); 596 597 if (rc != FMEV_SUCCESS) { 598 *nvlp = NULL; 599 (void) fmev_seterr(rc); 600 } 601 602 return (rc); 603 } 604 605 char * 606 fmev_shdl_nvl2str(fmev_shdl_t hdl, nvlist_t *nvl) 607 { 608 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 609 char *fmri, *fmricp; 610 fmev_err_t err; 611 int topoerr; 612 613 if (!FMEV_API_ENTER(hdl, 2)) 614 return (NULL); 615 616 if (g_topohdl == NULL) { 617 (void) pthread_mutex_lock(&ihdl->sh_lock); 618 if (g_topohdl == NULL) 619 g_topohdl = topo_open(TOPO_VERSION, NULL, &topoerr); 620 (void) pthread_mutex_unlock(&ihdl->sh_lock); 621 622 if (g_topohdl == NULL) { 623 (void) fmev_seterr(FMEVERR_INTERNAL); 624 return (NULL); 625 } 626 } 627 628 if (topo_fmri_nvl2str(g_topohdl, nvl, &fmri, &topoerr) == 0) { 629 fmricp = fmev_shdl_strdup(hdl, fmri); 630 topo_hdl_strfree(g_topohdl, fmri); 631 return (fmricp); /* fmev_errno set if strdup failed */ 632 } 633 634 switch (topoerr) { 635 case ETOPO_FMRI_NOMEM: 636 err = FMEVERR_ALLOC; 637 break; 638 639 case ETOPO_FMRI_MALFORM: 640 case ETOPO_METHOD_NOTSUP: 641 case ETOPO_METHOD_INVAL: 642 default: 643 err = FMEVERR_INVALIDARG; 644 break; 645 } 646 647 (void) fmev_seterr(err); 648 return (NULL); 649 } 650 651 fmev_err_t 652 fmev_shdl_fini(fmev_shdl_t hdl) 653 { 654 fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl); 655 656 if (!FMEV_API_ENTER(hdl, 1)) 657 return (fmev_errno); 658 659 (void) pthread_mutex_lock(&ihdl->sh_lock); 660 661 /* 662 * Verify that we are not in callback context - return an API 663 * error if we are. 664 */ 665 if (sysevent_evc_unsubscribe(ihdl->sh_binding, "invalidsid") == 666 EDEADLK) { 667 (void) pthread_mutex_unlock(&ihdl->sh_lock); 668 return (fmev_seterr(FMEVERR_API)); 669 } 670 671 if (ihdl->sh_avl) { 672 void *cookie = NULL; 673 struct fmev_subinfo *sip; 674 675 while ((sip = uu_avl_teardown(ihdl->sh_avl, &cookie)) != NULL) 676 (void) fmev_subinfo_fini(ihdl, sip, B_FALSE); 677 678 uu_avl_destroy(ihdl->sh_avl); 679 ihdl->sh_avl = NULL; 680 } 681 682 ASSERT(ihdl->sh_subcnt == 0); 683 684 if (ihdl->sh_binding) { 685 (void) sysevent_evc_unbind(ihdl->sh_binding); 686 ihdl->sh_binding = NULL; 687 } 688 689 if (ihdl->sh_pool) { 690 uu_avl_pool_destroy(ihdl->sh_pool); 691 ihdl->sh_pool = NULL; 692 } 693 694 if (ihdl->sh_attr) { 695 sysevent_subattr_free(ihdl->sh_attr); 696 ihdl->sh_attr = NULL; 697 } 698 699 ihdl->sh_cmn.hc_magic = 0; 700 701 if (g_topohdl) { 702 topo_close(g_topohdl); 703 g_topohdl = NULL; 704 } 705 706 (void) pthread_mutex_unlock(&ihdl->sh_lock); 707 (void) pthread_mutex_destroy(&ihdl->sh_lock); 708 709 fmev_shdl_free(hdl, hdl, sizeof (*ihdl)); 710 711 fmev_api_freetsd(); 712 713 return (fmev_seterr(FMEV_SUCCESS)); 714 } 715