xref: /titanic_41/usr/src/lib/fm/libfmevent/common/fmev_subscribe.c (revision 6a634c9dca3093f3922e4b7ab826d7bdf17bf78e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * FMA event subscription interfaces - subscribe to FMA protocol
28  * from outside the fault manager.
29  */
30 
31 #include <sys/types.h>
32 #include <atomic.h>
33 #include <libsysevent.h>
34 #include <libuutil.h>
35 #include <pthread.h>
36 #include <stdarg.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <strings.h>
40 #include <unistd.h>
41 #include <fm/libtopo.h>
42 
43 #include <fm/libfmevent.h>
44 
45 #include "fmev_impl.h"
46 
47 static topo_hdl_t *g_topohdl;
48 
49 typedef struct {
50 	struct fmev_hdl_cmn sh_cmn;
51 	evchan_t *sh_binding;
52 	uu_avl_pool_t *sh_pool;
53 	uu_avl_t *sh_avl;
54 	uint32_t sh_subcnt;
55 	uint32_t sh_flags;
56 	sysevent_subattr_t *sh_attr;
57 	pthread_mutex_t sh_lock;
58 	pthread_mutex_t sh_srlz_lock;
59 } fmev_shdl_impl_t;
60 
61 #define	HDL2IHDL(hdl)	((fmev_shdl_impl_t *)(hdl))
62 #define	IHDL2HDL(ihdl)	((fmev_shdl_t)(ihdl))
63 
64 #define	_FMEV_SHMAGIC	0x5368446c	/* ShDl */
65 #define	FMEV_SHDL_VALID(ihdl)	((ihdl)->sh_cmn.hc_magic == _FMEV_SHMAGIC)
66 
67 #define	SHDL_FL_SERIALIZE	0x1
68 
69 #define	FMEV_API_ENTER(hdl, v) \
70 	fmev_api_enter(&HDL2IHDL(hdl)->sh_cmn, LIBFMEVENT_VERSION_##v)
71 
72 /*
73  * For each subscription on a handle we add a node to an avl tree
74  * to track subscriptions.
75  */
76 
77 #define	FMEV_SID_SZ	(16 + 1)	/* Matches MAX_SUBID_LEN */
78 
79 struct fmev_subinfo {
80 	uu_avl_node_t si_node;
81 	fmev_shdl_impl_t *si_ihdl;
82 	char si_pat[FMEV_MAX_CLASS];
83 	char si_sid[FMEV_SID_SZ];
84 	fmev_cbfunc_t *si_cb;
85 	void *si_cbarg;
86 };
87 
88 struct fmev_hdl_cmn *
fmev_shdl_cmn(fmev_shdl_t hdl)89 fmev_shdl_cmn(fmev_shdl_t hdl)
90 {
91 	return (&HDL2IHDL(hdl)->sh_cmn);
92 }
93 
94 static int
shdlctl_start(fmev_shdl_impl_t * ihdl)95 shdlctl_start(fmev_shdl_impl_t *ihdl)
96 {
97 	(void) pthread_mutex_lock(&ihdl->sh_lock);
98 
99 	if (ihdl->sh_subcnt == 0) {
100 		return (1);	/* lock still held */
101 	} else {
102 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
103 		return (0);
104 	}
105 }
106 
107 static void
shdlctl_end(fmev_shdl_impl_t * ihdl)108 shdlctl_end(fmev_shdl_impl_t *ihdl)
109 {
110 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
111 }
112 
113 fmev_err_t
fmev_shdlctl_serialize(fmev_shdl_t hdl)114 fmev_shdlctl_serialize(fmev_shdl_t hdl)
115 {
116 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
117 
118 	if (!FMEV_API_ENTER(hdl, 1))
119 		return (fmev_errno);
120 
121 	if (!shdlctl_start(ihdl))
122 		return (fmev_seterr(FMEVERR_BUSY));
123 
124 	if (!(ihdl->sh_flags & SHDL_FL_SERIALIZE)) {
125 		(void) pthread_mutex_init(&ihdl->sh_srlz_lock, NULL);
126 		ihdl->sh_flags |= SHDL_FL_SERIALIZE;
127 	}
128 
129 	shdlctl_end(ihdl);
130 	return (fmev_seterr(FMEV_SUCCESS));
131 }
132 
133 fmev_err_t
fmev_shdlctl_thrattr(fmev_shdl_t hdl,pthread_attr_t * attr)134 fmev_shdlctl_thrattr(fmev_shdl_t hdl, pthread_attr_t *attr)
135 {
136 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
137 
138 	if (!FMEV_API_ENTER(hdl, 1))
139 		return (fmev_errno);
140 
141 	if (!shdlctl_start(ihdl))
142 		return (fmev_seterr(FMEVERR_BUSY));
143 
144 	sysevent_subattr_thrattr(ihdl->sh_attr, attr);
145 
146 	shdlctl_end(ihdl);
147 	return (fmev_seterr(FMEV_SUCCESS));
148 }
149 
150 fmev_err_t
fmev_shdlctl_sigmask(fmev_shdl_t hdl,sigset_t * set)151 fmev_shdlctl_sigmask(fmev_shdl_t hdl, sigset_t *set)
152 {
153 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
154 
155 	if (!FMEV_API_ENTER(hdl, 1))
156 		return (fmev_errno);
157 
158 	if (!shdlctl_start(ihdl))
159 		return (fmev_seterr(FMEVERR_BUSY));
160 
161 	sysevent_subattr_sigmask(ihdl->sh_attr, set);
162 
163 	shdlctl_end(ihdl);
164 	return (fmev_seterr(FMEV_SUCCESS));
165 }
166 
167 fmev_err_t
fmev_shdlctl_thrsetup(fmev_shdl_t hdl,door_xcreate_thrsetup_func_t * func,void * cookie)168 fmev_shdlctl_thrsetup(fmev_shdl_t hdl, door_xcreate_thrsetup_func_t *func,
169     void *cookie)
170 {
171 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
172 
173 	if (!FMEV_API_ENTER(hdl, 1))
174 		return (fmev_errno);
175 
176 	if (!shdlctl_start(ihdl))
177 		return (fmev_seterr(FMEVERR_BUSY));
178 
179 	sysevent_subattr_thrsetup(ihdl->sh_attr, func, cookie);
180 
181 	shdlctl_end(ihdl);
182 	return (fmev_seterr(FMEV_SUCCESS));
183 }
184 
185 fmev_err_t
fmev_shdlctl_thrcreate(fmev_shdl_t hdl,door_xcreate_server_func_t * func,void * cookie)186 fmev_shdlctl_thrcreate(fmev_shdl_t hdl, door_xcreate_server_func_t *func,
187     void *cookie)
188 {
189 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
190 
191 	if (!FMEV_API_ENTER(hdl, 1))
192 		return (fmev_errno);
193 
194 	if (!shdlctl_start(ihdl))
195 		return (fmev_seterr(FMEVERR_BUSY));
196 
197 	sysevent_subattr_thrcreate(ihdl->sh_attr, func, cookie);
198 
199 	shdlctl_end(ihdl);
200 	return (fmev_seterr(FMEV_SUCCESS));
201 }
202 
203 /*
204  * Our door service function.  We return 0 regardless so that the kernel
205  * does not keep either retrying (EAGAIN) or bleat to cmn_err.
206  */
207 
208 uint64_t fmev_proxy_cb_inval;
209 uint64_t fmev_proxy_cb_enomem;
210 
211 int
fmev_proxy_cb(sysevent_t * sep,void * arg)212 fmev_proxy_cb(sysevent_t *sep, void *arg)
213 {
214 	struct fmev_subinfo *sip = arg;
215 	fmev_shdl_impl_t *ihdl = sip->si_ihdl;
216 	nvlist_t *nvl;
217 	char *class;
218 	fmev_t ev;
219 
220 	if (sip == NULL || sip->si_cb == NULL) {
221 		fmev_proxy_cb_inval++;
222 		return (0);
223 	}
224 
225 	if ((ev = fmev_sysev2fmev(IHDL2HDL(ihdl), sep, &class, &nvl)) == NULL) {
226 		fmev_proxy_cb_enomem++;
227 		return (0);
228 	}
229 
230 	if (ihdl->sh_flags & SHDL_FL_SERIALIZE)
231 		(void) pthread_mutex_lock(&ihdl->sh_srlz_lock);
232 
233 	sip->si_cb(ev, class, nvl, sip->si_cbarg);
234 
235 	if (ihdl->sh_flags & SHDL_FL_SERIALIZE)
236 		(void) pthread_mutex_unlock(&ihdl->sh_srlz_lock);
237 
238 	fmev_rele(ev);	/* release hold obtained in fmev_sysev2fmev */
239 
240 	return (0);
241 }
242 
243 static volatile uint32_t fmev_subid;
244 
245 fmev_err_t
fmev_shdl_subscribe(fmev_shdl_t hdl,const char * pat,fmev_cbfunc_t func,void * funcarg)246 fmev_shdl_subscribe(fmev_shdl_t hdl, const char *pat, fmev_cbfunc_t func,
247     void *funcarg)
248 {
249 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
250 	struct fmev_subinfo *sip;
251 	uu_avl_index_t idx;
252 	uint64_t nsid;
253 	int serr;
254 
255 	if (!FMEV_API_ENTER(hdl, 1))
256 		return (fmev_errno);
257 
258 	if (pat == NULL || func == NULL)
259 		return (fmev_seterr(FMEVERR_API));
260 
261 	/*
262 	 * Empty class patterns are illegal, as is the sysevent magic for
263 	 * all classes.  Also validate class length.
264 	 */
265 	if (*pat == '\0' || strncmp(pat, EC_ALL, sizeof (EC_ALL)) == 0 ||
266 	    strncmp(pat, EC_SUB_ALL, sizeof (EC_SUB_ALL)) == 0 ||
267 	    strnlen(pat, FMEV_MAX_CLASS) == FMEV_MAX_CLASS)
268 		return (fmev_seterr(FMEVERR_BADCLASS));
269 
270 	if ((sip = fmev_shdl_zalloc(hdl, sizeof (*sip))) == NULL)
271 		return (fmev_seterr(FMEVERR_ALLOC));
272 
273 	(void) strncpy(sip->si_pat, pat, sizeof (sip->si_pat));
274 
275 	uu_avl_node_init(sip, &sip->si_node, ihdl->sh_pool);
276 
277 	(void) pthread_mutex_lock(&ihdl->sh_lock);
278 
279 	if (uu_avl_find(ihdl->sh_avl, sip, NULL, &idx) != NULL) {
280 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
281 		fmev_shdl_free(hdl, sip, sizeof (*sip));
282 		return (fmev_seterr(FMEVERR_DUPLICATE));
283 	}
284 
285 	/*
286 	 * Generate a subscriber id for GPEC that is unique to this
287 	 * subscription.  There is no provision for persistent
288 	 * subscribers.  The subscriber id must be unique within
289 	 * this zone.
290 	 */
291 	nsid = (uint64_t)getpid() << 32 | atomic_inc_32_nv(&fmev_subid);
292 	(void) snprintf(sip->si_sid, sizeof (sip->si_sid), "%llx", nsid);
293 
294 	sip->si_ihdl = ihdl;
295 	sip->si_cb = func;
296 	sip->si_cbarg = funcarg;
297 
298 	if ((serr = sysevent_evc_xsubscribe(ihdl->sh_binding, sip->si_sid,
299 	    sip->si_pat, fmev_proxy_cb, sip, 0, ihdl->sh_attr)) != 0) {
300 		fmev_err_t err;
301 
302 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
303 		fmev_shdl_free(hdl, sip, sizeof (*sip));
304 
305 		switch (serr) {
306 		case ENOMEM:
307 			err = FMEVERR_MAX_SUBSCRIBERS;
308 			break;
309 
310 		default:
311 			err = FMEVERR_INTERNAL;
312 			break;
313 		}
314 
315 		return (fmev_seterr(err));
316 	}
317 
318 	uu_avl_insert(ihdl->sh_avl, sip, idx);
319 	ihdl->sh_subcnt++;
320 
321 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
322 
323 	return (fmev_seterr(FMEV_SUCCESS));
324 }
325 
326 static int
fmev_subinfo_fini(fmev_shdl_impl_t * ihdl,struct fmev_subinfo * sip,boolean_t doavl)327 fmev_subinfo_fini(fmev_shdl_impl_t *ihdl, struct fmev_subinfo *sip,
328     boolean_t doavl)
329 {
330 	int err;
331 
332 	ASSERT(sip->si_ihdl == ihdl);
333 
334 	err = sysevent_evc_unsubscribe(ihdl->sh_binding, sip->si_sid);
335 
336 	if (err == 0) {
337 		if (doavl) {
338 			uu_avl_remove(ihdl->sh_avl, sip);
339 			uu_avl_node_fini(sip, &sip->si_node, ihdl->sh_pool);
340 		}
341 		fmev_shdl_free(IHDL2HDL(ihdl), sip, sizeof (*sip));
342 		ihdl->sh_subcnt--;
343 	}
344 
345 	return (err);
346 }
347 
348 fmev_err_t
fmev_shdl_unsubscribe(fmev_shdl_t hdl,const char * pat)349 fmev_shdl_unsubscribe(fmev_shdl_t hdl, const char *pat)
350 {
351 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
352 	fmev_err_t rv = FMEVERR_NOMATCH;
353 	struct fmev_subinfo *sip;
354 	struct fmev_subinfo si;
355 	int err;
356 
357 	if (!FMEV_API_ENTER(hdl, 1))
358 		return (fmev_errno);
359 
360 	if (pat == NULL)
361 		return (fmev_seterr(FMEVERR_API));
362 
363 	if (*pat == '\0' || strncmp(pat, EVCH_ALLSUB, sizeof (EC_ALL)) == 0 ||
364 	    strnlen(pat, FMEV_MAX_CLASS) == FMEV_MAX_CLASS)
365 		return (fmev_seterr(FMEVERR_BADCLASS));
366 
367 	(void) strncpy(si.si_pat, pat, sizeof (si.si_pat));
368 
369 	(void) pthread_mutex_lock(&ihdl->sh_lock);
370 
371 	if ((sip = uu_avl_find(ihdl->sh_avl, &si, NULL, NULL)) != NULL) {
372 		if ((err = fmev_subinfo_fini(ihdl, sip, B_TRUE)) == 0) {
373 			rv = FMEV_SUCCESS;
374 		} else {
375 			/*
376 			 * Return an API error if the unsubscribe was
377 			 * attempted from within a door callback invocation;
378 			 * other errors should not happen.
379 			 */
380 			rv = (err == EDEADLK) ? FMEVERR_API : FMEVERR_INTERNAL;
381 		}
382 	}
383 
384 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
385 
386 	return (fmev_seterr(rv));
387 }
388 
389 void *
fmev_shdl_alloc(fmev_shdl_t hdl,size_t sz)390 fmev_shdl_alloc(fmev_shdl_t hdl, size_t sz)
391 {
392 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
393 
394 	if (!FMEV_API_ENTER(hdl, 1))
395 		return (NULL);
396 
397 	return (ihdl->sh_cmn.hc_alloc(sz));
398 }
399 
400 void *
fmev_shdl_zalloc(fmev_shdl_t hdl,size_t sz)401 fmev_shdl_zalloc(fmev_shdl_t hdl, size_t sz)
402 {
403 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
404 
405 	if (!FMEV_API_ENTER(hdl, 1))
406 		return (NULL);
407 
408 	return (ihdl->sh_cmn.hc_zalloc(sz));
409 }
410 
411 void
fmev_shdl_free(fmev_shdl_t hdl,void * buf,size_t sz)412 fmev_shdl_free(fmev_shdl_t hdl, void *buf, size_t sz)
413 {
414 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
415 
416 	if (!FMEV_API_ENTER(hdl, 1))
417 		return;
418 
419 	ihdl->sh_cmn.hc_free(buf, sz);
420 }
421 
422 char *
fmev_shdl_strdup(fmev_shdl_t hdl,char * src)423 fmev_shdl_strdup(fmev_shdl_t hdl, char *src)
424 {
425 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
426 	size_t srclen;
427 	char *dst;
428 
429 	if (!FMEV_API_ENTER(hdl, 2))
430 		return (NULL);
431 
432 	srclen = strlen(src);
433 
434 	if ((dst = ihdl->sh_cmn.hc_alloc(srclen + 1)) == NULL) {
435 		(void) fmev_seterr(FMEVERR_ALLOC);
436 		return (NULL);
437 	}
438 
439 	(void) strncpy(dst, src, srclen);
440 	dst[srclen] = '\0';
441 	return (dst);
442 }
443 
444 void
fmev_shdl_strfree(fmev_shdl_t hdl,char * buf)445 fmev_shdl_strfree(fmev_shdl_t hdl, char *buf)
446 {
447 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
448 
449 	(void) FMEV_API_ENTER(hdl, 2);
450 
451 	ihdl->sh_cmn.hc_free(buf, strlen(buf) + 1);
452 }
453 
454 int
fmev_shdl_valid(fmev_shdl_t hdl)455 fmev_shdl_valid(fmev_shdl_t hdl)
456 {
457 	return (FMEV_SHDL_VALID(HDL2IHDL(hdl)));
458 }
459 
460 /*ARGSUSED*/
461 static int
fmev_keycmp(const void * l,const void * r,void * arg)462 fmev_keycmp(const void *l, const void *r, void *arg)
463 {
464 	struct fmev_subinfo *left = (struct fmev_subinfo *)l;
465 	struct fmev_subinfo *right = (struct fmev_subinfo *)r;
466 
467 	return (strncmp(left->si_pat, right->si_pat, FMEV_MAX_CLASS));
468 }
469 
470 fmev_shdl_t
fmev_shdl_init(uint32_t caller_version,void * (* hdlalloc)(size_t),void * (* hdlzalloc)(size_t),void (* hdlfree)(void *,size_t))471 fmev_shdl_init(uint32_t caller_version, void *(*hdlalloc)(size_t),
472     void *(*hdlzalloc)(size_t), void (*hdlfree)(void *, size_t))
473 {
474 	fmev_shdl_impl_t *ihdl;
475 	struct fmev_hdl_cmn hc;
476 	const char *chan_name;
477 	int err;
478 
479 	hc.hc_magic = _FMEV_SHMAGIC;
480 	hc.hc_api_vers = caller_version;
481 	hc.hc_alloc = hdlalloc ? hdlalloc : dflt_alloc;
482 	hc.hc_zalloc = hdlzalloc ? hdlzalloc : dflt_zalloc;
483 	hc.hc_free = hdlfree ? hdlfree : dflt_free;
484 
485 	if (!fmev_api_init(&hc))
486 		return (NULL);	/* error type set */
487 
488 	if (!((hdlalloc == NULL && hdlzalloc == NULL && hdlfree == NULL) ||
489 	    (hdlalloc != NULL && hdlzalloc != NULL && hdlfree != NULL))) {
490 		(void) fmev_seterr(FMEVERR_API);
491 		return (NULL);
492 	}
493 
494 	if (hdlzalloc == NULL)
495 		ihdl = dflt_zalloc(sizeof (*ihdl));
496 	else
497 		ihdl = hdlzalloc(sizeof (*ihdl));
498 
499 	if (ihdl == NULL) {
500 		(void) fmev_seterr(FMEVERR_ALLOC);
501 		return (NULL);
502 	}
503 
504 	ihdl->sh_cmn = hc;
505 
506 	if ((ihdl->sh_attr = sysevent_subattr_alloc()) == NULL) {
507 		err = FMEVERR_ALLOC;
508 		goto error;
509 	}
510 
511 	(void) pthread_mutex_init(&ihdl->sh_lock, NULL);
512 
513 	/*
514 	 * For simulation purposes we allow an environment variable
515 	 * to provide a different channel name.
516 	 */
517 	if ((chan_name = getenv("FMD_SNOOP_CHANNEL")) == NULL)
518 		chan_name = FMD_SNOOP_CHANNEL;
519 
520 	/*
521 	 * Try to bind to the event channel. If it's not already present,
522 	 * attempt to create the channel so that we can startup before
523 	 * the event producer (who will also apply choices such as
524 	 * channel depth when they bind to the channel).
525 	 */
526 	if (sysevent_evc_bind(chan_name, &ihdl->sh_binding,
527 	    EVCH_CREAT | EVCH_HOLD_PEND_INDEF) != 0) {
528 		switch (errno) {
529 		case EINVAL:
530 		default:
531 			err = FMEVERR_INTERNAL;
532 			break;
533 		case ENOMEM:
534 			err = FMEVERR_ALLOC;
535 			break;
536 		case EPERM:
537 			err = FMEVERR_NOPRIV;
538 			break;
539 		}
540 		goto error;
541 	}
542 
543 	if ((ihdl->sh_pool = uu_avl_pool_create("subinfo_pool",
544 	    sizeof (struct fmev_subinfo),
545 	    offsetof(struct fmev_subinfo, si_node), fmev_keycmp,
546 	    UU_AVL_POOL_DEBUG)) == NULL) {
547 		err = FMEVERR_INTERNAL;
548 		goto error;
549 	}
550 
551 	if ((ihdl->sh_avl = uu_avl_create(ihdl->sh_pool, NULL,
552 	    UU_DEFAULT)) == NULL) {
553 		err = FMEVERR_INTERNAL;
554 		goto error;
555 	}
556 
557 	return (IHDL2HDL(ihdl));
558 
559 error:
560 	(void) fmev_shdl_fini(IHDL2HDL(ihdl));
561 	(void) fmev_seterr(err);
562 	return (NULL);
563 }
564 
565 fmev_err_t
fmev_shdl_getauthority(fmev_shdl_t hdl,nvlist_t ** nvlp)566 fmev_shdl_getauthority(fmev_shdl_t hdl, nvlist_t **nvlp)
567 {
568 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
569 	nvlist_t *propnvl;
570 	fmev_err_t rc;
571 
572 	if (!FMEV_API_ENTER(hdl, 2))
573 		return (fmev_errno);
574 
575 	(void) pthread_mutex_lock(&ihdl->sh_lock);
576 
577 	if (sysevent_evc_getpropnvl(ihdl->sh_binding, &propnvl) != 0) {
578 		*nvlp = NULL;
579 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
580 		return (fmev_seterr(FMEVERR_UNKNOWN));
581 	}
582 
583 	if (propnvl == NULL) {
584 		rc = FMEVERR_BUSY;	/* Other end has not bound */
585 	} else {
586 		nvlist_t *auth;
587 
588 		if (nvlist_lookup_nvlist(propnvl, "fmdauth", &auth) == 0) {
589 			rc = (nvlist_dup(auth, nvlp, 0) == 0) ? FMEV_SUCCESS :
590 			    FMEVERR_ALLOC;
591 		} else {
592 			rc = FMEVERR_INTERNAL;
593 		}
594 		nvlist_free(propnvl);
595 	}
596 
597 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
598 
599 	if (rc != FMEV_SUCCESS) {
600 		*nvlp = NULL;
601 		(void) fmev_seterr(rc);
602 	}
603 
604 	return (rc);
605 }
606 
607 char *
fmev_shdl_nvl2str(fmev_shdl_t hdl,nvlist_t * nvl)608 fmev_shdl_nvl2str(fmev_shdl_t hdl, nvlist_t *nvl)
609 {
610 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
611 	char *fmri, *fmricp;
612 	fmev_err_t err;
613 	int topoerr;
614 
615 	if (!FMEV_API_ENTER(hdl, 2))
616 		return (NULL);
617 
618 	if (g_topohdl == NULL) {
619 		(void) pthread_mutex_lock(&ihdl->sh_lock);
620 		if (g_topohdl == NULL)
621 			g_topohdl = topo_open(TOPO_VERSION, NULL, &topoerr);
622 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
623 
624 		if (g_topohdl == NULL) {
625 			(void) fmev_seterr(FMEVERR_INTERNAL);
626 			return (NULL);
627 		}
628 	}
629 
630 	if (topo_fmri_nvl2str(g_topohdl, nvl, &fmri, &topoerr) == 0) {
631 		fmricp = fmev_shdl_strdup(hdl, fmri);
632 		topo_hdl_strfree(g_topohdl, fmri);
633 		return (fmricp);	/* fmev_errno set if strdup failed */
634 	}
635 
636 	switch (topoerr) {
637 	case ETOPO_FMRI_NOMEM:
638 		err = FMEVERR_ALLOC;
639 		break;
640 
641 	case ETOPO_FMRI_MALFORM:
642 	case ETOPO_METHOD_NOTSUP:
643 	case ETOPO_METHOD_INVAL:
644 	default:
645 		err = FMEVERR_INVALIDARG;
646 		break;
647 	}
648 
649 	(void) fmev_seterr(err);
650 	return (NULL);
651 }
652 
653 fmev_err_t
fmev_shdl_fini(fmev_shdl_t hdl)654 fmev_shdl_fini(fmev_shdl_t hdl)
655 {
656 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
657 
658 	if (!FMEV_API_ENTER(hdl, 1))
659 		return (fmev_errno);
660 
661 	(void) pthread_mutex_lock(&ihdl->sh_lock);
662 
663 	/*
664 	 * Verify that we are not in callback context - return an API
665 	 * error if we are.
666 	 */
667 	if (sysevent_evc_unsubscribe(ihdl->sh_binding, "invalidsid") ==
668 	    EDEADLK) {
669 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
670 		return (fmev_seterr(FMEVERR_API));
671 	}
672 
673 	if (ihdl->sh_avl) {
674 		void *cookie = NULL;
675 		struct fmev_subinfo *sip;
676 
677 		while ((sip = uu_avl_teardown(ihdl->sh_avl, &cookie)) != NULL)
678 			(void) fmev_subinfo_fini(ihdl, sip, B_FALSE);
679 
680 		uu_avl_destroy(ihdl->sh_avl);
681 		ihdl->sh_avl = NULL;
682 	}
683 
684 	ASSERT(ihdl->sh_subcnt == 0);
685 
686 	if (ihdl->sh_binding) {
687 		(void) sysevent_evc_unbind(ihdl->sh_binding);
688 		ihdl->sh_binding = NULL;
689 	}
690 
691 	if (ihdl->sh_pool) {
692 		uu_avl_pool_destroy(ihdl->sh_pool);
693 		ihdl->sh_pool = NULL;
694 	}
695 
696 	if (ihdl->sh_attr) {
697 		sysevent_subattr_free(ihdl->sh_attr);
698 		ihdl->sh_attr = NULL;
699 	}
700 
701 	ihdl->sh_cmn.hc_magic = 0;
702 
703 	if (g_topohdl) {
704 		topo_close(g_topohdl);
705 		g_topohdl = NULL;
706 	}
707 
708 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
709 	(void) pthread_mutex_destroy(&ihdl->sh_lock);
710 
711 	fmev_shdl_free(hdl, hdl, sizeof (*ihdl));
712 
713 	fmev_api_freetsd();
714 
715 	return (fmev_seterr(FMEV_SUCCESS));
716 }
717