xref: /titanic_41/usr/src/lib/fm/libfmevent/common/fmev_subscribe.c (revision bb1fad37c75defa7a6ae25f00c1d4b356713b734)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * FMA event subscription interfaces - subscribe to FMA protocol
29  * from outside the fault manager.
30  */
31 
32 #include <sys/types.h>
33 #include <atomic.h>
34 #include <libsysevent.h>
35 #include <libuutil.h>
36 #include <pthread.h>
37 #include <stdarg.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <strings.h>
41 #include <umem.h>
42 #include <unistd.h>
43 
44 #include <fm/libfmevent.h>
45 
46 #include "fmev_impl.h"
47 #include "fmev_channels.h"
48 
49 typedef struct {
50 	struct fmev_hdl_cmn sh_cmn;
51 	evchan_t *sh_binding;
52 	uu_avl_pool_t *sh_pool;
53 	uu_avl_t *sh_avl;
54 	uint32_t sh_subcnt;
55 	uint32_t sh_flags;
56 	sysevent_subattr_t *sh_attr;
57 	pthread_mutex_t sh_lock;
58 	pthread_mutex_t sh_srlz_lock;
59 } fmev_shdl_impl_t;
60 
61 #define	HDL2IHDL(hdl)	((fmev_shdl_impl_t *)(hdl))
62 #define	IHDL2HDL(ihdl)	((fmev_shdl_t)(ihdl))
63 
64 #define	_FMEV_SHMAGIC	0x5368446c	/* ShDl */
65 #define	FMEV_SHDL_VALID(ihdl)	((ihdl)->sh_cmn.hc_magic == _FMEV_SHMAGIC)
66 
67 #define	SHDL_FL_SERIALIZE	0x1
68 
69 #define	API_ENTERV1(hdl) \
70 	fmev_api_enter(&HDL2IHDL(hdl)->sh_cmn, LIBFMEVENT_VERSION_1)
71 
72 /*
73  * For each subscription on a handle we add a node to an avl tree
74  * to track subscriptions.
75  */
76 
77 #define	FMEV_SID_SZ	(16 + 1)	/* Matches MAX_SUBID_LEN */
78 
79 struct fmev_subinfo {
80 	uu_avl_node_t si_node;
81 	fmev_shdl_impl_t *si_ihdl;
82 	char si_pat[FMEV_MAX_CLASS];
83 	char si_sid[FMEV_SID_SZ];
84 	fmev_cbfunc_t *si_cb;
85 	void *si_cbarg;
86 };
87 
88 struct fmev_hdl_cmn *
89 fmev_shdl_cmn(fmev_shdl_t hdl)
90 {
91 	return (&HDL2IHDL(hdl)->sh_cmn);
92 }
93 
94 static int
95 shdlctl_start(fmev_shdl_impl_t *ihdl)
96 {
97 	(void) pthread_mutex_lock(&ihdl->sh_lock);
98 
99 	if (ihdl->sh_subcnt == 0) {
100 		return (1);	/* lock still held */
101 	} else {
102 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
103 		return (0);
104 	}
105 }
106 
107 static void
108 shdlctl_end(fmev_shdl_impl_t *ihdl)
109 {
110 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
111 }
112 
113 fmev_err_t
114 fmev_shdlctl_serialize(fmev_shdl_t hdl)
115 {
116 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
117 
118 	if (!API_ENTERV1(hdl))
119 		return (fmev_errno);
120 
121 	if (!shdlctl_start(ihdl))
122 		return (fmev_seterr(FMEVERR_BUSY));
123 
124 	if (!(ihdl->sh_flags & SHDL_FL_SERIALIZE)) {
125 		(void) pthread_mutex_init(&ihdl->sh_srlz_lock, NULL);
126 		ihdl->sh_flags |= SHDL_FL_SERIALIZE;
127 	}
128 
129 	shdlctl_end(ihdl);
130 	return (fmev_seterr(FMEV_SUCCESS));
131 }
132 
133 fmev_err_t
134 fmev_shdlctl_thrattr(fmev_shdl_t hdl, pthread_attr_t *attr)
135 {
136 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
137 
138 	if (!API_ENTERV1(hdl))
139 		return (fmev_errno);
140 
141 	if (!shdlctl_start(ihdl))
142 		return (fmev_seterr(FMEVERR_BUSY));
143 
144 	sysevent_subattr_thrattr(ihdl->sh_attr, attr);
145 
146 	shdlctl_end(ihdl);
147 	return (fmev_seterr(FMEV_SUCCESS));
148 }
149 
150 fmev_err_t
151 fmev_shdlctl_sigmask(fmev_shdl_t hdl, sigset_t *set)
152 {
153 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
154 
155 	if (!API_ENTERV1(hdl))
156 		return (fmev_errno);
157 
158 	if (!shdlctl_start(ihdl))
159 		return (fmev_seterr(FMEVERR_BUSY));
160 
161 	sysevent_subattr_sigmask(ihdl->sh_attr, set);
162 
163 	shdlctl_end(ihdl);
164 	return (fmev_seterr(FMEV_SUCCESS));
165 }
166 
167 fmev_err_t
168 fmev_shdlctl_thrsetup(fmev_shdl_t hdl, door_xcreate_thrsetup_func_t *func,
169     void *cookie)
170 {
171 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
172 
173 	if (!API_ENTERV1(hdl))
174 		return (fmev_errno);
175 
176 	if (!shdlctl_start(ihdl))
177 		return (fmev_seterr(FMEVERR_BUSY));
178 
179 	sysevent_subattr_thrsetup(ihdl->sh_attr, func, cookie);
180 
181 	shdlctl_end(ihdl);
182 	return (fmev_seterr(FMEV_SUCCESS));
183 }
184 
185 fmev_err_t
186 fmev_shdlctl_thrcreate(fmev_shdl_t hdl, door_xcreate_server_func_t *func,
187     void *cookie)
188 {
189 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
190 
191 	if (!API_ENTERV1(hdl))
192 		return (fmev_errno);
193 
194 	if (!shdlctl_start(ihdl))
195 		return (fmev_seterr(FMEVERR_BUSY));
196 
197 	sysevent_subattr_thrcreate(ihdl->sh_attr, func, cookie);
198 
199 	shdlctl_end(ihdl);
200 	return (fmev_seterr(FMEV_SUCCESS));
201 }
202 
203 /*
204  * Our door service function.  We return 0 regardless so that the kernel
205  * does not keep either retrying (EAGAIN) or bleat to cmn_err.
206  */
207 
208 uint64_t fmev_proxy_cb_inval;
209 uint64_t fmev_proxy_cb_enomem;
210 
211 int
212 fmev_proxy_cb(sysevent_t *sep, void *arg)
213 {
214 	struct fmev_subinfo *sip = arg;
215 	fmev_shdl_impl_t *ihdl = sip->si_ihdl;
216 	nvlist_t *nvl;
217 	char *class;
218 	fmev_t ev;
219 
220 	if (sip == NULL || sip->si_cb == NULL) {
221 		fmev_proxy_cb_inval++;
222 		return (0);
223 	}
224 
225 	if ((ev = fmev_sysev2fmev(IHDL2HDL(ihdl), sep, &class, &nvl)) == NULL) {
226 		fmev_proxy_cb_enomem++;
227 		return (0);
228 	}
229 
230 	if (ihdl->sh_flags & SHDL_FL_SERIALIZE)
231 		(void) pthread_mutex_lock(&ihdl->sh_srlz_lock);
232 
233 	sip->si_cb(ev, class, nvl, sip->si_cbarg);
234 
235 	if (ihdl->sh_flags & SHDL_FL_SERIALIZE)
236 		(void) pthread_mutex_unlock(&ihdl->sh_srlz_lock);
237 
238 	fmev_rele(ev);	/* release hold obtained in fmev_sysev2fmev */
239 
240 	return (0);
241 }
242 
243 static volatile uint32_t fmev_subid;
244 
245 fmev_err_t
246 fmev_shdl_subscribe(fmev_shdl_t hdl, const char *pat, fmev_cbfunc_t func,
247     void *funcarg)
248 {
249 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
250 	struct fmev_subinfo *sip;
251 	uu_avl_index_t idx;
252 	uint64_t nsid;
253 	int serr;
254 
255 	if (!API_ENTERV1(hdl))
256 		return (fmev_errno);
257 
258 	if (pat == NULL || func == NULL)
259 		return (fmev_seterr(FMEVERR_API));
260 
261 	/*
262 	 * Empty class patterns are illegal, as is the sysevent magic for
263 	 * all classes.  Also validate class length.
264 	 */
265 	if (*pat == '\0' || strncmp(pat, EC_ALL, sizeof (EC_ALL)) == 0 ||
266 	    strncmp(pat, EC_SUB_ALL, sizeof (EC_SUB_ALL)) == 0 ||
267 	    strnlen(pat, FMEV_MAX_CLASS) == FMEV_MAX_CLASS)
268 		return (fmev_seterr(FMEVERR_BADCLASS));
269 
270 	if ((sip = fmev_shdl_zalloc(hdl, sizeof (*sip))) == NULL)
271 		return (fmev_seterr(FMEVERR_ALLOC));
272 
273 	(void) strncpy(sip->si_pat, pat, sizeof (sip->si_pat));
274 
275 	uu_avl_node_init(sip, &sip->si_node, ihdl->sh_pool);
276 
277 	(void) pthread_mutex_lock(&ihdl->sh_lock);
278 
279 	if (uu_avl_find(ihdl->sh_avl, sip, NULL, &idx) != NULL) {
280 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
281 		fmev_shdl_free(hdl, sip, sizeof (*sip));
282 		return (fmev_seterr(FMEVERR_DUPLICATE));
283 	}
284 
285 	/*
286 	 * Generate a subscriber id for GPEC that is unique to this
287 	 * subscription.  There is no provision for persistent
288 	 * subscribers.  The subscriber id must be unique within
289 	 * this zone.
290 	 */
291 	nsid = (uint64_t)getpid() << 32 | atomic_inc_32_nv(&fmev_subid);
292 	(void) snprintf(sip->si_sid, sizeof (sip->si_sid), "%llx", nsid);
293 
294 	sip->si_ihdl = ihdl;
295 	sip->si_cb = func;
296 	sip->si_cbarg = funcarg;
297 
298 	if ((serr = sysevent_evc_xsubscribe(ihdl->sh_binding, sip->si_sid,
299 	    sip->si_pat, fmev_proxy_cb, sip, 0, ihdl->sh_attr)) != 0) {
300 		fmev_err_t err;
301 
302 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
303 		fmev_shdl_free(hdl, sip, sizeof (*sip));
304 
305 		switch (serr) {
306 		case ENOMEM:
307 			err = FMEVERR_MAX_SUBSCRIBERS;
308 			break;
309 
310 		default:
311 			err = FMEVERR_INTERNAL;
312 			break;
313 		}
314 
315 		return (fmev_seterr(err));
316 	}
317 
318 	uu_avl_insert(ihdl->sh_avl, sip, idx);
319 	ihdl->sh_subcnt++;
320 
321 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
322 
323 	return (fmev_seterr(FMEV_SUCCESS));
324 }
325 
326 static int
327 fmev_subinfo_fini(fmev_shdl_impl_t *ihdl, struct fmev_subinfo *sip,
328     boolean_t doavl)
329 {
330 	int err;
331 
332 	ASSERT(sip->si_ihdl == ihdl);
333 
334 	err = sysevent_evc_unsubscribe(ihdl->sh_binding, sip->si_sid);
335 
336 	if (err == 0) {
337 		if (doavl) {
338 			uu_avl_remove(ihdl->sh_avl, sip);
339 			uu_avl_node_fini(sip, &sip->si_node, ihdl->sh_pool);
340 		}
341 		fmev_shdl_free(IHDL2HDL(ihdl), sip, sizeof (*sip));
342 		ihdl->sh_subcnt--;
343 	}
344 
345 	return (err);
346 }
347 
348 fmev_err_t
349 fmev_shdl_unsubscribe(fmev_shdl_t hdl, const char *pat)
350 {
351 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
352 	fmev_err_t rv = FMEVERR_NOMATCH;
353 	struct fmev_subinfo *sip;
354 	struct fmev_subinfo si;
355 	int err;
356 
357 	if (!API_ENTERV1(hdl))
358 		return (fmev_errno);
359 
360 	if (pat == NULL)
361 		return (fmev_seterr(FMEVERR_API));
362 
363 	if (*pat == '\0' || strncmp(pat, EVCH_ALLSUB, sizeof (EC_ALL)) == 0 ||
364 	    strnlen(pat, FMEV_MAX_CLASS) == FMEV_MAX_CLASS)
365 		return (fmev_seterr(FMEVERR_BADCLASS));
366 
367 	(void) strncpy(si.si_pat, pat, sizeof (si.si_pat));
368 
369 	(void) pthread_mutex_lock(&ihdl->sh_lock);
370 
371 	if ((sip = uu_avl_find(ihdl->sh_avl, &si, NULL, NULL)) != NULL) {
372 		if ((err = fmev_subinfo_fini(ihdl, sip, B_TRUE)) == 0) {
373 			rv = FMEV_SUCCESS;
374 		} else {
375 			/*
376 			 * Return an API error if the unsubscribe was
377 			 * attempted from within a door callback invocation;
378 			 * other errors should not happen.
379 			 */
380 			rv = (err == EDEADLK) ? FMEVERR_API : FMEVERR_INTERNAL;
381 		}
382 	}
383 
384 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
385 
386 	return (fmev_seterr(rv));
387 }
388 
389 static void *
390 dflt_alloc(size_t sz)
391 {
392 	return (umem_alloc(sz, UMEM_DEFAULT));
393 }
394 
395 static void *
396 dflt_zalloc(size_t sz)
397 {
398 	return (umem_zalloc(sz, UMEM_DEFAULT));
399 }
400 
401 static void
402 dflt_free(void *buf, size_t sz)
403 {
404 	umem_free(buf, sz);
405 }
406 
407 void *
408 fmev_shdl_alloc(fmev_shdl_t hdl, size_t sz)
409 {
410 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
411 
412 	(void) API_ENTERV1(hdl);
413 
414 	return (ihdl->sh_cmn.hc_alloc(sz));
415 }
416 
417 void *
418 fmev_shdl_zalloc(fmev_shdl_t hdl, size_t sz)
419 {
420 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
421 
422 	(void) API_ENTERV1(hdl);
423 
424 	return (ihdl->sh_cmn.hc_zalloc(sz));
425 }
426 
427 void
428 fmev_shdl_free(fmev_shdl_t hdl, void *buf, size_t sz)
429 {
430 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
431 
432 	(void) API_ENTERV1(hdl);
433 
434 	ihdl->sh_cmn.hc_free(buf, sz);
435 }
436 
437 int
438 fmev_shdl_valid(fmev_shdl_t hdl)
439 {
440 	return (FMEV_SHDL_VALID(HDL2IHDL(hdl)));
441 }
442 
443 /*ARGSUSED*/
444 static int
445 fmev_keycmp(const void *l, const void *r, void *arg)
446 {
447 	struct fmev_subinfo *left = (struct fmev_subinfo *)l;
448 	struct fmev_subinfo *right = (struct fmev_subinfo *)r;
449 
450 	return (strncmp(left->si_pat, right->si_pat, FMEV_MAX_CLASS));
451 }
452 
453 fmev_shdl_t
454 fmev_shdl_init(uint32_t caller_version, void *(*hdlalloc)(size_t),
455     void *(*hdlzalloc)(size_t), void (*hdlfree)(void *, size_t))
456 {
457 	fmev_shdl_impl_t *ihdl;
458 	struct fmev_hdl_cmn hc;
459 	const char *chan_name;
460 	int err;
461 
462 	hc.hc_magic = _FMEV_SHMAGIC;
463 	hc.hc_api_vers = caller_version;
464 	hc.hc_alloc = hdlalloc ? hdlalloc : dflt_alloc;
465 	hc.hc_zalloc = hdlzalloc ? hdlzalloc : dflt_zalloc;
466 	hc.hc_free = hdlfree ? hdlfree : dflt_free;
467 
468 	if (!fmev_api_init(&hc))
469 		return (NULL);	/* error type set */
470 
471 	if (!((hdlalloc == NULL && hdlzalloc == NULL && hdlfree == NULL) ||
472 	    (hdlalloc != NULL && hdlzalloc != NULL && hdlfree != NULL))) {
473 		(void) fmev_seterr(FMEVERR_API);
474 		return (NULL);
475 	}
476 
477 	if (hdlzalloc == NULL)
478 		ihdl = dflt_zalloc(sizeof (*ihdl));
479 	else
480 		ihdl = hdlzalloc(sizeof (*ihdl));
481 
482 	if (ihdl == NULL) {
483 		(void) fmev_seterr(FMEVERR_ALLOC);
484 		return (NULL);
485 	}
486 
487 	ihdl->sh_cmn = hc;
488 
489 	if ((ihdl->sh_attr = sysevent_subattr_alloc()) == NULL) {
490 		err = FMEVERR_ALLOC;
491 		goto error;
492 	}
493 
494 	(void) pthread_mutex_init(&ihdl->sh_lock, NULL);
495 
496 	/*
497 	 * For simulation purposes we allow an environment variable
498 	 * to provide a different channel name.
499 	 */
500 	if ((chan_name = getenv("FMD_SNOOP_CHANNEL")) == NULL)
501 		chan_name = FMD_SNOOP_CHANNEL;
502 
503 	/*
504 	 * Try to bind to the event channel. If it's not already present,
505 	 * attempt to create the channel so that we can startup before
506 	 * the event producer (who will also apply choices such as
507 	 * channel depth when they bind to the channel).
508 	 */
509 	if (sysevent_evc_bind(chan_name, &ihdl->sh_binding,
510 	    EVCH_CREAT | EVCH_HOLD_PEND_INDEF) != 0) {
511 		switch (errno) {
512 		case EINVAL:
513 		default:
514 			err = FMEVERR_INTERNAL;
515 			break;
516 		case ENOMEM:
517 			err = FMEVERR_ALLOC;
518 			break;
519 		case EPERM:
520 			err = FMEVERR_NOPRIV;
521 			break;
522 		}
523 		goto error;
524 	}
525 
526 	if ((ihdl->sh_pool = uu_avl_pool_create("subinfo_pool",
527 	    sizeof (struct fmev_subinfo),
528 	    offsetof(struct fmev_subinfo, si_node), fmev_keycmp,
529 	    UU_AVL_POOL_DEBUG)) == NULL) {
530 		err = FMEVERR_INTERNAL;
531 		goto error;
532 	}
533 
534 	if ((ihdl->sh_avl = uu_avl_create(ihdl->sh_pool, NULL,
535 	    UU_DEFAULT)) == NULL) {
536 		err = FMEVERR_INTERNAL;
537 		goto error;
538 	}
539 
540 	return (IHDL2HDL(ihdl));
541 
542 error:
543 	(void) fmev_shdl_fini(IHDL2HDL(ihdl));
544 	(void) fmev_seterr(err);
545 	return (NULL);
546 }
547 
548 fmev_err_t
549 fmev_shdl_fini(fmev_shdl_t hdl)
550 {
551 	fmev_shdl_impl_t *ihdl = HDL2IHDL(hdl);
552 
553 	(void) API_ENTERV1(hdl);
554 
555 	(void) pthread_mutex_lock(&ihdl->sh_lock);
556 
557 	/*
558 	 * Verify that we are not in callback context - return an API
559 	 * error if we are.
560 	 */
561 	if (sysevent_evc_unsubscribe(ihdl->sh_binding, "invalidsid") ==
562 	    EDEADLK) {
563 		(void) pthread_mutex_unlock(&ihdl->sh_lock);
564 		return (fmev_seterr(FMEVERR_API));
565 	}
566 
567 	if (ihdl->sh_avl) {
568 		void *cookie = NULL;
569 		struct fmev_subinfo *sip;
570 
571 		while ((sip = uu_avl_teardown(ihdl->sh_avl, &cookie)) != NULL)
572 			(void) fmev_subinfo_fini(ihdl, sip, B_FALSE);
573 
574 		uu_avl_destroy(ihdl->sh_avl);
575 		ihdl->sh_avl = NULL;
576 	}
577 
578 	ASSERT(ihdl->sh_subcnt == 0);
579 
580 	if (ihdl->sh_binding) {
581 		(void) sysevent_evc_unbind(ihdl->sh_binding);
582 		ihdl->sh_binding = NULL;
583 	}
584 
585 	if (ihdl->sh_pool) {
586 		uu_avl_pool_destroy(ihdl->sh_pool);
587 		ihdl->sh_pool = NULL;
588 	}
589 
590 	if (ihdl->sh_attr) {
591 		sysevent_subattr_free(ihdl->sh_attr);
592 		ihdl->sh_attr = NULL;
593 	}
594 
595 	ihdl->sh_cmn.hc_magic = 0;
596 
597 	(void) pthread_mutex_unlock(&ihdl->sh_lock);
598 	(void) pthread_mutex_destroy(&ihdl->sh_lock);
599 
600 	fmev_shdl_free(hdl, hdl, sizeof (*ihdl));
601 
602 	fmev_api_freetsd();
603 
604 	return (fmev_seterr(FMEV_SUCCESS));
605 }
606