xref: /illumos-gate/usr/src/cmd/fm/fmd/common/fmd_rpc_adm.c (revision 6e375c8351497b82ffa4f33cbf61d712999b4605)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <strings.h>
30 #include <limits.h>
31 #include <unistd.h>
32 #include <stdlib.h>
33 #include <alloca.h>
34 
35 #include <fmd_rpc_adm.h>
36 #include <fmd_rpc.h>
37 #include <fmd_module.h>
38 #include <fmd_ustat.h>
39 #include <fmd_error.h>
40 #include <fmd_asru.h>
41 #include <fmd_ckpt.h>
42 #include <fmd_case.h>
43 #include <fmd_fmri.h>
44 #include <fmd_idspace.h>
45 #include <fmd_xprt.h>
46 
47 #include <fmd.h>
48 
49 bool_t
50 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req)
51 {
52 	struct fmd_rpc_modinfo *rmi;
53 	fmd_module_t *mp;
54 
55 	rvp->rml_list = NULL;
56 	rvp->rml_err = 0;
57 	rvp->rml_len = 0;
58 
59 	if (fmd_rpc_deny(req)) {
60 		rvp->rml_err = FMD_ADM_ERR_PERM;
61 		return (TRUE);
62 	}
63 
64 	(void) pthread_mutex_lock(&fmd.d_mod_lock);
65 
66 	for (mp = fmd_list_next(&fmd.d_mod_list);
67 	    mp != NULL; mp = fmd_list_next(mp)) {
68 
69 		if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) {
70 			rvp->rml_err = FMD_ADM_ERR_NOMEM;
71 			break;
72 		}
73 
74 		fmd_module_lock(mp);
75 
76 		/*
77 		 * If mod_info is NULL, the module is in the middle of loading:
78 		 * do not report its presence to observability tools yet.
79 		 */
80 		if (mp->mod_info == NULL) {
81 			fmd_module_unlock(mp);
82 			free(rmi);
83 			continue;
84 		}
85 
86 		rmi->rmi_name = strdup(mp->mod_name);
87 		rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc);
88 		rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers);
89 		rmi->rmi_faulty = mp->mod_error != 0;
90 		rmi->rmi_next = rvp->rml_list;
91 
92 		fmd_module_unlock(mp);
93 		rvp->rml_list = rmi;
94 		rvp->rml_len++;
95 
96 		if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) {
97 			rvp->rml_err = FMD_ADM_ERR_NOMEM;
98 			break;
99 		}
100 	}
101 
102 	(void) pthread_mutex_unlock(&fmd.d_mod_lock);
103 	return (TRUE);
104 }
105 
106 bool_t
107 fmd_adm_modcstat_1_svc(char *name,
108     struct fmd_rpc_modstat *rms, struct svc_req *req)
109 {
110 	fmd_ustat_snap_t snap;
111 	fmd_module_t *mp;
112 
113 	rms->rms_buf.rms_buf_val = NULL;
114 	rms->rms_buf.rms_buf_len = 0;
115 	rms->rms_err = 0;
116 
117 	if (fmd_rpc_deny(req)) {
118 		rms->rms_err = FMD_ADM_ERR_PERM;
119 		return (TRUE);
120 	}
121 
122 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
123 		rms->rms_err = FMD_ADM_ERR_MODSRCH;
124 		return (TRUE);
125 	}
126 
127 	if (fmd_modstat_snapshot(mp, &snap) == 0) {
128 		rms->rms_buf.rms_buf_val = snap.uss_buf;
129 		rms->rms_buf.rms_buf_len = snap.uss_len;
130 	} else if (errno == EFMD_HDL_ABORT) {
131 		rms->rms_err = FMD_ADM_ERR_MODFAIL;
132 	} else
133 		rms->rms_err = FMD_ADM_ERR_NOMEM;
134 
135 	fmd_module_rele(mp);
136 	return (TRUE);
137 }
138 
139 bool_t
140 fmd_adm_moddstat_1_svc(char *name,
141     struct fmd_rpc_modstat *rms, struct svc_req *req)
142 {
143 	fmd_module_t *mp;
144 
145 	rms->rms_buf.rms_buf_val = NULL;
146 	rms->rms_buf.rms_buf_len = 0;
147 	rms->rms_err = 0;
148 
149 	if (fmd_rpc_deny(req)) {
150 		rms->rms_err = FMD_ADM_ERR_PERM;
151 		return (TRUE);
152 	}
153 
154 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
155 		rms->rms_err = FMD_ADM_ERR_MODSRCH;
156 		return (TRUE);
157 	}
158 
159 	rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t));
160 	rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t);
161 
162 	if (rms->rms_buf.rms_buf_val == NULL) {
163 		rms->rms_err = FMD_ADM_ERR_NOMEM;
164 		rms->rms_buf.rms_buf_len = 0;
165 		fmd_module_rele(mp);
166 		return (TRUE);
167 	}
168 
169 	/*
170 	 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats
171 	 * are present in mp->mod_stats.  We don't use any for the daemon-
172 	 * maintained stats and provide this function in order to reduce the
173 	 * overhead of the fmstat(1M) default view, where these minimal stats
174 	 * must be retrieved for all of the active modules.
175 	 */
176 	(void) pthread_mutex_lock(&mp->mod_stats_lock);
177 
178 	if (mp->mod_stats != NULL) {
179 		mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime();
180 		bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val,
181 		    sizeof (fmd_modstat_t));
182 	} else {
183 		free(rms->rms_buf.rms_buf_val);
184 		rms->rms_buf.rms_buf_val = NULL;
185 		rms->rms_buf.rms_buf_len = 0;
186 		rms->rms_err = FMD_ADM_ERR_MODFAIL;
187 	}
188 
189 	(void) pthread_mutex_unlock(&mp->mod_stats_lock);
190 	fmd_module_rele(mp);
191 	return (TRUE);
192 }
193 
194 bool_t
195 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req)
196 {
197 	const size_t size = sizeof (fmd_statistics_t);
198 
199 	if (fmd_rpc_deny(req)) {
200 		rms->rms_buf.rms_buf_val = NULL;
201 		rms->rms_buf.rms_buf_len = 0;
202 		rms->rms_err = FMD_ADM_ERR_PERM;
203 	} else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) {
204 		/*
205 		 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING
206 		 * stats are present in fmd.d_stats (see definition in fmd.c).
207 		 */
208 		(void) pthread_mutex_lock(&fmd.d_stats_lock);
209 		bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size);
210 		(void) pthread_mutex_unlock(&fmd.d_stats_lock);
211 		rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t);
212 		rms->rms_err = 0;
213 	} else {
214 		rms->rms_buf.rms_buf_len = 0;
215 		rms->rms_err = FMD_ADM_ERR_NOMEM;
216 	}
217 
218 	return (TRUE);
219 }
220 
221 bool_t
222 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req)
223 {
224 	fmd_module_t *mp;
225 	const char *p;
226 	int err = 0;
227 
228 	if (fmd_rpc_deny(req)) {
229 		*rvp = FMD_ADM_ERR_PERM;
230 		return (TRUE);
231 	}
232 
233 	/*
234 	 * Before we endure the expense of constructing a module and attempting
235 	 * to load it, do a quick check to see if the pathname is valid.
236 	 */
237 	if (access(path, F_OK) != 0) {
238 		*rvp = FMD_ADM_ERR_MODNOENT;
239 		return (TRUE);
240 	}
241 
242 	if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0)
243 		mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops);
244 	else
245 		mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops);
246 
247 	if (mp == NULL) {
248 		switch (errno) {
249 		case EFMD_MOD_LOADED:
250 			err = FMD_ADM_ERR_MODEXIST;
251 			break;
252 		case EFMD_MOD_INIT:
253 			err = FMD_ADM_ERR_MODINIT;
254 			break;
255 		default:
256 			err = FMD_ADM_ERR_MODLOAD;
257 			break;
258 		}
259 	}
260 
261 	*rvp = err;
262 	return (TRUE);
263 }
264 
265 bool_t
266 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req)
267 {
268 	fmd_module_t *mp = NULL;
269 	int err = 0;
270 
271 	if (fmd_rpc_deny(req))
272 		err = FMD_ADM_ERR_PERM;
273 	else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
274 		err = FMD_ADM_ERR_MODSRCH;
275 	else if (mp == fmd.d_self)
276 		err = FMD_ADM_ERR_MODBUSY;
277 	else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
278 		err = FMD_ADM_ERR_MODSRCH;
279 
280 	if (mp != NULL)
281 		fmd_module_rele(mp);
282 
283 	*rvp = err;
284 	return (TRUE);
285 }
286 
287 bool_t
288 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req)
289 {
290 	fmd_module_t *mp = NULL;
291 	int err = 0;
292 
293 	if (fmd_rpc_deny(req))
294 		err = FMD_ADM_ERR_PERM;
295 	else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
296 		err = FMD_ADM_ERR_MODSRCH;
297 	else if (mp == fmd.d_self)
298 		err = FMD_ADM_ERR_MODBUSY;
299 	else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
300 		err = FMD_ADM_ERR_MODSRCH;
301 
302 	if (err == 0)
303 		fmd_ckpt_delete(mp); /* erase any saved checkpoints */
304 
305 	if (err == 0 && fmd_modhash_load(fmd.d_mod_hash,
306 	    mp->mod_path, mp->mod_ops) == NULL) {
307 		if (errno == EFMD_MOD_INIT)
308 			err = FMD_ADM_ERR_MODINIT;
309 		else
310 			err = FMD_ADM_ERR_MODLOAD;
311 	}
312 
313 	if (mp != NULL)
314 		fmd_module_rele(mp);
315 
316 	*rvp = err;
317 	return (TRUE);
318 }
319 
320 bool_t
321 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req)
322 {
323 	fmd_module_t *mp;
324 	int err = 0;
325 
326 	if (fmd_rpc_deny(req))
327 		err = FMD_ADM_ERR_PERM;
328 	else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
329 		err = FMD_ADM_ERR_MODSRCH;
330 	else {
331 		fmd_module_gc(mp);
332 		fmd_module_rele(mp);
333 	}
334 
335 	*rvp = err;
336 	return (TRUE);
337 }
338 
339 /*
340  * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts
341  * of data that may exceed the underlying RPC transport buffer size if the
342  * resource cache is heavily populated and/or all resources are requested.
343  * To minimize the likelihood of running out of RPC buffer space and having to
344  * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the
345  * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an
346  * individual FMRI if more information is needed.  To further reduce the XDR
347  * overhead, the string list is represented as XDR-opaque data where the
348  * entire list is returned as a string table (e.g. "fmriA\0fmriB\0...").
349  */
350 static void
351 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg)
352 {
353 	struct fmd_rpc_rsrclist *rrl = arg;
354 	size_t name_len, buf_len;
355 	void *p;
356 
357 	/*
358 	 * Skip the ASRU if this fault is marked as invisible.
359 	 * If rrl_all is false, we take a quick look at asru_flags with no lock
360 	 * held to see if the ASRU is not faulty.  If so,
361 	 * we don't want to report it by default and can just skip this ASRU.
362 	 * This helps keep overhead low in the common case, as the call to
363 	 * fmd_asru_getstate() can be expensive depending on the scheme.
364 	 */
365 
366 	if (ap->asru_flags & FMD_ASRU_INVISIBLE)
367 		return;
368 	if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY))
369 		return;
370 
371 	if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0)
372 		return; /* error has occurred or resource is in 'ok' state */
373 
374 	/*
375 	 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold
376 	 * another string, doubling it as needed.  Then copy the new string
377 	 * on to the end, and increment rrl_len to indicate the used space.
378 	 */
379 	(void) pthread_mutex_lock(&ap->asru_lock);
380 	name_len = strlen(ap->asru_name) + 1;
381 
382 	while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) {
383 		if (rrl->rrl_buf.rrl_buf_len != 0)
384 			buf_len = rrl->rrl_buf.rrl_buf_len * 2;
385 		else
386 			buf_len = 1024; /* default buffer size */
387 
388 		if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) {
389 			bzero((char *)p + rrl->rrl_buf.rrl_buf_len,
390 			    buf_len - rrl->rrl_buf.rrl_buf_len);
391 			rrl->rrl_buf.rrl_buf_val = p;
392 			rrl->rrl_buf.rrl_buf_len = buf_len;
393 		} else {
394 			rrl->rrl_err = FMD_ADM_ERR_NOMEM;
395 			break;
396 		}
397 	}
398 
399 	if (rrl->rrl_err == 0) {
400 		bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val +
401 		    rrl->rrl_len, name_len);
402 		rrl->rrl_len += name_len;
403 		rrl->rrl_cnt++;
404 	}
405 
406 	(void) pthread_mutex_unlock(&ap->asru_lock);
407 }
408 
409 bool_t
410 fmd_adm_rsrclist_1_svc(bool_t all,
411     struct fmd_rpc_rsrclist *rvp, struct svc_req *req)
412 {
413 	rvp->rrl_buf.rrl_buf_len = 0;
414 	rvp->rrl_buf.rrl_buf_val = NULL;
415 	rvp->rrl_len = 0;
416 	rvp->rrl_cnt = 0;
417 	rvp->rrl_err = 0;
418 	rvp->rrl_all = all;
419 
420 	if (fmd_rpc_deny(req))
421 		rvp->rrl_err = FMD_ADM_ERR_PERM;
422 	else
423 		fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp);
424 
425 	return (TRUE);
426 }
427 
428 bool_t
429 fmd_adm_rsrcinfo_1_svc(char *fmri,
430     struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req)
431 {
432 	fmd_asru_t *ap;
433 	fmd_case_impl_t *cip;
434 	int state;
435 
436 	bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo));
437 
438 	if (fmd_rpc_deny(req)) {
439 		rvp->rri_err = FMD_ADM_ERR_PERM;
440 		return (TRUE);
441 	}
442 
443 	if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) {
444 		rvp->rri_err = FMD_ADM_ERR_RSRCSRCH;
445 		return (TRUE);
446 	}
447 
448 	state = fmd_asru_getstate(ap);
449 	(void) pthread_mutex_lock(&ap->asru_lock);
450 	cip = (fmd_case_impl_t *)ap->asru_case;
451 
452 	rvp->rri_fmri = strdup(ap->asru_name);
453 	rvp->rri_uuid = strdup(ap->asru_uuid);
454 	rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL;
455 	rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0;
456 	rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0;
457 	rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0;
458 
459 	(void) pthread_mutex_unlock(&ap->asru_lock);
460 	fmd_asru_hash_release(fmd.d_asrus, ap);
461 
462 	if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL)
463 		rvp->rri_err = FMD_ADM_ERR_NOMEM;
464 
465 	return (TRUE);
466 }
467 
468 bool_t
469 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req)
470 {
471 	return (fmd_adm_rsrcrepaired_1_svc(name, rvp, req));
472 }
473 
474 bool_t
475 fmd_adm_rsrcrepaired_1_svc(char *name, int *rvp, struct svc_req *req)
476 {
477 	int err = FMD_ADM_ERR_RSRCNOTF;
478 
479 	if (fmd_rpc_deny(req))
480 		err = FMD_ADM_ERR_PERM;
481 	else {
482 		fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
483 		    fmd_asru_repaired, &err);
484 		fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
485 		    fmd_asru_repaired, &err);
486 		fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
487 		    fmd_asru_repaired, &err);
488 		fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
489 		    fmd_asru_repaired, &err);
490 	}
491 	*rvp = err;
492 	return (TRUE);
493 }
494 
495 bool_t
496 fmd_adm_rsrcreplaced_1_svc(char *name, int *rvp, struct svc_req *req)
497 {
498 	int err = FMD_ADM_ERR_RSRCNOTF;
499 
500 	if (fmd_rpc_deny(req))
501 		err = FMD_ADM_ERR_PERM;
502 	else {
503 		fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
504 		    fmd_asru_replaced, &err);
505 		fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
506 		    fmd_asru_replaced, &err);
507 		fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
508 		    fmd_asru_replaced, &err);
509 		fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
510 		    fmd_asru_replaced, &err);
511 	}
512 	*rvp = err;
513 	return (TRUE);
514 }
515 
516 typedef struct {
517 	int *errp;
518 	char *uuid;
519 } fmd_adm_ra_t;
520 
521 void
522 fmd_asru_ra_cb(fmd_asru_link_t *alp, void *arg)
523 {
524 	fmd_adm_ra_t *farap = (fmd_adm_ra_t *)arg;
525 
526 	if (strcmp(farap->uuid, "") == 0 ||
527 	    strcmp(farap->uuid, alp->al_case_uuid) == 0)
528 		fmd_asru_acquit(alp, farap->errp);
529 }
530 
531 bool_t
532 fmd_adm_rsrcacquit_1_svc(char *name, char *uuid, int *rvp, struct svc_req *req)
533 {
534 	int err = FMD_ADM_ERR_RSRCNOTF;
535 	fmd_adm_ra_t fara;
536 
537 	if (fmd_rpc_deny(req))
538 		err = FMD_ADM_ERR_PERM;
539 	else {
540 		fara.errp = &err;
541 		fara.uuid = uuid;
542 		fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
543 		    fmd_asru_ra_cb, &fara);
544 		fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
545 		    fmd_asru_ra_cb, &fara);
546 		fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
547 		    fmd_asru_ra_cb, &fara);
548 		fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
549 		    fmd_asru_ra_cb, &fara);
550 	}
551 	*rvp = err;
552 	return (TRUE);
553 }
554 
555 static void
556 fmd_adm_serdinfo_eng(fmd_serd_eng_t *sgp, void *arg)
557 {
558 	struct fmd_rpc_serdlist *rsl = arg;
559 	struct fmd_rpc_serdinfo *rsi = malloc(sizeof (struct fmd_rpc_serdinfo));
560 
561 	uint64_t old, now = fmd_time_gethrtime();
562 	const fmd_serd_elem_t *oep;
563 
564 	if (rsi == NULL || (rsi->rsi_name = strdup(sgp->sg_name)) == NULL) {
565 		rsl->rsl_err = FMD_ADM_ERR_NOMEM;
566 		free(rsi);
567 		return;
568 	}
569 
570 	if ((oep = fmd_list_next(&sgp->sg_list)) != NULL)
571 		old = fmd_event_hrtime(oep->se_event);
572 	else
573 		old = now;
574 
575 	rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1;
576 	rsi->rsi_count = sgp->sg_count;
577 	rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0;
578 	rsi->rsi_n = sgp->sg_n;
579 	rsi->rsi_t = sgp->sg_t;
580 	rsi->rsi_next = rsl->rsl_list;
581 
582 	rsl->rsl_list = rsi;
583 	rsl->rsl_len++;
584 }
585 
586 bool_t
587 fmd_adm_serdinfo_1_svc(char *name,
588     struct fmd_rpc_serdlist *rvp, struct svc_req *req)
589 {
590 	fmd_module_t *mp;
591 
592 	rvp->rsl_list = NULL;
593 	rvp->rsl_err = 0;
594 	rvp->rsl_len = 0;
595 
596 	if (fmd_rpc_deny(req)) {
597 		rvp->rsl_err = FMD_ADM_ERR_PERM;
598 		return (TRUE);
599 	}
600 
601 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
602 		rvp->rsl_err = FMD_ADM_ERR_MODSRCH;
603 		return (TRUE);
604 	}
605 
606 	fmd_module_lock(mp);
607 	fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdinfo_eng, rvp);
608 	fmd_module_unlock(mp);
609 
610 	fmd_module_rele(mp);
611 	return (TRUE);
612 }
613 
614 bool_t
615 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req)
616 {
617 	fmd_module_t *mp;
618 	fmd_serd_eng_t *sgp;
619 	int err = 0;
620 
621 	if (fmd_rpc_deny(req)) {
622 		*rvp = FMD_ADM_ERR_PERM;
623 		return (TRUE);
624 	}
625 
626 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
627 		*rvp = FMD_ADM_ERR_MODSRCH;
628 		return (TRUE);
629 	}
630 
631 	fmd_module_lock(mp);
632 
633 	if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
634 		if (fmd_serd_eng_fired(sgp)) {
635 			err = FMD_ADM_ERR_SERDFIRED;
636 		} else {
637 			fmd_serd_eng_reset(sgp);
638 			fmd_module_setdirty(mp);
639 		}
640 	} else
641 		err = FMD_ADM_ERR_SERDSRCH;
642 
643 	fmd_module_unlock(mp);
644 	fmd_module_rele(mp);
645 
646 	*rvp = err;
647 	return (TRUE);
648 }
649 
650 bool_t
651 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req)
652 {
653 	fmd_log_t **lpp, *old, *new;
654 	int try = 1, trylimit = 1;
655 
656 	hrtime_t nsec = 0;
657 	timespec_t tv;
658 
659 	if (fmd_rpc_deny(req)) {
660 		*rvp = FMD_ADM_ERR_PERM;
661 		return (TRUE);
662 	}
663 
664 	if (strcmp(name, "errlog") == 0)
665 		lpp = &fmd.d_errlog;
666 	else if (strcmp(name, "fltlog") == 0)
667 		lpp = &fmd.d_fltlog;
668 	else {
669 		*rvp = FMD_ADM_ERR_ROTSRCH;
670 		return (TRUE);
671 	}
672 
673 	(void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit);
674 	(void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec);
675 
676 	tv.tv_sec = nsec / NANOSEC;
677 	tv.tv_nsec = nsec % NANOSEC;
678 
679 	/*
680 	 * To rotate a log file, grab d_log_lock as writer to make sure no
681 	 * one else can discover the current log pointer.  Then try to rotate
682 	 * the log.  If we're successful, release the old log pointer.
683 	 */
684 	do {
685 		if (try > 1)
686 			(void) nanosleep(&tv, NULL); /* wait for checkpoints */
687 
688 		(void) pthread_rwlock_wrlock(&fmd.d_log_lock);
689 		old = *lpp;
690 
691 		if ((new = fmd_log_rotate(old)) != NULL) {
692 			fmd_log_rele(old);
693 			*lpp = new;
694 		}
695 
696 		(void) pthread_rwlock_unlock(&fmd.d_log_lock);
697 
698 	} while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit);
699 
700 	if (new != NULL)
701 		*rvp = 0;
702 	else if (errno == EFMD_LOG_ROTBUSY)
703 		*rvp = FMD_ADM_ERR_ROTBUSY;
704 	else
705 		*rvp = FMD_ADM_ERR_ROTFAIL;
706 
707 	return (TRUE);
708 }
709 
710 bool_t
711 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req)
712 {
713 	fmd_case_t *cp = NULL;
714 	int err = 0;
715 
716 	if (fmd_rpc_deny(req))
717 		err = FMD_ADM_ERR_PERM;
718 	else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
719 		err = FMD_ADM_ERR_CASESRCH;
720 	else if (fmd_case_repair(cp) != 0) {
721 		err = errno == EFMD_CASE_OWNER ?
722 		    FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
723 	}
724 
725 	if (cp != NULL)
726 		fmd_case_rele(cp);
727 
728 	*rvp = err;
729 	return (TRUE);
730 }
731 
732 bool_t
733 fmd_adm_caseacquit_1_svc(char *uuid, int *rvp, struct svc_req *req)
734 {
735 	fmd_case_t *cp = NULL;
736 	int err = 0;
737 
738 	if (fmd_rpc_deny(req))
739 		err = FMD_ADM_ERR_PERM;
740 	else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
741 		err = FMD_ADM_ERR_CASESRCH;
742 	else if (fmd_case_acquit(cp) != 0) {
743 		err = errno == EFMD_CASE_OWNER ?
744 		    FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
745 	}
746 
747 	if (cp != NULL)
748 		fmd_case_rele(cp);
749 
750 	*rvp = err;
751 	return (TRUE);
752 }
753 
754 void
755 fmd_adm_caselist_case(fmd_case_t *cp, void *arg)
756 {
757 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
758 	struct fmd_rpc_caselist *rcl = arg;
759 	size_t uuid_len, buf_len;
760 	void *p;
761 
762 	if (rcl->rcl_err != 0)
763 		return;
764 
765 	/*
766 	 * skip invisible cases
767 	 */
768 	if (cip->ci_flags & FMD_CF_INVISIBLE)
769 		return;
770 
771 	/*
772 	 * Lock the case and reallocate rcl_buf[] to be large enough to hold
773 	 * another string, doubling it as needed.  Then copy the new string
774 	 * on to the end, and increment rcl_len to indicate the used space.
775 	 */
776 	if (!(cip->ci_flags & FMD_CF_SOLVED))
777 		return;
778 
779 	(void) pthread_mutex_lock(&cip->ci_lock);
780 
781 	uuid_len = cip->ci_uuidlen + 1;
782 
783 	while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) {
784 		if (rcl->rcl_buf.rcl_buf_len != 0)
785 			buf_len = rcl->rcl_buf.rcl_buf_len * 2;
786 		else
787 			buf_len = 1024; /* default buffer size */
788 
789 		if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) {
790 			bzero((char *)p + rcl->rcl_buf.rcl_buf_len,
791 			    buf_len - rcl->rcl_buf.rcl_buf_len);
792 			rcl->rcl_buf.rcl_buf_val = p;
793 			rcl->rcl_buf.rcl_buf_len = buf_len;
794 		} else {
795 			rcl->rcl_err = FMD_ADM_ERR_NOMEM;
796 			break;
797 		}
798 	}
799 
800 	if (rcl->rcl_err == 0) {
801 		bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val +
802 		    rcl->rcl_len, uuid_len);
803 		rcl->rcl_len += uuid_len;
804 		rcl->rcl_cnt++;
805 	}
806 
807 	(void) pthread_mutex_unlock(&cip->ci_lock);
808 }
809 
810 bool_t
811 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req)
812 {
813 	rvp->rcl_buf.rcl_buf_len = 0;
814 	rvp->rcl_buf.rcl_buf_val = NULL;
815 	rvp->rcl_len = 0;
816 	rvp->rcl_cnt = 0;
817 	rvp->rcl_err = 0;
818 
819 	if (fmd_rpc_deny(req))
820 		rvp->rcl_err = FMD_ADM_ERR_PERM;
821 	else
822 		fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp);
823 
824 	return (TRUE);
825 }
826 
827 bool_t
828 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp,
829     struct svc_req *req)
830 {
831 	fmd_case_t *cp;
832 	nvlist_t *nvl;
833 	int err = 0;
834 
835 	bzero(rvp, sizeof (struct fmd_rpc_caseinfo));
836 
837 	if (fmd_rpc_deny(req)) {
838 		rvp->rci_err = FMD_ADM_ERR_PERM;
839 		return (TRUE);
840 	}
841 
842 	if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) {
843 		rvp->rci_err = FMD_ADM_ERR_CASESRCH;
844 		return (TRUE);
845 	}
846 
847 	if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) {
848 		fmd_case_rele(cp);
849 		rvp->rci_err = FMD_ADM_ERR_CASESRCH;
850 		return (TRUE);
851 	}
852 
853 	nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS);
854 
855 	err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val,
856 	    &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0);
857 
858 	nvlist_free(nvl);
859 
860 	if (err != 0)
861 		rvp->rci_err = FMD_ADM_ERR_NOMEM;
862 
863 	fmd_case_rele(cp);
864 
865 	return (TRUE);
866 }
867 
868 /*ARGSUSED*/
869 static void
870 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg)
871 {
872 	struct fmd_rpc_xprtlist *rvp = arg;
873 
874 	if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len)
875 		rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id;
876 }
877 
878 bool_t
879 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req)
880 {
881 	if (fmd_rpc_deny(req)) {
882 		rvp->rxl_buf.rxl_buf_len = 0;
883 		rvp->rxl_buf.rxl_buf_val = NULL;
884 		rvp->rxl_len = 0;
885 		rvp->rxl_err = FMD_ADM_ERR_PERM;
886 		return (TRUE);
887 	}
888 
889 	/*
890 	 * Since we're taking a snapshot of the transports, and these could
891 	 * change after we return our result, there's no need to hold any kind
892 	 * of lock between retrieving ids_count and taking the snapshot.  We'll
893 	 * just capture up to a maximum of whatever ids_count value we sampled.
894 	 */
895 	rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count;
896 	rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) *
897 	    rvp->rxl_buf.rxl_buf_len);
898 	rvp->rxl_len = 0;
899 	rvp->rxl_err = 0;
900 
901 	if (rvp->rxl_buf.rxl_buf_val == NULL) {
902 		rvp->rxl_err = FMD_ADM_ERR_NOMEM;
903 		return (TRUE);
904 	}
905 
906 	fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp);
907 	return (TRUE);
908 }
909 
910 bool_t
911 fmd_adm_xprtstat_1_svc(int32_t id,
912     struct fmd_rpc_modstat *rms, struct svc_req *req)
913 {
914 	fmd_xprt_impl_t *xip;
915 	fmd_stat_t *sp, *ep, *cp;
916 
917 	if (fmd_rpc_deny(req)) {
918 		rms->rms_buf.rms_buf_val = NULL;
919 		rms->rms_buf.rms_buf_len = 0;
920 		rms->rms_err = FMD_ADM_ERR_PERM;
921 		return (TRUE);
922 	}
923 
924 	rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t));
925 	rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) /
926 	    sizeof (fmd_stat_t);
927 	rms->rms_err = 0;
928 
929 	if (rms->rms_buf.rms_buf_val == NULL) {
930 		rms->rms_err = FMD_ADM_ERR_NOMEM;
931 		rms->rms_buf.rms_buf_len = 0;
932 		return (TRUE);
933 	}
934 
935 	if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) {
936 		rms->rms_err = FMD_ADM_ERR_XPRTSRCH;
937 		return (TRUE);
938 	}
939 
940 	/*
941 	 * Grab the stats lock and bcopy the entire transport stats array in
942 	 * one shot. Then go back through and duplicate any string values.
943 	 */
944 	(void) pthread_mutex_lock(&xip->xi_stats_lock);
945 
946 	sp = (fmd_stat_t *)xip->xi_stats;
947 	ep = sp + rms->rms_buf.rms_buf_len;
948 	cp = rms->rms_buf.rms_buf_val;
949 
950 	bcopy(sp, cp, sizeof (fmd_xprt_stat_t));
951 
952 	for (; sp < ep; sp++, cp++) {
953 		if (sp->fmds_type == FMD_TYPE_STRING &&
954 		    sp->fmds_value.str != NULL)
955 			cp->fmds_value.str = strdup(sp->fmds_value.str);
956 	}
957 
958 	(void) pthread_mutex_unlock(&xip->xi_stats_lock);
959 	fmd_idspace_rele(fmd.d_xprt_ids, id);
960 
961 	return (TRUE);
962 }
963 
964 int
965 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data)
966 {
967 	xdr_free(proc, data);
968 	svc_done(xprt);
969 	return (TRUE);
970 }
971 
972 /*
973  * Custom XDR routine for our API structure fmd_stat_t.  This function must
974  * match the definition of fmd_stat_t in <fmd_api.h> and must also match
975  * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c.
976  */
977 bool_t
978 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp)
979 {
980 	bool_t rv = TRUE;
981 
982 	rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name));
983 	rv &= xdr_u_int(xp, &sp->fmds_type);
984 	rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc));
985 
986 	switch (sp->fmds_type) {
987 	case FMD_TYPE_BOOL:
988 		rv &= xdr_int(xp, &sp->fmds_value.bool);
989 		break;
990 	case FMD_TYPE_INT32:
991 		rv &= xdr_int32_t(xp, &sp->fmds_value.i32);
992 		break;
993 	case FMD_TYPE_UINT32:
994 		rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32);
995 		break;
996 	case FMD_TYPE_INT64:
997 		rv &= xdr_int64_t(xp, &sp->fmds_value.i64);
998 		break;
999 	case FMD_TYPE_UINT64:
1000 	case FMD_TYPE_TIME:
1001 	case FMD_TYPE_SIZE:
1002 		rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64);
1003 		break;
1004 	case FMD_TYPE_STRING:
1005 		rv &= xdr_string(xp, &sp->fmds_value.str, ~0);
1006 		break;
1007 	}
1008 
1009 	return (rv);
1010 }
1011