xref: /illumos-gate/usr/src/cmd/fm/fmd/common/fmd_rpc_adm.c (revision 1e6f4912c04ba197d638cc6eb5b35eeae672df40)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #include <strings.h>
28 #include <limits.h>
29 #include <unistd.h>
30 #include <stdlib.h>
31 #include <alloca.h>
32 
33 #include <fmd_rpc_adm.h>
34 #include <fmd_rpc.h>
35 #include <fmd_module.h>
36 #include <fmd_ustat.h>
37 #include <fmd_error.h>
38 #include <fmd_asru.h>
39 #include <fmd_ckpt.h>
40 #include <fmd_case.h>
41 #include <fmd_fmri.h>
42 #include <fmd_idspace.h>
43 #include <fmd_xprt.h>
44 
45 #include <fmd.h>
46 
47 bool_t
48 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req)
49 {
50 	struct fmd_rpc_modinfo *rmi;
51 	fmd_module_t *mp;
52 
53 	rvp->rml_list = NULL;
54 	rvp->rml_err = 0;
55 	rvp->rml_len = 0;
56 
57 	if (fmd_rpc_deny(req)) {
58 		rvp->rml_err = FMD_ADM_ERR_PERM;
59 		return (TRUE);
60 	}
61 
62 	(void) pthread_mutex_lock(&fmd.d_mod_lock);
63 
64 	for (mp = fmd_list_next(&fmd.d_mod_list);
65 	    mp != NULL; mp = fmd_list_next(mp)) {
66 
67 		if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) {
68 			rvp->rml_err = FMD_ADM_ERR_NOMEM;
69 			break;
70 		}
71 
72 		fmd_module_lock(mp);
73 
74 		/*
75 		 * If mod_info is NULL, the module is in the middle of loading:
76 		 * do not report its presence to observability tools yet.
77 		 */
78 		if (mp->mod_info == NULL) {
79 			fmd_module_unlock(mp);
80 			free(rmi);
81 			continue;
82 		}
83 
84 		rmi->rmi_name = strdup(mp->mod_name);
85 		rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc);
86 		rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers);
87 		rmi->rmi_faulty = mp->mod_error != 0;
88 		rmi->rmi_next = rvp->rml_list;
89 
90 		fmd_module_unlock(mp);
91 		rvp->rml_list = rmi;
92 		rvp->rml_len++;
93 
94 		if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) {
95 			rvp->rml_err = FMD_ADM_ERR_NOMEM;
96 			break;
97 		}
98 	}
99 
100 	(void) pthread_mutex_unlock(&fmd.d_mod_lock);
101 	return (TRUE);
102 }
103 
104 bool_t
105 fmd_adm_modcstat_1_svc(char *name,
106     struct fmd_rpc_modstat *rms, struct svc_req *req)
107 {
108 	fmd_ustat_snap_t snap;
109 	fmd_module_t *mp;
110 
111 	rms->rms_buf.rms_buf_val = NULL;
112 	rms->rms_buf.rms_buf_len = 0;
113 	rms->rms_err = 0;
114 
115 	if (fmd_rpc_deny(req)) {
116 		rms->rms_err = FMD_ADM_ERR_PERM;
117 		return (TRUE);
118 	}
119 
120 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
121 		rms->rms_err = FMD_ADM_ERR_MODSRCH;
122 		return (TRUE);
123 	}
124 
125 	if (fmd_modstat_snapshot(mp, &snap) == 0) {
126 		rms->rms_buf.rms_buf_val = snap.uss_buf;
127 		rms->rms_buf.rms_buf_len = snap.uss_len;
128 	} else if (errno == EFMD_HDL_ABORT) {
129 		rms->rms_err = FMD_ADM_ERR_MODFAIL;
130 	} else
131 		rms->rms_err = FMD_ADM_ERR_NOMEM;
132 
133 	fmd_module_rele(mp);
134 	return (TRUE);
135 }
136 
137 bool_t
138 fmd_adm_moddstat_1_svc(char *name,
139     struct fmd_rpc_modstat *rms, struct svc_req *req)
140 {
141 	fmd_module_t *mp;
142 
143 	rms->rms_buf.rms_buf_val = NULL;
144 	rms->rms_buf.rms_buf_len = 0;
145 	rms->rms_err = 0;
146 
147 	if (fmd_rpc_deny(req)) {
148 		rms->rms_err = FMD_ADM_ERR_PERM;
149 		return (TRUE);
150 	}
151 
152 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
153 		rms->rms_err = FMD_ADM_ERR_MODSRCH;
154 		return (TRUE);
155 	}
156 
157 	rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t));
158 	rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t);
159 
160 	if (rms->rms_buf.rms_buf_val == NULL) {
161 		rms->rms_err = FMD_ADM_ERR_NOMEM;
162 		rms->rms_buf.rms_buf_len = 0;
163 		fmd_module_rele(mp);
164 		return (TRUE);
165 	}
166 
167 	/*
168 	 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats
169 	 * are present in mp->mod_stats.  We don't use any for the daemon-
170 	 * maintained stats and provide this function in order to reduce the
171 	 * overhead of the fmstat(1M) default view, where these minimal stats
172 	 * must be retrieved for all of the active modules.
173 	 */
174 	(void) pthread_mutex_lock(&mp->mod_stats_lock);
175 
176 	if (mp->mod_stats != NULL) {
177 		mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime();
178 		bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val,
179 		    sizeof (fmd_modstat_t));
180 	} else {
181 		free(rms->rms_buf.rms_buf_val);
182 		rms->rms_buf.rms_buf_val = NULL;
183 		rms->rms_buf.rms_buf_len = 0;
184 		rms->rms_err = FMD_ADM_ERR_MODFAIL;
185 	}
186 
187 	(void) pthread_mutex_unlock(&mp->mod_stats_lock);
188 	fmd_module_rele(mp);
189 	return (TRUE);
190 }
191 
192 bool_t
193 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req)
194 {
195 	const size_t size = sizeof (fmd_statistics_t);
196 
197 	if (fmd_rpc_deny(req)) {
198 		rms->rms_buf.rms_buf_val = NULL;
199 		rms->rms_buf.rms_buf_len = 0;
200 		rms->rms_err = FMD_ADM_ERR_PERM;
201 	} else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) {
202 		/*
203 		 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING
204 		 * stats are present in fmd.d_stats (see definition in fmd.c).
205 		 */
206 		(void) pthread_mutex_lock(&fmd.d_stats_lock);
207 		bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size);
208 		(void) pthread_mutex_unlock(&fmd.d_stats_lock);
209 		rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t);
210 		rms->rms_err = 0;
211 	} else {
212 		rms->rms_buf.rms_buf_len = 0;
213 		rms->rms_err = FMD_ADM_ERR_NOMEM;
214 	}
215 
216 	return (TRUE);
217 }
218 
219 bool_t
220 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req)
221 {
222 	fmd_module_t *mp;
223 	const char *p;
224 	int err = 0;
225 
226 	if (fmd_rpc_deny(req)) {
227 		*rvp = FMD_ADM_ERR_PERM;
228 		return (TRUE);
229 	}
230 
231 	/*
232 	 * Before we endure the expense of constructing a module and attempting
233 	 * to load it, do a quick check to see if the pathname is valid.
234 	 */
235 	if (access(path, F_OK) != 0) {
236 		*rvp = FMD_ADM_ERR_MODNOENT;
237 		return (TRUE);
238 	}
239 
240 	if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0)
241 		mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops);
242 	else
243 		mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops);
244 
245 	if (mp == NULL) {
246 		switch (errno) {
247 		case EFMD_MOD_LOADED:
248 			err = FMD_ADM_ERR_MODEXIST;
249 			break;
250 		case EFMD_MOD_INIT:
251 			err = FMD_ADM_ERR_MODINIT;
252 			break;
253 		default:
254 			err = FMD_ADM_ERR_MODLOAD;
255 			break;
256 		}
257 	}
258 
259 	*rvp = err;
260 	return (TRUE);
261 }
262 
263 bool_t
264 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req)
265 {
266 	fmd_module_t *mp = NULL;
267 	int err = 0;
268 
269 	if (fmd_rpc_deny(req))
270 		err = FMD_ADM_ERR_PERM;
271 	else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
272 		err = FMD_ADM_ERR_MODSRCH;
273 	else if (mp == fmd.d_self)
274 		err = FMD_ADM_ERR_MODBUSY;
275 	else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
276 		err = FMD_ADM_ERR_MODSRCH;
277 
278 	if (mp != NULL)
279 		fmd_module_rele(mp);
280 
281 	*rvp = err;
282 	return (TRUE);
283 }
284 
285 bool_t
286 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req)
287 {
288 	fmd_module_t *mp = NULL;
289 	int err = 0;
290 
291 	if (fmd_rpc_deny(req))
292 		err = FMD_ADM_ERR_PERM;
293 	else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
294 		err = FMD_ADM_ERR_MODSRCH;
295 	else if (mp == fmd.d_self)
296 		err = FMD_ADM_ERR_MODBUSY;
297 	else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
298 		err = FMD_ADM_ERR_MODSRCH;
299 
300 	if (err == 0)
301 		fmd_ckpt_delete(mp); /* erase any saved checkpoints */
302 
303 	if (err == 0 && fmd_modhash_load(fmd.d_mod_hash,
304 	    mp->mod_path, mp->mod_ops) == NULL) {
305 		if (errno == EFMD_MOD_INIT)
306 			err = FMD_ADM_ERR_MODINIT;
307 		else
308 			err = FMD_ADM_ERR_MODLOAD;
309 	}
310 
311 	if (mp != NULL)
312 		fmd_module_rele(mp);
313 
314 	*rvp = err;
315 	return (TRUE);
316 }
317 
318 bool_t
319 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req)
320 {
321 	fmd_module_t *mp;
322 	int err = 0;
323 
324 	if (fmd_rpc_deny(req))
325 		err = FMD_ADM_ERR_PERM;
326 	else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
327 		err = FMD_ADM_ERR_MODSRCH;
328 	else {
329 		fmd_module_gc(mp);
330 		fmd_module_rele(mp);
331 	}
332 
333 	*rvp = err;
334 	return (TRUE);
335 }
336 
337 /*
338  * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts
339  * of data that may exceed the underlying RPC transport buffer size if the
340  * resource cache is heavily populated and/or all resources are requested.
341  * To minimize the likelihood of running out of RPC buffer space and having to
342  * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the
343  * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an
344  * individual FMRI if more information is needed.  To further reduce the XDR
345  * overhead, the string list is represented as XDR-opaque data where the
346  * entire list is returned as a string table (e.g. "fmriA\0fmriB\0...").
347  */
348 static void
349 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg)
350 {
351 	struct fmd_rpc_rsrclist *rrl = arg;
352 	size_t name_len, buf_len;
353 	void *p;
354 
355 	/*
356 	 * Skip the ASRU if this fault is marked as invisible.
357 	 * If rrl_all is false, we take a quick look at asru_flags with no lock
358 	 * held to see if the ASRU is not faulty.  If so,
359 	 * we don't want to report it by default and can just skip this ASRU.
360 	 * This helps keep overhead low in the common case, as the call to
361 	 * fmd_asru_getstate() can be expensive depending on the scheme.
362 	 */
363 
364 	if (ap->asru_flags & FMD_ASRU_INVISIBLE)
365 		return;
366 	if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY))
367 		return;
368 
369 	if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0)
370 		return; /* error has occurred or resource is in 'ok' state */
371 
372 	/*
373 	 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold
374 	 * another string, doubling it as needed.  Then copy the new string
375 	 * on to the end, and increment rrl_len to indicate the used space.
376 	 */
377 	(void) pthread_mutex_lock(&ap->asru_lock);
378 	name_len = strlen(ap->asru_name) + 1;
379 
380 	while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) {
381 		if (rrl->rrl_buf.rrl_buf_len != 0)
382 			buf_len = rrl->rrl_buf.rrl_buf_len * 2;
383 		else
384 			buf_len = 1024; /* default buffer size */
385 
386 		if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) {
387 			bzero((char *)p + rrl->rrl_buf.rrl_buf_len,
388 			    buf_len - rrl->rrl_buf.rrl_buf_len);
389 			rrl->rrl_buf.rrl_buf_val = p;
390 			rrl->rrl_buf.rrl_buf_len = buf_len;
391 		} else {
392 			rrl->rrl_err = FMD_ADM_ERR_NOMEM;
393 			break;
394 		}
395 	}
396 
397 	if (rrl->rrl_err == 0) {
398 		bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val +
399 		    rrl->rrl_len, name_len);
400 		rrl->rrl_len += name_len;
401 		rrl->rrl_cnt++;
402 	}
403 
404 	(void) pthread_mutex_unlock(&ap->asru_lock);
405 }
406 
407 bool_t
408 fmd_adm_rsrclist_1_svc(bool_t all,
409     struct fmd_rpc_rsrclist *rvp, struct svc_req *req)
410 {
411 	rvp->rrl_buf.rrl_buf_len = 0;
412 	rvp->rrl_buf.rrl_buf_val = NULL;
413 	rvp->rrl_len = 0;
414 	rvp->rrl_cnt = 0;
415 	rvp->rrl_err = 0;
416 	rvp->rrl_all = all;
417 
418 	if (fmd_rpc_deny(req))
419 		rvp->rrl_err = FMD_ADM_ERR_PERM;
420 	else
421 		fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp);
422 
423 	return (TRUE);
424 }
425 
426 bool_t
427 fmd_adm_rsrcinfo_1_svc(char *fmri,
428     struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req)
429 {
430 	fmd_asru_t *ap;
431 	fmd_case_impl_t *cip;
432 	int state;
433 
434 	bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo));
435 
436 	if (fmd_rpc_deny(req)) {
437 		rvp->rri_err = FMD_ADM_ERR_PERM;
438 		return (TRUE);
439 	}
440 
441 	if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) {
442 		rvp->rri_err = FMD_ADM_ERR_RSRCSRCH;
443 		return (TRUE);
444 	}
445 
446 	state = fmd_asru_getstate(ap);
447 	(void) pthread_mutex_lock(&ap->asru_lock);
448 	cip = (fmd_case_impl_t *)ap->asru_case;
449 
450 	rvp->rri_fmri = strdup(ap->asru_name);
451 	rvp->rri_uuid = strdup(ap->asru_uuid);
452 	rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL;
453 	rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0;
454 	rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0;
455 	rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0;
456 
457 	(void) pthread_mutex_unlock(&ap->asru_lock);
458 	fmd_asru_hash_release(fmd.d_asrus, ap);
459 
460 	if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL)
461 		rvp->rri_err = FMD_ADM_ERR_NOMEM;
462 
463 	return (TRUE);
464 }
465 
466 static void
467 fmd_adm_do_repair(char *name, struct svc_req *req, int *errp, uint8_t reason,
468     char *uuid)
469 {
470 	if (fmd_rpc_deny(req))
471 		*errp = FMD_ADM_ERR_PERM;
472 	else {
473 		fmd_asru_rep_arg_t fara;
474 		int err = FARA_ERR_RSRCNOTF;
475 
476 		fara.fara_reason = reason;
477 		fara.fara_rval = &err;
478 		fara.fara_uuid = uuid;
479 		fara.fara_bywhat = FARA_BY_ASRU;
480 		fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
481 		    fmd_asru_repaired, &fara);
482 		fara.fara_bywhat = FARA_BY_LABEL;
483 		fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
484 		    fmd_asru_repaired, &fara);
485 		fara.fara_bywhat = FARA_BY_FRU;
486 		fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
487 		    fmd_asru_repaired, &fara);
488 		fara.fara_bywhat = FARA_BY_RSRC;
489 		fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
490 		    fmd_asru_repaired, &fara);
491 		if (err == FARA_ERR_RSRCNOTR)
492 			*errp = FMD_ADM_ERR_RSRCNOTR;
493 		else if (err == FARA_OK)
494 			*errp = 0;
495 	}
496 }
497 
498 bool_t
499 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req)
500 {
501 	int err = FMD_ADM_ERR_RSRCNOTF;
502 
503 	/*
504 	 * If anyone does an fmadm flush command, discard any resolved
505 	 * cases that were being retained for historic diagnosis.
506 	 */
507 	if (fmd_rpc_deny(req))
508 		err = FMD_ADM_ERR_PERM;
509 	else {
510 		fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
511 		    fmd_asru_flush, &err);
512 		fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
513 		    fmd_asru_flush, &err);
514 		fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
515 		    fmd_asru_flush, &err);
516 		fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
517 		    fmd_asru_flush, &err);
518 	}
519 	*rvp = err;
520 	return (TRUE);
521 }
522 
523 bool_t
524 fmd_adm_rsrcrepaired_1_svc(char *name, int *rvp, struct svc_req *req)
525 {
526 	int err = FMD_ADM_ERR_RSRCNOTF;
527 
528 	fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPAIRED, NULL);
529 	*rvp = err;
530 	return (TRUE);
531 }
532 
533 bool_t
534 fmd_adm_rsrcreplaced_1_svc(char *name, int *rvp, struct svc_req *req)
535 {
536 	int err = FMD_ADM_ERR_RSRCNOTF;
537 
538 	fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPLACED, NULL);
539 	*rvp = err;
540 	return (TRUE);
541 }
542 
543 bool_t
544 fmd_adm_rsrcacquit_1_svc(char *name, char *uuid, int *rvp, struct svc_req *req)
545 {
546 	int err = FMD_ADM_ERR_RSRCNOTF;
547 
548 	fmd_adm_do_repair(name, req, &err, FMD_ASRU_ACQUITTED, uuid);
549 	*rvp = err;
550 	return (TRUE);
551 }
552 
553 static void
554 fmd_adm_serdlist_measure(fmd_serd_eng_t *sgp, void *arg)
555 {
556 	struct fmd_rpc_serdlist *rsl = arg;
557 
558 	rsl->rsl_len += strlen(sgp->sg_name) + 1;
559 	rsl->rsl_cnt++;
560 }
561 
562 static void
563 fmd_adm_serdlist_record(fmd_serd_eng_t *sgp, void *arg)
564 {
565 	struct fmd_rpc_serdlist *rsl = arg;
566 
567 	bcopy(sgp->sg_name, rsl->rsl_buf.rsl_buf_val + rsl->rsl_len,
568 	    strlen(sgp->sg_name));
569 	rsl->rsl_len += strlen(sgp->sg_name) + 1;
570 }
571 
572 bool_t
573 fmd_adm_serdlist_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
574     struct svc_req *req)
575 {
576 	fmd_module_t *mp;
577 	void *p;
578 
579 	rvp->rsl_buf.rsl_buf_len = 0;
580 	rvp->rsl_buf.rsl_buf_val = NULL;
581 	rvp->rsl_len = 0;
582 	rvp->rsl_cnt = 0;
583 	rvp->rsl_err = 0;
584 
585 	if (fmd_rpc_deny(req)) {
586 		rvp->rsl_err = FMD_ADM_ERR_PERM;
587 		return (TRUE);
588 	}
589 
590 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
591 		rvp->rsl_err = FMD_ADM_ERR_MODSRCH;
592 		return (TRUE);
593 	}
594 
595 	fmd_module_lock(mp);
596 	/* In the first pass, collect the overall length of the buffer. */
597 	fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_measure, rvp);
598 	if (rvp->rsl_len == 0) {
599 		fmd_module_unlock(mp);
600 		fmd_module_rele(mp);
601 		return (TRUE);
602 	}
603 	p = malloc(rvp->rsl_len);
604 	if (p) {
605 		rvp->rsl_buf.rsl_buf_val = p;
606 		rvp->rsl_buf.rsl_buf_len = rvp->rsl_len;
607 		bzero(rvp->rsl_buf.rsl_buf_val, rvp->rsl_buf.rsl_buf_len);
608 		rvp->rsl_len = 0;
609 		/* In the second pass, populate the buffer with data. */
610 		fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_record,
611 		    rvp);
612 	} else {
613 		rvp->rsl_err = FMD_ADM_ERR_NOMEM;
614 	}
615 	fmd_module_unlock(mp);
616 
617 	fmd_module_rele(mp);
618 	return (TRUE);
619 }
620 
621 static void
622 fmd_adm_serdinfo_record(fmd_serd_eng_t *sgp, struct fmd_rpc_serdinfo *rsi)
623 {
624 	uint64_t old, now = fmd_time_gethrtime();
625 	const fmd_serd_elem_t *oep;
626 
627 	if ((rsi->rsi_name = strdup(sgp->sg_name)) == NULL) {
628 		rsi->rsi_err = FMD_ADM_ERR_NOMEM;
629 		return;
630 	}
631 
632 	if ((oep = fmd_list_next(&sgp->sg_list)) != NULL)
633 		old = fmd_event_hrtime(oep->se_event);
634 	else
635 		old = now;
636 
637 	rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1;
638 	rsi->rsi_count = sgp->sg_count;
639 	rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0;
640 	rsi->rsi_n = sgp->sg_n;
641 	rsi->rsi_t = sgp->sg_t;
642 }
643 
644 bool_t
645 fmd_adm_serdinfo_1_svc(char *mname, char *sname, struct fmd_rpc_serdinfo *rvp,
646     struct svc_req *req)
647 {
648 	fmd_module_t *mp;
649 	fmd_serd_eng_t *sgp;
650 
651 	bzero(rvp, sizeof (struct fmd_rpc_serdinfo));
652 
653 	if (fmd_rpc_deny(req)) {
654 		rvp->rsi_err = FMD_ADM_ERR_PERM;
655 		return (TRUE);
656 	}
657 
658 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
659 		rvp->rsi_err = FMD_ADM_ERR_MODSRCH;
660 		return (TRUE);
661 	}
662 
663 	fmd_module_lock(mp);
664 
665 	if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
666 		fmd_adm_serdinfo_record(sgp, rvp);
667 	} else
668 		rvp->rsi_err = FMD_ADM_ERR_SERDSRCH;
669 
670 	fmd_module_unlock(mp);
671 	fmd_module_rele(mp);
672 
673 	return (TRUE);
674 }
675 
676 /*ARGSUSED*/
677 bool_t
678 fmd_adm_serdinfo_old_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
679     struct svc_req *req)
680 {
681 	return (FALSE);
682 }
683 
684 bool_t
685 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req)
686 {
687 	fmd_module_t *mp;
688 	fmd_serd_eng_t *sgp;
689 	int err = 0;
690 
691 	if (fmd_rpc_deny(req)) {
692 		*rvp = FMD_ADM_ERR_PERM;
693 		return (TRUE);
694 	}
695 
696 	if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
697 		*rvp = FMD_ADM_ERR_MODSRCH;
698 		return (TRUE);
699 	}
700 
701 	fmd_module_lock(mp);
702 
703 	if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
704 		if (fmd_serd_eng_fired(sgp)) {
705 			err = FMD_ADM_ERR_SERDFIRED;
706 		} else {
707 			fmd_serd_eng_reset(sgp);
708 			fmd_module_setdirty(mp);
709 		}
710 	} else
711 		err = FMD_ADM_ERR_SERDSRCH;
712 
713 	fmd_module_unlock(mp);
714 	fmd_module_rele(mp);
715 
716 	*rvp = err;
717 	return (TRUE);
718 }
719 
720 bool_t
721 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req)
722 {
723 	fmd_log_t **lpp, *old, *new;
724 	int try = 1, trylimit = 1;
725 
726 	hrtime_t nsec = 0;
727 	timespec_t tv;
728 
729 	if (fmd_rpc_deny(req)) {
730 		*rvp = FMD_ADM_ERR_PERM;
731 		return (TRUE);
732 	}
733 
734 	if (strcmp(name, "errlog") == 0)
735 		lpp = &fmd.d_errlog;
736 	else if (strcmp(name, "fltlog") == 0)
737 		lpp = &fmd.d_fltlog;
738 	else {
739 		*rvp = FMD_ADM_ERR_ROTSRCH;
740 		return (TRUE);
741 	}
742 
743 	(void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit);
744 	(void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec);
745 
746 	tv.tv_sec = nsec / NANOSEC;
747 	tv.tv_nsec = nsec % NANOSEC;
748 
749 	/*
750 	 * To rotate a log file, grab d_log_lock as writer to make sure no
751 	 * one else can discover the current log pointer.  Then try to rotate
752 	 * the log.  If we're successful, release the old log pointer.
753 	 */
754 	do {
755 		if (try > 1)
756 			(void) nanosleep(&tv, NULL); /* wait for checkpoints */
757 
758 		(void) pthread_rwlock_wrlock(&fmd.d_log_lock);
759 		old = *lpp;
760 
761 		if ((new = fmd_log_rotate(old)) != NULL) {
762 			fmd_log_rele(old);
763 			*lpp = new;
764 		}
765 
766 		(void) pthread_rwlock_unlock(&fmd.d_log_lock);
767 
768 	} while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit);
769 
770 	if (new != NULL)
771 		*rvp = 0;
772 	else if (errno == EFMD_LOG_ROTBUSY)
773 		*rvp = FMD_ADM_ERR_ROTBUSY;
774 	else
775 		*rvp = FMD_ADM_ERR_ROTFAIL;
776 
777 	return (TRUE);
778 }
779 
780 bool_t
781 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req)
782 {
783 	fmd_case_t *cp = NULL;
784 	int err = 0;
785 
786 	if (fmd_rpc_deny(req))
787 		err = FMD_ADM_ERR_PERM;
788 	else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
789 		err = FMD_ADM_ERR_CASESRCH;
790 	else if (fmd_case_repair(cp) != 0) {
791 		err = errno == EFMD_CASE_OWNER ?
792 		    FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
793 	}
794 
795 	if (cp != NULL)
796 		fmd_case_rele(cp);
797 
798 	*rvp = err;
799 	return (TRUE);
800 }
801 
802 bool_t
803 fmd_adm_caseacquit_1_svc(char *uuid, int *rvp, struct svc_req *req)
804 {
805 	fmd_case_t *cp = NULL;
806 	int err = 0;
807 
808 	if (fmd_rpc_deny(req))
809 		err = FMD_ADM_ERR_PERM;
810 	else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
811 		err = FMD_ADM_ERR_CASESRCH;
812 	else if (fmd_case_acquit(cp) != 0) {
813 		err = errno == EFMD_CASE_OWNER ?
814 		    FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
815 	}
816 
817 	if (cp != NULL)
818 		fmd_case_rele(cp);
819 
820 	*rvp = err;
821 	return (TRUE);
822 }
823 
824 void
825 fmd_adm_caselist_case(fmd_case_t *cp, void *arg)
826 {
827 	fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
828 	struct fmd_rpc_caselist *rcl = arg;
829 	size_t uuid_len, buf_len;
830 	void *p;
831 
832 	if (rcl->rcl_err != 0)
833 		return;
834 
835 	/*
836 	 * skip invisible cases
837 	 */
838 	if (cip->ci_flags & FMD_CF_INVISIBLE)
839 		return;
840 
841 	/*
842 	 * Lock the case and reallocate rcl_buf[] to be large enough to hold
843 	 * another string, doubling it as needed.  Then copy the new string
844 	 * on to the end, and increment rcl_len to indicate the used space.
845 	 */
846 	if (!(cip->ci_flags & FMD_CF_SOLVED))
847 		return;
848 
849 	(void) pthread_mutex_lock(&cip->ci_lock);
850 
851 	uuid_len = cip->ci_uuidlen + 1;
852 
853 	while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) {
854 		if (rcl->rcl_buf.rcl_buf_len != 0)
855 			buf_len = rcl->rcl_buf.rcl_buf_len * 2;
856 		else
857 			buf_len = 1024; /* default buffer size */
858 
859 		if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) {
860 			bzero((char *)p + rcl->rcl_buf.rcl_buf_len,
861 			    buf_len - rcl->rcl_buf.rcl_buf_len);
862 			rcl->rcl_buf.rcl_buf_val = p;
863 			rcl->rcl_buf.rcl_buf_len = buf_len;
864 		} else {
865 			rcl->rcl_err = FMD_ADM_ERR_NOMEM;
866 			break;
867 		}
868 	}
869 
870 	if (rcl->rcl_err == 0) {
871 		bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val +
872 		    rcl->rcl_len, uuid_len);
873 		rcl->rcl_len += uuid_len;
874 		rcl->rcl_cnt++;
875 	}
876 
877 	(void) pthread_mutex_unlock(&cip->ci_lock);
878 }
879 
880 bool_t
881 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req)
882 {
883 	rvp->rcl_buf.rcl_buf_len = 0;
884 	rvp->rcl_buf.rcl_buf_val = NULL;
885 	rvp->rcl_len = 0;
886 	rvp->rcl_cnt = 0;
887 	rvp->rcl_err = 0;
888 
889 	if (fmd_rpc_deny(req))
890 		rvp->rcl_err = FMD_ADM_ERR_PERM;
891 	else
892 		fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp);
893 
894 	return (TRUE);
895 }
896 
897 bool_t
898 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp,
899     struct svc_req *req)
900 {
901 	fmd_case_t *cp;
902 	nvlist_t *nvl;
903 	int err = 0;
904 
905 	bzero(rvp, sizeof (struct fmd_rpc_caseinfo));
906 
907 	if (fmd_rpc_deny(req)) {
908 		rvp->rci_err = FMD_ADM_ERR_PERM;
909 		return (TRUE);
910 	}
911 
912 	if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) {
913 		rvp->rci_err = FMD_ADM_ERR_CASESRCH;
914 		return (TRUE);
915 	}
916 
917 	if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) {
918 		fmd_case_rele(cp);
919 		rvp->rci_err = FMD_ADM_ERR_CASESRCH;
920 		return (TRUE);
921 	}
922 
923 	nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS);
924 
925 	err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val,
926 	    &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0);
927 
928 	nvlist_free(nvl);
929 
930 	if (err != 0)
931 		rvp->rci_err = FMD_ADM_ERR_NOMEM;
932 
933 	fmd_case_rele(cp);
934 
935 	return (TRUE);
936 }
937 
938 /*ARGSUSED*/
939 static void
940 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg)
941 {
942 	struct fmd_rpc_xprtlist *rvp = arg;
943 
944 	if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len)
945 		rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id;
946 }
947 
948 bool_t
949 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req)
950 {
951 	if (fmd_rpc_deny(req)) {
952 		rvp->rxl_buf.rxl_buf_len = 0;
953 		rvp->rxl_buf.rxl_buf_val = NULL;
954 		rvp->rxl_len = 0;
955 		rvp->rxl_err = FMD_ADM_ERR_PERM;
956 		return (TRUE);
957 	}
958 
959 	/*
960 	 * Since we're taking a snapshot of the transports, and these could
961 	 * change after we return our result, there's no need to hold any kind
962 	 * of lock between retrieving ids_count and taking the snapshot.  We'll
963 	 * just capture up to a maximum of whatever ids_count value we sampled.
964 	 */
965 	rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count;
966 	rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) *
967 	    rvp->rxl_buf.rxl_buf_len);
968 	rvp->rxl_len = 0;
969 	rvp->rxl_err = 0;
970 
971 	if (rvp->rxl_buf.rxl_buf_val == NULL) {
972 		rvp->rxl_err = FMD_ADM_ERR_NOMEM;
973 		return (TRUE);
974 	}
975 
976 	fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp);
977 	return (TRUE);
978 }
979 
980 bool_t
981 fmd_adm_xprtstat_1_svc(int32_t id,
982     struct fmd_rpc_modstat *rms, struct svc_req *req)
983 {
984 	fmd_xprt_impl_t *xip;
985 	fmd_stat_t *sp, *ep, *cp;
986 
987 	if (fmd_rpc_deny(req)) {
988 		rms->rms_buf.rms_buf_val = NULL;
989 		rms->rms_buf.rms_buf_len = 0;
990 		rms->rms_err = FMD_ADM_ERR_PERM;
991 		return (TRUE);
992 	}
993 
994 	rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t));
995 	rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) /
996 	    sizeof (fmd_stat_t);
997 	rms->rms_err = 0;
998 
999 	if (rms->rms_buf.rms_buf_val == NULL) {
1000 		rms->rms_err = FMD_ADM_ERR_NOMEM;
1001 		rms->rms_buf.rms_buf_len = 0;
1002 		return (TRUE);
1003 	}
1004 
1005 	if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) {
1006 		rms->rms_err = FMD_ADM_ERR_XPRTSRCH;
1007 		return (TRUE);
1008 	}
1009 
1010 	/*
1011 	 * Grab the stats lock and bcopy the entire transport stats array in
1012 	 * one shot. Then go back through and duplicate any string values.
1013 	 */
1014 	(void) pthread_mutex_lock(&xip->xi_stats_lock);
1015 
1016 	sp = (fmd_stat_t *)xip->xi_stats;
1017 	ep = sp + rms->rms_buf.rms_buf_len;
1018 	cp = rms->rms_buf.rms_buf_val;
1019 
1020 	bcopy(sp, cp, sizeof (fmd_xprt_stat_t));
1021 
1022 	for (; sp < ep; sp++, cp++) {
1023 		if (sp->fmds_type == FMD_TYPE_STRING &&
1024 		    sp->fmds_value.str != NULL)
1025 			cp->fmds_value.str = strdup(sp->fmds_value.str);
1026 	}
1027 
1028 	(void) pthread_mutex_unlock(&xip->xi_stats_lock);
1029 	fmd_idspace_rele(fmd.d_xprt_ids, id);
1030 
1031 	return (TRUE);
1032 }
1033 
1034 int
1035 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data)
1036 {
1037 	xdr_free(proc, data);
1038 	svc_done(xprt);
1039 	return (TRUE);
1040 }
1041 
1042 /*
1043  * Custom XDR routine for our API structure fmd_stat_t.  This function must
1044  * match the definition of fmd_stat_t in <fmd_api.h> and must also match
1045  * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c.
1046  */
1047 bool_t
1048 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp)
1049 {
1050 	bool_t rv = TRUE;
1051 
1052 	rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name));
1053 	rv &= xdr_u_int(xp, &sp->fmds_type);
1054 	rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc));
1055 
1056 	switch (sp->fmds_type) {
1057 	case FMD_TYPE_BOOL:
1058 		rv &= xdr_int(xp, &sp->fmds_value.bool);
1059 		break;
1060 	case FMD_TYPE_INT32:
1061 		rv &= xdr_int32_t(xp, &sp->fmds_value.i32);
1062 		break;
1063 	case FMD_TYPE_UINT32:
1064 		rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32);
1065 		break;
1066 	case FMD_TYPE_INT64:
1067 		rv &= xdr_int64_t(xp, &sp->fmds_value.i64);
1068 		break;
1069 	case FMD_TYPE_UINT64:
1070 	case FMD_TYPE_TIME:
1071 	case FMD_TYPE_SIZE:
1072 		rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64);
1073 		break;
1074 	case FMD_TYPE_STRING:
1075 		rv &= xdr_string(xp, &sp->fmds_value.str, ~0);
1076 		break;
1077 	}
1078 
1079 	return (rv);
1080 }
1081