1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <strings.h>
27 #include <limits.h>
28 #include <unistd.h>
29 #include <stdlib.h>
30 #include <alloca.h>
31
32 #include <fmd_rpc_adm.h>
33 #include <fmd_rpc.h>
34 #include <fmd_module.h>
35 #include <fmd_ustat.h>
36 #include <fmd_error.h>
37 #include <fmd_asru.h>
38 #include <fmd_ckpt.h>
39 #include <fmd_case.h>
40 #include <fmd_fmri.h>
41 #include <fmd_idspace.h>
42 #include <fmd_xprt.h>
43
44 #include <fmd.h>
45
46 bool_t
fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist * rvp,struct svc_req * req)47 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req)
48 {
49 struct fmd_rpc_modinfo *rmi;
50 fmd_module_t *mp;
51
52 rvp->rml_list = NULL;
53 rvp->rml_err = 0;
54 rvp->rml_len = 0;
55
56 if (fmd_rpc_deny(req)) {
57 rvp->rml_err = FMD_ADM_ERR_PERM;
58 return (TRUE);
59 }
60
61 (void) pthread_mutex_lock(&fmd.d_mod_lock);
62
63 for (mp = fmd_list_next(&fmd.d_mod_list);
64 mp != NULL; mp = fmd_list_next(mp)) {
65
66 if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) {
67 rvp->rml_err = FMD_ADM_ERR_NOMEM;
68 break;
69 }
70
71 fmd_module_lock(mp);
72
73 /*
74 * If mod_info is NULL, the module is in the middle of loading:
75 * do not report its presence to observability tools yet.
76 */
77 if (mp->mod_info == NULL) {
78 fmd_module_unlock(mp);
79 free(rmi);
80 continue;
81 }
82
83 rmi->rmi_name = strdup(mp->mod_name);
84 rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc);
85 rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers);
86 rmi->rmi_faulty = mp->mod_error != 0;
87 rmi->rmi_next = rvp->rml_list;
88
89 fmd_module_unlock(mp);
90 rvp->rml_list = rmi;
91 rvp->rml_len++;
92
93 if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) {
94 rvp->rml_err = FMD_ADM_ERR_NOMEM;
95 break;
96 }
97 }
98
99 (void) pthread_mutex_unlock(&fmd.d_mod_lock);
100 return (TRUE);
101 }
102
103 bool_t
fmd_adm_modcstat_1_svc(char * name,struct fmd_rpc_modstat * rms,struct svc_req * req)104 fmd_adm_modcstat_1_svc(char *name,
105 struct fmd_rpc_modstat *rms, struct svc_req *req)
106 {
107 fmd_ustat_snap_t snap;
108 fmd_module_t *mp;
109
110 rms->rms_buf.rms_buf_val = NULL;
111 rms->rms_buf.rms_buf_len = 0;
112 rms->rms_err = 0;
113
114 if (fmd_rpc_deny(req)) {
115 rms->rms_err = FMD_ADM_ERR_PERM;
116 return (TRUE);
117 }
118
119 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
120 rms->rms_err = FMD_ADM_ERR_MODSRCH;
121 return (TRUE);
122 }
123
124 if (fmd_modstat_snapshot(mp, &snap) == 0) {
125 rms->rms_buf.rms_buf_val = snap.uss_buf;
126 rms->rms_buf.rms_buf_len = snap.uss_len;
127 } else if (errno == EFMD_HDL_ABORT) {
128 rms->rms_err = FMD_ADM_ERR_MODFAIL;
129 } else
130 rms->rms_err = FMD_ADM_ERR_NOMEM;
131
132 fmd_module_rele(mp);
133 return (TRUE);
134 }
135
136 bool_t
fmd_adm_moddstat_1_svc(char * name,struct fmd_rpc_modstat * rms,struct svc_req * req)137 fmd_adm_moddstat_1_svc(char *name,
138 struct fmd_rpc_modstat *rms, struct svc_req *req)
139 {
140 fmd_module_t *mp;
141
142 rms->rms_buf.rms_buf_val = NULL;
143 rms->rms_buf.rms_buf_len = 0;
144 rms->rms_err = 0;
145
146 if (fmd_rpc_deny(req)) {
147 rms->rms_err = FMD_ADM_ERR_PERM;
148 return (TRUE);
149 }
150
151 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
152 rms->rms_err = FMD_ADM_ERR_MODSRCH;
153 return (TRUE);
154 }
155
156 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t));
157 rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t);
158
159 if (rms->rms_buf.rms_buf_val == NULL) {
160 rms->rms_err = FMD_ADM_ERR_NOMEM;
161 rms->rms_buf.rms_buf_len = 0;
162 fmd_module_rele(mp);
163 return (TRUE);
164 }
165
166 /*
167 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats
168 * are present in mp->mod_stats. We don't use any for the daemon-
169 * maintained stats and provide this function in order to reduce the
170 * overhead of the fmstat(1M) default view, where these minimal stats
171 * must be retrieved for all of the active modules.
172 */
173 (void) pthread_mutex_lock(&mp->mod_stats_lock);
174
175 if (mp->mod_stats != NULL) {
176 mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime();
177 bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val,
178 sizeof (fmd_modstat_t));
179 } else {
180 free(rms->rms_buf.rms_buf_val);
181 rms->rms_buf.rms_buf_val = NULL;
182 rms->rms_buf.rms_buf_len = 0;
183 rms->rms_err = FMD_ADM_ERR_MODFAIL;
184 }
185
186 (void) pthread_mutex_unlock(&mp->mod_stats_lock);
187 fmd_module_rele(mp);
188 return (TRUE);
189 }
190
191 bool_t
fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat * rms,struct svc_req * req)192 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req)
193 {
194 const size_t size = sizeof (fmd_statistics_t);
195
196 if (fmd_rpc_deny(req)) {
197 rms->rms_buf.rms_buf_val = NULL;
198 rms->rms_buf.rms_buf_len = 0;
199 rms->rms_err = FMD_ADM_ERR_PERM;
200 } else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) {
201 /*
202 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING
203 * stats are present in fmd.d_stats (see definition in fmd.c).
204 */
205 (void) pthread_mutex_lock(&fmd.d_stats_lock);
206 bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size);
207 (void) pthread_mutex_unlock(&fmd.d_stats_lock);
208 rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t);
209 rms->rms_err = 0;
210 } else {
211 rms->rms_buf.rms_buf_len = 0;
212 rms->rms_err = FMD_ADM_ERR_NOMEM;
213 }
214
215 return (TRUE);
216 }
217
218 bool_t
fmd_adm_modload_1_svc(char * path,int * rvp,struct svc_req * req)219 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req)
220 {
221 fmd_module_t *mp;
222 const char *p;
223 int err = 0;
224
225 if (fmd_rpc_deny(req)) {
226 *rvp = FMD_ADM_ERR_PERM;
227 return (TRUE);
228 }
229
230 /*
231 * Before we endure the expense of constructing a module and attempting
232 * to load it, do a quick check to see if the pathname is valid.
233 */
234 if (access(path, F_OK) != 0) {
235 *rvp = FMD_ADM_ERR_MODNOENT;
236 return (TRUE);
237 }
238
239 if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0)
240 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops);
241 else
242 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops);
243
244 if (mp == NULL) {
245 switch (errno) {
246 case EFMD_MOD_LOADED:
247 err = FMD_ADM_ERR_MODEXIST;
248 break;
249 case EFMD_MOD_INIT:
250 err = FMD_ADM_ERR_MODINIT;
251 break;
252 default:
253 err = FMD_ADM_ERR_MODLOAD;
254 break;
255 }
256 }
257
258 *rvp = err;
259 return (TRUE);
260 }
261
262 bool_t
fmd_adm_modunload_1_svc(char * name,int * rvp,struct svc_req * req)263 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req)
264 {
265 fmd_module_t *mp = NULL;
266 int err = 0;
267
268 if (fmd_rpc_deny(req))
269 err = FMD_ADM_ERR_PERM;
270 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
271 err = FMD_ADM_ERR_MODSRCH;
272 else if (mp == fmd.d_self)
273 err = FMD_ADM_ERR_MODBUSY;
274 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
275 err = FMD_ADM_ERR_MODSRCH;
276
277 if (mp != NULL)
278 fmd_module_rele(mp);
279
280 *rvp = err;
281 return (TRUE);
282 }
283
284 bool_t
fmd_adm_modreset_1_svc(char * name,int * rvp,struct svc_req * req)285 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req)
286 {
287 fmd_module_t *mp = NULL;
288 int err = 0;
289
290 if (fmd_rpc_deny(req))
291 err = FMD_ADM_ERR_PERM;
292 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
293 err = FMD_ADM_ERR_MODSRCH;
294 else if (mp == fmd.d_self)
295 err = FMD_ADM_ERR_MODBUSY;
296 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0)
297 err = FMD_ADM_ERR_MODSRCH;
298
299 if (err == 0)
300 fmd_ckpt_delete(mp); /* erase any saved checkpoints */
301
302 if (err == 0 && fmd_modhash_load(fmd.d_mod_hash,
303 mp->mod_path, mp->mod_ops) == NULL) {
304 if (errno == EFMD_MOD_INIT)
305 err = FMD_ADM_ERR_MODINIT;
306 else
307 err = FMD_ADM_ERR_MODLOAD;
308 }
309
310 if (mp != NULL)
311 fmd_module_rele(mp);
312
313 *rvp = err;
314 return (TRUE);
315 }
316
317 bool_t
fmd_adm_modgc_1_svc(char * name,int * rvp,struct svc_req * req)318 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req)
319 {
320 fmd_module_t *mp;
321 int err = 0;
322
323 if (fmd_rpc_deny(req))
324 err = FMD_ADM_ERR_PERM;
325 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL)
326 err = FMD_ADM_ERR_MODSRCH;
327 else {
328 fmd_module_gc(mp);
329 fmd_module_rele(mp);
330 }
331
332 *rvp = err;
333 return (TRUE);
334 }
335
336 /*
337 * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts
338 * of data that may exceed the underlying RPC transport buffer size if the
339 * resource cache is heavily populated and/or all resources are requested.
340 * To minimize the likelihood of running out of RPC buffer space and having to
341 * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the
342 * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an
343 * individual FMRI if more information is needed. To further reduce the XDR
344 * overhead, the string list is represented as XDR-opaque data where the
345 * entire list is returned as a string table (e.g. "fmriA\0fmriB\0...").
346 */
347 static void
fmd_adm_rsrclist_asru(fmd_asru_t * ap,void * arg)348 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg)
349 {
350 struct fmd_rpc_rsrclist *rrl = arg;
351 size_t name_len, buf_len;
352 void *p;
353
354 /*
355 * Skip the ASRU if this fault is marked as invisible.
356 * If rrl_all is false, we take a quick look at asru_flags with no lock
357 * held to see if the ASRU is not faulty. If so,
358 * we don't want to report it by default and can just skip this ASRU.
359 * This helps keep overhead low in the common case, as the call to
360 * fmd_asru_getstate() can be expensive depending on the scheme.
361 */
362
363 if (ap->asru_flags & FMD_ASRU_INVISIBLE)
364 return;
365 if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY))
366 return;
367
368 if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0)
369 return; /* error has occurred or resource is in 'ok' state */
370
371 /*
372 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold
373 * another string, doubling it as needed. Then copy the new string
374 * on to the end, and increment rrl_len to indicate the used space.
375 */
376 (void) pthread_mutex_lock(&ap->asru_lock);
377 name_len = strlen(ap->asru_name) + 1;
378
379 while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) {
380 if (rrl->rrl_buf.rrl_buf_len != 0)
381 buf_len = rrl->rrl_buf.rrl_buf_len * 2;
382 else
383 buf_len = 1024; /* default buffer size */
384
385 if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) {
386 bzero((char *)p + rrl->rrl_buf.rrl_buf_len,
387 buf_len - rrl->rrl_buf.rrl_buf_len);
388 rrl->rrl_buf.rrl_buf_val = p;
389 rrl->rrl_buf.rrl_buf_len = buf_len;
390 } else {
391 rrl->rrl_err = FMD_ADM_ERR_NOMEM;
392 break;
393 }
394 }
395
396 if (rrl->rrl_err == 0) {
397 bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val +
398 rrl->rrl_len, name_len);
399 rrl->rrl_len += name_len;
400 rrl->rrl_cnt++;
401 }
402
403 (void) pthread_mutex_unlock(&ap->asru_lock);
404 }
405
406 bool_t
fmd_adm_rsrclist_1_svc(bool_t all,struct fmd_rpc_rsrclist * rvp,struct svc_req * req)407 fmd_adm_rsrclist_1_svc(bool_t all,
408 struct fmd_rpc_rsrclist *rvp, struct svc_req *req)
409 {
410 rvp->rrl_buf.rrl_buf_len = 0;
411 rvp->rrl_buf.rrl_buf_val = NULL;
412 rvp->rrl_len = 0;
413 rvp->rrl_cnt = 0;
414 rvp->rrl_err = 0;
415 rvp->rrl_all = all;
416
417 if (fmd_rpc_deny(req))
418 rvp->rrl_err = FMD_ADM_ERR_PERM;
419 else
420 fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp);
421
422 return (TRUE);
423 }
424
425 bool_t
fmd_adm_rsrcinfo_1_svc(char * fmri,struct fmd_rpc_rsrcinfo * rvp,struct svc_req * req)426 fmd_adm_rsrcinfo_1_svc(char *fmri,
427 struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req)
428 {
429 fmd_asru_t *ap;
430 fmd_case_impl_t *cip;
431 int state;
432
433 bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo));
434
435 if (fmd_rpc_deny(req)) {
436 rvp->rri_err = FMD_ADM_ERR_PERM;
437 return (TRUE);
438 }
439
440 if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) {
441 rvp->rri_err = FMD_ADM_ERR_RSRCSRCH;
442 return (TRUE);
443 }
444
445 state = fmd_asru_getstate(ap);
446 (void) pthread_mutex_lock(&ap->asru_lock);
447 cip = (fmd_case_impl_t *)ap->asru_case;
448
449 rvp->rri_fmri = strdup(ap->asru_name);
450 rvp->rri_uuid = strdup(ap->asru_uuid);
451 rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL;
452 rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0;
453 rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0;
454 rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0;
455
456 (void) pthread_mutex_unlock(&ap->asru_lock);
457 fmd_asru_hash_release(fmd.d_asrus, ap);
458
459 if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL)
460 rvp->rri_err = FMD_ADM_ERR_NOMEM;
461
462 return (TRUE);
463 }
464
465 static void
fmd_adm_do_repair(char * name,struct svc_req * req,int * errp,uint8_t reason,char * uuid)466 fmd_adm_do_repair(char *name, struct svc_req *req, int *errp, uint8_t reason,
467 char *uuid)
468 {
469 if (fmd_rpc_deny(req))
470 *errp = FMD_ADM_ERR_PERM;
471 else {
472 fmd_asru_rep_arg_t fara;
473 int err = FARA_ERR_RSRCNOTF;
474
475 fara.fara_reason = reason;
476 fara.fara_rval = &err;
477 fara.fara_uuid = uuid;
478 fara.fara_bywhat = FARA_BY_ASRU;
479 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
480 fmd_asru_repaired, &fara);
481 fara.fara_bywhat = FARA_BY_LABEL;
482 fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
483 fmd_asru_repaired, &fara);
484 fara.fara_bywhat = FARA_BY_FRU;
485 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
486 fmd_asru_repaired, &fara);
487 fara.fara_bywhat = FARA_BY_RSRC;
488 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
489 fmd_asru_repaired, &fara);
490 if (err == FARA_ERR_RSRCNOTR)
491 *errp = FMD_ADM_ERR_RSRCNOTR;
492 else if (err == FARA_OK)
493 *errp = 0;
494 }
495 }
496
497 bool_t
fmd_adm_rsrcflush_1_svc(char * name,int * rvp,struct svc_req * req)498 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req)
499 {
500 int err = FMD_ADM_ERR_RSRCNOTF;
501
502 /*
503 * If anyone does an fmadm flush command, discard any resolved
504 * cases that were being retained for historic diagnosis.
505 */
506 if (fmd_rpc_deny(req))
507 err = FMD_ADM_ERR_PERM;
508 else {
509 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name,
510 fmd_asru_flush, &err);
511 fmd_asru_hash_apply_by_label(fmd.d_asrus, name,
512 fmd_asru_flush, &err);
513 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name,
514 fmd_asru_flush, &err);
515 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name,
516 fmd_asru_flush, &err);
517 }
518 *rvp = err;
519 return (TRUE);
520 }
521
522 bool_t
fmd_adm_rsrcrepaired_1_svc(char * name,int * rvp,struct svc_req * req)523 fmd_adm_rsrcrepaired_1_svc(char *name, int *rvp, struct svc_req *req)
524 {
525 int err = FMD_ADM_ERR_RSRCNOTF;
526
527 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPAIRED, NULL);
528 *rvp = err;
529 return (TRUE);
530 }
531
532 bool_t
fmd_adm_rsrcreplaced_1_svc(char * name,int * rvp,struct svc_req * req)533 fmd_adm_rsrcreplaced_1_svc(char *name, int *rvp, struct svc_req *req)
534 {
535 int err = FMD_ADM_ERR_RSRCNOTF;
536
537 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPLACED, NULL);
538 *rvp = err;
539 return (TRUE);
540 }
541
542 bool_t
fmd_adm_rsrcacquit_1_svc(char * name,char * uuid,int * rvp,struct svc_req * req)543 fmd_adm_rsrcacquit_1_svc(char *name, char *uuid, int *rvp, struct svc_req *req)
544 {
545 int err = FMD_ADM_ERR_RSRCNOTF;
546
547 fmd_adm_do_repair(name, req, &err, FMD_ASRU_ACQUITTED, uuid);
548 *rvp = err;
549 return (TRUE);
550 }
551
552 static void
fmd_adm_serdlist_measure(fmd_serd_eng_t * sgp,void * arg)553 fmd_adm_serdlist_measure(fmd_serd_eng_t *sgp, void *arg)
554 {
555 struct fmd_rpc_serdlist *rsl = arg;
556
557 rsl->rsl_len += strlen(sgp->sg_name) + 1;
558 rsl->rsl_cnt++;
559 }
560
561 static void
fmd_adm_serdlist_record(fmd_serd_eng_t * sgp,void * arg)562 fmd_adm_serdlist_record(fmd_serd_eng_t *sgp, void *arg)
563 {
564 struct fmd_rpc_serdlist *rsl = arg;
565
566 bcopy(sgp->sg_name, rsl->rsl_buf.rsl_buf_val + rsl->rsl_len,
567 strlen(sgp->sg_name));
568 rsl->rsl_len += strlen(sgp->sg_name) + 1;
569 }
570
571 bool_t
fmd_adm_serdlist_1_svc(char * name,struct fmd_rpc_serdlist * rvp,struct svc_req * req)572 fmd_adm_serdlist_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
573 struct svc_req *req)
574 {
575 fmd_module_t *mp;
576 void *p;
577
578 rvp->rsl_buf.rsl_buf_len = 0;
579 rvp->rsl_buf.rsl_buf_val = NULL;
580 rvp->rsl_len = 0;
581 rvp->rsl_cnt = 0;
582 rvp->rsl_err = 0;
583
584 if (fmd_rpc_deny(req)) {
585 rvp->rsl_err = FMD_ADM_ERR_PERM;
586 return (TRUE);
587 }
588
589 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) {
590 rvp->rsl_err = FMD_ADM_ERR_MODSRCH;
591 return (TRUE);
592 }
593
594 fmd_module_lock(mp);
595 /* In the first pass, collect the overall length of the buffer. */
596 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_measure, rvp);
597 if (rvp->rsl_len == 0) {
598 fmd_module_unlock(mp);
599 fmd_module_rele(mp);
600 return (TRUE);
601 }
602 p = malloc(rvp->rsl_len);
603 if (p) {
604 rvp->rsl_buf.rsl_buf_val = p;
605 rvp->rsl_buf.rsl_buf_len = rvp->rsl_len;
606 bzero(rvp->rsl_buf.rsl_buf_val, rvp->rsl_buf.rsl_buf_len);
607 rvp->rsl_len = 0;
608 /* In the second pass, populate the buffer with data. */
609 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdlist_record,
610 rvp);
611 } else {
612 rvp->rsl_err = FMD_ADM_ERR_NOMEM;
613 }
614 fmd_module_unlock(mp);
615
616 fmd_module_rele(mp);
617 return (TRUE);
618 }
619
620 static void
fmd_adm_serdinfo_record(fmd_serd_eng_t * sgp,struct fmd_rpc_serdinfo * rsi)621 fmd_adm_serdinfo_record(fmd_serd_eng_t *sgp, struct fmd_rpc_serdinfo *rsi)
622 {
623 uint64_t old, now = fmd_time_gethrtime();
624 const fmd_serd_elem_t *oep;
625
626 if ((rsi->rsi_name = strdup(sgp->sg_name)) == NULL) {
627 rsi->rsi_err = FMD_ADM_ERR_NOMEM;
628 return;
629 }
630
631 if ((oep = fmd_list_next(&sgp->sg_list)) != NULL)
632 old = fmd_event_hrtime(oep->se_event);
633 else
634 old = now;
635
636 rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1;
637 rsi->rsi_count = sgp->sg_count;
638 rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0;
639 rsi->rsi_n = sgp->sg_n;
640 rsi->rsi_t = sgp->sg_t;
641 }
642
643 bool_t
fmd_adm_serdinfo_1_svc(char * mname,char * sname,struct fmd_rpc_serdinfo * rvp,struct svc_req * req)644 fmd_adm_serdinfo_1_svc(char *mname, char *sname, struct fmd_rpc_serdinfo *rvp,
645 struct svc_req *req)
646 {
647 fmd_module_t *mp;
648 fmd_serd_eng_t *sgp;
649
650 bzero(rvp, sizeof (struct fmd_rpc_serdinfo));
651
652 if (fmd_rpc_deny(req)) {
653 rvp->rsi_err = FMD_ADM_ERR_PERM;
654 return (TRUE);
655 }
656
657 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
658 rvp->rsi_err = FMD_ADM_ERR_MODSRCH;
659 return (TRUE);
660 }
661
662 fmd_module_lock(mp);
663
664 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
665 fmd_adm_serdinfo_record(sgp, rvp);
666 } else
667 rvp->rsi_err = FMD_ADM_ERR_SERDSRCH;
668
669 fmd_module_unlock(mp);
670 fmd_module_rele(mp);
671
672 return (TRUE);
673 }
674
675 /*ARGSUSED*/
676 bool_t
fmd_adm_serdinfo_old_1_svc(char * name,struct fmd_rpc_serdlist * rvp,struct svc_req * req)677 fmd_adm_serdinfo_old_1_svc(char *name, struct fmd_rpc_serdlist *rvp,
678 struct svc_req *req)
679 {
680 return (FALSE);
681 }
682
683 bool_t
fmd_adm_serdreset_1_svc(char * mname,char * sname,int * rvp,struct svc_req * req)684 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req)
685 {
686 fmd_module_t *mp;
687 fmd_serd_eng_t *sgp;
688 int err = 0;
689
690 if (fmd_rpc_deny(req)) {
691 *rvp = FMD_ADM_ERR_PERM;
692 return (TRUE);
693 }
694
695 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) {
696 *rvp = FMD_ADM_ERR_MODSRCH;
697 return (TRUE);
698 }
699
700 fmd_module_lock(mp);
701
702 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) {
703 if (fmd_serd_eng_fired(sgp)) {
704 err = FMD_ADM_ERR_SERDFIRED;
705 } else {
706 fmd_serd_eng_reset(sgp);
707 fmd_module_setdirty(mp);
708 }
709 } else
710 err = FMD_ADM_ERR_SERDSRCH;
711
712 fmd_module_unlock(mp);
713 fmd_module_rele(mp);
714
715 *rvp = err;
716 return (TRUE);
717 }
718
719 bool_t
fmd_adm_logrotate_1_svc(char * name,int * rvp,struct svc_req * req)720 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req)
721 {
722 fmd_log_t **lpp, *old, *new;
723 int try = 1, trylimit = 1;
724 pthread_rwlock_t *lockp;
725
726 hrtime_t nsec = 0;
727 timespec_t tv;
728
729 if (fmd_rpc_deny(req)) {
730 *rvp = FMD_ADM_ERR_PERM;
731 return (TRUE);
732 }
733
734 if (strcmp(name, "errlog") == 0) {
735 lpp = &fmd.d_errlog;
736 lockp = &fmd.d_log_lock;
737 } else if (strcmp(name, "fltlog") == 0) {
738 lpp = &fmd.d_fltlog;
739 lockp = &fmd.d_log_lock;
740 } else if (strcmp(name, "infolog") == 0) {
741 lpp = &fmd.d_ilog;
742 lockp = &fmd.d_ilog_lock;
743 } else if (strcmp(name, "infolog_hival") == 0) {
744 lpp = &fmd.d_hvilog;
745 lockp = &fmd.d_hvilog_lock;
746 } else {
747 *rvp = FMD_ADM_ERR_ROTSRCH;
748 return (TRUE);
749 }
750
751 (void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit);
752 (void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec);
753
754 tv.tv_sec = nsec / NANOSEC;
755 tv.tv_nsec = nsec % NANOSEC;
756
757 /*
758 * To rotate a log file, grab d_log_lock as writer to make sure no
759 * one else can discover the current log pointer. Then try to rotate
760 * the log. If we're successful, release the old log pointer.
761 */
762 do {
763 if (try > 1)
764 (void) nanosleep(&tv, NULL); /* wait for checkpoints */
765
766 (void) pthread_rwlock_wrlock(lockp);
767 old = *lpp;
768
769 if ((new = fmd_log_rotate(old)) != NULL) {
770 fmd_log_rele(old);
771 *lpp = new;
772 }
773
774 (void) pthread_rwlock_unlock(lockp);
775
776 } while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit);
777
778 if (new != NULL)
779 *rvp = 0;
780 else if (errno == EFMD_LOG_ROTBUSY)
781 *rvp = FMD_ADM_ERR_ROTBUSY;
782 else
783 *rvp = FMD_ADM_ERR_ROTFAIL;
784
785 return (TRUE);
786 }
787
788 bool_t
fmd_adm_caserepair_1_svc(char * uuid,int * rvp,struct svc_req * req)789 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req)
790 {
791 fmd_case_t *cp = NULL;
792 int err = 0;
793
794 if (fmd_rpc_deny(req))
795 err = FMD_ADM_ERR_PERM;
796 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
797 err = FMD_ADM_ERR_CASESRCH;
798 else if (fmd_case_repair(cp) != 0) {
799 err = errno == EFMD_CASE_OWNER ?
800 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
801 }
802
803 if (cp != NULL)
804 fmd_case_rele(cp);
805
806 *rvp = err;
807 return (TRUE);
808 }
809
810 bool_t
fmd_adm_caseacquit_1_svc(char * uuid,int * rvp,struct svc_req * req)811 fmd_adm_caseacquit_1_svc(char *uuid, int *rvp, struct svc_req *req)
812 {
813 fmd_case_t *cp = NULL;
814 int err = 0;
815
816 if (fmd_rpc_deny(req))
817 err = FMD_ADM_ERR_PERM;
818 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL)
819 err = FMD_ADM_ERR_CASESRCH;
820 else if (fmd_case_acquit(cp) != 0) {
821 err = errno == EFMD_CASE_OWNER ?
822 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN;
823 }
824
825 if (cp != NULL)
826 fmd_case_rele(cp);
827
828 *rvp = err;
829 return (TRUE);
830 }
831
832 void
fmd_adm_caselist_case(fmd_case_t * cp,void * arg)833 fmd_adm_caselist_case(fmd_case_t *cp, void *arg)
834 {
835 fmd_case_impl_t *cip = (fmd_case_impl_t *)cp;
836 struct fmd_rpc_caselist *rcl = arg;
837 size_t uuid_len, buf_len;
838 void *p;
839
840 if (rcl->rcl_err != 0)
841 return;
842
843 /*
844 * skip invisible cases
845 */
846 if (cip->ci_flags & FMD_CF_INVISIBLE)
847 return;
848
849 /*
850 * Lock the case and reallocate rcl_buf[] to be large enough to hold
851 * another string, doubling it as needed. Then copy the new string
852 * on to the end, and increment rcl_len to indicate the used space.
853 */
854 if (!(cip->ci_flags & FMD_CF_SOLVED))
855 return;
856
857 (void) pthread_mutex_lock(&cip->ci_lock);
858
859 uuid_len = cip->ci_uuidlen + 1;
860
861 while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) {
862 if (rcl->rcl_buf.rcl_buf_len != 0)
863 buf_len = rcl->rcl_buf.rcl_buf_len * 2;
864 else
865 buf_len = 1024; /* default buffer size */
866
867 if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) {
868 bzero((char *)p + rcl->rcl_buf.rcl_buf_len,
869 buf_len - rcl->rcl_buf.rcl_buf_len);
870 rcl->rcl_buf.rcl_buf_val = p;
871 rcl->rcl_buf.rcl_buf_len = buf_len;
872 } else {
873 rcl->rcl_err = FMD_ADM_ERR_NOMEM;
874 break;
875 }
876 }
877
878 if (rcl->rcl_err == 0) {
879 bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val +
880 rcl->rcl_len, uuid_len);
881 rcl->rcl_len += uuid_len;
882 rcl->rcl_cnt++;
883 }
884
885 (void) pthread_mutex_unlock(&cip->ci_lock);
886 }
887
888 bool_t
fmd_adm_caselist_1_svc(struct fmd_rpc_caselist * rvp,struct svc_req * req)889 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req)
890 {
891 rvp->rcl_buf.rcl_buf_len = 0;
892 rvp->rcl_buf.rcl_buf_val = NULL;
893 rvp->rcl_len = 0;
894 rvp->rcl_cnt = 0;
895 rvp->rcl_err = 0;
896
897 if (fmd_rpc_deny(req))
898 rvp->rcl_err = FMD_ADM_ERR_PERM;
899 else
900 fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp);
901
902 return (TRUE);
903 }
904
905 bool_t
fmd_adm_caseinfo_1_svc(char * uuid,struct fmd_rpc_caseinfo * rvp,struct svc_req * req)906 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp,
907 struct svc_req *req)
908 {
909 fmd_case_t *cp;
910 nvlist_t *nvl;
911 int err = 0;
912
913 bzero(rvp, sizeof (struct fmd_rpc_caseinfo));
914
915 if (fmd_rpc_deny(req)) {
916 rvp->rci_err = FMD_ADM_ERR_PERM;
917 return (TRUE);
918 }
919
920 if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) {
921 rvp->rci_err = FMD_ADM_ERR_CASESRCH;
922 return (TRUE);
923 }
924
925 if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) {
926 fmd_case_rele(cp);
927 rvp->rci_err = FMD_ADM_ERR_CASESRCH;
928 return (TRUE);
929 }
930
931 nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS);
932
933 err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val,
934 &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0);
935
936 nvlist_free(nvl);
937
938 if (err != 0)
939 rvp->rci_err = FMD_ADM_ERR_NOMEM;
940
941 fmd_case_rele(cp);
942
943 return (TRUE);
944 }
945
946 /*ARGSUSED*/
947 static void
fmd_adm_xprtlist_one(fmd_idspace_t * ids,id_t id,void * arg)948 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg)
949 {
950 struct fmd_rpc_xprtlist *rvp = arg;
951
952 if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len)
953 rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id;
954 }
955
956 bool_t
fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist * rvp,struct svc_req * req)957 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req)
958 {
959 if (fmd_rpc_deny(req)) {
960 rvp->rxl_buf.rxl_buf_len = 0;
961 rvp->rxl_buf.rxl_buf_val = NULL;
962 rvp->rxl_len = 0;
963 rvp->rxl_err = FMD_ADM_ERR_PERM;
964 return (TRUE);
965 }
966
967 /*
968 * Since we're taking a snapshot of the transports, and these could
969 * change after we return our result, there's no need to hold any kind
970 * of lock between retrieving ids_count and taking the snapshot. We'll
971 * just capture up to a maximum of whatever ids_count value we sampled.
972 */
973 rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count;
974 rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) *
975 rvp->rxl_buf.rxl_buf_len);
976 rvp->rxl_len = 0;
977 rvp->rxl_err = 0;
978
979 if (rvp->rxl_buf.rxl_buf_val == NULL) {
980 rvp->rxl_err = FMD_ADM_ERR_NOMEM;
981 return (TRUE);
982 }
983
984 fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp);
985 return (TRUE);
986 }
987
988 bool_t
fmd_adm_xprtstat_1_svc(int32_t id,struct fmd_rpc_modstat * rms,struct svc_req * req)989 fmd_adm_xprtstat_1_svc(int32_t id,
990 struct fmd_rpc_modstat *rms, struct svc_req *req)
991 {
992 fmd_xprt_impl_t *xip;
993 fmd_stat_t *sp, *ep, *cp;
994
995 if (fmd_rpc_deny(req)) {
996 rms->rms_buf.rms_buf_val = NULL;
997 rms->rms_buf.rms_buf_len = 0;
998 rms->rms_err = FMD_ADM_ERR_PERM;
999 return (TRUE);
1000 }
1001
1002 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t));
1003 rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) /
1004 sizeof (fmd_stat_t);
1005 rms->rms_err = 0;
1006
1007 if (rms->rms_buf.rms_buf_val == NULL) {
1008 rms->rms_err = FMD_ADM_ERR_NOMEM;
1009 rms->rms_buf.rms_buf_len = 0;
1010 return (TRUE);
1011 }
1012
1013 if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) {
1014 rms->rms_err = FMD_ADM_ERR_XPRTSRCH;
1015 return (TRUE);
1016 }
1017
1018 /*
1019 * Grab the stats lock and bcopy the entire transport stats array in
1020 * one shot. Then go back through and duplicate any string values.
1021 */
1022 (void) pthread_mutex_lock(&xip->xi_stats_lock);
1023
1024 sp = (fmd_stat_t *)xip->xi_stats;
1025 ep = sp + rms->rms_buf.rms_buf_len;
1026 cp = rms->rms_buf.rms_buf_val;
1027
1028 bcopy(sp, cp, sizeof (fmd_xprt_stat_t));
1029
1030 for (; sp < ep; sp++, cp++) {
1031 if (sp->fmds_type == FMD_TYPE_STRING &&
1032 sp->fmds_value.str != NULL)
1033 cp->fmds_value.str = strdup(sp->fmds_value.str);
1034 }
1035
1036 (void) pthread_mutex_unlock(&xip->xi_stats_lock);
1037 fmd_idspace_rele(fmd.d_xprt_ids, id);
1038
1039 return (TRUE);
1040 }
1041
1042 int
fmd_adm_1_freeresult(SVCXPRT * xprt,xdrproc_t proc,caddr_t data)1043 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data)
1044 {
1045 xdr_free(proc, data);
1046 svc_done(xprt);
1047 return (TRUE);
1048 }
1049
1050 /*
1051 * Custom XDR routine for our API structure fmd_stat_t. This function must
1052 * match the definition of fmd_stat_t in <fmd_api.h> and must also match
1053 * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c.
1054 */
1055 bool_t
xdr_fmd_stat(XDR * xp,fmd_stat_t * sp)1056 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp)
1057 {
1058 bool_t rv = TRUE;
1059
1060 rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name));
1061 rv &= xdr_u_int(xp, &sp->fmds_type);
1062 rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc));
1063
1064 switch (sp->fmds_type) {
1065 case FMD_TYPE_BOOL:
1066 rv &= xdr_int(xp, &sp->fmds_value.bool);
1067 break;
1068 case FMD_TYPE_INT32:
1069 rv &= xdr_int32_t(xp, &sp->fmds_value.i32);
1070 break;
1071 case FMD_TYPE_UINT32:
1072 rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32);
1073 break;
1074 case FMD_TYPE_INT64:
1075 rv &= xdr_int64_t(xp, &sp->fmds_value.i64);
1076 break;
1077 case FMD_TYPE_UINT64:
1078 case FMD_TYPE_TIME:
1079 case FMD_TYPE_SIZE:
1080 rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64);
1081 break;
1082 case FMD_TYPE_STRING:
1083 rv &= xdr_string(xp, &sp->fmds_value.str, ~0);
1084 break;
1085 }
1086
1087 return (rv);
1088 }
1089