1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <strings.h> 28 #include <limits.h> 29 #include <unistd.h> 30 #include <stdlib.h> 31 #include <alloca.h> 32 33 #include <fmd_rpc_adm.h> 34 #include <fmd_rpc.h> 35 #include <fmd_module.h> 36 #include <fmd_ustat.h> 37 #include <fmd_error.h> 38 #include <fmd_asru.h> 39 #include <fmd_ckpt.h> 40 #include <fmd_case.h> 41 #include <fmd_fmri.h> 42 #include <fmd_idspace.h> 43 #include <fmd_xprt.h> 44 45 #include <fmd.h> 46 47 bool_t 48 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req) 49 { 50 struct fmd_rpc_modinfo *rmi; 51 fmd_module_t *mp; 52 53 rvp->rml_list = NULL; 54 rvp->rml_err = 0; 55 rvp->rml_len = 0; 56 57 if (fmd_rpc_deny(req)) { 58 rvp->rml_err = FMD_ADM_ERR_PERM; 59 return (TRUE); 60 } 61 62 (void) pthread_mutex_lock(&fmd.d_mod_lock); 63 64 for (mp = fmd_list_next(&fmd.d_mod_list); 65 mp != NULL; mp = fmd_list_next(mp)) { 66 67 if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) { 68 rvp->rml_err = FMD_ADM_ERR_NOMEM; 69 break; 70 } 71 72 fmd_module_lock(mp); 73 74 /* 75 * If mod_info is NULL, the module is in the middle of loading: 76 * do not report its presence to observability tools yet. 77 */ 78 if (mp->mod_info == NULL) { 79 fmd_module_unlock(mp); 80 free(rmi); 81 continue; 82 } 83 84 rmi->rmi_name = strdup(mp->mod_name); 85 rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc); 86 rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers); 87 rmi->rmi_faulty = mp->mod_error != 0; 88 rmi->rmi_next = rvp->rml_list; 89 90 fmd_module_unlock(mp); 91 rvp->rml_list = rmi; 92 rvp->rml_len++; 93 94 if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) { 95 rvp->rml_err = FMD_ADM_ERR_NOMEM; 96 break; 97 } 98 } 99 100 (void) pthread_mutex_unlock(&fmd.d_mod_lock); 101 return (TRUE); 102 } 103 104 bool_t 105 fmd_adm_modcstat_1_svc(char *name, 106 struct fmd_rpc_modstat *rms, struct svc_req *req) 107 { 108 fmd_ustat_snap_t snap; 109 fmd_module_t *mp; 110 111 rms->rms_buf.rms_buf_val = NULL; 112 rms->rms_buf.rms_buf_len = 0; 113 rms->rms_err = 0; 114 115 if (fmd_rpc_deny(req)) { 116 rms->rms_err = FMD_ADM_ERR_PERM; 117 return (TRUE); 118 } 119 120 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 121 rms->rms_err = FMD_ADM_ERR_MODSRCH; 122 return (TRUE); 123 } 124 125 if (fmd_modstat_snapshot(mp, &snap) == 0) { 126 rms->rms_buf.rms_buf_val = snap.uss_buf; 127 rms->rms_buf.rms_buf_len = snap.uss_len; 128 } else if (errno == EFMD_HDL_ABORT) { 129 rms->rms_err = FMD_ADM_ERR_MODFAIL; 130 } else 131 rms->rms_err = FMD_ADM_ERR_NOMEM; 132 133 fmd_module_rele(mp); 134 return (TRUE); 135 } 136 137 bool_t 138 fmd_adm_moddstat_1_svc(char *name, 139 struct fmd_rpc_modstat *rms, struct svc_req *req) 140 { 141 fmd_module_t *mp; 142 143 rms->rms_buf.rms_buf_val = NULL; 144 rms->rms_buf.rms_buf_len = 0; 145 rms->rms_err = 0; 146 147 if (fmd_rpc_deny(req)) { 148 rms->rms_err = FMD_ADM_ERR_PERM; 149 return (TRUE); 150 } 151 152 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 153 rms->rms_err = FMD_ADM_ERR_MODSRCH; 154 return (TRUE); 155 } 156 157 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t)); 158 rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t); 159 160 if (rms->rms_buf.rms_buf_val == NULL) { 161 rms->rms_err = FMD_ADM_ERR_NOMEM; 162 rms->rms_buf.rms_buf_len = 0; 163 fmd_module_rele(mp); 164 return (TRUE); 165 } 166 167 /* 168 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats 169 * are present in mp->mod_stats. We don't use any for the daemon- 170 * maintained stats and provide this function in order to reduce the 171 * overhead of the fmstat(1M) default view, where these minimal stats 172 * must be retrieved for all of the active modules. 173 */ 174 (void) pthread_mutex_lock(&mp->mod_stats_lock); 175 176 if (mp->mod_stats != NULL) { 177 mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime(); 178 bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val, 179 sizeof (fmd_modstat_t)); 180 } else { 181 free(rms->rms_buf.rms_buf_val); 182 rms->rms_buf.rms_buf_val = NULL; 183 rms->rms_buf.rms_buf_len = 0; 184 rms->rms_err = FMD_ADM_ERR_MODFAIL; 185 } 186 187 (void) pthread_mutex_unlock(&mp->mod_stats_lock); 188 fmd_module_rele(mp); 189 return (TRUE); 190 } 191 192 bool_t 193 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req) 194 { 195 const size_t size = sizeof (fmd_statistics_t); 196 197 if (fmd_rpc_deny(req)) { 198 rms->rms_buf.rms_buf_val = NULL; 199 rms->rms_buf.rms_buf_len = 0; 200 rms->rms_err = FMD_ADM_ERR_PERM; 201 } else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) { 202 /* 203 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING 204 * stats are present in fmd.d_stats (see definition in fmd.c). 205 */ 206 (void) pthread_mutex_lock(&fmd.d_stats_lock); 207 bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size); 208 (void) pthread_mutex_unlock(&fmd.d_stats_lock); 209 rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t); 210 rms->rms_err = 0; 211 } else { 212 rms->rms_buf.rms_buf_len = 0; 213 rms->rms_err = FMD_ADM_ERR_NOMEM; 214 } 215 216 return (TRUE); 217 } 218 219 bool_t 220 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req) 221 { 222 fmd_module_t *mp; 223 const char *p; 224 int err = 0; 225 226 if (fmd_rpc_deny(req)) { 227 *rvp = FMD_ADM_ERR_PERM; 228 return (TRUE); 229 } 230 231 /* 232 * Before we endure the expense of constructing a module and attempting 233 * to load it, do a quick check to see if the pathname is valid. 234 */ 235 if (access(path, F_OK) != 0) { 236 *rvp = FMD_ADM_ERR_MODNOENT; 237 return (TRUE); 238 } 239 240 if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0) 241 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops); 242 else 243 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops); 244 245 if (mp == NULL) { 246 switch (errno) { 247 case EFMD_MOD_LOADED: 248 err = FMD_ADM_ERR_MODEXIST; 249 break; 250 case EFMD_MOD_INIT: 251 err = FMD_ADM_ERR_MODINIT; 252 break; 253 default: 254 err = FMD_ADM_ERR_MODLOAD; 255 break; 256 } 257 } 258 259 *rvp = err; 260 return (TRUE); 261 } 262 263 bool_t 264 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req) 265 { 266 fmd_module_t *mp = NULL; 267 int err = 0; 268 269 if (fmd_rpc_deny(req)) 270 err = FMD_ADM_ERR_PERM; 271 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 272 err = FMD_ADM_ERR_MODSRCH; 273 else if (mp == fmd.d_self) 274 err = FMD_ADM_ERR_MODBUSY; 275 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) 276 err = FMD_ADM_ERR_MODSRCH; 277 278 if (mp != NULL) 279 fmd_module_rele(mp); 280 281 *rvp = err; 282 return (TRUE); 283 } 284 285 bool_t 286 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req) 287 { 288 fmd_module_t *mp = NULL; 289 int err = 0; 290 291 if (fmd_rpc_deny(req)) 292 err = FMD_ADM_ERR_PERM; 293 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 294 err = FMD_ADM_ERR_MODSRCH; 295 else if (mp == fmd.d_self) 296 err = FMD_ADM_ERR_MODBUSY; 297 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) 298 err = FMD_ADM_ERR_MODSRCH; 299 300 if (err == 0) 301 fmd_ckpt_delete(mp); /* erase any saved checkpoints */ 302 303 if (err == 0 && fmd_modhash_load(fmd.d_mod_hash, 304 mp->mod_path, mp->mod_ops) == NULL) { 305 if (errno == EFMD_MOD_INIT) 306 err = FMD_ADM_ERR_MODINIT; 307 else 308 err = FMD_ADM_ERR_MODLOAD; 309 } 310 311 if (mp != NULL) 312 fmd_module_rele(mp); 313 314 *rvp = err; 315 return (TRUE); 316 } 317 318 bool_t 319 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req) 320 { 321 fmd_module_t *mp; 322 int err = 0; 323 324 if (fmd_rpc_deny(req)) 325 err = FMD_ADM_ERR_PERM; 326 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 327 err = FMD_ADM_ERR_MODSRCH; 328 else { 329 fmd_module_gc(mp); 330 fmd_module_rele(mp); 331 } 332 333 *rvp = err; 334 return (TRUE); 335 } 336 337 /* 338 * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts 339 * of data that may exceed the underlying RPC transport buffer size if the 340 * resource cache is heavily populated and/or all resources are requested. 341 * To minimize the likelihood of running out of RPC buffer space and having to 342 * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the 343 * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an 344 * individual FMRI if more information is needed. To further reduce the XDR 345 * overhead, the string list is represented as XDR-opaque data where the 346 * entire list is returned as a string table (e.g. "fmriA\0fmriB\0..."). 347 */ 348 static void 349 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg) 350 { 351 struct fmd_rpc_rsrclist *rrl = arg; 352 size_t name_len, buf_len; 353 void *p; 354 355 /* 356 * Skip the ASRU if this fault is marked as invisible. 357 * If rrl_all is false, we take a quick look at asru_flags with no lock 358 * held to see if the ASRU is not faulty. If so, 359 * we don't want to report it by default and can just skip this ASRU. 360 * This helps keep overhead low in the common case, as the call to 361 * fmd_asru_getstate() can be expensive depending on the scheme. 362 */ 363 364 if (ap->asru_flags & FMD_ASRU_INVISIBLE) 365 return; 366 if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY)) 367 return; 368 369 if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0) 370 return; /* error has occurred or resource is in 'ok' state */ 371 372 /* 373 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold 374 * another string, doubling it as needed. Then copy the new string 375 * on to the end, and increment rrl_len to indicate the used space. 376 */ 377 (void) pthread_mutex_lock(&ap->asru_lock); 378 name_len = strlen(ap->asru_name) + 1; 379 380 while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) { 381 if (rrl->rrl_buf.rrl_buf_len != 0) 382 buf_len = rrl->rrl_buf.rrl_buf_len * 2; 383 else 384 buf_len = 1024; /* default buffer size */ 385 386 if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) { 387 bzero((char *)p + rrl->rrl_buf.rrl_buf_len, 388 buf_len - rrl->rrl_buf.rrl_buf_len); 389 rrl->rrl_buf.rrl_buf_val = p; 390 rrl->rrl_buf.rrl_buf_len = buf_len; 391 } else { 392 rrl->rrl_err = FMD_ADM_ERR_NOMEM; 393 break; 394 } 395 } 396 397 if (rrl->rrl_err == 0) { 398 bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val + 399 rrl->rrl_len, name_len); 400 rrl->rrl_len += name_len; 401 rrl->rrl_cnt++; 402 } 403 404 (void) pthread_mutex_unlock(&ap->asru_lock); 405 } 406 407 bool_t 408 fmd_adm_rsrclist_1_svc(bool_t all, 409 struct fmd_rpc_rsrclist *rvp, struct svc_req *req) 410 { 411 rvp->rrl_buf.rrl_buf_len = 0; 412 rvp->rrl_buf.rrl_buf_val = NULL; 413 rvp->rrl_len = 0; 414 rvp->rrl_cnt = 0; 415 rvp->rrl_err = 0; 416 rvp->rrl_all = all; 417 418 if (fmd_rpc_deny(req)) 419 rvp->rrl_err = FMD_ADM_ERR_PERM; 420 else 421 fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp); 422 423 return (TRUE); 424 } 425 426 bool_t 427 fmd_adm_rsrcinfo_1_svc(char *fmri, 428 struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req) 429 { 430 fmd_asru_t *ap; 431 fmd_case_impl_t *cip; 432 int state; 433 434 bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo)); 435 436 if (fmd_rpc_deny(req)) { 437 rvp->rri_err = FMD_ADM_ERR_PERM; 438 return (TRUE); 439 } 440 441 if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) { 442 rvp->rri_err = FMD_ADM_ERR_RSRCSRCH; 443 return (TRUE); 444 } 445 446 state = fmd_asru_getstate(ap); 447 (void) pthread_mutex_lock(&ap->asru_lock); 448 cip = (fmd_case_impl_t *)ap->asru_case; 449 450 rvp->rri_fmri = strdup(ap->asru_name); 451 rvp->rri_uuid = strdup(ap->asru_uuid); 452 rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL; 453 rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0; 454 rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0; 455 rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0; 456 457 (void) pthread_mutex_unlock(&ap->asru_lock); 458 fmd_asru_hash_release(fmd.d_asrus, ap); 459 460 if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL) 461 rvp->rri_err = FMD_ADM_ERR_NOMEM; 462 463 return (TRUE); 464 } 465 466 static void 467 fmd_adm_do_repair(char *name, struct svc_req *req, int *errp, uint8_t reason, 468 char *uuid) 469 { 470 if (fmd_rpc_deny(req)) 471 *errp = FMD_ADM_ERR_PERM; 472 else { 473 fmd_asru_rep_arg_t fara; 474 int err = FARA_ERR_RSRCNOTF; 475 476 fara.fara_reason = reason; 477 fara.fara_rval = &err; 478 fara.fara_uuid = uuid; 479 fara.fara_bywhat = FARA_BY_ASRU; 480 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name, 481 fmd_asru_repaired, &fara); 482 fara.fara_bywhat = FARA_BY_LABEL; 483 fmd_asru_hash_apply_by_label(fmd.d_asrus, name, 484 fmd_asru_repaired, &fara); 485 fara.fara_bywhat = FARA_BY_FRU; 486 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name, 487 fmd_asru_repaired, &fara); 488 fara.fara_bywhat = FARA_BY_RSRC; 489 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name, 490 fmd_asru_repaired, &fara); 491 if (err == FARA_ERR_RSRCNOTR) 492 *errp = FMD_ADM_ERR_RSRCNOTR; 493 else if (err == FARA_OK) 494 *errp = 0; 495 } 496 } 497 498 bool_t 499 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req) 500 { 501 int err = FMD_ADM_ERR_RSRCNOTF; 502 503 /* 504 * If anyone does an fmadm flush command, discard any resolved 505 * cases that were being retained for historic diagnosis. 506 */ 507 if (fmd_rpc_deny(req)) 508 err = FMD_ADM_ERR_PERM; 509 else { 510 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name, 511 fmd_asru_flush, &err); 512 fmd_asru_hash_apply_by_label(fmd.d_asrus, name, 513 fmd_asru_flush, &err); 514 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name, 515 fmd_asru_flush, &err); 516 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name, 517 fmd_asru_flush, &err); 518 } 519 *rvp = err; 520 return (TRUE); 521 } 522 523 bool_t 524 fmd_adm_rsrcrepaired_1_svc(char *name, int *rvp, struct svc_req *req) 525 { 526 int err = FMD_ADM_ERR_RSRCNOTF; 527 528 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPAIRED, NULL); 529 *rvp = err; 530 return (TRUE); 531 } 532 533 bool_t 534 fmd_adm_rsrcreplaced_1_svc(char *name, int *rvp, struct svc_req *req) 535 { 536 int err = FMD_ADM_ERR_RSRCNOTF; 537 538 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPLACED, NULL); 539 *rvp = err; 540 return (TRUE); 541 } 542 543 bool_t 544 fmd_adm_rsrcacquit_1_svc(char *name, char *uuid, int *rvp, struct svc_req *req) 545 { 546 int err = FMD_ADM_ERR_RSRCNOTF; 547 548 fmd_adm_do_repair(name, req, &err, FMD_ASRU_ACQUITTED, uuid); 549 *rvp = err; 550 return (TRUE); 551 } 552 553 static void 554 fmd_adm_serdinfo_eng(fmd_serd_eng_t *sgp, void *arg) 555 { 556 struct fmd_rpc_serdlist *rsl = arg; 557 struct fmd_rpc_serdinfo *rsi = malloc(sizeof (struct fmd_rpc_serdinfo)); 558 559 uint64_t old, now = fmd_time_gethrtime(); 560 const fmd_serd_elem_t *oep; 561 562 if (rsi == NULL || (rsi->rsi_name = strdup(sgp->sg_name)) == NULL) { 563 rsl->rsl_err = FMD_ADM_ERR_NOMEM; 564 free(rsi); 565 return; 566 } 567 568 if ((oep = fmd_list_next(&sgp->sg_list)) != NULL) 569 old = fmd_event_hrtime(oep->se_event); 570 else 571 old = now; 572 573 rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1; 574 rsi->rsi_count = sgp->sg_count; 575 rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0; 576 rsi->rsi_n = sgp->sg_n; 577 rsi->rsi_t = sgp->sg_t; 578 rsi->rsi_next = rsl->rsl_list; 579 580 rsl->rsl_list = rsi; 581 rsl->rsl_len++; 582 } 583 584 bool_t 585 fmd_adm_serdinfo_1_svc(char *name, 586 struct fmd_rpc_serdlist *rvp, struct svc_req *req) 587 { 588 fmd_module_t *mp; 589 590 rvp->rsl_list = NULL; 591 rvp->rsl_err = 0; 592 rvp->rsl_len = 0; 593 594 if (fmd_rpc_deny(req)) { 595 rvp->rsl_err = FMD_ADM_ERR_PERM; 596 return (TRUE); 597 } 598 599 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 600 rvp->rsl_err = FMD_ADM_ERR_MODSRCH; 601 return (TRUE); 602 } 603 604 fmd_module_lock(mp); 605 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdinfo_eng, rvp); 606 fmd_module_unlock(mp); 607 608 fmd_module_rele(mp); 609 return (TRUE); 610 } 611 612 bool_t 613 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req) 614 { 615 fmd_module_t *mp; 616 fmd_serd_eng_t *sgp; 617 int err = 0; 618 619 if (fmd_rpc_deny(req)) { 620 *rvp = FMD_ADM_ERR_PERM; 621 return (TRUE); 622 } 623 624 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) { 625 *rvp = FMD_ADM_ERR_MODSRCH; 626 return (TRUE); 627 } 628 629 fmd_module_lock(mp); 630 631 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) { 632 if (fmd_serd_eng_fired(sgp)) { 633 err = FMD_ADM_ERR_SERDFIRED; 634 } else { 635 fmd_serd_eng_reset(sgp); 636 fmd_module_setdirty(mp); 637 } 638 } else 639 err = FMD_ADM_ERR_SERDSRCH; 640 641 fmd_module_unlock(mp); 642 fmd_module_rele(mp); 643 644 *rvp = err; 645 return (TRUE); 646 } 647 648 bool_t 649 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req) 650 { 651 fmd_log_t **lpp, *old, *new; 652 int try = 1, trylimit = 1; 653 654 hrtime_t nsec = 0; 655 timespec_t tv; 656 657 if (fmd_rpc_deny(req)) { 658 *rvp = FMD_ADM_ERR_PERM; 659 return (TRUE); 660 } 661 662 if (strcmp(name, "errlog") == 0) 663 lpp = &fmd.d_errlog; 664 else if (strcmp(name, "fltlog") == 0) 665 lpp = &fmd.d_fltlog; 666 else { 667 *rvp = FMD_ADM_ERR_ROTSRCH; 668 return (TRUE); 669 } 670 671 (void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit); 672 (void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec); 673 674 tv.tv_sec = nsec / NANOSEC; 675 tv.tv_nsec = nsec % NANOSEC; 676 677 /* 678 * To rotate a log file, grab d_log_lock as writer to make sure no 679 * one else can discover the current log pointer. Then try to rotate 680 * the log. If we're successful, release the old log pointer. 681 */ 682 do { 683 if (try > 1) 684 (void) nanosleep(&tv, NULL); /* wait for checkpoints */ 685 686 (void) pthread_rwlock_wrlock(&fmd.d_log_lock); 687 old = *lpp; 688 689 if ((new = fmd_log_rotate(old)) != NULL) { 690 fmd_log_rele(old); 691 *lpp = new; 692 } 693 694 (void) pthread_rwlock_unlock(&fmd.d_log_lock); 695 696 } while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit); 697 698 if (new != NULL) 699 *rvp = 0; 700 else if (errno == EFMD_LOG_ROTBUSY) 701 *rvp = FMD_ADM_ERR_ROTBUSY; 702 else 703 *rvp = FMD_ADM_ERR_ROTFAIL; 704 705 return (TRUE); 706 } 707 708 bool_t 709 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req) 710 { 711 fmd_case_t *cp = NULL; 712 int err = 0; 713 714 if (fmd_rpc_deny(req)) 715 err = FMD_ADM_ERR_PERM; 716 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) 717 err = FMD_ADM_ERR_CASESRCH; 718 else if (fmd_case_repair(cp) != 0) { 719 err = errno == EFMD_CASE_OWNER ? 720 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN; 721 } 722 723 if (cp != NULL) 724 fmd_case_rele(cp); 725 726 *rvp = err; 727 return (TRUE); 728 } 729 730 bool_t 731 fmd_adm_caseacquit_1_svc(char *uuid, int *rvp, struct svc_req *req) 732 { 733 fmd_case_t *cp = NULL; 734 int err = 0; 735 736 if (fmd_rpc_deny(req)) 737 err = FMD_ADM_ERR_PERM; 738 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) 739 err = FMD_ADM_ERR_CASESRCH; 740 else if (fmd_case_acquit(cp) != 0) { 741 err = errno == EFMD_CASE_OWNER ? 742 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN; 743 } 744 745 if (cp != NULL) 746 fmd_case_rele(cp); 747 748 *rvp = err; 749 return (TRUE); 750 } 751 752 void 753 fmd_adm_caselist_case(fmd_case_t *cp, void *arg) 754 { 755 fmd_case_impl_t *cip = (fmd_case_impl_t *)cp; 756 struct fmd_rpc_caselist *rcl = arg; 757 size_t uuid_len, buf_len; 758 void *p; 759 760 if (rcl->rcl_err != 0) 761 return; 762 763 /* 764 * skip invisible cases 765 */ 766 if (cip->ci_flags & FMD_CF_INVISIBLE) 767 return; 768 769 /* 770 * Lock the case and reallocate rcl_buf[] to be large enough to hold 771 * another string, doubling it as needed. Then copy the new string 772 * on to the end, and increment rcl_len to indicate the used space. 773 */ 774 if (!(cip->ci_flags & FMD_CF_SOLVED)) 775 return; 776 777 (void) pthread_mutex_lock(&cip->ci_lock); 778 779 uuid_len = cip->ci_uuidlen + 1; 780 781 while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) { 782 if (rcl->rcl_buf.rcl_buf_len != 0) 783 buf_len = rcl->rcl_buf.rcl_buf_len * 2; 784 else 785 buf_len = 1024; /* default buffer size */ 786 787 if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) { 788 bzero((char *)p + rcl->rcl_buf.rcl_buf_len, 789 buf_len - rcl->rcl_buf.rcl_buf_len); 790 rcl->rcl_buf.rcl_buf_val = p; 791 rcl->rcl_buf.rcl_buf_len = buf_len; 792 } else { 793 rcl->rcl_err = FMD_ADM_ERR_NOMEM; 794 break; 795 } 796 } 797 798 if (rcl->rcl_err == 0) { 799 bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val + 800 rcl->rcl_len, uuid_len); 801 rcl->rcl_len += uuid_len; 802 rcl->rcl_cnt++; 803 } 804 805 (void) pthread_mutex_unlock(&cip->ci_lock); 806 } 807 808 bool_t 809 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req) 810 { 811 rvp->rcl_buf.rcl_buf_len = 0; 812 rvp->rcl_buf.rcl_buf_val = NULL; 813 rvp->rcl_len = 0; 814 rvp->rcl_cnt = 0; 815 rvp->rcl_err = 0; 816 817 if (fmd_rpc_deny(req)) 818 rvp->rcl_err = FMD_ADM_ERR_PERM; 819 else 820 fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp); 821 822 return (TRUE); 823 } 824 825 bool_t 826 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp, 827 struct svc_req *req) 828 { 829 fmd_case_t *cp; 830 nvlist_t *nvl; 831 int err = 0; 832 833 bzero(rvp, sizeof (struct fmd_rpc_caseinfo)); 834 835 if (fmd_rpc_deny(req)) { 836 rvp->rci_err = FMD_ADM_ERR_PERM; 837 return (TRUE); 838 } 839 840 if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) { 841 rvp->rci_err = FMD_ADM_ERR_CASESRCH; 842 return (TRUE); 843 } 844 845 if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) { 846 fmd_case_rele(cp); 847 rvp->rci_err = FMD_ADM_ERR_CASESRCH; 848 return (TRUE); 849 } 850 851 nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS); 852 853 err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val, 854 &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0); 855 856 nvlist_free(nvl); 857 858 if (err != 0) 859 rvp->rci_err = FMD_ADM_ERR_NOMEM; 860 861 fmd_case_rele(cp); 862 863 return (TRUE); 864 } 865 866 /*ARGSUSED*/ 867 static void 868 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg) 869 { 870 struct fmd_rpc_xprtlist *rvp = arg; 871 872 if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len) 873 rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id; 874 } 875 876 bool_t 877 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req) 878 { 879 if (fmd_rpc_deny(req)) { 880 rvp->rxl_buf.rxl_buf_len = 0; 881 rvp->rxl_buf.rxl_buf_val = NULL; 882 rvp->rxl_len = 0; 883 rvp->rxl_err = FMD_ADM_ERR_PERM; 884 return (TRUE); 885 } 886 887 /* 888 * Since we're taking a snapshot of the transports, and these could 889 * change after we return our result, there's no need to hold any kind 890 * of lock between retrieving ids_count and taking the snapshot. We'll 891 * just capture up to a maximum of whatever ids_count value we sampled. 892 */ 893 rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count; 894 rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) * 895 rvp->rxl_buf.rxl_buf_len); 896 rvp->rxl_len = 0; 897 rvp->rxl_err = 0; 898 899 if (rvp->rxl_buf.rxl_buf_val == NULL) { 900 rvp->rxl_err = FMD_ADM_ERR_NOMEM; 901 return (TRUE); 902 } 903 904 fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp); 905 return (TRUE); 906 } 907 908 bool_t 909 fmd_adm_xprtstat_1_svc(int32_t id, 910 struct fmd_rpc_modstat *rms, struct svc_req *req) 911 { 912 fmd_xprt_impl_t *xip; 913 fmd_stat_t *sp, *ep, *cp; 914 915 if (fmd_rpc_deny(req)) { 916 rms->rms_buf.rms_buf_val = NULL; 917 rms->rms_buf.rms_buf_len = 0; 918 rms->rms_err = FMD_ADM_ERR_PERM; 919 return (TRUE); 920 } 921 922 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t)); 923 rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) / 924 sizeof (fmd_stat_t); 925 rms->rms_err = 0; 926 927 if (rms->rms_buf.rms_buf_val == NULL) { 928 rms->rms_err = FMD_ADM_ERR_NOMEM; 929 rms->rms_buf.rms_buf_len = 0; 930 return (TRUE); 931 } 932 933 if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) { 934 rms->rms_err = FMD_ADM_ERR_XPRTSRCH; 935 return (TRUE); 936 } 937 938 /* 939 * Grab the stats lock and bcopy the entire transport stats array in 940 * one shot. Then go back through and duplicate any string values. 941 */ 942 (void) pthread_mutex_lock(&xip->xi_stats_lock); 943 944 sp = (fmd_stat_t *)xip->xi_stats; 945 ep = sp + rms->rms_buf.rms_buf_len; 946 cp = rms->rms_buf.rms_buf_val; 947 948 bcopy(sp, cp, sizeof (fmd_xprt_stat_t)); 949 950 for (; sp < ep; sp++, cp++) { 951 if (sp->fmds_type == FMD_TYPE_STRING && 952 sp->fmds_value.str != NULL) 953 cp->fmds_value.str = strdup(sp->fmds_value.str); 954 } 955 956 (void) pthread_mutex_unlock(&xip->xi_stats_lock); 957 fmd_idspace_rele(fmd.d_xprt_ids, id); 958 959 return (TRUE); 960 } 961 962 int 963 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data) 964 { 965 xdr_free(proc, data); 966 svc_done(xprt); 967 return (TRUE); 968 } 969 970 /* 971 * Custom XDR routine for our API structure fmd_stat_t. This function must 972 * match the definition of fmd_stat_t in <fmd_api.h> and must also match 973 * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c. 974 */ 975 bool_t 976 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp) 977 { 978 bool_t rv = TRUE; 979 980 rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name)); 981 rv &= xdr_u_int(xp, &sp->fmds_type); 982 rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc)); 983 984 switch (sp->fmds_type) { 985 case FMD_TYPE_BOOL: 986 rv &= xdr_int(xp, &sp->fmds_value.bool); 987 break; 988 case FMD_TYPE_INT32: 989 rv &= xdr_int32_t(xp, &sp->fmds_value.i32); 990 break; 991 case FMD_TYPE_UINT32: 992 rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32); 993 break; 994 case FMD_TYPE_INT64: 995 rv &= xdr_int64_t(xp, &sp->fmds_value.i64); 996 break; 997 case FMD_TYPE_UINT64: 998 case FMD_TYPE_TIME: 999 case FMD_TYPE_SIZE: 1000 rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64); 1001 break; 1002 case FMD_TYPE_STRING: 1003 rv &= xdr_string(xp, &sp->fmds_value.str, ~0); 1004 break; 1005 } 1006 1007 return (rv); 1008 } 1009