1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 #include <strings.h> 30 #include <limits.h> 31 #include <unistd.h> 32 #include <stdlib.h> 33 #include <alloca.h> 34 35 #include <fmd_rpc_adm.h> 36 #include <fmd_rpc.h> 37 #include <fmd_module.h> 38 #include <fmd_ustat.h> 39 #include <fmd_error.h> 40 #include <fmd_asru.h> 41 #include <fmd_ckpt.h> 42 #include <fmd_case.h> 43 #include <fmd_fmri.h> 44 #include <fmd_idspace.h> 45 #include <fmd_xprt.h> 46 47 #include <fmd.h> 48 49 bool_t 50 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req) 51 { 52 struct fmd_rpc_modinfo *rmi; 53 fmd_module_t *mp; 54 55 rvp->rml_list = NULL; 56 rvp->rml_err = 0; 57 rvp->rml_len = 0; 58 59 if (fmd_rpc_deny(req)) { 60 rvp->rml_err = FMD_ADM_ERR_PERM; 61 return (TRUE); 62 } 63 64 (void) pthread_mutex_lock(&fmd.d_mod_lock); 65 66 for (mp = fmd_list_next(&fmd.d_mod_list); 67 mp != NULL; mp = fmd_list_next(mp)) { 68 69 if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) { 70 rvp->rml_err = FMD_ADM_ERR_NOMEM; 71 break; 72 } 73 74 fmd_module_lock(mp); 75 76 /* 77 * If mod_info is NULL, the module is in the middle of loading: 78 * do not report its presence to observability tools yet. 79 */ 80 if (mp->mod_info == NULL) { 81 fmd_module_unlock(mp); 82 free(rmi); 83 continue; 84 } 85 86 rmi->rmi_name = strdup(mp->mod_name); 87 rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc); 88 rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers); 89 rmi->rmi_faulty = mp->mod_error != 0; 90 rmi->rmi_next = rvp->rml_list; 91 92 fmd_module_unlock(mp); 93 rvp->rml_list = rmi; 94 rvp->rml_len++; 95 96 if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) { 97 rvp->rml_err = FMD_ADM_ERR_NOMEM; 98 break; 99 } 100 } 101 102 (void) pthread_mutex_unlock(&fmd.d_mod_lock); 103 return (TRUE); 104 } 105 106 bool_t 107 fmd_adm_modcstat_1_svc(char *name, 108 struct fmd_rpc_modstat *rms, struct svc_req *req) 109 { 110 fmd_ustat_snap_t snap; 111 fmd_module_t *mp; 112 113 rms->rms_buf.rms_buf_val = NULL; 114 rms->rms_buf.rms_buf_len = 0; 115 rms->rms_err = 0; 116 117 if (fmd_rpc_deny(req)) { 118 rms->rms_err = FMD_ADM_ERR_PERM; 119 return (TRUE); 120 } 121 122 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 123 rms->rms_err = FMD_ADM_ERR_MODSRCH; 124 return (TRUE); 125 } 126 127 if (fmd_modstat_snapshot(mp, &snap) == 0) { 128 rms->rms_buf.rms_buf_val = snap.uss_buf; 129 rms->rms_buf.rms_buf_len = snap.uss_len; 130 } else if (errno == EFMD_HDL_ABORT) { 131 rms->rms_err = FMD_ADM_ERR_MODFAIL; 132 } else 133 rms->rms_err = FMD_ADM_ERR_NOMEM; 134 135 fmd_module_rele(mp); 136 return (TRUE); 137 } 138 139 bool_t 140 fmd_adm_moddstat_1_svc(char *name, 141 struct fmd_rpc_modstat *rms, struct svc_req *req) 142 { 143 fmd_module_t *mp; 144 145 rms->rms_buf.rms_buf_val = NULL; 146 rms->rms_buf.rms_buf_len = 0; 147 rms->rms_err = 0; 148 149 if (fmd_rpc_deny(req)) { 150 rms->rms_err = FMD_ADM_ERR_PERM; 151 return (TRUE); 152 } 153 154 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 155 rms->rms_err = FMD_ADM_ERR_MODSRCH; 156 return (TRUE); 157 } 158 159 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t)); 160 rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t); 161 162 if (rms->rms_buf.rms_buf_val == NULL) { 163 rms->rms_err = FMD_ADM_ERR_NOMEM; 164 rms->rms_buf.rms_buf_len = 0; 165 fmd_module_rele(mp); 166 return (TRUE); 167 } 168 169 /* 170 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats 171 * are present in mp->mod_stats. We don't use any for the daemon- 172 * maintained stats and provide this function in order to reduce the 173 * overhead of the fmstat(1M) default view, where these minimal stats 174 * must be retrieved for all of the active modules. 175 */ 176 (void) pthread_mutex_lock(&mp->mod_stats_lock); 177 178 if (mp->mod_stats != NULL) { 179 mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime(); 180 bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val, 181 sizeof (fmd_modstat_t)); 182 } else { 183 free(rms->rms_buf.rms_buf_val); 184 rms->rms_buf.rms_buf_val = NULL; 185 rms->rms_buf.rms_buf_len = 0; 186 rms->rms_err = FMD_ADM_ERR_MODFAIL; 187 } 188 189 (void) pthread_mutex_unlock(&mp->mod_stats_lock); 190 fmd_module_rele(mp); 191 return (TRUE); 192 } 193 194 bool_t 195 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req) 196 { 197 const size_t size = sizeof (fmd_statistics_t); 198 199 if (fmd_rpc_deny(req)) { 200 rms->rms_buf.rms_buf_val = NULL; 201 rms->rms_buf.rms_buf_len = 0; 202 rms->rms_err = FMD_ADM_ERR_PERM; 203 } else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) { 204 /* 205 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING 206 * stats are present in fmd.d_stats (see definition in fmd.c). 207 */ 208 (void) pthread_mutex_lock(&fmd.d_stats_lock); 209 bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size); 210 (void) pthread_mutex_unlock(&fmd.d_stats_lock); 211 rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t); 212 rms->rms_err = 0; 213 } else { 214 rms->rms_buf.rms_buf_len = 0; 215 rms->rms_err = FMD_ADM_ERR_NOMEM; 216 } 217 218 return (TRUE); 219 } 220 221 bool_t 222 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req) 223 { 224 fmd_module_t *mp; 225 const char *p; 226 int err = 0; 227 228 if (fmd_rpc_deny(req)) { 229 *rvp = FMD_ADM_ERR_PERM; 230 return (TRUE); 231 } 232 233 /* 234 * Before we endure the expense of constructing a module and attempting 235 * to load it, do a quick check to see if the pathname is valid. 236 */ 237 if (access(path, F_OK) != 0) { 238 *rvp = FMD_ADM_ERR_MODNOENT; 239 return (TRUE); 240 } 241 242 if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0) 243 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops); 244 else 245 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops); 246 247 if (mp == NULL) { 248 switch (errno) { 249 case EFMD_MOD_LOADED: 250 err = FMD_ADM_ERR_MODEXIST; 251 break; 252 case EFMD_MOD_INIT: 253 err = FMD_ADM_ERR_MODINIT; 254 break; 255 default: 256 err = FMD_ADM_ERR_MODLOAD; 257 break; 258 } 259 } 260 261 *rvp = err; 262 return (TRUE); 263 } 264 265 bool_t 266 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req) 267 { 268 fmd_module_t *mp = NULL; 269 int err = 0; 270 271 if (fmd_rpc_deny(req)) 272 err = FMD_ADM_ERR_PERM; 273 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 274 err = FMD_ADM_ERR_MODSRCH; 275 else if (mp == fmd.d_self) 276 err = FMD_ADM_ERR_MODBUSY; 277 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) 278 err = FMD_ADM_ERR_MODSRCH; 279 280 if (mp != NULL) 281 fmd_module_rele(mp); 282 283 *rvp = err; 284 return (TRUE); 285 } 286 287 bool_t 288 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req) 289 { 290 fmd_module_t *mp = NULL; 291 int err = 0; 292 293 if (fmd_rpc_deny(req)) 294 err = FMD_ADM_ERR_PERM; 295 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 296 err = FMD_ADM_ERR_MODSRCH; 297 else if (mp == fmd.d_self) 298 err = FMD_ADM_ERR_MODBUSY; 299 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) 300 err = FMD_ADM_ERR_MODSRCH; 301 302 if (err == 0) 303 fmd_ckpt_delete(mp); /* erase any saved checkpoints */ 304 305 if (err == 0 && fmd_modhash_load(fmd.d_mod_hash, 306 mp->mod_path, mp->mod_ops) == NULL) { 307 if (errno == EFMD_MOD_INIT) 308 err = FMD_ADM_ERR_MODINIT; 309 else 310 err = FMD_ADM_ERR_MODLOAD; 311 } 312 313 if (mp != NULL) 314 fmd_module_rele(mp); 315 316 *rvp = err; 317 return (TRUE); 318 } 319 320 bool_t 321 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req) 322 { 323 fmd_module_t *mp; 324 int err = 0; 325 326 if (fmd_rpc_deny(req)) 327 err = FMD_ADM_ERR_PERM; 328 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 329 err = FMD_ADM_ERR_MODSRCH; 330 else { 331 fmd_module_gc(mp); 332 fmd_module_rele(mp); 333 } 334 335 *rvp = err; 336 return (TRUE); 337 } 338 339 /* 340 * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts 341 * of data that may exceed the underlying RPC transport buffer size if the 342 * resource cache is heavily populated and/or all resources are requested. 343 * To minimize the likelihood of running out of RPC buffer space and having to 344 * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the 345 * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an 346 * individual FMRI if more information is needed. To further reduce the XDR 347 * overhead, the string list is represented as XDR-opaque data where the 348 * entire list is returned as a string table (e.g. "fmriA\0fmriB\0..."). 349 */ 350 static void 351 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg) 352 { 353 struct fmd_rpc_rsrclist *rrl = arg; 354 size_t name_len, buf_len; 355 void *p; 356 357 /* 358 * Skip the ASRU if this fault is marked as invisible. 359 * If rrl_all is false, we take a quick look at asru_flags with no lock 360 * held to see if the ASRU is not faulty. If so, 361 * we don't want to report it by default and can just skip this ASRU. 362 * This helps keep overhead low in the common case, as the call to 363 * fmd_asru_getstate() can be expensive depending on the scheme. 364 */ 365 366 if (ap->asru_flags & FMD_ASRU_INVISIBLE) 367 return; 368 if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY)) 369 return; 370 371 if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0) 372 return; /* error has occurred or resource is in 'ok' state */ 373 374 /* 375 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold 376 * another string, doubling it as needed. Then copy the new string 377 * on to the end, and increment rrl_len to indicate the used space. 378 */ 379 (void) pthread_mutex_lock(&ap->asru_lock); 380 name_len = strlen(ap->asru_name) + 1; 381 382 while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) { 383 if (rrl->rrl_buf.rrl_buf_len != 0) 384 buf_len = rrl->rrl_buf.rrl_buf_len * 2; 385 else 386 buf_len = 1024; /* default buffer size */ 387 388 if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) { 389 bzero((char *)p + rrl->rrl_buf.rrl_buf_len, 390 buf_len - rrl->rrl_buf.rrl_buf_len); 391 rrl->rrl_buf.rrl_buf_val = p; 392 rrl->rrl_buf.rrl_buf_len = buf_len; 393 } else { 394 rrl->rrl_err = FMD_ADM_ERR_NOMEM; 395 break; 396 } 397 } 398 399 if (rrl->rrl_err == 0) { 400 bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val + 401 rrl->rrl_len, name_len); 402 rrl->rrl_len += name_len; 403 rrl->rrl_cnt++; 404 } 405 406 (void) pthread_mutex_unlock(&ap->asru_lock); 407 } 408 409 bool_t 410 fmd_adm_rsrclist_1_svc(bool_t all, 411 struct fmd_rpc_rsrclist *rvp, struct svc_req *req) 412 { 413 rvp->rrl_buf.rrl_buf_len = 0; 414 rvp->rrl_buf.rrl_buf_val = NULL; 415 rvp->rrl_len = 0; 416 rvp->rrl_cnt = 0; 417 rvp->rrl_err = 0; 418 rvp->rrl_all = all; 419 420 if (fmd_rpc_deny(req)) 421 rvp->rrl_err = FMD_ADM_ERR_PERM; 422 else 423 fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp); 424 425 return (TRUE); 426 } 427 428 bool_t 429 fmd_adm_rsrcinfo_1_svc(char *fmri, 430 struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req) 431 { 432 fmd_asru_t *ap; 433 fmd_case_impl_t *cip; 434 int state; 435 436 bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo)); 437 438 if (fmd_rpc_deny(req)) { 439 rvp->rri_err = FMD_ADM_ERR_PERM; 440 return (TRUE); 441 } 442 443 if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) { 444 rvp->rri_err = FMD_ADM_ERR_RSRCSRCH; 445 return (TRUE); 446 } 447 448 state = fmd_asru_getstate(ap); 449 (void) pthread_mutex_lock(&ap->asru_lock); 450 cip = (fmd_case_impl_t *)ap->asru_case; 451 452 rvp->rri_fmri = strdup(ap->asru_name); 453 rvp->rri_uuid = strdup(ap->asru_uuid); 454 rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL; 455 rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0; 456 rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0; 457 rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0; 458 459 (void) pthread_mutex_unlock(&ap->asru_lock); 460 fmd_asru_hash_release(fmd.d_asrus, ap); 461 462 if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL) 463 rvp->rri_err = FMD_ADM_ERR_NOMEM; 464 465 return (TRUE); 466 } 467 468 bool_t 469 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req) 470 { 471 if (fmd_rpc_deny(req)) 472 *rvp = FMD_ADM_ERR_PERM; 473 else if (fmd_asru_hash_delete_name(fmd.d_asrus, name) != 0) 474 *rvp = FMD_ADM_ERR_RSRCSRCH; 475 else 476 *rvp = 0; 477 478 return (TRUE); 479 } 480 481 static void 482 fmd_adm_repair_containee(fmd_asru_t *ee, void *er) 483 { 484 if ((ee->asru_flags & FMD_ASRU_FAULTY) && 485 fmd_fmri_contains(er, ee->asru_fmri) > 0) 486 (void) fmd_asru_clrflags(ee, FMD_ASRU_FAULTY, NULL, NULL); 487 } 488 489 bool_t 490 fmd_adm_rsrcrepair_1_svc(char *name, int *rvp, struct svc_req *req) 491 { 492 fmd_asru_t *ap = NULL; 493 int err = 0; 494 495 if (fmd_rpc_deny(req)) 496 err = FMD_ADM_ERR_PERM; 497 else if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, name)) == NULL) 498 err = FMD_ADM_ERR_RSRCSRCH; 499 else if (!fmd_asru_clrflags(ap, FMD_ASRU_FAULTY, NULL, NULL)) 500 err = FMD_ADM_ERR_RSRCNOTF; 501 else { 502 /* 503 * We've located the requested ASRU, and have repaired it. Now 504 * traverse the ASRU cache, looking for any faulty entries that 505 * are contained by this one. If we find any, repair them too. 506 */ 507 fmd_asru_hash_apply(fmd.d_asrus, 508 fmd_adm_repair_containee, ap->asru_fmri); 509 } 510 511 if (ap != NULL) 512 fmd_asru_hash_release(fmd.d_asrus, ap); 513 514 *rvp = err; 515 return (TRUE); 516 } 517 518 static void 519 fmd_adm_serdinfo_eng(fmd_serd_eng_t *sgp, void *arg) 520 { 521 struct fmd_rpc_serdlist *rsl = arg; 522 struct fmd_rpc_serdinfo *rsi = malloc(sizeof (struct fmd_rpc_serdinfo)); 523 524 uint64_t old, now = fmd_time_gethrtime(); 525 const fmd_serd_elem_t *oep; 526 527 if (rsi == NULL || (rsi->rsi_name = strdup(sgp->sg_name)) == NULL) { 528 rsl->rsl_err = FMD_ADM_ERR_NOMEM; 529 free(rsi); 530 return; 531 } 532 533 if ((oep = fmd_list_next(&sgp->sg_list)) != NULL) 534 old = fmd_event_hrtime(oep->se_event); 535 else 536 old = now; 537 538 rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1; 539 rsi->rsi_count = sgp->sg_count; 540 rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0; 541 rsi->rsi_n = sgp->sg_n; 542 rsi->rsi_t = sgp->sg_t; 543 rsi->rsi_next = rsl->rsl_list; 544 545 rsl->rsl_list = rsi; 546 rsl->rsl_len++; 547 } 548 549 bool_t 550 fmd_adm_serdinfo_1_svc(char *name, 551 struct fmd_rpc_serdlist *rvp, struct svc_req *req) 552 { 553 fmd_module_t *mp; 554 555 rvp->rsl_list = NULL; 556 rvp->rsl_err = 0; 557 rvp->rsl_len = 0; 558 559 if (fmd_rpc_deny(req)) { 560 rvp->rsl_err = FMD_ADM_ERR_PERM; 561 return (TRUE); 562 } 563 564 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 565 rvp->rsl_err = FMD_ADM_ERR_MODSRCH; 566 return (TRUE); 567 } 568 569 fmd_module_lock(mp); 570 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdinfo_eng, rvp); 571 fmd_module_unlock(mp); 572 573 fmd_module_rele(mp); 574 return (TRUE); 575 } 576 577 bool_t 578 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req) 579 { 580 fmd_module_t *mp; 581 fmd_serd_eng_t *sgp; 582 int err = 0; 583 584 if (fmd_rpc_deny(req)) { 585 *rvp = FMD_ADM_ERR_PERM; 586 return (TRUE); 587 } 588 589 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) { 590 *rvp = FMD_ADM_ERR_MODSRCH; 591 return (TRUE); 592 } 593 594 fmd_module_lock(mp); 595 596 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) { 597 if (fmd_serd_eng_fired(sgp)) { 598 err = FMD_ADM_ERR_SERDFIRED; 599 } else { 600 fmd_serd_eng_reset(sgp); 601 fmd_module_setdirty(mp); 602 } 603 } else 604 err = FMD_ADM_ERR_SERDSRCH; 605 606 fmd_module_unlock(mp); 607 fmd_module_rele(mp); 608 609 *rvp = err; 610 return (TRUE); 611 } 612 613 bool_t 614 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req) 615 { 616 fmd_log_t **lpp, *old, *new; 617 int try = 1, trylimit = 1; 618 619 hrtime_t nsec = 0; 620 timespec_t tv; 621 622 if (fmd_rpc_deny(req)) { 623 *rvp = FMD_ADM_ERR_PERM; 624 return (TRUE); 625 } 626 627 if (strcmp(name, "errlog") == 0) 628 lpp = &fmd.d_errlog; 629 else if (strcmp(name, "fltlog") == 0) 630 lpp = &fmd.d_fltlog; 631 else { 632 *rvp = FMD_ADM_ERR_ROTSRCH; 633 return (TRUE); 634 } 635 636 (void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit); 637 (void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec); 638 639 tv.tv_sec = nsec / NANOSEC; 640 tv.tv_nsec = nsec % NANOSEC; 641 642 /* 643 * To rotate a log file, grab d_log_lock as writer to make sure no 644 * one else can discover the current log pointer. Then try to rotate 645 * the log. If we're successful, release the old log pointer. 646 */ 647 do { 648 if (try > 1) 649 (void) nanosleep(&tv, NULL); /* wait for checkpoints */ 650 651 (void) pthread_rwlock_wrlock(&fmd.d_log_lock); 652 old = *lpp; 653 654 if ((new = fmd_log_rotate(old)) != NULL) { 655 fmd_log_rele(old); 656 *lpp = new; 657 } 658 659 (void) pthread_rwlock_unlock(&fmd.d_log_lock); 660 661 } while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit); 662 663 if (new != NULL) 664 *rvp = 0; 665 else if (errno == EFMD_LOG_ROTBUSY) 666 *rvp = FMD_ADM_ERR_ROTBUSY; 667 else 668 *rvp = FMD_ADM_ERR_ROTFAIL; 669 670 return (TRUE); 671 } 672 673 bool_t 674 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req) 675 { 676 fmd_case_t *cp = NULL; 677 int err = 0; 678 679 if (fmd_rpc_deny(req)) 680 err = FMD_ADM_ERR_PERM; 681 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) 682 err = FMD_ADM_ERR_CASESRCH; 683 else if (fmd_case_repair(cp) != 0) { 684 err = errno == EFMD_CASE_OWNER ? 685 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN; 686 } 687 688 if (cp != NULL) 689 fmd_case_rele(cp); 690 691 *rvp = err; 692 return (TRUE); 693 } 694 695 void 696 fmd_adm_caselist_case(fmd_case_t *cp, void *arg) 697 { 698 fmd_case_impl_t *cip = (fmd_case_impl_t *)cp; 699 struct fmd_rpc_caselist *rcl = arg; 700 size_t uuid_len, buf_len; 701 void *p; 702 703 if (rcl->rcl_err != 0) 704 return; 705 706 /* 707 * skip invisible cases 708 */ 709 if (cip->ci_flags & FMD_CF_INVISIBLE) 710 return; 711 712 /* 713 * Lock the case and reallocate rcl_buf[] to be large enough to hold 714 * another string, doubling it as needed. Then copy the new string 715 * on to the end, and increment rcl_len to indicate the used space. 716 */ 717 if (!(cip->ci_flags & FMD_CF_SOLVED)) 718 return; 719 720 (void) pthread_mutex_lock(&cip->ci_lock); 721 722 uuid_len = cip->ci_uuidlen + 1; 723 724 while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) { 725 if (rcl->rcl_buf.rcl_buf_len != 0) 726 buf_len = rcl->rcl_buf.rcl_buf_len * 2; 727 else 728 buf_len = 1024; /* default buffer size */ 729 730 if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) { 731 bzero((char *)p + rcl->rcl_buf.rcl_buf_len, 732 buf_len - rcl->rcl_buf.rcl_buf_len); 733 rcl->rcl_buf.rcl_buf_val = p; 734 rcl->rcl_buf.rcl_buf_len = buf_len; 735 } else { 736 rcl->rcl_err = FMD_ADM_ERR_NOMEM; 737 break; 738 } 739 } 740 741 if (rcl->rcl_err == 0) { 742 bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val + 743 rcl->rcl_len, uuid_len); 744 rcl->rcl_len += uuid_len; 745 rcl->rcl_cnt++; 746 } 747 748 (void) pthread_mutex_unlock(&cip->ci_lock); 749 } 750 751 bool_t 752 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req) 753 { 754 rvp->rcl_buf.rcl_buf_len = 0; 755 rvp->rcl_buf.rcl_buf_val = NULL; 756 rvp->rcl_len = 0; 757 rvp->rcl_cnt = 0; 758 rvp->rcl_err = 0; 759 760 if (fmd_rpc_deny(req)) 761 rvp->rcl_err = FMD_ADM_ERR_PERM; 762 else 763 fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp); 764 765 return (TRUE); 766 } 767 768 bool_t 769 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp, 770 struct svc_req *req) 771 { 772 fmd_case_t *cp; 773 nvlist_t *nvl; 774 int err = 0; 775 776 bzero(rvp, sizeof (struct fmd_rpc_caseinfo)); 777 778 if (fmd_rpc_deny(req)) { 779 rvp->rci_err = FMD_ADM_ERR_PERM; 780 return (TRUE); 781 } 782 783 if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) { 784 rvp->rci_err = FMD_ADM_ERR_CASESRCH; 785 return (TRUE); 786 } 787 788 if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) { 789 fmd_case_rele(cp); 790 rvp->rci_err = FMD_ADM_ERR_CASESRCH; 791 return (TRUE); 792 } 793 794 nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS); 795 796 err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val, 797 &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0); 798 799 nvlist_free(nvl); 800 801 if (err != 0) 802 rvp->rci_err = FMD_ADM_ERR_NOMEM; 803 804 fmd_case_rele(cp); 805 806 return (TRUE); 807 } 808 809 /*ARGSUSED*/ 810 static void 811 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg) 812 { 813 struct fmd_rpc_xprtlist *rvp = arg; 814 815 if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len) 816 rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id; 817 } 818 819 bool_t 820 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req) 821 { 822 if (fmd_rpc_deny(req)) { 823 rvp->rxl_buf.rxl_buf_len = 0; 824 rvp->rxl_buf.rxl_buf_val = NULL; 825 rvp->rxl_len = 0; 826 rvp->rxl_err = FMD_ADM_ERR_PERM; 827 return (TRUE); 828 } 829 830 /* 831 * Since we're taking a snapshot of the transports, and these could 832 * change after we return our result, there's no need to hold any kind 833 * of lock between retrieving ids_count and taking the snapshot. We'll 834 * just capture up to a maximum of whatever ids_count value we sampled. 835 */ 836 rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count; 837 rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) * 838 rvp->rxl_buf.rxl_buf_len); 839 rvp->rxl_len = 0; 840 rvp->rxl_err = 0; 841 842 if (rvp->rxl_buf.rxl_buf_val == NULL) { 843 rvp->rxl_err = FMD_ADM_ERR_NOMEM; 844 return (TRUE); 845 } 846 847 fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp); 848 return (TRUE); 849 } 850 851 bool_t 852 fmd_adm_xprtstat_1_svc(int32_t id, 853 struct fmd_rpc_modstat *rms, struct svc_req *req) 854 { 855 fmd_xprt_impl_t *xip; 856 fmd_stat_t *sp, *ep, *cp; 857 858 if (fmd_rpc_deny(req)) { 859 rms->rms_buf.rms_buf_val = NULL; 860 rms->rms_buf.rms_buf_len = 0; 861 rms->rms_err = FMD_ADM_ERR_PERM; 862 return (TRUE); 863 } 864 865 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t)); 866 rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) / 867 sizeof (fmd_stat_t); 868 rms->rms_err = 0; 869 870 if (rms->rms_buf.rms_buf_val == NULL) { 871 rms->rms_err = FMD_ADM_ERR_NOMEM; 872 rms->rms_buf.rms_buf_len = 0; 873 return (TRUE); 874 } 875 876 if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) { 877 rms->rms_err = FMD_ADM_ERR_XPRTSRCH; 878 return (TRUE); 879 } 880 881 /* 882 * Grab the stats lock and bcopy the entire transport stats array in 883 * one shot. Then go back through and duplicate any string values. 884 */ 885 (void) pthread_mutex_lock(&xip->xi_stats_lock); 886 887 sp = (fmd_stat_t *)xip->xi_stats; 888 ep = sp + rms->rms_buf.rms_buf_len; 889 cp = rms->rms_buf.rms_buf_val; 890 891 bcopy(sp, cp, sizeof (fmd_xprt_stat_t)); 892 893 for (; sp < ep; sp++, cp++) { 894 if (sp->fmds_type == FMD_TYPE_STRING && 895 sp->fmds_value.str != NULL) 896 cp->fmds_value.str = strdup(sp->fmds_value.str); 897 } 898 899 (void) pthread_mutex_unlock(&xip->xi_stats_lock); 900 fmd_idspace_rele(fmd.d_xprt_ids, id); 901 902 return (TRUE); 903 } 904 905 int 906 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data) 907 { 908 xdr_free(proc, data); 909 svc_done(xprt); 910 return (TRUE); 911 } 912 913 /* 914 * Custom XDR routine for our API structure fmd_stat_t. This function must 915 * match the definition of fmd_stat_t in <fmd_api.h> and must also match 916 * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c. 917 */ 918 bool_t 919 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp) 920 { 921 bool_t rv = TRUE; 922 923 rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name)); 924 rv &= xdr_u_int(xp, &sp->fmds_type); 925 rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc)); 926 927 switch (sp->fmds_type) { 928 case FMD_TYPE_BOOL: 929 rv &= xdr_int(xp, &sp->fmds_value.bool); 930 break; 931 case FMD_TYPE_INT32: 932 rv &= xdr_int32_t(xp, &sp->fmds_value.i32); 933 break; 934 case FMD_TYPE_UINT32: 935 rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32); 936 break; 937 case FMD_TYPE_INT64: 938 rv &= xdr_int64_t(xp, &sp->fmds_value.i64); 939 break; 940 case FMD_TYPE_UINT64: 941 case FMD_TYPE_TIME: 942 case FMD_TYPE_SIZE: 943 rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64); 944 break; 945 case FMD_TYPE_STRING: 946 rv &= xdr_string(xp, &sp->fmds_value.str, ~0); 947 break; 948 } 949 950 return (rv); 951 } 952