1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include <strings.h> 28 #include <limits.h> 29 #include <unistd.h> 30 #include <stdlib.h> 31 #include <alloca.h> 32 33 #include <fmd_rpc_adm.h> 34 #include <fmd_rpc.h> 35 #include <fmd_module.h> 36 #include <fmd_ustat.h> 37 #include <fmd_error.h> 38 #include <fmd_asru.h> 39 #include <fmd_ckpt.h> 40 #include <fmd_case.h> 41 #include <fmd_fmri.h> 42 #include <fmd_idspace.h> 43 #include <fmd_xprt.h> 44 45 #include <fmd.h> 46 47 bool_t 48 fmd_adm_modinfo_1_svc(struct fmd_rpc_modlist *rvp, struct svc_req *req) 49 { 50 struct fmd_rpc_modinfo *rmi; 51 fmd_module_t *mp; 52 53 rvp->rml_list = NULL; 54 rvp->rml_err = 0; 55 rvp->rml_len = 0; 56 57 if (fmd_rpc_deny(req)) { 58 rvp->rml_err = FMD_ADM_ERR_PERM; 59 return (TRUE); 60 } 61 62 (void) pthread_mutex_lock(&fmd.d_mod_lock); 63 64 for (mp = fmd_list_next(&fmd.d_mod_list); 65 mp != NULL; mp = fmd_list_next(mp)) { 66 67 if ((rmi = malloc(sizeof (struct fmd_rpc_modinfo))) == NULL) { 68 rvp->rml_err = FMD_ADM_ERR_NOMEM; 69 break; 70 } 71 72 fmd_module_lock(mp); 73 74 /* 75 * If mod_info is NULL, the module is in the middle of loading: 76 * do not report its presence to observability tools yet. 77 */ 78 if (mp->mod_info == NULL) { 79 fmd_module_unlock(mp); 80 free(rmi); 81 continue; 82 } 83 84 rmi->rmi_name = strdup(mp->mod_name); 85 rmi->rmi_desc = strdup(mp->mod_info->fmdi_desc); 86 rmi->rmi_vers = strdup(mp->mod_info->fmdi_vers); 87 rmi->rmi_faulty = mp->mod_error != 0; 88 rmi->rmi_next = rvp->rml_list; 89 90 fmd_module_unlock(mp); 91 rvp->rml_list = rmi; 92 rvp->rml_len++; 93 94 if (rmi->rmi_desc == NULL || rmi->rmi_vers == NULL) { 95 rvp->rml_err = FMD_ADM_ERR_NOMEM; 96 break; 97 } 98 } 99 100 (void) pthread_mutex_unlock(&fmd.d_mod_lock); 101 return (TRUE); 102 } 103 104 bool_t 105 fmd_adm_modcstat_1_svc(char *name, 106 struct fmd_rpc_modstat *rms, struct svc_req *req) 107 { 108 fmd_ustat_snap_t snap; 109 fmd_module_t *mp; 110 111 rms->rms_buf.rms_buf_val = NULL; 112 rms->rms_buf.rms_buf_len = 0; 113 rms->rms_err = 0; 114 115 if (fmd_rpc_deny(req)) { 116 rms->rms_err = FMD_ADM_ERR_PERM; 117 return (TRUE); 118 } 119 120 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 121 rms->rms_err = FMD_ADM_ERR_MODSRCH; 122 return (TRUE); 123 } 124 125 if (fmd_modstat_snapshot(mp, &snap) == 0) { 126 rms->rms_buf.rms_buf_val = snap.uss_buf; 127 rms->rms_buf.rms_buf_len = snap.uss_len; 128 } else if (errno == EFMD_HDL_ABORT) { 129 rms->rms_err = FMD_ADM_ERR_MODFAIL; 130 } else 131 rms->rms_err = FMD_ADM_ERR_NOMEM; 132 133 fmd_module_rele(mp); 134 return (TRUE); 135 } 136 137 bool_t 138 fmd_adm_moddstat_1_svc(char *name, 139 struct fmd_rpc_modstat *rms, struct svc_req *req) 140 { 141 fmd_module_t *mp; 142 143 rms->rms_buf.rms_buf_val = NULL; 144 rms->rms_buf.rms_buf_len = 0; 145 rms->rms_err = 0; 146 147 if (fmd_rpc_deny(req)) { 148 rms->rms_err = FMD_ADM_ERR_PERM; 149 return (TRUE); 150 } 151 152 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 153 rms->rms_err = FMD_ADM_ERR_MODSRCH; 154 return (TRUE); 155 } 156 157 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_modstat_t)); 158 rms->rms_buf.rms_buf_len = sizeof (fmd_modstat_t) / sizeof (fmd_stat_t); 159 160 if (rms->rms_buf.rms_buf_val == NULL) { 161 rms->rms_err = FMD_ADM_ERR_NOMEM; 162 rms->rms_buf.rms_buf_len = 0; 163 fmd_module_rele(mp); 164 return (TRUE); 165 } 166 167 /* 168 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING stats 169 * are present in mp->mod_stats. We don't use any for the daemon- 170 * maintained stats and provide this function in order to reduce the 171 * overhead of the fmstat(1M) default view, where these minimal stats 172 * must be retrieved for all of the active modules. 173 */ 174 (void) pthread_mutex_lock(&mp->mod_stats_lock); 175 176 if (mp->mod_stats != NULL) { 177 mp->mod_stats->ms_snaptime.fmds_value.ui64 = gethrtime(); 178 bcopy(mp->mod_stats, rms->rms_buf.rms_buf_val, 179 sizeof (fmd_modstat_t)); 180 } else { 181 free(rms->rms_buf.rms_buf_val); 182 rms->rms_buf.rms_buf_val = NULL; 183 rms->rms_buf.rms_buf_len = 0; 184 rms->rms_err = FMD_ADM_ERR_MODFAIL; 185 } 186 187 (void) pthread_mutex_unlock(&mp->mod_stats_lock); 188 fmd_module_rele(mp); 189 return (TRUE); 190 } 191 192 bool_t 193 fmd_adm_modgstat_1_svc(struct fmd_rpc_modstat *rms, struct svc_req *req) 194 { 195 const size_t size = sizeof (fmd_statistics_t); 196 197 if (fmd_rpc_deny(req)) { 198 rms->rms_buf.rms_buf_val = NULL; 199 rms->rms_buf.rms_buf_len = 0; 200 rms->rms_err = FMD_ADM_ERR_PERM; 201 } else if ((rms->rms_buf.rms_buf_val = malloc(size)) != NULL) { 202 /* 203 * Note: the bcopy() here is valid only if no FMD_TYPE_STRING 204 * stats are present in fmd.d_stats (see definition in fmd.c). 205 */ 206 (void) pthread_mutex_lock(&fmd.d_stats_lock); 207 bcopy(fmd.d_stats, rms->rms_buf.rms_buf_val, size); 208 (void) pthread_mutex_unlock(&fmd.d_stats_lock); 209 rms->rms_buf.rms_buf_len = size / sizeof (fmd_stat_t); 210 rms->rms_err = 0; 211 } else { 212 rms->rms_buf.rms_buf_len = 0; 213 rms->rms_err = FMD_ADM_ERR_NOMEM; 214 } 215 216 return (TRUE); 217 } 218 219 bool_t 220 fmd_adm_modload_1_svc(char *path, int *rvp, struct svc_req *req) 221 { 222 fmd_module_t *mp; 223 const char *p; 224 int err = 0; 225 226 if (fmd_rpc_deny(req)) { 227 *rvp = FMD_ADM_ERR_PERM; 228 return (TRUE); 229 } 230 231 /* 232 * Before we endure the expense of constructing a module and attempting 233 * to load it, do a quick check to see if the pathname is valid. 234 */ 235 if (access(path, F_OK) != 0) { 236 *rvp = FMD_ADM_ERR_MODNOENT; 237 return (TRUE); 238 } 239 240 if ((p = strrchr(path, '.')) != NULL && strcmp(p, ".so") == 0) 241 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_rtld_ops); 242 else 243 mp = fmd_modhash_load(fmd.d_mod_hash, path, &fmd_proc_ops); 244 245 if (mp == NULL) { 246 switch (errno) { 247 case EFMD_MOD_LOADED: 248 err = FMD_ADM_ERR_MODEXIST; 249 break; 250 case EFMD_MOD_INIT: 251 err = FMD_ADM_ERR_MODINIT; 252 break; 253 default: 254 err = FMD_ADM_ERR_MODLOAD; 255 break; 256 } 257 } 258 259 *rvp = err; 260 return (TRUE); 261 } 262 263 bool_t 264 fmd_adm_modunload_1_svc(char *name, int *rvp, struct svc_req *req) 265 { 266 fmd_module_t *mp = NULL; 267 int err = 0; 268 269 if (fmd_rpc_deny(req)) 270 err = FMD_ADM_ERR_PERM; 271 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 272 err = FMD_ADM_ERR_MODSRCH; 273 else if (mp == fmd.d_self) 274 err = FMD_ADM_ERR_MODBUSY; 275 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) 276 err = FMD_ADM_ERR_MODSRCH; 277 278 if (mp != NULL) 279 fmd_module_rele(mp); 280 281 *rvp = err; 282 return (TRUE); 283 } 284 285 bool_t 286 fmd_adm_modreset_1_svc(char *name, int *rvp, struct svc_req *req) 287 { 288 fmd_module_t *mp = NULL; 289 int err = 0; 290 291 if (fmd_rpc_deny(req)) 292 err = FMD_ADM_ERR_PERM; 293 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 294 err = FMD_ADM_ERR_MODSRCH; 295 else if (mp == fmd.d_self) 296 err = FMD_ADM_ERR_MODBUSY; 297 else if (fmd_modhash_unload(fmd.d_mod_hash, name) != 0) 298 err = FMD_ADM_ERR_MODSRCH; 299 300 if (err == 0) 301 fmd_ckpt_delete(mp); /* erase any saved checkpoints */ 302 303 if (err == 0 && fmd_modhash_load(fmd.d_mod_hash, 304 mp->mod_path, mp->mod_ops) == NULL) { 305 if (errno == EFMD_MOD_INIT) 306 err = FMD_ADM_ERR_MODINIT; 307 else 308 err = FMD_ADM_ERR_MODLOAD; 309 } 310 311 if (mp != NULL) 312 fmd_module_rele(mp); 313 314 *rvp = err; 315 return (TRUE); 316 } 317 318 bool_t 319 fmd_adm_modgc_1_svc(char *name, int *rvp, struct svc_req *req) 320 { 321 fmd_module_t *mp; 322 int err = 0; 323 324 if (fmd_rpc_deny(req)) 325 err = FMD_ADM_ERR_PERM; 326 else if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) 327 err = FMD_ADM_ERR_MODSRCH; 328 else { 329 fmd_module_gc(mp); 330 fmd_module_rele(mp); 331 } 332 333 *rvp = err; 334 return (TRUE); 335 } 336 337 /* 338 * Unlike our other RPC callbacks, fmd_adm_rsrclist_1 can return large amounts 339 * of data that may exceed the underlying RPC transport buffer size if the 340 * resource cache is heavily populated and/or all resources are requested. 341 * To minimize the likelihood of running out of RPC buffer space and having to 342 * fail the client request, fmd_adm_rsrclist_1 returns a snapshot of the 343 * relevant FMRI strings only: the client can use fmd_adm_rsrcinfo_1 on an 344 * individual FMRI if more information is needed. To further reduce the XDR 345 * overhead, the string list is represented as XDR-opaque data where the 346 * entire list is returned as a string table (e.g. "fmriA\0fmriB\0..."). 347 */ 348 static void 349 fmd_adm_rsrclist_asru(fmd_asru_t *ap, void *arg) 350 { 351 struct fmd_rpc_rsrclist *rrl = arg; 352 size_t name_len, buf_len; 353 void *p; 354 355 /* 356 * Skip the ASRU if this fault is marked as invisible. 357 * If rrl_all is false, we take a quick look at asru_flags with no lock 358 * held to see if the ASRU is not faulty. If so, 359 * we don't want to report it by default and can just skip this ASRU. 360 * This helps keep overhead low in the common case, as the call to 361 * fmd_asru_getstate() can be expensive depending on the scheme. 362 */ 363 364 if (ap->asru_flags & FMD_ASRU_INVISIBLE) 365 return; 366 if (rrl->rrl_all == B_FALSE && !(ap->asru_flags & FMD_ASRU_FAULTY)) 367 return; 368 369 if (rrl->rrl_err != 0 || fmd_asru_getstate(ap) == 0) 370 return; /* error has occurred or resource is in 'ok' state */ 371 372 /* 373 * Lock the ASRU and reallocate rrl_buf[] to be large enough to hold 374 * another string, doubling it as needed. Then copy the new string 375 * on to the end, and increment rrl_len to indicate the used space. 376 */ 377 (void) pthread_mutex_lock(&ap->asru_lock); 378 name_len = strlen(ap->asru_name) + 1; 379 380 while (rrl->rrl_len + name_len > rrl->rrl_buf.rrl_buf_len) { 381 if (rrl->rrl_buf.rrl_buf_len != 0) 382 buf_len = rrl->rrl_buf.rrl_buf_len * 2; 383 else 384 buf_len = 1024; /* default buffer size */ 385 386 if ((p = realloc(rrl->rrl_buf.rrl_buf_val, buf_len)) != NULL) { 387 bzero((char *)p + rrl->rrl_buf.rrl_buf_len, 388 buf_len - rrl->rrl_buf.rrl_buf_len); 389 rrl->rrl_buf.rrl_buf_val = p; 390 rrl->rrl_buf.rrl_buf_len = buf_len; 391 } else { 392 rrl->rrl_err = FMD_ADM_ERR_NOMEM; 393 break; 394 } 395 } 396 397 if (rrl->rrl_err == 0) { 398 bcopy(ap->asru_name, (char *)rrl->rrl_buf.rrl_buf_val + 399 rrl->rrl_len, name_len); 400 rrl->rrl_len += name_len; 401 rrl->rrl_cnt++; 402 } 403 404 (void) pthread_mutex_unlock(&ap->asru_lock); 405 } 406 407 bool_t 408 fmd_adm_rsrclist_1_svc(bool_t all, 409 struct fmd_rpc_rsrclist *rvp, struct svc_req *req) 410 { 411 rvp->rrl_buf.rrl_buf_len = 0; 412 rvp->rrl_buf.rrl_buf_val = NULL; 413 rvp->rrl_len = 0; 414 rvp->rrl_cnt = 0; 415 rvp->rrl_err = 0; 416 rvp->rrl_all = all; 417 418 if (fmd_rpc_deny(req)) 419 rvp->rrl_err = FMD_ADM_ERR_PERM; 420 else 421 fmd_asru_hash_apply(fmd.d_asrus, fmd_adm_rsrclist_asru, rvp); 422 423 return (TRUE); 424 } 425 426 bool_t 427 fmd_adm_rsrcinfo_1_svc(char *fmri, 428 struct fmd_rpc_rsrcinfo *rvp, struct svc_req *req) 429 { 430 fmd_asru_t *ap; 431 fmd_case_impl_t *cip; 432 int state; 433 434 bzero(rvp, sizeof (struct fmd_rpc_rsrcinfo)); 435 436 if (fmd_rpc_deny(req)) { 437 rvp->rri_err = FMD_ADM_ERR_PERM; 438 return (TRUE); 439 } 440 441 if ((ap = fmd_asru_hash_lookup_name(fmd.d_asrus, fmri)) == NULL) { 442 rvp->rri_err = FMD_ADM_ERR_RSRCSRCH; 443 return (TRUE); 444 } 445 446 state = fmd_asru_getstate(ap); 447 (void) pthread_mutex_lock(&ap->asru_lock); 448 cip = (fmd_case_impl_t *)ap->asru_case; 449 450 rvp->rri_fmri = strdup(ap->asru_name); 451 rvp->rri_uuid = strdup(ap->asru_uuid); 452 rvp->rri_case = cip ? strdup(cip->ci_uuid) : NULL; 453 rvp->rri_faulty = (state & FMD_ASRU_FAULTY) != 0; 454 rvp->rri_unusable = (state & FMD_ASRU_UNUSABLE) != 0; 455 rvp->rri_invisible = (ap->asru_flags & FMD_ASRU_INVISIBLE) != 0; 456 457 (void) pthread_mutex_unlock(&ap->asru_lock); 458 fmd_asru_hash_release(fmd.d_asrus, ap); 459 460 if (rvp->rri_fmri == NULL || rvp->rri_uuid == NULL) 461 rvp->rri_err = FMD_ADM_ERR_NOMEM; 462 463 return (TRUE); 464 } 465 466 static void 467 fmd_adm_do_repair(char *name, struct svc_req *req, int *errp, uint8_t reason, 468 char *uuid) 469 { 470 if (fmd_rpc_deny(req)) 471 *errp = FMD_ADM_ERR_PERM; 472 else { 473 fmd_asru_rep_arg_t fara; 474 475 fara.fara_reason = reason; 476 fara.fara_rval = errp; 477 fara.fara_uuid = uuid; 478 fara.fara_bywhat = FARA_BY_ASRU; 479 fmd_asru_hash_apply_by_asru(fmd.d_asrus, name, 480 fmd_asru_repaired, &fara); 481 fara.fara_bywhat = FARA_BY_LABEL; 482 fmd_asru_hash_apply_by_label(fmd.d_asrus, name, 483 fmd_asru_repaired, &fara); 484 fara.fara_bywhat = FARA_BY_FRU; 485 fmd_asru_hash_apply_by_fru(fmd.d_asrus, name, 486 fmd_asru_repaired, &fara); 487 fara.fara_bywhat = FARA_BY_RSRC; 488 fmd_asru_hash_apply_by_rsrc(fmd.d_asrus, name, 489 fmd_asru_repaired, &fara); 490 } 491 } 492 493 bool_t 494 fmd_adm_rsrcflush_1_svc(char *name, int *rvp, struct svc_req *req) 495 { 496 int err = FMD_ADM_ERR_RSRCNOTF; 497 498 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPAIRED, NULL); 499 *rvp = err; 500 return (TRUE); 501 } 502 503 bool_t 504 fmd_adm_rsrcrepaired_1_svc(char *name, int *rvp, struct svc_req *req) 505 { 506 int err = FMD_ADM_ERR_RSRCNOTF; 507 508 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPAIRED, NULL); 509 *rvp = err; 510 return (TRUE); 511 } 512 513 bool_t 514 fmd_adm_rsrcreplaced_1_svc(char *name, int *rvp, struct svc_req *req) 515 { 516 int err = FMD_ADM_ERR_RSRCNOTF; 517 518 fmd_adm_do_repair(name, req, &err, FMD_ASRU_REPLACED, NULL); 519 *rvp = err; 520 return (TRUE); 521 } 522 523 bool_t 524 fmd_adm_rsrcacquit_1_svc(char *name, char *uuid, int *rvp, struct svc_req *req) 525 { 526 int err = FMD_ADM_ERR_RSRCNOTF; 527 528 fmd_adm_do_repair(name, req, &err, FMD_ASRU_ACQUITTED, uuid); 529 *rvp = err; 530 return (TRUE); 531 } 532 533 static void 534 fmd_adm_serdinfo_eng(fmd_serd_eng_t *sgp, void *arg) 535 { 536 struct fmd_rpc_serdlist *rsl = arg; 537 struct fmd_rpc_serdinfo *rsi = malloc(sizeof (struct fmd_rpc_serdinfo)); 538 539 uint64_t old, now = fmd_time_gethrtime(); 540 const fmd_serd_elem_t *oep; 541 542 if (rsi == NULL || (rsi->rsi_name = strdup(sgp->sg_name)) == NULL) { 543 rsl->rsl_err = FMD_ADM_ERR_NOMEM; 544 free(rsi); 545 return; 546 } 547 548 if ((oep = fmd_list_next(&sgp->sg_list)) != NULL) 549 old = fmd_event_hrtime(oep->se_event); 550 else 551 old = now; 552 553 rsi->rsi_delta = now >= old ? now - old : (UINT64_MAX - old) + now + 1; 554 rsi->rsi_count = sgp->sg_count; 555 rsi->rsi_fired = fmd_serd_eng_fired(sgp) != 0; 556 rsi->rsi_n = sgp->sg_n; 557 rsi->rsi_t = sgp->sg_t; 558 rsi->rsi_next = rsl->rsl_list; 559 560 rsl->rsl_list = rsi; 561 rsl->rsl_len++; 562 } 563 564 bool_t 565 fmd_adm_serdinfo_1_svc(char *name, 566 struct fmd_rpc_serdlist *rvp, struct svc_req *req) 567 { 568 fmd_module_t *mp; 569 570 rvp->rsl_list = NULL; 571 rvp->rsl_err = 0; 572 rvp->rsl_len = 0; 573 574 if (fmd_rpc_deny(req)) { 575 rvp->rsl_err = FMD_ADM_ERR_PERM; 576 return (TRUE); 577 } 578 579 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, name)) == NULL) { 580 rvp->rsl_err = FMD_ADM_ERR_MODSRCH; 581 return (TRUE); 582 } 583 584 fmd_module_lock(mp); 585 fmd_serd_hash_apply(&mp->mod_serds, fmd_adm_serdinfo_eng, rvp); 586 fmd_module_unlock(mp); 587 588 fmd_module_rele(mp); 589 return (TRUE); 590 } 591 592 bool_t 593 fmd_adm_serdreset_1_svc(char *mname, char *sname, int *rvp, struct svc_req *req) 594 { 595 fmd_module_t *mp; 596 fmd_serd_eng_t *sgp; 597 int err = 0; 598 599 if (fmd_rpc_deny(req)) { 600 *rvp = FMD_ADM_ERR_PERM; 601 return (TRUE); 602 } 603 604 if ((mp = fmd_modhash_lookup(fmd.d_mod_hash, mname)) == NULL) { 605 *rvp = FMD_ADM_ERR_MODSRCH; 606 return (TRUE); 607 } 608 609 fmd_module_lock(mp); 610 611 if ((sgp = fmd_serd_eng_lookup(&mp->mod_serds, sname)) != NULL) { 612 if (fmd_serd_eng_fired(sgp)) { 613 err = FMD_ADM_ERR_SERDFIRED; 614 } else { 615 fmd_serd_eng_reset(sgp); 616 fmd_module_setdirty(mp); 617 } 618 } else 619 err = FMD_ADM_ERR_SERDSRCH; 620 621 fmd_module_unlock(mp); 622 fmd_module_rele(mp); 623 624 *rvp = err; 625 return (TRUE); 626 } 627 628 bool_t 629 fmd_adm_logrotate_1_svc(char *name, int *rvp, struct svc_req *req) 630 { 631 fmd_log_t **lpp, *old, *new; 632 int try = 1, trylimit = 1; 633 634 hrtime_t nsec = 0; 635 timespec_t tv; 636 637 if (fmd_rpc_deny(req)) { 638 *rvp = FMD_ADM_ERR_PERM; 639 return (TRUE); 640 } 641 642 if (strcmp(name, "errlog") == 0) 643 lpp = &fmd.d_errlog; 644 else if (strcmp(name, "fltlog") == 0) 645 lpp = &fmd.d_fltlog; 646 else { 647 *rvp = FMD_ADM_ERR_ROTSRCH; 648 return (TRUE); 649 } 650 651 (void) fmd_conf_getprop(fmd.d_conf, "log.tryrotate", &trylimit); 652 (void) fmd_conf_getprop(fmd.d_conf, "log.waitrotate", &nsec); 653 654 tv.tv_sec = nsec / NANOSEC; 655 tv.tv_nsec = nsec % NANOSEC; 656 657 /* 658 * To rotate a log file, grab d_log_lock as writer to make sure no 659 * one else can discover the current log pointer. Then try to rotate 660 * the log. If we're successful, release the old log pointer. 661 */ 662 do { 663 if (try > 1) 664 (void) nanosleep(&tv, NULL); /* wait for checkpoints */ 665 666 (void) pthread_rwlock_wrlock(&fmd.d_log_lock); 667 old = *lpp; 668 669 if ((new = fmd_log_rotate(old)) != NULL) { 670 fmd_log_rele(old); 671 *lpp = new; 672 } 673 674 (void) pthread_rwlock_unlock(&fmd.d_log_lock); 675 676 } while (new == NULL && errno == EFMD_LOG_ROTBUSY && try++ < trylimit); 677 678 if (new != NULL) 679 *rvp = 0; 680 else if (errno == EFMD_LOG_ROTBUSY) 681 *rvp = FMD_ADM_ERR_ROTBUSY; 682 else 683 *rvp = FMD_ADM_ERR_ROTFAIL; 684 685 return (TRUE); 686 } 687 688 bool_t 689 fmd_adm_caserepair_1_svc(char *uuid, int *rvp, struct svc_req *req) 690 { 691 fmd_case_t *cp = NULL; 692 int err = 0; 693 694 if (fmd_rpc_deny(req)) 695 err = FMD_ADM_ERR_PERM; 696 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) 697 err = FMD_ADM_ERR_CASESRCH; 698 else if (fmd_case_repair(cp) != 0) { 699 err = errno == EFMD_CASE_OWNER ? 700 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN; 701 } 702 703 if (cp != NULL) 704 fmd_case_rele(cp); 705 706 *rvp = err; 707 return (TRUE); 708 } 709 710 bool_t 711 fmd_adm_caseacquit_1_svc(char *uuid, int *rvp, struct svc_req *req) 712 { 713 fmd_case_t *cp = NULL; 714 int err = 0; 715 716 if (fmd_rpc_deny(req)) 717 err = FMD_ADM_ERR_PERM; 718 else if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) 719 err = FMD_ADM_ERR_CASESRCH; 720 else if (fmd_case_acquit(cp) != 0) { 721 err = errno == EFMD_CASE_OWNER ? 722 FMD_ADM_ERR_CASEXPRT : FMD_ADM_ERR_CASEOPEN; 723 } 724 725 if (cp != NULL) 726 fmd_case_rele(cp); 727 728 *rvp = err; 729 return (TRUE); 730 } 731 732 void 733 fmd_adm_caselist_case(fmd_case_t *cp, void *arg) 734 { 735 fmd_case_impl_t *cip = (fmd_case_impl_t *)cp; 736 struct fmd_rpc_caselist *rcl = arg; 737 size_t uuid_len, buf_len; 738 void *p; 739 740 if (rcl->rcl_err != 0) 741 return; 742 743 /* 744 * skip invisible cases 745 */ 746 if (cip->ci_flags & FMD_CF_INVISIBLE) 747 return; 748 749 /* 750 * Lock the case and reallocate rcl_buf[] to be large enough to hold 751 * another string, doubling it as needed. Then copy the new string 752 * on to the end, and increment rcl_len to indicate the used space. 753 */ 754 if (!(cip->ci_flags & FMD_CF_SOLVED)) 755 return; 756 757 (void) pthread_mutex_lock(&cip->ci_lock); 758 759 uuid_len = cip->ci_uuidlen + 1; 760 761 while (rcl->rcl_len + uuid_len > rcl->rcl_buf.rcl_buf_len) { 762 if (rcl->rcl_buf.rcl_buf_len != 0) 763 buf_len = rcl->rcl_buf.rcl_buf_len * 2; 764 else 765 buf_len = 1024; /* default buffer size */ 766 767 if ((p = realloc(rcl->rcl_buf.rcl_buf_val, buf_len)) != NULL) { 768 bzero((char *)p + rcl->rcl_buf.rcl_buf_len, 769 buf_len - rcl->rcl_buf.rcl_buf_len); 770 rcl->rcl_buf.rcl_buf_val = p; 771 rcl->rcl_buf.rcl_buf_len = buf_len; 772 } else { 773 rcl->rcl_err = FMD_ADM_ERR_NOMEM; 774 break; 775 } 776 } 777 778 if (rcl->rcl_err == 0) { 779 bcopy(cip->ci_uuid, (char *)rcl->rcl_buf.rcl_buf_val + 780 rcl->rcl_len, uuid_len); 781 rcl->rcl_len += uuid_len; 782 rcl->rcl_cnt++; 783 } 784 785 (void) pthread_mutex_unlock(&cip->ci_lock); 786 } 787 788 bool_t 789 fmd_adm_caselist_1_svc(struct fmd_rpc_caselist *rvp, struct svc_req *req) 790 { 791 rvp->rcl_buf.rcl_buf_len = 0; 792 rvp->rcl_buf.rcl_buf_val = NULL; 793 rvp->rcl_len = 0; 794 rvp->rcl_cnt = 0; 795 rvp->rcl_err = 0; 796 797 if (fmd_rpc_deny(req)) 798 rvp->rcl_err = FMD_ADM_ERR_PERM; 799 else 800 fmd_case_hash_apply(fmd.d_cases, fmd_adm_caselist_case, rvp); 801 802 return (TRUE); 803 } 804 805 bool_t 806 fmd_adm_caseinfo_1_svc(char *uuid, struct fmd_rpc_caseinfo *rvp, 807 struct svc_req *req) 808 { 809 fmd_case_t *cp; 810 nvlist_t *nvl; 811 int err = 0; 812 813 bzero(rvp, sizeof (struct fmd_rpc_caseinfo)); 814 815 if (fmd_rpc_deny(req)) { 816 rvp->rci_err = FMD_ADM_ERR_PERM; 817 return (TRUE); 818 } 819 820 if ((cp = fmd_case_hash_lookup(fmd.d_cases, uuid)) == NULL) { 821 rvp->rci_err = FMD_ADM_ERR_CASESRCH; 822 return (TRUE); 823 } 824 825 if (!(((fmd_case_impl_t *)cp)->ci_flags & FMD_CF_SOLVED)) { 826 fmd_case_rele(cp); 827 rvp->rci_err = FMD_ADM_ERR_CASESRCH; 828 return (TRUE); 829 } 830 831 nvl = fmd_case_mkevent(cp, FM_LIST_SUSPECT_CLASS); 832 833 err = nvlist_pack(nvl, &rvp->rci_evbuf.rci_evbuf_val, 834 &rvp->rci_evbuf.rci_evbuf_len, NV_ENCODE_XDR, 0); 835 836 nvlist_free(nvl); 837 838 if (err != 0) 839 rvp->rci_err = FMD_ADM_ERR_NOMEM; 840 841 fmd_case_rele(cp); 842 843 return (TRUE); 844 } 845 846 /*ARGSUSED*/ 847 static void 848 fmd_adm_xprtlist_one(fmd_idspace_t *ids, id_t id, void *arg) 849 { 850 struct fmd_rpc_xprtlist *rvp = arg; 851 852 if (rvp->rxl_len < rvp->rxl_buf.rxl_buf_len) 853 rvp->rxl_buf.rxl_buf_val[rvp->rxl_len++] = id; 854 } 855 856 bool_t 857 fmd_adm_xprtlist_1_svc(struct fmd_rpc_xprtlist *rvp, struct svc_req *req) 858 { 859 if (fmd_rpc_deny(req)) { 860 rvp->rxl_buf.rxl_buf_len = 0; 861 rvp->rxl_buf.rxl_buf_val = NULL; 862 rvp->rxl_len = 0; 863 rvp->rxl_err = FMD_ADM_ERR_PERM; 864 return (TRUE); 865 } 866 867 /* 868 * Since we're taking a snapshot of the transports, and these could 869 * change after we return our result, there's no need to hold any kind 870 * of lock between retrieving ids_count and taking the snapshot. We'll 871 * just capture up to a maximum of whatever ids_count value we sampled. 872 */ 873 rvp->rxl_buf.rxl_buf_len = fmd.d_xprt_ids->ids_count; 874 rvp->rxl_buf.rxl_buf_val = malloc(sizeof (int32_t) * 875 rvp->rxl_buf.rxl_buf_len); 876 rvp->rxl_len = 0; 877 rvp->rxl_err = 0; 878 879 if (rvp->rxl_buf.rxl_buf_val == NULL) { 880 rvp->rxl_err = FMD_ADM_ERR_NOMEM; 881 return (TRUE); 882 } 883 884 fmd_idspace_apply(fmd.d_xprt_ids, fmd_adm_xprtlist_one, rvp); 885 return (TRUE); 886 } 887 888 bool_t 889 fmd_adm_xprtstat_1_svc(int32_t id, 890 struct fmd_rpc_modstat *rms, struct svc_req *req) 891 { 892 fmd_xprt_impl_t *xip; 893 fmd_stat_t *sp, *ep, *cp; 894 895 if (fmd_rpc_deny(req)) { 896 rms->rms_buf.rms_buf_val = NULL; 897 rms->rms_buf.rms_buf_len = 0; 898 rms->rms_err = FMD_ADM_ERR_PERM; 899 return (TRUE); 900 } 901 902 rms->rms_buf.rms_buf_val = malloc(sizeof (fmd_xprt_stat_t)); 903 rms->rms_buf.rms_buf_len = sizeof (fmd_xprt_stat_t) / 904 sizeof (fmd_stat_t); 905 rms->rms_err = 0; 906 907 if (rms->rms_buf.rms_buf_val == NULL) { 908 rms->rms_err = FMD_ADM_ERR_NOMEM; 909 rms->rms_buf.rms_buf_len = 0; 910 return (TRUE); 911 } 912 913 if ((xip = fmd_idspace_hold(fmd.d_xprt_ids, id)) == NULL) { 914 rms->rms_err = FMD_ADM_ERR_XPRTSRCH; 915 return (TRUE); 916 } 917 918 /* 919 * Grab the stats lock and bcopy the entire transport stats array in 920 * one shot. Then go back through and duplicate any string values. 921 */ 922 (void) pthread_mutex_lock(&xip->xi_stats_lock); 923 924 sp = (fmd_stat_t *)xip->xi_stats; 925 ep = sp + rms->rms_buf.rms_buf_len; 926 cp = rms->rms_buf.rms_buf_val; 927 928 bcopy(sp, cp, sizeof (fmd_xprt_stat_t)); 929 930 for (; sp < ep; sp++, cp++) { 931 if (sp->fmds_type == FMD_TYPE_STRING && 932 sp->fmds_value.str != NULL) 933 cp->fmds_value.str = strdup(sp->fmds_value.str); 934 } 935 936 (void) pthread_mutex_unlock(&xip->xi_stats_lock); 937 fmd_idspace_rele(fmd.d_xprt_ids, id); 938 939 return (TRUE); 940 } 941 942 int 943 fmd_adm_1_freeresult(SVCXPRT *xprt, xdrproc_t proc, caddr_t data) 944 { 945 xdr_free(proc, data); 946 svc_done(xprt); 947 return (TRUE); 948 } 949 950 /* 951 * Custom XDR routine for our API structure fmd_stat_t. This function must 952 * match the definition of fmd_stat_t in <fmd_api.h> and must also match 953 * the corresponding routine in usr/src/lib/fm/libfmd_adm/common/fmd_adm.c. 954 */ 955 bool_t 956 xdr_fmd_stat(XDR *xp, fmd_stat_t *sp) 957 { 958 bool_t rv = TRUE; 959 960 rv &= xdr_opaque(xp, sp->fmds_name, sizeof (sp->fmds_name)); 961 rv &= xdr_u_int(xp, &sp->fmds_type); 962 rv &= xdr_opaque(xp, sp->fmds_desc, sizeof (sp->fmds_desc)); 963 964 switch (sp->fmds_type) { 965 case FMD_TYPE_BOOL: 966 rv &= xdr_int(xp, &sp->fmds_value.bool); 967 break; 968 case FMD_TYPE_INT32: 969 rv &= xdr_int32_t(xp, &sp->fmds_value.i32); 970 break; 971 case FMD_TYPE_UINT32: 972 rv &= xdr_uint32_t(xp, &sp->fmds_value.ui32); 973 break; 974 case FMD_TYPE_INT64: 975 rv &= xdr_int64_t(xp, &sp->fmds_value.i64); 976 break; 977 case FMD_TYPE_UINT64: 978 case FMD_TYPE_TIME: 979 case FMD_TYPE_SIZE: 980 rv &= xdr_uint64_t(xp, &sp->fmds_value.ui64); 981 break; 982 case FMD_TYPE_STRING: 983 rv &= xdr_string(xp, &sp->fmds_value.str, ~0); 984 break; 985 } 986 987 return (rv); 988 } 989