1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #include <stdlib.h> 27 #include <stdio.h> 28 29 #include <strings.h> 30 #include <sys/types.h> 31 #include <sys/stat.h> 32 #include <time.h> 33 #include <fcntl.h> 34 #include <unistd.h> 35 #include <errno.h> 36 #include <assert.h> 37 #include <umem.h> 38 #include <alloca.h> 39 #include <sys/processor.h> 40 #include <poll.h> 41 #include <pthread.h> 42 #include <signal.h> 43 #include <values.h> 44 #include <libscf.h> 45 46 #include <ctype.h> 47 48 #include "ldmsvcs_utils.h" 49 #include "ldom_alloc.h" 50 51 #define ASSERT(cnd) \ 52 ((void) ((cnd) || ((void) fprintf(stderr, \ 53 "assertion failure in %s:%d: %s\n", \ 54 __FILE__, __LINE__, #cnd), 0))) 55 56 #define FDS_VLDC \ 57 "/devices/virtual-devices@100/channel-devices@200/" \ 58 "/virtual-channel-client@1:ldmfma" 59 60 /* allow timeouts in sec that are nearly forever but small enough for an int */ 61 #define LDM_TIMEOUT_CEILING (MAXINT / 2) 62 63 #define MIN(x, y) ((x) < (y) ? (x) : (y)) 64 65 /* 66 * functions in this file are for version 1.0 of FMA domain services 67 */ 68 static ds_ver_t ds_vers[] = { 69 { 1, 0 } 70 }; 71 72 #define DS_NUM_VER (sizeof (ds_vers) / sizeof (ds_ver_t)) 73 74 /* 75 * information for each channel 76 */ 77 struct ldmsvcs_info { 78 pthread_mutex_t mt; 79 pthread_cond_t cv; 80 fds_channel_t fds_chan; 81 fds_reg_svcs_t fmas_svcs; 82 int cv_twait; 83 }; 84 85 /* 86 * struct listdata_s and struct poller_s are used to maintain the state of 87 * the poller thread. this thread is used to manage incoming messages and 88 * pass those messages onto the correct requesting thread. see the "poller 89 * functions" section for more details. 90 */ 91 struct listdata_s { 92 enum { 93 UNUSED, 94 PENDING, 95 ARRIVED 96 } status; 97 uint64_t req_num; 98 int fd; 99 size_t datalen; 100 }; 101 102 static struct poller_s { 103 pthread_mutex_t mt; 104 pthread_cond_t cv; 105 pthread_t polling_tid; 106 int doreset; 107 int doexit; 108 int nclients; 109 struct listdata_s **list; 110 int list_len; 111 int pending_count; 112 } pollbase = { 113 PTHREAD_MUTEX_INITIALIZER, 114 PTHREAD_COND_INITIALIZER, 115 0, 116 1, 117 0, 118 0, 119 NULL, 120 0, 121 0 122 }; 123 124 125 static struct ldmsvcs_info *channel_init(struct ldom_hdl *lhp); 126 static int channel_openreset(struct ldmsvcs_info *lsp); 127 static int read_msg(struct ldmsvcs_info *lsp); 128 129 static int 130 get_smf_int_val(char *prop_nm, int min, int max, int default_val) 131 { 132 scf_simple_prop_t *prop; /* SMF property */ 133 int64_t *valp; /* prop value ptr */ 134 int64_t val; /* prop value to return */ 135 136 val = default_val; 137 if ((prop = scf_simple_prop_get(NULL, LDM_SVC_NM, LDM_PROP_GROUP_NM, 138 prop_nm)) != NULL) { 139 if ((valp = scf_simple_prop_next_integer(prop)) != NULL) { 140 val = *valp; 141 if (val < min) 142 val = min; 143 else if (val > max) 144 val = max; 145 } 146 scf_simple_prop_free(prop); 147 } 148 return ((int)val); 149 } 150 151 static void 152 channel_close(struct ldmsvcs_info *lsp) 153 { 154 (void) pthread_mutex_lock(&lsp->mt); 155 156 if (lsp->fds_chan.state == CHANNEL_OPEN || 157 lsp->fds_chan.state == CHANNEL_READY) { 158 (void) close(lsp->fds_chan.fd); 159 lsp->cv_twait = get_smf_int_val(LDM_INIT_TO_PROP_NM, 160 0, LDM_TIMEOUT_CEILING, LDM_INIT_WAIT_TIME); 161 lsp->fds_chan.state = CHANNEL_CLOSED; 162 } 163 164 (void) pthread_mutex_unlock(&lsp->mt); 165 } 166 167 /* 168 * read size bytes of data from a streaming fd into buf 169 */ 170 static int 171 read_stream(int fd, void *buf, size_t size) 172 { 173 pollfd_t pollfd; 174 ssize_t rv; 175 size_t data_left; 176 ptrdiff_t currentp; 177 178 pollfd.events = POLLIN; 179 pollfd.revents = 0; 180 pollfd.fd = fd; 181 182 currentp = (ptrdiff_t)buf; 183 data_left = size; 184 185 /* 186 * data may come in bits and pieces 187 */ 188 do { 189 if ((rv = read(fd, (void *)currentp, data_left)) < 0) { 190 if (errno == EAGAIN && poll(&pollfd, 1, -1) > 0) 191 continue; /* retry */ 192 else 193 return (1); 194 } 195 196 data_left -= rv; 197 currentp += rv; 198 } while (data_left > 0); 199 200 return (0); 201 } 202 203 204 /* 205 * poller functions 206 * 207 * at init time, a thread is created for the purpose of monitoring incoming 208 * messages and doing one of the following: 209 * 210 * 1. doing the initial handshake and version negotiation 211 * 212 * 2. handing incoming data off to the requesting thread (which is an fmd 213 * module or scheme thread) 214 */ 215 static int 216 poller_handle_data(int fd, size_t payloadsize) 217 { 218 uint64_t *req_num; 219 void *pr; 220 size_t prlen; 221 int i; 222 223 prlen = sizeof (ds_data_handle_t) + sizeof (uint64_t); 224 225 if (payloadsize < prlen) 226 return (1); 227 228 pr = alloca(prlen); 229 230 if (read_stream(fd, pr, prlen) != 0) 231 return (1); 232 233 req_num = (uint64_t *)((ptrdiff_t)pr + sizeof (ds_data_handle_t)); 234 235 (void) pthread_mutex_lock(&pollbase.mt); 236 237 for (i = 0; i < pollbase.list_len; i++) { 238 if (pollbase.list[i]->req_num == *req_num) { 239 ASSERT(pollbase.list[i]->status == PENDING); 240 241 pollbase.list[i]->status = ARRIVED; 242 pollbase.list[i]->fd = fd; 243 pollbase.list[i]->datalen = payloadsize - prlen; 244 245 pollbase.pending_count--; 246 (void) pthread_cond_broadcast(&pollbase.cv); 247 break; 248 } 249 } 250 251 /* 252 * now wait for receiving thread to read in the data 253 */ 254 if (i < pollbase.list_len) { 255 while (pollbase.list[i]->status == ARRIVED) 256 (void) pthread_cond_wait(&pollbase.cv, &pollbase.mt); 257 } 258 259 (void) pthread_mutex_unlock(&pollbase.mt); 260 261 return (0); 262 } 263 264 265 /* 266 * note that this function is meant to handle only DS_DATA messages 267 */ 268 static int 269 poller_recv_data(struct ldom_hdl *lhp, uint64_t req_num, int index, 270 void **resp, size_t *resplen) 271 { 272 struct timespec twait; 273 int ier; 274 275 ier = 0; 276 twait.tv_sec = time(NULL) + lhp->lsinfo->cv_twait; 277 twait.tv_nsec = 0; 278 279 (void) pthread_mutex_lock(&pollbase.mt); 280 281 ASSERT(pollbase.list[index]->req_num == req_num); 282 283 while (pollbase.list[index]->status == PENDING && 284 pollbase.doreset == 0 && ier == 0) 285 ier = pthread_cond_timedwait(&pollbase.cv, &pollbase.mt, 286 &twait); 287 288 if (ier == 0) { 289 if (pollbase.doreset == 0) { 290 ASSERT(pollbase.list[index]->status == ARRIVED); 291 292 /* 293 * need to add req_num to beginning of resp 294 */ 295 *resplen = pollbase.list[index]->datalen + 296 sizeof (uint64_t); 297 *resp = lhp->allocp(*resplen); 298 *((uint64_t *)*resp) = req_num; 299 300 if (read_stream(pollbase.list[index]->fd, 301 (void *)((ptrdiff_t)*resp + sizeof (uint64_t)), 302 *resplen - sizeof (uint64_t)) != 0) 303 ier = ETIMEDOUT; 304 305 pollbase.list[index]->status = UNUSED; 306 pollbase.list[index]->req_num = 0; 307 (void) pthread_cond_broadcast(&pollbase.cv); 308 } else { 309 if (--(pollbase.pending_count) == 0) 310 (void) pthread_cond_broadcast(&pollbase.cv); 311 } 312 } 313 314 (void) pthread_mutex_unlock(&pollbase.mt); 315 316 ASSERT(ier == 0 || ier == ETIMEDOUT); 317 318 return (ier); 319 } 320 321 322 static void 323 poller_add_client(void) 324 { 325 (void) pthread_mutex_lock(&pollbase.mt); 326 pollbase.nclients++; 327 (void) pthread_mutex_unlock(&pollbase.mt); 328 } 329 330 331 static void 332 poller_remove_client(void) 333 { 334 (void) pthread_mutex_lock(&pollbase.mt); 335 pollbase.nclients--; 336 ASSERT(pollbase.nclients >= 0); 337 (void) pthread_mutex_unlock(&pollbase.mt); 338 } 339 340 341 static int 342 poller_add_pending(uint64_t req_num) 343 { 344 int newlen, index, i, j; 345 346 (void) pthread_mutex_lock(&pollbase.mt); 347 pollbase.pending_count++; 348 349 for (j = 0, index = -1; j < 2 && index == -1; j++) { 350 for (i = 0; i < pollbase.list_len; i++) { 351 if (pollbase.list[i]->status == UNUSED) { 352 pollbase.list[i]->status = PENDING; 353 pollbase.list[i]->req_num = req_num; 354 pollbase.list[i]->datalen = 0; 355 index = i; 356 break; 357 } 358 } 359 360 if (index == -1) { 361 struct listdata_s **newlist, **oldlist; 362 363 /* 364 * get to this point if list is not long enough. 365 * check for a runaway list. since requests are 366 * synchronous (clients send a request and need to 367 * wait for the result before returning) the size 368 * of the list cannot be much more than the number 369 * of clients. 370 */ 371 ASSERT(pollbase.list_len < pollbase.nclients + 1); 372 373 newlen = pollbase.list_len + 5; 374 newlist = ldom_alloc(newlen * 375 sizeof (struct listdata_s *)); 376 377 for (i = 0; i < pollbase.list_len; i++) 378 newlist[i] = pollbase.list[i]; 379 380 oldlist = pollbase.list; 381 pollbase.list = newlist; 382 ldom_free(oldlist, pollbase.list_len * 383 sizeof (struct listdata_s *)); 384 385 for (i = pollbase.list_len; i < newlen; i++) { 386 pollbase.list[i] = 387 ldom_alloc(sizeof (struct listdata_s)); 388 pollbase.list[i]->status = UNUSED; 389 } 390 391 pollbase.list_len = newlen; 392 } 393 } 394 395 (void) pthread_mutex_unlock(&pollbase.mt); 396 ASSERT(index != -1); 397 398 return (index); 399 } 400 401 402 static void 403 poller_delete_pending(uint64_t req_num, int index) 404 { 405 (void) pthread_mutex_lock(&pollbase.mt); 406 407 ASSERT(pollbase.list[index]->req_num == req_num); 408 pollbase.list[index]->status = UNUSED; 409 410 if (--(pollbase.pending_count) == 0 && pollbase.doreset == 1) 411 (void) pthread_cond_broadcast(&pollbase.cv); 412 413 (void) pthread_mutex_unlock(&pollbase.mt); 414 } 415 416 417 static void 418 poller_shutdown(boolean_t wait) 419 { 420 (void) pthread_mutex_lock(&pollbase.mt); 421 422 pollbase.doexit = 1; 423 424 (void) pthread_mutex_unlock(&pollbase.mt); 425 426 if (wait == B_TRUE) { 427 /* stop the poller thread and wait for it to end */ 428 (void) pthread_kill(pollbase.polling_tid, SIGTERM); 429 (void) pthread_join(pollbase.polling_tid, NULL); 430 } 431 } 432 433 434 /* 435 * perform the polling of incoming messages. manage any resets (usually 436 * due to one end of the connection being closed) as well as exit 437 * conditions. 438 */ 439 static void * 440 poller_loop(void *arg) 441 { 442 struct ldmsvcs_info *lsp; 443 pollfd_t pollfd; 444 int ier; 445 446 lsp = (struct ldmsvcs_info *)arg; 447 448 for (;;) { 449 (void) pthread_mutex_lock(&pollbase.mt); 450 451 if (pollbase.doexit) { 452 (void) pthread_mutex_unlock(&pollbase.mt); 453 break; 454 } 455 456 if (pollbase.doreset) { 457 int i; 458 459 while (pollbase.pending_count > 0) 460 (void) pthread_cond_wait(&pollbase.cv, 461 &pollbase.mt); 462 463 ASSERT(pollbase.pending_count == 0); 464 for (i = 0; i < pollbase.list_len; i++) 465 pollbase.list[i]->status = UNUSED; 466 467 pollbase.doreset = 0; 468 } 469 (void) pthread_mutex_unlock(&pollbase.mt); 470 471 if ((ier = channel_openreset(lsp)) == 1) { 472 continue; 473 } else if (ier == 2) { 474 /* 475 * start exit preparations 476 */ 477 poller_shutdown(B_FALSE); 478 continue; 479 } 480 481 pollfd.events = POLLIN; 482 pollfd.revents = 0; 483 pollfd.fd = lsp->fds_chan.fd; 484 485 if (poll(&pollfd, 1, -1) <= 0 || read_msg(lsp) != 0) { 486 /* 487 * read error and/or fd got closed 488 */ 489 (void) pthread_mutex_lock(&pollbase.mt); 490 pollbase.doreset = 1; 491 (void) pthread_mutex_unlock(&pollbase.mt); 492 493 channel_close(lsp); 494 } 495 } 496 497 return (NULL); 498 } 499 500 501 /* 502 * create the polling thread 503 */ 504 static int 505 poller_init(struct ldmsvcs_info *lsp) 506 { 507 int rc = 0; 508 509 (void) pthread_mutex_lock(&pollbase.mt); 510 511 if (pollbase.polling_tid == 0) { 512 pthread_attr_t *attr = NULL; 513 514 /* 515 * create a joinable polling thread for receiving messages 516 */ 517 if (pthread_create(&pollbase.polling_tid, attr, 518 poller_loop, lsp) != 0) 519 rc = 1; 520 } 521 522 (void) pthread_mutex_unlock(&pollbase.mt); 523 524 return (rc); 525 } 526 527 /* 528 * Cleanup the polling thread 529 */ 530 static void 531 poller_fini(void) 532 { 533 int i; 534 535 /* stop the poller thread */ 536 poller_shutdown(B_TRUE); 537 538 (void) pthread_mutex_lock(&pollbase.mt); 539 540 /* Free up the list of outstanding requests */ 541 if (pollbase.list != NULL) { 542 for (i = 0; i < pollbase.list_len; i++) { 543 if (pollbase.list[i]) { 544 ldom_free(pollbase.list[i], 545 sizeof (struct listdata_s)); 546 } 547 } 548 ldom_free(pollbase.list, pollbase.list_len * 549 sizeof (struct listdata_s *)); 550 pollbase.list = NULL; 551 pollbase.list_len = 0; 552 } 553 554 (void) pthread_mutex_unlock(&pollbase.mt); 555 } 556 557 /* 558 * utilities for message handlers 559 */ 560 static int 561 fds_send(struct ldmsvcs_info *lsp, void *msg, size_t msglen) 562 { 563 static pthread_mutex_t mt = PTHREAD_MUTEX_INITIALIZER; 564 565 (void) pthread_mutex_lock(&mt); 566 567 if (write(lsp->fds_chan.fd, msg, msglen) != msglen) { 568 channel_close(lsp); 569 (void) pthread_mutex_unlock(&mt); 570 return (ETIMEDOUT); 571 } 572 573 (void) pthread_mutex_unlock(&mt); 574 return (0); 575 } 576 577 578 /* 579 * Find the max and min version supported 580 */ 581 static void 582 fds_min_max_versions(uint16_t *min_major, uint16_t *max_major) 583 { 584 int i; 585 586 *min_major = ds_vers[0].major; 587 *max_major = *min_major; 588 589 for (i = 1; i < DS_NUM_VER; i++) { 590 if (ds_vers[i].major < *min_major) 591 *min_major = ds_vers[i].major; 592 593 if (ds_vers[i].major > *max_major) 594 *max_major = ds_vers[i].major; 595 } 596 } 597 598 /* 599 * check whether the major and minor numbers requested by remote ds client 600 * can be satisfied. if the requested major is supported, true is 601 * returned, and the agreed minor is returned in new_minor. if the 602 * requested major is not supported, the routine returns false, and the 603 * closest major is returned in *new_major, upon which the ds client should 604 * renegotiate. the closest major is the just lower that the requested 605 * major number. 606 */ 607 static boolean_t 608 fds_negotiate_version(uint16_t req_major, uint16_t *new_majorp, 609 uint16_t *new_minorp) 610 { 611 int i = 0; 612 uint16_t major, lower_major; 613 uint16_t min_major, max_major; 614 boolean_t found_match = B_FALSE; 615 616 fds_min_max_versions(&min_major, &max_major); 617 618 /* 619 * if the minimum version supported is greater than the version 620 * requested, return the lowest version supported 621 */ 622 if (min_major > req_major) { 623 *new_majorp = min_major; 624 return (B_FALSE); 625 } 626 627 /* 628 * if the largest version supported is lower than the version 629 * requested, return the largest version supported 630 */ 631 if (max_major < req_major) { 632 *new_majorp = max_major; 633 return (B_FALSE); 634 } 635 636 /* 637 * now we know that the requested version lies between the min and 638 * max versions supported. check if the requested major can be 639 * found in supported versions. 640 */ 641 lower_major = min_major; 642 for (i = 0; i < DS_NUM_VER; i++) { 643 major = ds_vers[i].major; 644 if (major == req_major) { 645 found_match = B_TRUE; 646 *new_minorp = ds_vers[i].minor; 647 *new_majorp = major; 648 break; 649 } else if ((major < req_major) && (major > lower_major)) 650 lower_major = major; 651 } 652 653 /* 654 * If no match is found, return the closest available number 655 */ 656 if (!found_match) 657 *new_majorp = lower_major; 658 659 return (found_match); 660 } 661 662 663 /* 664 * return 0 if service is added; 1 if service is a duplicate 665 */ 666 static int 667 fds_svc_add(struct ldmsvcs_info *lsp, ds_reg_req_t *req, int minor) 668 { 669 fds_svc_t *svc; 670 int i, rc; 671 672 svc = NULL; 673 for (i = 0; i < lsp->fmas_svcs.nsvcs; i++) { 674 if (strcmp(lsp->fmas_svcs.tbl[i]->name, req->svc_id) == 0) { 675 svc = lsp->fmas_svcs.tbl[i]; 676 break; 677 } 678 } 679 680 if (svc == NULL) 681 return (0); /* we don't need this service */ 682 683 (void) pthread_mutex_lock(&lsp->fmas_svcs.mt); 684 685 /* 686 * duplicate registration is OK --- we retain the previous entry 687 * (which has not been unregistered anyway) 688 */ 689 if (svc->state == DS_SVC_ACTIVE) { 690 rc = 1; 691 } else { 692 svc->state = DS_SVC_ACTIVE; 693 svc->hdl = req->svc_handle; 694 svc->ver.major = req->major_vers; 695 svc->ver.minor = minor; 696 697 rc = 0; 698 (void) pthread_cond_broadcast(&lsp->fmas_svcs.cv); 699 } 700 701 (void) pthread_mutex_unlock(&lsp->fmas_svcs.mt); 702 703 return (rc); 704 } 705 706 707 static void 708 fds_svc_reset(struct ldmsvcs_info *lsp, int index) 709 { 710 int i, start, end; 711 712 if (index >= 0) { 713 start = index; 714 end = index + 1; 715 } else { 716 start = 0; 717 end = lsp->fmas_svcs.nsvcs; 718 } 719 720 (void) pthread_mutex_lock(&lsp->fmas_svcs.mt); 721 722 for (i = start; i < end; i++) { 723 lsp->fmas_svcs.tbl[i]->hdl = 0; 724 lsp->fmas_svcs.tbl[i]->state = DS_SVC_INVAL; 725 lsp->fmas_svcs.tbl[i]->ver.major = 726 ds_vers[DS_NUM_VER - 1].major; 727 lsp->fmas_svcs.tbl[i]->ver.minor = 728 ds_vers[DS_NUM_VER - 1].minor; 729 } 730 731 (void) pthread_mutex_unlock(&lsp->fmas_svcs.mt); 732 } 733 734 735 static int 736 fds_svc_remove(struct ldmsvcs_info *lsp, ds_svc_hdl_t svc_handle) 737 { 738 int i; 739 740 for (i = 0; i < lsp->fmas_svcs.nsvcs; i++) { 741 if (lsp->fmas_svcs.tbl[i]->hdl == svc_handle) { 742 fds_svc_reset(lsp, i); 743 return (0); 744 } 745 } 746 747 return (1); 748 } 749 750 751 /* 752 * message handlers 753 */ 754 /*ARGSUSED*/ 755 static void 756 ds_handle_msg_noop(struct ldmsvcs_info *lsp, void *buf, size_t len) 757 { 758 } 759 760 static void 761 ds_handle_init_req(struct ldmsvcs_info *lsp, void *buf, size_t len) 762 { 763 ds_init_req_t *req; 764 uint16_t new_major, new_minor; 765 size_t msglen; 766 767 req = (ds_init_req_t *)buf; 768 769 /* sanity check the incoming message */ 770 if (len != sizeof (ds_init_req_t)) { 771 channel_close(lsp); 772 return; 773 } 774 775 /* 776 * Check version info. ACK only if the major numbers exactly 777 * match. The service entity can retry with a new minor 778 * based on the response sent as part of the NACK. 779 */ 780 if (fds_negotiate_version(req->major_vers, &new_major, &new_minor)) { 781 ds_hdr_t *H; 782 ds_init_ack_t *R; 783 784 msglen = sizeof (ds_hdr_t) + sizeof (ds_init_ack_t); 785 H = alloca(msglen); 786 R = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 787 788 H->msg_type = DS_INIT_ACK; 789 H->payload_len = sizeof (ds_init_ack_t); 790 R->minor_vers = MIN(new_minor, req->minor_vers); 791 792 if (fds_send(lsp, H, msglen) != 0) 793 return; 794 795 (void) pthread_mutex_lock(&lsp->mt); 796 ASSERT(lsp->fds_chan.state == CHANNEL_OPEN); 797 lsp->fds_chan.state = CHANNEL_READY; 798 799 /* 800 * Now the channel is ready after the handshake completes. 801 * Reset the timeout to a smaller value for receiving messages 802 * from the domain services. 803 */ 804 lsp->cv_twait = get_smf_int_val(LDM_RUNNING_TO_PROP_NM, 805 0, LDM_TIMEOUT_CEILING, LDM_RUNNING_WAIT_TIME); 806 807 (void) pthread_mutex_unlock(&lsp->mt); 808 } else { 809 ds_hdr_t *H; 810 ds_init_nack_t *R; 811 812 msglen = sizeof (ds_hdr_t) + sizeof (ds_init_nack_t); 813 H = alloca(msglen); 814 R = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 815 816 H->msg_type = DS_INIT_NACK; 817 H->payload_len = sizeof (ds_init_nack_t); 818 R->major_vers = new_major; 819 820 (void) fds_send(lsp, H, msglen); 821 /* 822 * do not update state; remote end may attempt to initiate 823 * connection with a different version 824 */ 825 } 826 } 827 828 829 /*ARGSUSED*/ 830 static void 831 ds_handle_reg_req(struct ldmsvcs_info *lsp, void *buf, size_t len) 832 { 833 ds_reg_req_t *req; 834 char *msg; 835 uint16_t new_major, new_minor; 836 size_t msglen; 837 int dup_svcreg = 0; 838 839 req = (ds_reg_req_t *)buf; 840 msg = (char *)req->svc_id; 841 842 /* 843 * Service must be NULL terminated 844 */ 845 if (req->svc_id == NULL || strlen(req->svc_id) == 0 || 846 msg[strlen(req->svc_id)] != '\0') { 847 channel_close(lsp); 848 return; 849 } 850 851 if (fds_negotiate_version(req->major_vers, &new_major, &new_minor) && 852 (dup_svcreg = fds_svc_add(lsp, req, 853 MIN(new_minor, req->minor_vers))) == 0) { 854 855 /* 856 * Check version info. ACK only if the major numbers 857 * exactly match. The service entity can retry with a new 858 * minor based on the response sent as part of the NACK. 859 */ 860 ds_hdr_t *H; 861 ds_reg_ack_t *R; 862 863 msglen = sizeof (ds_hdr_t) + sizeof (ds_reg_ack_t); 864 H = alloca(msglen); 865 bzero(H, msglen); 866 R = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 867 868 H->msg_type = DS_REG_ACK; 869 H->payload_len = sizeof (ds_reg_ack_t); 870 R->svc_handle = req->svc_handle; 871 R->minor_vers = MIN(new_minor, req->minor_vers); 872 873 (void) fds_send(lsp, H, msglen); 874 } else { 875 ds_hdr_t *H; 876 ds_reg_nack_t *R; 877 878 msglen = sizeof (ds_hdr_t) + sizeof (ds_reg_nack_t); 879 H = alloca(msglen); 880 bzero(H, msglen); 881 R = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 882 883 H->msg_type = DS_REG_NACK; 884 H->payload_len = sizeof (ds_reg_nack_t); 885 R->svc_handle = req->svc_handle; 886 R->major_vers = new_major; 887 888 if (dup_svcreg) 889 R->result = DS_REG_DUP; 890 else 891 R->result = DS_REG_VER_NACK; 892 893 (void) fds_send(lsp, H, msglen); 894 } 895 } 896 897 898 /*ARGSUSED*/ 899 static void 900 ds_handle_unreg(struct ldmsvcs_info *lsp, void *buf, size_t len) 901 { 902 ds_unreg_req_t *req; 903 size_t msglen; 904 905 req = (ds_unreg_req_t *)buf; 906 907 if (fds_svc_remove(lsp, req->svc_handle) == 0) { 908 ds_hdr_t *H; 909 ds_unreg_ack_t *R; 910 911 msglen = sizeof (ds_hdr_t) + sizeof (ds_unreg_ack_t); 912 H = alloca(msglen); 913 R = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 914 915 H->msg_type = DS_REG_ACK; 916 H->payload_len = sizeof (ds_unreg_ack_t); 917 R->svc_handle = req->svc_handle; 918 919 (void) fds_send(lsp, H, msglen); 920 } else { 921 ds_hdr_t *H; 922 ds_unreg_nack_t *R; 923 924 msglen = sizeof (ds_hdr_t) + sizeof (ds_unreg_nack_t); 925 H = alloca(msglen); 926 R = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 927 928 H->msg_type = DS_REG_NACK; 929 H->payload_len = sizeof (ds_unreg_nack_t); 930 R->svc_handle = req->svc_handle; 931 932 (void) fds_send(lsp, H, msglen); 933 } 934 } 935 936 937 /* 938 * Message handler lookup table (v1.0 only for now) Future 939 * versions can add their own lookup table. 940 */ 941 typedef void (*ds_msg_handler_t)(struct ldmsvcs_info *lsp, 942 void *buf, size_t len); 943 944 static const ds_msg_handler_t ds_msg_handlers[] = { 945 ds_handle_init_req, /* DS_INIT_REQ */ 946 ds_handle_msg_noop, /* DS_INIT_ACK */ 947 ds_handle_msg_noop, /* DS_INIT_NACK */ 948 ds_handle_reg_req, /* DS_REG_REQ */ 949 ds_handle_msg_noop, /* DS_REG_ACK */ 950 ds_handle_msg_noop, /* DS_REG_NACK */ 951 ds_handle_unreg, /* DS_UNREG */ 952 ds_handle_msg_noop, /* DS_UNREG_ACK */ 953 ds_handle_msg_noop, /* DS_UNREG_NACK */ 954 ds_handle_msg_noop, /* DS_DATA */ 955 ds_handle_msg_noop /* DS_NACK */ 956 }; 957 958 959 /* 960 * message and service internal functions 961 */ 962 static void 963 fds_svc_alloc(struct ldmsvcs_info *lsp) 964 { 965 int i; 966 static char *name[] = { LDM_DS_NAME_CPU, LDM_DS_NAME_MEM, 967 LDM_DS_NAME_PRI, LDM_DS_NAME_IOD, NULL }; 968 969 (void) pthread_mutex_init(&lsp->fmas_svcs.mt, NULL); 970 (void) pthread_cond_init(&lsp->fmas_svcs.cv, NULL); 971 972 for (lsp->fmas_svcs.nsvcs = 0; name[lsp->fmas_svcs.nsvcs] != NULL; 973 lsp->fmas_svcs.nsvcs++) 974 ; 975 976 lsp->fmas_svcs.tbl = (fds_svc_t **)ldom_alloc(sizeof (fds_svc_t *) * 977 lsp->fmas_svcs.nsvcs); 978 979 for (i = 0; i < lsp->fmas_svcs.nsvcs; i++) { 980 lsp->fmas_svcs.tbl[i] = 981 (fds_svc_t *)ldom_alloc(sizeof (fds_svc_t)); 982 bzero(lsp->fmas_svcs.tbl[i], sizeof (fds_svc_t)); 983 lsp->fmas_svcs.tbl[i]->name = name[i]; 984 } 985 } 986 987 988 static fds_svc_t * 989 fds_svc_lookup(struct ldmsvcs_info *lsp, char *name) 990 { 991 struct timespec twait; 992 fds_svc_t *svc; 993 int i, ier; 994 995 if (pthread_mutex_lock(&lsp->fmas_svcs.mt) == EINVAL) 996 return (NULL); /* uninitialized or destroyed mutex */ 997 998 svc = NULL; 999 for (i = 0; i < lsp->fmas_svcs.nsvcs; i++) { 1000 if (strcmp(lsp->fmas_svcs.tbl[i]->name, name) == 0) { 1001 svc = lsp->fmas_svcs.tbl[i]; 1002 break; 1003 } 1004 } 1005 1006 ASSERT(svc != NULL); 1007 1008 if (svc->state == DS_SVC_INACTIVE) { 1009 /* service is not registered */ 1010 ier = ETIMEDOUT; 1011 } else { 1012 ier = 0; 1013 twait.tv_sec = time(NULL) + lsp->cv_twait; 1014 twait.tv_nsec = 0; 1015 1016 while (svc->state != DS_SVC_ACTIVE && ier == 0 && 1017 lsp->fds_chan.state != CHANNEL_UNUSABLE) 1018 ier = pthread_cond_timedwait(&lsp->fmas_svcs.cv, 1019 &lsp->fmas_svcs.mt, &twait); 1020 1021 /* 1022 * By now, the ds service should have registered already. 1023 * If it does not, ldmd probably does not support this service. 1024 * Then mark the service state as inactive. 1025 */ 1026 if (ier == ETIMEDOUT) { 1027 svc->state = DS_SVC_INACTIVE; 1028 } 1029 } 1030 1031 (void) pthread_mutex_unlock(&lsp->fmas_svcs.mt); 1032 1033 if (ier == 0) 1034 return (svc); 1035 else 1036 return (NULL); 1037 } 1038 1039 1040 static uint64_t 1041 fds_svc_req_num(void) 1042 { 1043 static uint64_t req_num = 1; 1044 1045 return (req_num++); 1046 } 1047 1048 1049 /* 1050 * return 0 if successful, 1 if otherwise 1051 */ 1052 static int 1053 read_msg(struct ldmsvcs_info *lsp) 1054 { 1055 ds_hdr_t header; 1056 void *msg_buf; 1057 1058 /* 1059 * read the header 1060 */ 1061 if (read_stream(lsp->fds_chan.fd, &header, sizeof (ds_hdr_t)) != 0) 1062 return (1); 1063 1064 if (header.msg_type >= 1065 sizeof (ds_msg_handlers) / sizeof (ds_msg_handler_t)) 1066 return (1); 1067 1068 /* 1069 * handle data as a special case 1070 */ 1071 if (header.msg_type == 9) 1072 return (poller_handle_data(lsp->fds_chan.fd, 1073 header.payload_len)); 1074 1075 /* 1076 * all other types of messages should be small 1077 */ 1078 ASSERT(header.payload_len < 1024); 1079 msg_buf = alloca(header.payload_len); 1080 1081 /* 1082 * read the payload 1083 */ 1084 if (read_stream(lsp->fds_chan.fd, msg_buf, header.payload_len) != 0) 1085 return (1); 1086 1087 (*ds_msg_handlers[header.msg_type])(lsp, msg_buf, header.payload_len); 1088 1089 return (0); 1090 } 1091 1092 1093 /* 1094 * return values: 1095 * 0 - success 1096 * 1 - problem with opening the channel 1097 * 2 - channed not opened; request to exit has been detected 1098 */ 1099 static int 1100 channel_openreset(struct ldmsvcs_info *lsp) 1101 { 1102 int ier; 1103 1104 ier = pthread_mutex_lock(&lsp->mt); 1105 1106 if (ier == EINVAL || lsp->fds_chan.state == CHANNEL_EXIT || 1107 lsp->fds_chan.state == CHANNEL_UNUSABLE) { 1108 (void) pthread_mutex_unlock(&lsp->mt); 1109 return (2); 1110 } 1111 1112 if (lsp->fds_chan.state == CHANNEL_UNINITIALIZED || 1113 lsp->fds_chan.state == CHANNEL_CLOSED) { 1114 (void) pthread_cond_broadcast(&lsp->cv); 1115 1116 if ((lsp->fds_chan.fd = open(FDS_VLDC, O_RDWR)) < 0) { 1117 lsp->fds_chan.state = CHANNEL_UNUSABLE; 1118 lsp->cv_twait = get_smf_int_val(LDM_RUNNING_TO_PROP_NM, 1119 0, LDM_TIMEOUT_CEILING, LDM_RUNNING_WAIT_TIME); 1120 (void) pthread_mutex_unlock(&lsp->mt); 1121 (void) pthread_cond_broadcast(&lsp->fmas_svcs.cv); 1122 1123 return (2); 1124 } else { 1125 vldc_opt_op_t op; 1126 1127 op.op_sel = VLDC_OP_SET; 1128 op.opt_sel = VLDC_OPT_MODE; 1129 op.opt_val = LDC_MODE_RELIABLE; 1130 1131 if (ioctl(lsp->fds_chan.fd, VLDC_IOCTL_OPT_OP, 1132 &op) != 0) { 1133 (void) close(lsp->fds_chan.fd); 1134 (void) pthread_mutex_unlock(&lsp->mt); 1135 return (1); 1136 } 1137 } 1138 lsp->fds_chan.state = CHANNEL_OPEN; 1139 } 1140 1141 if (lsp->fds_chan.state == CHANNEL_OPEN) { 1142 /* 1143 * reset various channel parameters 1144 */ 1145 lsp->fds_chan.ver.major = 0; 1146 lsp->fds_chan.ver.minor = 0; 1147 fds_svc_reset(lsp, -1); 1148 } 1149 (void) pthread_mutex_unlock(&lsp->mt); 1150 1151 return (0); 1152 } 1153 1154 1155 static void 1156 channel_fini(void) 1157 { 1158 int i; 1159 struct ldmsvcs_info *lsp; 1160 1161 /* 1162 * End the poller thread 1163 */ 1164 poller_fini(); 1165 1166 if ((lsp = channel_init(NULL)) == NULL) 1167 return; 1168 1169 (void) pthread_mutex_lock(&lsp->mt); 1170 1171 lsp->fds_chan.state = CHANNEL_EXIT; 1172 (void) close(lsp->fds_chan.fd); 1173 1174 (void) pthread_mutex_unlock(&lsp->mt); 1175 1176 /* Free the ldom service structure */ 1177 for (i = 0; i < lsp->fmas_svcs.nsvcs; i++) { 1178 ldom_free(lsp->fmas_svcs.tbl[i], sizeof (fds_svc_t)); 1179 } 1180 ldom_free(lsp->fmas_svcs.tbl, 1181 lsp->fmas_svcs.nsvcs * sizeof (fds_svc_t *)); 1182 ldom_free(lsp, sizeof (struct ldmsvcs_info)); 1183 } 1184 1185 1186 static struct ldmsvcs_info * 1187 channel_init(struct ldom_hdl *lhp) 1188 { 1189 static pthread_mutex_t mt = PTHREAD_MUTEX_INITIALIZER; 1190 static pthread_cond_t cv = PTHREAD_COND_INITIALIZER; 1191 static struct ldmsvcs_info *root = NULL; 1192 static int busy_init = 0; 1193 1194 struct timespec twait; 1195 int expired; 1196 1197 (void) pthread_mutex_lock(&mt); 1198 1199 while (busy_init == 1) 1200 (void) pthread_cond_wait(&cv, &mt); 1201 1202 if (root != NULL || (lhp == NULL && root == NULL)) { 1203 (void) pthread_mutex_unlock(&mt); 1204 return (root); 1205 } 1206 1207 /* 1208 * get to this point if we need to open the channel 1209 */ 1210 busy_init = 1; 1211 (void) pthread_mutex_unlock(&mt); 1212 1213 root = (struct ldmsvcs_info *) 1214 ldom_alloc(sizeof (struct ldmsvcs_info)); 1215 bzero(root, sizeof (struct ldmsvcs_info)); 1216 1217 root->fds_chan.state = CHANNEL_UNINITIALIZED; 1218 root->cv_twait = get_smf_int_val(LDM_INIT_TO_PROP_NM, 1219 0, LDM_TIMEOUT_CEILING, LDM_INIT_WAIT_TIME); 1220 1221 if (pthread_mutex_init(&root->mt, NULL) != 0 || 1222 pthread_cond_init(&root->cv, NULL) != 0) { 1223 ldom_free(root, sizeof (struct ldmsvcs_info)); 1224 return (NULL); 1225 } 1226 1227 fds_svc_alloc(root); 1228 fds_svc_reset(root, -1); 1229 1230 (void) poller_init(root); 1231 1232 expired = 0; 1233 twait.tv_sec = time(NULL) + 10; 1234 twait.tv_nsec = 0; 1235 1236 (void) pthread_mutex_lock(&root->mt); 1237 1238 /* 1239 * wait for channel to become uninitialized. this should be quick. 1240 */ 1241 while (root->fds_chan.state == CHANNEL_UNINITIALIZED && expired == 0) 1242 expired = pthread_cond_timedwait(&root->cv, &root->mt, &twait); 1243 1244 if (root->fds_chan.state == CHANNEL_UNUSABLE) 1245 expired = 1; 1246 1247 (void) pthread_mutex_unlock(&root->mt); 1248 1249 (void) pthread_mutex_lock(&mt); 1250 busy_init = 0; 1251 (void) pthread_mutex_unlock(&mt); 1252 (void) pthread_cond_broadcast(&cv); 1253 1254 (void) atexit(channel_fini); 1255 1256 if (expired == 0) 1257 return (root); 1258 else 1259 return (NULL); 1260 } 1261 1262 1263 static int 1264 sendrecv(struct ldom_hdl *lhp, uint64_t req_num, 1265 void *msg, size_t msglen, ds_svc_hdl_t *svc_hdl, char *svcname, 1266 void **resp, size_t *resplen) 1267 { 1268 struct ldmsvcs_info *lsp; 1269 fds_svc_t *svc; 1270 int maxretries, index, i, ier; 1271 1272 lsp = lhp->lsinfo; 1273 i = 0; 1274 maxretries = 1; 1275 1276 do { 1277 /* 1278 * if any of the calls in this loop fail, retry some number 1279 * of times before giving up. 1280 */ 1281 if ((svc = fds_svc_lookup(lsp, svcname)) == NULL) { 1282 (void) pthread_mutex_lock(&lsp->mt); 1283 1284 if (lsp->fds_chan.state != CHANNEL_READY) 1285 ier = ETIMEDOUT; /* channel not ready */ 1286 else 1287 ier = ENOTSUP; /* service not ready */ 1288 1289 (void) pthread_mutex_unlock(&lsp->mt); 1290 1291 continue; 1292 } else { 1293 ier = 0; 1294 *svc_hdl = svc->hdl; 1295 } 1296 1297 index = poller_add_pending(req_num); 1298 1299 if ((ier = fds_send(lsp, msg, msglen)) != 0 || 1300 (ier = poller_recv_data(lhp, req_num, index, resp, 1301 resplen)) != 0) 1302 poller_delete_pending(req_num, index); 1303 1304 } while (i++ < maxretries && ier != 0); 1305 1306 ASSERT(ier == 0 || ier == ETIMEDOUT || ier == ENOTSUP); 1307 1308 return (ier); 1309 } 1310 1311 1312 /* 1313 * input: 1314 * msg_type - requested operation: FMA_CPU_REQ_STATUS or FMA_CPU_REQ_OFFLINE 1315 * cpuid - physical cpu id 1316 * 1317 * normal return values: 1318 * P_OFFLINE - cpu is offline 1319 * P_ONLINE - cpu is online 1320 * 1321 * abnormal return values: 1322 * ETIMEDOUT - LDOM manager is not responding 1323 * ENOTSUP - LDOM service for cpu offlining/status is not available 1324 * ENOMSG - got an unexpected response from the LDOM cpu service 1325 */ 1326 static int 1327 cpu_request(struct ldom_hdl *lhp, uint32_t msg_type, uint32_t cpuid) 1328 { 1329 ds_hdr_t *H; 1330 ds_data_handle_t *D; 1331 fma_cpu_service_req_t *R; 1332 1333 char *svcname = LDM_DS_NAME_CPU; 1334 fma_cpu_resp_t *respmsg; 1335 void *resp; 1336 size_t resplen, reqmsglen; 1337 int rc; 1338 1339 if (lhp->lsinfo == NULL) 1340 return (ENOMSG); 1341 1342 reqmsglen = sizeof (ds_hdr_t) + sizeof (ds_data_handle_t) + 1343 sizeof (fma_cpu_service_req_t); 1344 1345 H = lhp->allocp(reqmsglen); 1346 D = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 1347 R = (void *)((ptrdiff_t)D + sizeof (ds_data_handle_t)); 1348 1349 H->msg_type = DS_DATA; 1350 H->payload_len = sizeof (ds_data_handle_t) + 1351 sizeof (fma_cpu_service_req_t); 1352 1353 R->req_num = fds_svc_req_num(); 1354 R->msg_type = msg_type; 1355 R->cpu_id = cpuid; 1356 1357 if ((rc = sendrecv(lhp, R->req_num, H, reqmsglen, 1358 &D->svc_handle, svcname, &resp, &resplen)) != 0) { 1359 lhp->freep(H, reqmsglen); 1360 return (rc); 1361 } 1362 1363 lhp->freep(H, reqmsglen); 1364 1365 ASSERT(resplen == sizeof (fma_cpu_resp_t)); 1366 respmsg = (fma_cpu_resp_t *)resp; 1367 1368 rc = ENOMSG; 1369 if (respmsg->result == FMA_CPU_RESP_OK) { 1370 if (respmsg->status == FMA_CPU_STAT_ONLINE) 1371 rc = P_ONLINE; 1372 else if (respmsg->status == FMA_CPU_STAT_OFFLINE) 1373 rc = P_OFFLINE; 1374 } else { 1375 if (msg_type == FMA_CPU_REQ_OFFLINE && 1376 respmsg->status == FMA_CPU_STAT_OFFLINE) 1377 rc = P_OFFLINE; 1378 } 1379 1380 lhp->freep(resp, resplen); 1381 1382 return (rc); 1383 } 1384 1385 1386 /* 1387 * input: 1388 * msg_type - requested operation: FMA_MEM_REQ_STATUS or FMA_MEM_REQ_RETIRE 1389 * pa - starting address of memory page 1390 * pgsize - memory page size in bytes 1391 * 1392 * normal return values for msg_type == FMA_MEM_REQ_STATUS: 1393 * 0 - page is retired 1394 * EAGAIN - page is scheduled for retirement 1395 * EIO - page not scheduled for retirement 1396 * EINVAL - error 1397 * 1398 * normal return values for msg_type == FMA_MEM_REQ_RETIRE: 1399 * 0 - success in retiring page 1400 * EIO - page is already retired 1401 * EAGAIN - page is scheduled for retirement 1402 * EINVAL - error 1403 * 1404 * abnormal return values (regardless of msg_type) 1405 * ETIMEDOUT - LDOM manager is not responding 1406 * ENOTSUP - LDOM service for cpu offlining/status is not available 1407 * ENOMSG - got an unexpected response from the LDOM cpu service 1408 */ 1409 static int 1410 mem_request(struct ldom_hdl *lhp, uint32_t msg_type, uint64_t pa, 1411 uint64_t pgsize) 1412 { 1413 ds_hdr_t *H; 1414 ds_data_handle_t *D; 1415 fma_mem_service_req_t *R; 1416 1417 char *svcname = LDM_DS_NAME_MEM; 1418 fma_mem_resp_t *respmsg; 1419 void *resp; 1420 size_t resplen, reqmsglen; 1421 int rc; 1422 1423 if (lhp->lsinfo == NULL) 1424 return (ENOMSG); 1425 1426 reqmsglen = sizeof (ds_hdr_t) + sizeof (ds_data_handle_t) + 1427 sizeof (fma_mem_service_req_t); 1428 1429 H = lhp->allocp(reqmsglen); 1430 bzero(H, reqmsglen); 1431 D = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 1432 R = (void *)((ptrdiff_t)D + sizeof (ds_data_handle_t)); 1433 1434 H->msg_type = DS_DATA; 1435 H->payload_len = sizeof (ds_data_handle_t) + 1436 sizeof (fma_mem_service_req_t); 1437 1438 R->req_num = fds_svc_req_num(); 1439 R->msg_type = msg_type; 1440 R->real_addr = pa; 1441 R->length = pgsize; 1442 1443 if ((rc = sendrecv(lhp, R->req_num, H, reqmsglen, 1444 &D->svc_handle, svcname, &resp, &resplen)) != 0) { 1445 lhp->freep(H, reqmsglen); 1446 return (rc); 1447 } 1448 1449 lhp->freep(H, reqmsglen); 1450 1451 ASSERT(resplen == sizeof (fma_mem_resp_t)); 1452 respmsg = (fma_mem_resp_t *)resp; 1453 1454 rc = ENOMSG; 1455 if (msg_type == FMA_MEM_REQ_STATUS) { 1456 if (respmsg->result == FMA_MEM_RESP_OK) { 1457 if (respmsg->status == FMA_MEM_STAT_RETIRED) 1458 rc = 0; /* page is retired */ 1459 else if (respmsg->status == FMA_MEM_STAT_NOTRETIRED) 1460 rc = EIO; /* page is not scheduled */ 1461 } else if (respmsg->result == FMA_MEM_RESP_FAILURE) { 1462 if (respmsg->status == FMA_MEM_STAT_NOTRETIRED) 1463 rc = EAGAIN; /* page is scheduled */ 1464 else if (respmsg->status == FMA_MEM_STAT_ILLEGAL) 1465 rc = EINVAL; 1466 } 1467 } else if (msg_type == FMA_MEM_REQ_RETIRE) { 1468 if (respmsg->result == FMA_MEM_RESP_OK) { 1469 if (respmsg->status == FMA_MEM_STAT_RETIRED) 1470 rc = 0; /* is successfully retired */ 1471 } else if (respmsg->result == FMA_MEM_RESP_FAILURE) { 1472 if (respmsg->status == FMA_MEM_STAT_RETIRED) 1473 rc = EIO; /* is already retired */ 1474 else if (respmsg->status == FMA_MEM_STAT_NOTRETIRED) 1475 rc = EAGAIN; /* is scheduled to retire */ 1476 else if (respmsg->status == FMA_MEM_STAT_ILLEGAL) 1477 rc = EINVAL; 1478 } 1479 } else if (msg_type == FMA_MEM_REQ_RESURRECT) { 1480 if (respmsg->result == FMA_MEM_RESP_OK) { 1481 if (respmsg->status == FMA_MEM_STAT_NOTRETIRED) 1482 rc = 0; /* is successfully unretired */ 1483 } if (respmsg->result == FMA_MEM_RESP_FAILURE) { 1484 if (respmsg->status == FMA_MEM_STAT_RETIRED) 1485 rc = EAGAIN; /* page couldn't be locked */ 1486 else if (respmsg->status == FMA_MEM_STAT_NOTRETIRED) 1487 rc = EIO; /* page isn't retired already */ 1488 else if (respmsg->status == FMA_MEM_STAT_ILLEGAL) 1489 rc = EINVAL; 1490 } 1491 } 1492 1493 lhp->freep(resp, resplen); 1494 1495 return (rc); 1496 } 1497 1498 1499 /* 1500 * APIs 1501 */ 1502 int 1503 ldmsvcs_check_channel(void) 1504 { 1505 struct stat buf; 1506 1507 if (stat(FDS_VLDC, &buf) == 0) 1508 return (0); /* vldc exists */ 1509 else if (errno == ENOENT || errno == ENOTDIR) 1510 return (1); /* vldc does not exist */ 1511 else 1512 return (-1); /* miscellaneous error */ 1513 } 1514 1515 1516 /*ARGSUSED*/ 1517 void 1518 ldmsvcs_init(struct ldom_hdl *lhp) 1519 { 1520 if (ldmsvcs_check_channel() != 0) 1521 return; 1522 1523 lhp->lsinfo = channel_init(lhp); 1524 poller_add_client(); 1525 } 1526 1527 1528 /*ARGSUSED*/ 1529 void 1530 ldmsvcs_fini(struct ldom_hdl *lhp) 1531 { 1532 if (ldmsvcs_check_channel() != 0) 1533 return; 1534 1535 poller_remove_client(); 1536 } 1537 1538 1539 /*ARGSUSED*/ 1540 ssize_t 1541 ldmsvcs_get_core_md(struct ldom_hdl *lhp, uint64_t **buf) 1542 { 1543 ds_hdr_t *H; 1544 ds_data_handle_t *D; 1545 fma_req_pri_t *R; 1546 1547 char *svcname = LDM_DS_NAME_PRI; 1548 void *resp; 1549 size_t resplen, reqmsglen; 1550 ssize_t buflen; 1551 int rc; 1552 1553 if (lhp->lsinfo == NULL) 1554 return (-1); 1555 1556 reqmsglen = sizeof (ds_hdr_t) + sizeof (ds_data_handle_t) + 1557 sizeof (fma_req_pri_t); 1558 1559 H = lhp->allocp(reqmsglen); 1560 D = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 1561 R = (void *)((ptrdiff_t)D + sizeof (ds_data_handle_t)); 1562 1563 H->msg_type = DS_DATA; 1564 H->payload_len = sizeof (ds_data_handle_t) + 1565 sizeof (fma_req_pri_t); 1566 1567 R->req_num = fds_svc_req_num(); 1568 1569 if ((rc = sendrecv(lhp, R->req_num, H, reqmsglen, 1570 &D->svc_handle, svcname, &resp, &resplen)) != 0) { 1571 lhp->freep(H, reqmsglen); 1572 errno = rc; 1573 return (-1); 1574 } 1575 1576 lhp->freep(H, reqmsglen); 1577 1578 /* 1579 * resp should contain the req_num immediately followed by the PRI 1580 * (the latter may or may not be present). unfortunately, the 1581 * current compiler flags cause a warning for the following 1582 * definition 1583 * 1584 * typedef struct { 1585 * uint64_t req_num; 1586 * uint8_t pri[]; 1587 * } fma_pri_resp_t; 1588 * 1589 * so we do not use the struct here. 1590 */ 1591 if (resplen <= sizeof (uint64_t)) { 1592 lhp->freep(resp, resplen); 1593 if (resplen == sizeof (uint64_t)) 1594 return (0); 1595 else 1596 return (-1); 1597 } 1598 1599 buflen = resplen - sizeof (uint64_t); 1600 *buf = lhp->allocp(buflen); 1601 1602 bcopy((void *)((ptrdiff_t)resp + sizeof (uint64_t)), *buf, buflen); 1603 lhp->freep(resp, resplen); 1604 1605 return (buflen); 1606 } 1607 1608 1609 /* 1610 * see cpu_request() for a description of return values 1611 */ 1612 int 1613 ldmsvcs_cpu_req_status(struct ldom_hdl *lhp, uint32_t cpuid) 1614 { 1615 return (cpu_request(lhp, FMA_CPU_REQ_STATUS, cpuid)); 1616 } 1617 1618 1619 int 1620 ldmsvcs_cpu_req_offline(struct ldom_hdl *lhp, uint32_t cpuid) 1621 { 1622 return (cpu_request(lhp, FMA_CPU_REQ_OFFLINE, cpuid)); 1623 } 1624 1625 int 1626 ldmsvcs_cpu_req_online(struct ldom_hdl *lhp, uint32_t cpuid) 1627 { 1628 return (cpu_request(lhp, FMA_CPU_REQ_ONLINE, cpuid)); 1629 } 1630 1631 /* 1632 * see mem_request() for a description of return values 1633 */ 1634 int 1635 ldmsvcs_mem_req_status(struct ldom_hdl *lhp, uint64_t pa) 1636 { 1637 return (mem_request(lhp, FMA_MEM_REQ_STATUS, pa, getpagesize())); 1638 } 1639 1640 int 1641 ldmsvcs_mem_req_retire(struct ldom_hdl *lhp, uint64_t pa) 1642 { 1643 return (mem_request(lhp, FMA_MEM_REQ_RETIRE, pa, getpagesize())); 1644 } 1645 1646 int 1647 ldmsvcs_mem_req_unretire(struct ldom_hdl *lhp, uint64_t pa) 1648 { 1649 return (mem_request(lhp, FMA_MEM_REQ_RESURRECT, pa, getpagesize())); 1650 } 1651 1652 int 1653 ldmsvcs_io_req_id(struct ldom_hdl *lhp, uint64_t addr, uint_t type, 1654 uint64_t *virt_addr, char *name, int name_len, uint64_t *did) 1655 { 1656 1657 ds_hdr_t *H; 1658 ds_data_handle_t *D; 1659 fma_io_req_t *R; 1660 1661 char *svcname = LDM_DS_NAME_IOD; 1662 void *resp; 1663 fma_io_resp_t *iop; 1664 size_t resplen, reqmsglen; 1665 int offset; 1666 int rc; 1667 1668 if (lhp->lsinfo == NULL) 1669 return (-1); 1670 1671 reqmsglen = sizeof (ds_hdr_t) + sizeof (ds_data_handle_t) + 1672 sizeof (fma_io_req_t); 1673 1674 H = lhp->allocp(reqmsglen); 1675 D = (void *)((ptrdiff_t)H + sizeof (ds_hdr_t)); 1676 R = (void *)((ptrdiff_t)D + sizeof (ds_data_handle_t)); 1677 1678 H->msg_type = DS_DATA; 1679 H->payload_len = sizeof (ds_data_handle_t) + sizeof (fma_io_req_t); 1680 1681 R->req_num = fds_svc_req_num(); 1682 R->msg_type = type; 1683 R->rsrc_address = addr; 1684 1685 rc = ENOMSG; 1686 if ((rc = sendrecv(lhp, R->req_num, H, reqmsglen, 1687 &D->svc_handle, svcname, &resp, &resplen)) != 0) { 1688 lhp->freep(H, reqmsglen); 1689 return (rc); 1690 } 1691 lhp->freep(H, reqmsglen); 1692 1693 /* 1694 * resp should contain the req_num, status, virtual addr, domain id 1695 * and the domain name. The domain name may or may not be present. 1696 */ 1697 offset = sizeof (fma_io_resp_t); 1698 if (resplen < offset) { 1699 lhp->freep(resp, resplen); 1700 return (-1); 1701 } 1702 1703 iop = (fma_io_resp_t *)resp; 1704 switch (iop->result) { 1705 case FMA_IO_RESP_OK: 1706 /* success */ 1707 rc = 0; 1708 *virt_addr = iop->virt_rsrc_address; 1709 *did = iop->domain_id; 1710 if (name == NULL || name_len <= 0) 1711 break; 1712 *name = '\0'; 1713 if (resplen > offset) { 1714 (void) strncpy(name, (char *)((ptrdiff_t)resp + offset), 1715 name_len); 1716 } 1717 break; 1718 default: 1719 rc = -1; 1720 break; 1721 } 1722 1723 lhp->freep(resp, resplen); 1724 return (rc); 1725 } 1726 1727 /* end file */ 1728