1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * Interface for Serengeti IOSRAM mailbox 31 * OS <-> SC communication protocol 32 */ 33 34 #include <sys/types.h> 35 #include <sys/systm.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/kmem.h> 39 #include <sys/uadmin.h> 40 #include <sys/machsystm.h> 41 #include <sys/disp.h> 42 #include <sys/taskq.h> 43 44 #include <sys/sgevents.h> 45 #include <sys/sgsbbc_priv.h> 46 #include <sys/sgsbbc_iosram_priv.h> 47 #include <sys/sgsbbc_mailbox_priv.h> 48 #include <sys/plat_ecc_unum.h> 49 #include <sys/plat_ecc_dimm.h> 50 #include <sys/serengeti.h> 51 #include <sys/fm/util.h> 52 #include <sys/promif.h> 53 #include <sys/plat_datapath.h> 54 55 sbbc_mailbox_t *master_mbox = NULL; 56 57 /* 58 * Panic Shutdown event support 59 */ 60 static kmutex_t panic_hdlr_lock; 61 62 /* 63 * The ID of the soft interrupt which triggers the bringing down of a Domain 64 * when a PANIC_SHUTDOWN event is received. 65 */ 66 static ddi_softintr_t panic_softintr_id = 0; 67 68 static sg_panic_shutdown_t panic_payload; 69 static sbbc_msg_t panic_payload_msg; 70 71 /* 72 * A queue for making sure outgoing messages are in order as ScApp 73 * does not support interleaving messages. 74 */ 75 static kcondvar_t outbox_queue; 76 static kmutex_t outbox_queue_lock; 77 78 /* 79 * Handle unsolicited capability message. 80 */ 81 static plat_capability_data_t cap_payload; 82 static sbbc_msg_t cap_payload_msg; 83 static kmutex_t cap_msg_hdlr_lock; 84 85 /* 86 * Datapath error and fault messages arrive unsolicited. The message data 87 * is contained in a plat_datapath_info_t structure. 88 */ 89 typedef struct { 90 uint8_t type; /* CDS, DX, CP */ 91 uint8_t pad; /* for alignment */ 92 uint16_t cpuid; /* Safari ID of base CPU */ 93 uint32_t t_value; /* SERD timeout threshold (seconds) */ 94 } plat_datapath_info_t; 95 96 /* 97 * Unsolicited datapath error messages are processed via a soft interrupt, 98 * triggered in unsolicited interrupt processing. 99 */ 100 static ddi_softintr_t dp_softintr_id = 0; 101 static kmutex_t dp_hdlr_lock; 102 103 static plat_datapath_info_t dp_payload; 104 static sbbc_msg_t dp_payload_msg; 105 106 static char *dperrtype[] = { 107 DP_ERROR_CDS, 108 DP_ERROR_DX, 109 DP_ERROR_RP 110 }; 111 112 /* 113 * Variable indicating if we are already processing requests. 114 * Setting this value must be protected by outbox_queue_lock. 115 */ 116 static int outbox_busy = 0; 117 118 /* 119 * local stuff 120 */ 121 static int sbbc_mbox_send_msg(sbbc_msg_t *, int, uint_t, time_t, clock_t); 122 static int sbbc_mbox_recv_msg(); 123 static int mbox_write(struct sbbc_mbox_header *, 124 struct sbbc_fragment *, sbbc_msg_t *); 125 static int mbox_read(struct sbbc_mbox_header *, struct sbbc_fragment *, 126 sbbc_msg_t *); 127 static int mbox_has_free_space(struct sbbc_mbox_header *); 128 static void mbox_skip_next_msg(struct sbbc_mbox_header *); 129 static int mbox_read_header(uint32_t, struct sbbc_mbox_header *); 130 static void mbox_update_header(uint32_t, struct sbbc_mbox_header *); 131 static int mbox_read_frag(struct sbbc_mbox_header *, struct sbbc_fragment *); 132 static struct sbbc_msg_waiter *mbox_find_waiter(uint16_t, uint32_t); 133 static void wakeup_next(void); 134 static uint_t sbbc_panic_shutdown_handler(char *arg); 135 static uint_t sbbc_do_fast_shutdown(char *arg); 136 static void sbbc_mbox_post_reg(sbbc_softstate_t *softsp); 137 static uint_t cap_ecc_msg_handler(char *); 138 static uint_t sbbc_datapath_error_msg_handler(char *arg); 139 static uint_t sbbc_datapath_fault_msg_handler(char *arg); 140 static uint_t sbbc_dp_trans_event(char *arg); 141 142 143 /* 144 * Interrupt handlers 145 */ 146 static int sbbc_mbox_msgin(void); 147 static int sbbc_mbox_msgout(void); 148 static int sbbc_mbox_spacein(void); 149 static int sbbc_mbox_spaceout(void); 150 151 /* 152 * ECC event mailbox message taskq and parameters 153 */ 154 static taskq_t *sbbc_ecc_mbox_taskq = NULL; 155 static int sbbc_ecc_mbox_taskq_errs = 0; 156 static int sbbc_ecc_mbox_send_errs = 0; 157 static int sbbc_ecc_mbox_inval_errs = 0; 158 static int sbbc_ecc_mbox_other_errs = 0; 159 int sbbc_ecc_mbox_err_throttle = ECC_MBOX_TASKQ_ERR_THROTTLE; 160 161 /* 162 * Called when SBBC driver is loaded 163 * Initialise global mailbox stuff, etc 164 */ 165 void 166 sbbc_mbox_init() 167 { 168 int i; 169 170 master_mbox = kmem_zalloc(sizeof (sbbc_mailbox_t), KM_NOSLEEP); 171 if (master_mbox == NULL) { 172 cmn_err(CE_PANIC, "Can't allocate memory for mailbox\n"); 173 } 174 175 /* 176 * mutex'es for the wait-lists 177 */ 178 for (i = 0; i < SBBC_MBOX_MSG_TYPES; i++) { 179 mutex_init(&master_mbox->mbox_wait_lock[i], 180 NULL, MUTEX_DEFAULT, NULL); 181 master_mbox->mbox_wait_list[i] = NULL; 182 } 183 184 for (i = 0; i < SBBC_MBOX_MSG_TYPES; i++) 185 master_mbox->intrs[i] = NULL; 186 187 /* 188 * Two mailbox channels SC -> OS , read-only 189 * OS -> SC, read/write 190 */ 191 master_mbox->mbox_in = kmem_zalloc(sizeof (sbbc_mbox_t), KM_NOSLEEP); 192 if (master_mbox->mbox_in == NULL) { 193 cmn_err(CE_PANIC, 194 "Can't allocate memory for inbound mailbox\n"); 195 } 196 197 master_mbox->mbox_out = kmem_zalloc(sizeof (sbbc_mbox_t), KM_NOSLEEP); 198 if (master_mbox->mbox_out == NULL) { 199 cmn_err(CE_PANIC, 200 "Can't allocate memory for outbound mailbox\n"); 201 } 202 203 mutex_init(&master_mbox->mbox_in->mb_lock, NULL, 204 MUTEX_DEFAULT, NULL); 205 mutex_init(&master_mbox->mbox_out->mb_lock, NULL, 206 MUTEX_DEFAULT, NULL); 207 208 /* 209 * Add PANIC_SHUTDOWN Event mutex 210 */ 211 mutex_init(&panic_hdlr_lock, NULL, MUTEX_DEFAULT, NULL); 212 213 /* Initialize datapath error message handler mutex */ 214 mutex_init(&dp_hdlr_lock, NULL, MUTEX_DEFAULT, NULL); 215 216 /* Initialize capability message handler event mutex */ 217 mutex_init(&cap_msg_hdlr_lock, NULL, MUTEX_DEFAULT, NULL); 218 219 /* 220 * NOT USED YET 221 */ 222 master_mbox->mbox_in->mb_type = 223 master_mbox->mbox_out->mb_type = 0; 224 225 cv_init(&outbox_queue, NULL, CV_DEFAULT, NULL); 226 mutex_init(&outbox_queue_lock, NULL, MUTEX_DEFAULT, NULL); 227 228 } 229 230 /* 231 * called when the SBBC driver is unloaded 232 */ 233 void 234 sbbc_mbox_fini() 235 { 236 int i; 237 int err; 238 239 /* 240 * destroy ECC event mailbox taskq 241 */ 242 if (sbbc_ecc_mbox_taskq != NULL) { 243 taskq_destroy(sbbc_ecc_mbox_taskq); 244 sbbc_ecc_mbox_taskq = NULL; 245 sbbc_ecc_mbox_taskq_errs = 0; 246 } 247 248 /* 249 * unregister interrupts 250 */ 251 (void) iosram_unreg_intr(SBBC_MAILBOX_IN); 252 (void) iosram_unreg_intr(SBBC_MAILBOX_IN); 253 (void) iosram_unreg_intr(SBBC_MAILBOX_SPACE_IN); 254 (void) iosram_unreg_intr(SBBC_MAILBOX_SPACE_OUT); 255 256 /* 257 * Remove Panic Shutdown and Datapath Error event support. 258 * 259 * NOTE: If we have not added the soft interrupt handlers for these 260 * then we know that we have not registered the event handlers either. 261 */ 262 if (panic_softintr_id != 0) { 263 ddi_remove_softintr(panic_softintr_id); 264 265 err = sbbc_mbox_unreg_intr(MBOX_EVENT_PANIC_SHUTDOWN, 266 sbbc_panic_shutdown_handler); 267 if (err != 0) { 268 cmn_err(CE_WARN, "Failed to unreg Panic Shutdown " 269 "handler. Err=%d", err); 270 } 271 } 272 if (dp_softintr_id != 0) { 273 ddi_remove_softintr(dp_softintr_id); 274 275 err = sbbc_mbox_unreg_intr(MBOX_EVENT_DP_ERROR, 276 sbbc_datapath_error_msg_handler); 277 err |= sbbc_mbox_unreg_intr(MBOX_EVENT_DP_FAULT, 278 sbbc_datapath_fault_msg_handler); 279 if (err != 0) { 280 cmn_err(CE_WARN, "Failed to unreg Datapath Error " 281 "handler. Err=%d", err); 282 } 283 } 284 285 /* 286 * destroy all its mutex'es, lists etc 287 */ 288 289 /* 290 * mutex'es for the wait-lists 291 */ 292 for (i = 0; i < SBBC_MBOX_MSG_TYPES; i++) { 293 mutex_destroy(&master_mbox->mbox_wait_lock[i]); 294 } 295 296 mutex_destroy(&master_mbox->mbox_in->mb_lock); 297 mutex_destroy(&master_mbox->mbox_out->mb_lock); 298 299 mutex_destroy(&panic_hdlr_lock); 300 mutex_destroy(&dp_hdlr_lock); 301 302 kmem_free(master_mbox->mbox_in, sizeof (sbbc_mbox_t)); 303 kmem_free(master_mbox->mbox_out, sizeof (sbbc_mbox_t)); 304 kmem_free(master_mbox, sizeof (sbbc_mailbox_t)); 305 306 cv_destroy(&outbox_queue); 307 mutex_destroy(&outbox_queue_lock); 308 309 err = sbbc_mbox_unreg_intr(INFO_MBOX, cap_ecc_msg_handler); 310 if (err != 0) { 311 cmn_err(CE_WARN, "Failed to unregister capability message " 312 "handler. Err=%d", err); 313 } 314 315 mutex_destroy(&cap_msg_hdlr_lock); 316 } 317 318 /* 319 * Update iosram_sbbc to the new softstate after a tunnel switch. 320 * Move software interrupts from the old dip to the new dip. 321 */ 322 int 323 sbbc_mbox_switch(sbbc_softstate_t *softsp) 324 { 325 sbbc_intrs_t *intr; 326 int msg_type; 327 int rc = 0; 328 int err; 329 330 if (master_mbox == NULL) 331 return (ENXIO); 332 333 ASSERT(MUTEX_HELD(&master_iosram->iosram_lock)); 334 335 for (msg_type = 0; msg_type < SBBC_MBOX_MSG_TYPES; msg_type++) { 336 337 for (intr = master_mbox->intrs[msg_type]; intr != NULL; 338 intr = intr->sbbc_intr_next) { 339 340 if (intr->sbbc_intr_id) { 341 ddi_remove_softintr(intr->sbbc_intr_id); 342 343 if (ddi_add_softintr(softsp->dip, 344 DDI_SOFTINT_HIGH, 345 &intr->sbbc_intr_id, NULL, NULL, 346 intr->sbbc_handler, intr->sbbc_arg) 347 != DDI_SUCCESS) { 348 349 cmn_err(CE_WARN, 350 "Can't add SBBC mailbox " 351 "softint for msg_type %x\n", 352 msg_type); 353 rc = ENXIO; 354 } 355 } 356 } 357 } 358 359 /* 360 * Add PANIC_SHUTDOWN Event handler 361 */ 362 if (panic_softintr_id) { 363 ddi_remove_softintr(panic_softintr_id); 364 365 err = ddi_add_softintr(softsp->dip, DDI_SOFTINT_LOW, 366 &panic_softintr_id, NULL, NULL, 367 sbbc_do_fast_shutdown, NULL); 368 369 if (err != DDI_SUCCESS) { 370 cmn_err(CE_WARN, "Failed to register Panic " 371 "Shutdown handler. Err=%d", err); 372 (void) sbbc_mbox_unreg_intr(MBOX_EVENT_PANIC_SHUTDOWN, 373 sbbc_panic_shutdown_handler); 374 rc = ENXIO; 375 } 376 377 } 378 /* 379 * Add Datapath Error Event handler 380 */ 381 if (dp_softintr_id) { 382 ddi_remove_softintr(dp_softintr_id); 383 384 err = ddi_add_softintr(softsp->dip, DDI_SOFTINT_LOW, 385 &dp_softintr_id, NULL, NULL, 386 sbbc_dp_trans_event, NULL); 387 388 if (err != DDI_SUCCESS) { 389 cmn_err(CE_WARN, "Failed to register Datapath " 390 "Error Event handler. Err=%d", err); 391 (void) sbbc_mbox_unreg_intr(MBOX_EVENT_DP_ERROR, 392 sbbc_datapath_error_msg_handler); 393 (void) sbbc_mbox_unreg_intr(MBOX_EVENT_DP_FAULT, 394 sbbc_datapath_fault_msg_handler); 395 rc = ENXIO; 396 } 397 398 } 399 400 return (rc); 401 } 402 403 /* 404 * Called when the IOSRAM tunnel is created for the 'chosen' node. 405 * 406 * Read the mailbox header from the IOSRAM 407 * tunnel[SBBC_MAILBOX_KEY] 408 * Register the mailbox interrupt handlers 409 * for messages in/space etc 410 */ 411 int 412 sbbc_mbox_create(sbbc_softstate_t *softsp) 413 { 414 struct sbbc_mbox_header header; 415 416 int i; 417 int err; 418 int rc = 0; 419 420 /* 421 * This function should only be called once when 422 * the chosen node is initialized. 423 */ 424 ASSERT(MUTEX_HELD(&chosen_lock)); 425 426 if (master_mbox == NULL) 427 return (ENXIO); 428 429 /* 430 * read the header at offset 0 431 * check magic/version etc 432 */ 433 if (rc = iosram_read(SBBC_MAILBOX_KEY, 0, (caddr_t)&header, 434 sizeof (struct sbbc_mbox_header))) { 435 436 return (rc); 437 } 438 439 /* 440 * add the interrupt handlers for the mailbox 441 * interrupts 442 */ 443 for (i = 0; i < MBOX_INTRS; i++) { 444 sbbc_intrfunc_t intr_handler; 445 uint_t *state; 446 kmutex_t *lock; 447 uint32_t intr_num; 448 449 switch (i) { 450 case MBOX_MSGIN_INTR: 451 intr_handler = (sbbc_intrfunc_t)sbbc_mbox_msgin; 452 intr_num = SBBC_MAILBOX_IN; 453 break; 454 case MBOX_MSGOUT_INTR: 455 intr_handler = (sbbc_intrfunc_t)sbbc_mbox_msgout; 456 intr_num = SBBC_MAILBOX_OUT; 457 break; 458 case MBOX_SPACEIN_INTR: 459 intr_handler = (sbbc_intrfunc_t)sbbc_mbox_spacein; 460 intr_num = SBBC_MAILBOX_SPACE_IN; 461 break; 462 case MBOX_SPACEOUT_INTR: 463 intr_handler = (sbbc_intrfunc_t)sbbc_mbox_spaceout; 464 intr_num = SBBC_MAILBOX_SPACE_OUT; 465 break; 466 } 467 state = (uint_t *)&master_mbox->intr_state[i].mbox_intr_state; 468 lock = &master_mbox->intr_state[i].mbox_intr_lock; 469 if (iosram_reg_intr(intr_num, intr_handler, (caddr_t)NULL, 470 state, lock)) { 471 472 cmn_err(CE_WARN, 473 "Can't register Mailbox interrupts \n"); 474 } 475 } 476 477 /* 478 * Add PANIC_SHUTDOWN Event handler 479 */ 480 panic_payload_msg.msg_buf = (caddr_t)&panic_payload; 481 panic_payload_msg.msg_len = sizeof (panic_payload); 482 483 err = ddi_add_softintr(softsp->dip, DDI_SOFTINT_LOW, &panic_softintr_id, 484 NULL, NULL, sbbc_do_fast_shutdown, NULL); 485 486 if (err == DDI_SUCCESS) { 487 err = sbbc_mbox_reg_intr(MBOX_EVENT_PANIC_SHUTDOWN, 488 sbbc_panic_shutdown_handler, &panic_payload_msg, 489 NULL, &panic_hdlr_lock); 490 if (err != 0) { 491 cmn_err(CE_WARN, "Failed to register Panic " 492 "Shutdown handler. Err=%d", err); 493 } 494 495 } else { 496 cmn_err(CE_WARN, "Failed to add Panic Shutdown " 497 "softintr handler"); 498 } 499 500 /* 501 * Add Unsolicited Datapath Error Events handler 502 */ 503 dp_payload_msg.msg_buf = (caddr_t)&dp_payload; 504 dp_payload_msg.msg_len = sizeof (dp_payload); 505 506 err = ddi_add_softintr(softsp->dip, DDI_SOFTINT_LOW, &dp_softintr_id, 507 NULL, NULL, sbbc_dp_trans_event, NULL); 508 509 if (err == DDI_SUCCESS) { 510 err = sbbc_mbox_reg_intr(MBOX_EVENT_DP_ERROR, 511 sbbc_datapath_error_msg_handler, &dp_payload_msg, 512 NULL, &dp_hdlr_lock); 513 err |= sbbc_mbox_reg_intr(MBOX_EVENT_DP_FAULT, 514 sbbc_datapath_fault_msg_handler, &dp_payload_msg, 515 NULL, &dp_hdlr_lock); 516 if (err != 0) { 517 cmn_err(CE_WARN, "Failed to register Datapath " 518 "error handler. Err=%d", err); 519 } 520 521 } else { 522 cmn_err(CE_WARN, "Failed to add Datapath error " 523 "softintr handler"); 524 } 525 526 /* 527 * Register an interrupt handler with the sgbbc driver for the 528 * unsolicited INFO_MBOX response for the capability bitmap. 529 * This message is expected whenever the SC is (re)booted or 530 * failed over. 531 */ 532 cap_payload_msg.msg_buf = (caddr_t)&cap_payload; 533 cap_payload_msg.msg_len = sizeof (cap_payload); 534 535 err = sbbc_mbox_reg_intr(INFO_MBOX, cap_ecc_msg_handler, 536 &cap_payload_msg, NULL, &cap_msg_hdlr_lock); 537 if (err != 0) { 538 cmn_err(CE_WARN, "Failed to register capability message" 539 " handler with Err=%d", err); 540 } 541 542 /* 543 * Now is the opportunity to register 544 * the deferred mbox intrs. 545 */ 546 sbbc_mbox_post_reg(softsp); 547 548 return (rc); 549 } 550 551 /* 552 * Called when chosen IOSRAM is initialized 553 * to register the deferred mbox intrs. 554 */ 555 static void 556 sbbc_mbox_post_reg(sbbc_softstate_t *softsp) 557 { 558 uint32_t msg_type; 559 sbbc_intrs_t *intr; 560 561 ASSERT(master_mbox); 562 for (msg_type = 0; msg_type < SBBC_MBOX_MSG_TYPES; msg_type++) { 563 intr = master_mbox->intrs[msg_type]; 564 while (intr != NULL) { 565 if (!intr->registered) { 566 SGSBBC_DBG_INTR(CE_CONT, "sbbc_mbox_post_reg: " 567 "postreg for msgtype=%x\n", msg_type); 568 if (ddi_add_softintr(softsp->dip, 569 DDI_SOFTINT_HIGH, &intr->sbbc_intr_id, 570 NULL, NULL, intr->sbbc_handler, 571 (caddr_t)intr->sbbc_arg) 572 != DDI_SUCCESS) { 573 cmn_err(CE_WARN, "Can't add SBBC " 574 "deferred mailbox softint \n"); 575 } else 576 intr->registered = 1; 577 } 578 intr = intr->sbbc_intr_next; 579 } 580 } 581 } 582 583 /* 584 * Register a handler for a message type 585 * NB NB NB 586 * arg must be either NULL or the address of a sbbc_fragment 587 * pointer 588 */ 589 int 590 sbbc_mbox_reg_intr(uint32_t msg_type, sbbc_intrfunc_t intr_handler, 591 sbbc_msg_t *arg, uint_t *state, kmutex_t *lock) 592 { 593 sbbc_intrs_t *intr, *previntr; 594 int rc = 0; 595 596 /* 597 * Validate arguments 598 */ 599 if (msg_type >= SBBC_MBOX_MSG_TYPES) 600 return (EINVAL); 601 602 /* 603 * Verify that we have already set up the master sbbc 604 */ 605 if (master_iosram == NULL || master_mbox == NULL) 606 return (ENXIO); 607 608 mutex_enter(&master_iosram->iosram_lock); 609 msg_type &= SBBC_MSG_TYPE_MASK; 610 previntr = intr = master_mbox->intrs[msg_type]; 611 612 /* Find the end of the link list */ 613 while (intr != NULL && intr->sbbc_handler != intr_handler) { 614 615 previntr = intr; 616 intr = intr->sbbc_intr_next; 617 } 618 619 /* Return if the handler has been registered */ 620 if (intr != NULL) { 621 mutex_exit(&master_iosram->iosram_lock); 622 return (EBUSY); 623 } 624 625 /* 626 * The requested handler has not been installed. 627 * Allocate some memory. 628 */ 629 intr = kmem_zalloc(sizeof (sbbc_intrs_t), KM_SLEEP); 630 631 intr->sbbc_handler = intr_handler; 632 intr->sbbc_arg = (caddr_t)arg; 633 intr->sbbc_intr_state = state; 634 intr->sbbc_intr_lock = lock; 635 intr->sbbc_intr_next = NULL; 636 /* not registered yet */ 637 intr->registered = 0; 638 639 if (previntr != NULL) 640 previntr->sbbc_intr_next = intr; 641 else 642 master_mbox->intrs[msg_type] = intr; 643 644 /* 645 * register only if the chosen IOSRAM is 646 * initialized, otherwise defer the registration 647 * until IOSRAM initialization. 648 */ 649 if (master_iosram->iosram_sbbc) { 650 if (ddi_add_softintr(master_iosram->iosram_sbbc->dip, 651 DDI_SOFTINT_HIGH, 652 &intr->sbbc_intr_id, NULL, NULL, 653 intr_handler, (caddr_t)arg) != DDI_SUCCESS) { 654 cmn_err(CE_WARN, "Can't add SBBC mailbox softint \n"); 655 rc = ENXIO; 656 } else 657 intr->registered = 1; 658 } else { 659 SGSBBC_DBG_INTR(CE_CONT, "sbbc_mbox_reg_intr: " 660 "deferring msg=%x registration\n", msg_type); 661 } 662 663 mutex_exit(&master_iosram->iosram_lock); 664 665 return (rc); 666 } 667 668 /* 669 * Unregister a handler for a message type 670 */ 671 int 672 sbbc_mbox_unreg_intr(uint32_t msg_type, sbbc_intrfunc_t intr_handler) 673 { 674 sbbc_intrs_t *intr, *previntr, *nextintr; 675 676 /* 677 * Verify that we have already set up the master sbbc 678 */ 679 if (master_iosram == NULL || master_mbox == NULL) 680 return (ENXIO); 681 682 msg_type &= SBBC_MSG_TYPE_MASK; 683 684 if (msg_type >= SBBC_MBOX_MSG_TYPES || 685 intr_handler == (sbbc_intrfunc_t)NULL) { 686 687 return (EINVAL); 688 } 689 690 mutex_enter(&master_iosram->iosram_lock); 691 692 previntr = intr = master_mbox->intrs[msg_type]; 693 694 /* 695 * No handlers installed 696 */ 697 if (intr == NULL) { 698 mutex_exit(&master_iosram->iosram_lock); 699 return (EINVAL); 700 } 701 702 while (intr != NULL) { 703 704 /* Save the next pointer */ 705 nextintr = intr->sbbc_intr_next; 706 707 /* Found a match. Remove it from the link list */ 708 if (intr->sbbc_handler == intr_handler) { 709 710 if (intr->sbbc_intr_id) 711 ddi_remove_softintr(intr->sbbc_intr_id); 712 713 kmem_free(intr, sizeof (sbbc_intrs_t)); 714 715 if (previntr != master_mbox->intrs[msg_type]) 716 previntr->sbbc_intr_next = nextintr; 717 else 718 master_mbox->intrs[msg_type] = nextintr; 719 720 break; 721 } 722 723 /* update pointers */ 724 previntr = intr; 725 intr = nextintr; 726 } 727 728 mutex_exit(&master_iosram->iosram_lock); 729 730 return (0); 731 } 732 /* 733 * Interrupt handlers - one for each mailbox 734 * interrupt type 735 */ 736 737 /* 738 * mailbox message received 739 */ 740 static int 741 sbbc_mbox_msgin() 742 { 743 mutex_enter(&master_mbox->intr_state[MBOX_MSGIN_INTR].mbox_intr_lock); 744 master_mbox->intr_state[MBOX_MSGIN_INTR].mbox_intr_state = 745 SBBC_INTR_RUNNING; 746 mutex_exit(&master_mbox->intr_state[MBOX_MSGIN_INTR].mbox_intr_lock); 747 748 /* 749 * We are only locking the InBox here, not the whole 750 * mailbox. This is based on the assumption of 751 * complete separation of mailboxes - outbox is 752 * read/write, inbox is read-only. 753 * We only ever update the producer for the 754 * outbox and the consumer for the inbox. 755 */ 756 mutex_enter(&master_mbox->mbox_in->mb_lock); 757 758 for (;;) { 759 /* 760 * Get as many incoming messages as possible 761 */ 762 while (sbbc_mbox_recv_msg() == 0) 763 /* empty */; 764 765 /* 766 * send interrupt to SC to let it know that 767 * space is available over here 768 */ 769 (void) iosram_send_intr(SBBC_MAILBOX_SPACE_IN); 770 771 mutex_enter(&master_mbox->intr_state[MBOX_MSGIN_INTR]. 772 mbox_intr_lock); 773 /* 774 * Read the inbox one more time to see if new messages 775 * has come in after we exit the loop. 776 */ 777 if (sbbc_mbox_recv_msg() == 0) { 778 mutex_exit(&master_mbox->intr_state[MBOX_MSGIN_INTR]. 779 mbox_intr_lock); 780 } else { 781 master_mbox->intr_state[MBOX_MSGIN_INTR]. 782 mbox_intr_state = SBBC_INTR_IDLE; 783 mutex_exit(&master_mbox->intr_state[MBOX_MSGIN_INTR]. 784 mbox_intr_lock); 785 break; 786 } 787 } 788 789 mutex_exit(&master_mbox->mbox_in->mb_lock); 790 791 return (DDI_INTR_CLAIMED); 792 } 793 794 /* 795 * mailbox message sent 796 */ 797 static int 798 sbbc_mbox_msgout() 799 { 800 /* 801 * Should never get this 802 */ 803 804 return (DDI_INTR_CLAIMED); 805 } 806 807 /* 808 * space in the inbox 809 */ 810 static int 811 sbbc_mbox_spacein() 812 { 813 /* 814 * Should never get this 815 */ 816 817 return (DDI_INTR_CLAIMED); 818 } 819 820 /* 821 * space in the outbox 822 */ 823 static int 824 sbbc_mbox_spaceout() 825 { 826 /* 827 * cv_broadcast() the threads waiting on the 828 * outbox's mb_full 829 */ 830 831 mutex_enter(&master_mbox->mbox_out->mb_lock); 832 833 cv_broadcast(&master_mbox->mbox_out->mb_full); 834 835 mutex_exit(&master_mbox->mbox_out->mb_lock); 836 837 return (DDI_INTR_CLAIMED); 838 } 839 840 /* 841 * Client Interface 842 * 843 * The main interface will be 844 * 845 * sbbc_mbox_request_response(sbbc_msg_t *request, 846 * sbbc_msg_t *response, time_t wait_time) 847 * 848 * 1) the client calls request_response 849 * 2) a new unique msg ID is assigned for that msg 850 * 3) if there is space available in the outbox 851 * - the request msg is written to the mbox_out mailbox 852 * and the mailbox info updated. 853 * - allocate a sbbc_msg_waiter struct for this 854 * message, initialise the w_cv condvar. 855 * - get the mailbox mbox_wait_lock mutex for this 856 * message type 857 * - the response msg is put on the mbox_wait_list for 858 * that message type to await the SC's response 859 * - wait on the w_cv condvar protected by the 860 * mbox_wait_lock 861 * - SBBC_MAILBOX_OUT interrupt is sent to the SC 862 * 863 * 4) if no space in the outbox, 864 * - the request message blocks waiting 865 * for a SBBC_MAILBOX_SPACE_OUT interrupt 866 * It will block on the mailbox mb_full condvar. 867 * - go to (3) above 868 * 5) When we get a SBBC_MAILBOX_IN interrupt. 869 * - read the message ID of the next message (FIFO) 870 * - find that ID on the wait list 871 * - no wait list entry => unsolicited message. If theres 872 * a handler, trigger it 873 * - if someone is waiting, read the message in from 874 * SRAM, handling fragmentation, wraparound, etc 875 * - if the whole message has been read, signal 876 * the waiter 877 * - read next message until mailbox empty 878 * - send SBBC_MAILBOX_SPACE_IN interrupt to the SC 879 * 880 * 6) If a response is required and none is received, the client 881 * will timeout after <wait_time> seconds and the message 882 * status will be set to ETIMEDOUT. 883 */ 884 int 885 sbbc_mbox_request_response(sbbc_msg_t *request, 886 sbbc_msg_t *response, time_t wait_time) 887 { 888 889 struct sbbc_msg_waiter *waiter; 890 uint_t msg_id; 891 int rc = 0; 892 int flags; 893 uint16_t msg_type; 894 clock_t stop_time; 895 clock_t clockleft; 896 kmutex_t *mbox_wait_lock; 897 kmutex_t *mb_lock; 898 static fn_t f = "sbbc_mbox_request_response"; 899 900 if ((request == NULL) || 901 (request->msg_type.type >= SBBC_MBOX_MSG_TYPES) || 902 ((response != NULL) && 903 (response->msg_type.type >= SBBC_MBOX_MSG_TYPES))) 904 return (EINVAL); 905 906 msg_type = request->msg_type.type; 907 908 /* 909 * Verify that we have already set up the master sbbc 910 */ 911 if (master_mbox == NULL) 912 return (ENXIO); 913 mbox_wait_lock = &master_mbox->mbox_wait_lock[msg_type]; 914 915 flags = WAIT_FOR_REPLY|WAIT_FOR_SPACE; 916 917 /* 918 * We want to place a lower limit on the shortest amount of time we 919 * will wait before timing out while communicating with the SC via 920 * the mailbox. 921 */ 922 if (wait_time < sbbc_mbox_min_timeout) 923 wait_time = sbbc_mbox_default_timeout; 924 925 stop_time = ddi_get_lbolt() + wait_time * drv_usectohz(MICROSEC); 926 927 /* 928 * If there is a message being processed, sleep until it is our turn. 929 */ 930 mutex_enter(&outbox_queue_lock); 931 932 /* 933 * allocate an ID for this message, let it wrap 934 * around transparently. 935 * msg_id == 0 is unsolicited message 936 */ 937 msg_id = ++(master_mbox->mbox_msg_id); 938 if (msg_id == 0) 939 msg_id = ++(master_mbox->mbox_msg_id); 940 941 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x, msg_len = 0x%x\n", 942 f, msg_id, request->msg_len); 943 944 /* 945 * A new message can actually grab the lock before the thread 946 * that has just been signaled. Therefore, we need to double 947 * check to make sure that outbox_busy is not already set 948 * after we wake up. 949 * 950 * Potentially this could mean starvation for certain unfortunate 951 * threads that keep getting woken up and putting back to sleep. 952 * But the window of such contention is very small to begin with. 953 */ 954 while (outbox_busy) { 955 956 clockleft = cv_timedwait(&outbox_queue, &outbox_queue_lock, 957 stop_time); 958 959 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x is woken up\n", f, msg_id); 960 961 /* 962 * If we have timed out, set status to ETIMEOUT and return. 963 */ 964 if (clockleft < 0) { 965 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x has timed out\n", 966 f, msg_id); 967 cmn_err(CE_NOTE, 968 "Timed out obtaining SBBC outbox lock"); 969 request->msg_status = ETIMEDOUT; 970 if (response != NULL) 971 response->msg_status = ETIMEDOUT; 972 mutex_exit(&outbox_queue_lock); 973 return (ETIMEDOUT); 974 } 975 } 976 977 outbox_busy = 1; 978 mutex_exit(&outbox_queue_lock); 979 980 /* 981 * We are only locking the OutBox from here, not the whole 982 * mailbox. This is based on the assumption of 983 * complete separation of mailboxes - outbox is 984 * read/write, inbox is read-only. 985 * We only ever update the producer for the 986 * outbox and the consumer for the inbox. 987 */ 988 mb_lock = &master_mbox->mbox_out->mb_lock; 989 mutex_enter(mb_lock); 990 991 /* 992 * No response expected ? Just send the message and return 993 */ 994 if (response == NULL) { 995 rc = sbbc_mbox_send_msg(request, flags, msg_id, wait_time, 996 stop_time); 997 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x send rc = %d\n", 998 f, msg_id, rc); 999 1000 wakeup_next(); 1001 1002 mutex_exit(mb_lock); 1003 request->msg_status = rc; 1004 return (rc); 1005 } 1006 1007 /* 1008 * allocate/initialise a waiter 1009 */ 1010 waiter = kmem_zalloc(sizeof (struct sbbc_msg_waiter), KM_NOSLEEP); 1011 1012 if (waiter == (struct sbbc_msg_waiter *)NULL) { 1013 cmn_err(CE_WARN, "SBBC Mailbox can't allocate waiter\n"); 1014 1015 wakeup_next(); 1016 1017 mutex_exit(mb_lock); 1018 return (ENOMEM); 1019 } 1020 1021 waiter->w_id = 0; /* Until we get an ID from the send */ 1022 waiter->w_msg = response; 1023 waiter->w_msg->msg_status = EINPROGRESS; 1024 1025 cv_init(&waiter->w_cv, NULL, CV_DEFAULT, NULL); 1026 1027 rc = sbbc_mbox_send_msg(request, flags, msg_id, wait_time, stop_time); 1028 1029 wakeup_next(); 1030 1031 if (rc != 0) { 1032 1033 request->msg_status = response->msg_status = rc; 1034 mutex_exit(mb_lock); 1035 1036 /* Free the waiter */ 1037 cv_destroy(&waiter->w_cv); 1038 kmem_free(waiter, sizeof (struct sbbc_msg_waiter)); 1039 1040 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x send rc = %d\n", 1041 f, msg_id, rc); 1042 1043 return (rc); 1044 } 1045 1046 waiter->w_id = msg_id; 1047 1048 /* 1049 * Lock this waiter list and add the waiter 1050 */ 1051 mutex_enter(mbox_wait_lock); 1052 1053 if (master_mbox->mbox_wait_list[msg_type] == NULL) { 1054 master_mbox->mbox_wait_list[msg_type] = waiter; 1055 waiter->w_next = NULL; 1056 } else { 1057 struct sbbc_msg_waiter *tmp; 1058 tmp = master_mbox->mbox_wait_list[msg_type]; 1059 master_mbox->mbox_wait_list[msg_type] = waiter; 1060 waiter->w_next = tmp; 1061 } 1062 1063 mutex_exit(mb_lock); 1064 1065 /* 1066 * wait here for a response to our message 1067 * holding the mbox_wait_lock for the list ensures 1068 * that the interrupt handler can't get in before 1069 * we block. 1070 * NOTE: We use the request msg_type for the 1071 * the wait_list. This ensures that the 1072 * msg_type won't change. 1073 */ 1074 clockleft = cv_timedwait(&waiter->w_cv, mbox_wait_lock, stop_time); 1075 1076 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x is woken up for response\n", 1077 f, msg_id); 1078 1079 /* 1080 * If we have timed out, set msg_status to ETIMEDOUT, 1081 * and remove the waiter from the waiter list. 1082 */ 1083 if (clockleft < 0) { 1084 /* 1085 * Remove the waiter from the waiter list. 1086 * If we can't find the waiter in the list, 1087 * 1. msg_status == EINPROGRESS 1088 * It is being processed. We will give it 1089 * a chance to finish. 1090 * 2. msg_status != EINPROGRESS 1091 * It is done processing. We can safely 1092 * remove it. 1093 * If we can find the waiter, it has timed out. 1094 */ 1095 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x has timed out\n", 1096 f, msg_id); 1097 if (mbox_find_waiter(msg_type, msg_id) == NULL) { 1098 if (waiter->w_msg->msg_status == EINPROGRESS) { 1099 SGSBBC_DBG_MBOX("%s: Waiting for msg_id = 0x%x " 1100 "complete.\n", f, msg_id); 1101 cv_wait(&waiter->w_cv, mbox_wait_lock); 1102 } 1103 } else { 1104 SGSBBC_DBG_MBOX("%s: setting msg_id = 0x%x " 1105 "to ETIMEDOUT\n", f, msg_id); 1106 cmn_err(CE_NOTE, "Timed out waiting for SC response"); 1107 rc = waiter->w_msg->msg_status = ETIMEDOUT; 1108 } 1109 } 1110 1111 /* 1112 * lose the waiter 1113 */ 1114 cv_destroy(&waiter->w_cv); 1115 kmem_free(waiter, sizeof (struct sbbc_msg_waiter)); 1116 1117 mutex_exit(mbox_wait_lock); 1118 1119 return (rc); 1120 1121 } 1122 1123 static void 1124 wakeup_next() 1125 { 1126 /* 1127 * Done sending the current message or encounter an error. 1128 * Wake up the one request in the outbox_queue. 1129 */ 1130 mutex_enter(&outbox_queue_lock); 1131 outbox_busy = 0; 1132 cv_signal(&outbox_queue); 1133 mutex_exit(&outbox_queue_lock); 1134 } 1135 1136 1137 /* ARGSUSED */ 1138 int 1139 sbbc_mbox_send_msg(sbbc_msg_t *msg, int flags, uint_t msg_id, 1140 time_t wait_time, clock_t stop_time) 1141 { 1142 struct sbbc_mbox_header header; 1143 struct sbbc_fragment frag; 1144 int rc = 0; 1145 int bytes_written; 1146 uint32_t intr_enabled; 1147 clock_t clockleft; 1148 static fn_t f = "sbbc_mbox_send_msg"; 1149 1150 /* 1151 * First check that the SC has enabled its mailbox 1152 */ 1153 rc = iosram_read(SBBC_INTR_SC_ENABLED_KEY, 0, 1154 (caddr_t)&intr_enabled, sizeof (intr_enabled)); 1155 1156 if (rc) 1157 return (rc); 1158 1159 if (!(intr_enabled & SBBC_MAILBOX_OUT)) 1160 return (ENOTSUP); 1161 1162 /* 1163 * read the mailbox header 1164 */ 1165 if (rc = mbox_read_header(SBBC_OUTBOX, &header)) 1166 return (rc); 1167 1168 /* 1169 * Allocate/initialise a fragment for this message 1170 */ 1171 frag.f_id = msg_id; 1172 frag.f_type = msg->msg_type; 1173 frag.f_status = 0; 1174 frag.f_total_len = msg->msg_len; 1175 frag.f_frag_offset = 0; 1176 /* 1177 * Throw in the message data 1178 */ 1179 bcopy(&msg->msg_data, &frag.f_data, sizeof (msg->msg_data)); 1180 1181 /* 1182 * If not enough space is available 1183 * write what we can and wait for 1184 * an interrupt to tell us that more 1185 * space is available 1186 */ 1187 1188 bytes_written = 0; 1189 do { 1190 rc = mbox_write(&header, &frag, msg); 1191 1192 if (rc != 0 && rc != ENOSPC) { 1193 return (rc); 1194 } 1195 1196 if (rc == 0) { 1197 /* 1198 * Always tell the SC when there is a message. 1199 * Ignore returned value as not being able to 1200 * signal the SC about space available does 1201 * not stop the SC from processing input. 1202 */ 1203 (void) iosram_send_intr(SBBC_MAILBOX_OUT); 1204 } 1205 1206 bytes_written += frag.f_frag_len; 1207 frag.f_frag_offset += frag.f_frag_len; 1208 if ((bytes_written < msg->msg_len) || (rc == ENOSPC)) { 1209 1210 if (mbox_has_free_space(&header) <= 1211 sizeof (struct sbbc_fragment)) { 1212 1213 int tmprc; 1214 1215 clockleft = cv_timedwait( 1216 &master_mbox->mbox_out->mb_full, 1217 &master_mbox->mbox_out->mb_lock, 1218 stop_time); 1219 1220 /* Return ETIMEDOUT if we timed out */ 1221 if (clockleft < 0) { 1222 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x " 1223 "has timed out\n", f, msg_id); 1224 cmn_err(CE_NOTE, 1225 "Timed out sending message " 1226 "to SC"); 1227 return (ETIMEDOUT); 1228 } 1229 1230 /* Read updated header from IOSRAM */ 1231 if (tmprc = mbox_read_header(SBBC_OUTBOX, 1232 &header)) { 1233 1234 return (tmprc); 1235 } 1236 } 1237 } 1238 1239 SGSBBC_DBG_MBOX("%s: msg_id = 0x%x, bytes_written = 0x%x, " 1240 "msg_len = 0x%x\n", f, 1241 msg_id, bytes_written, msg->msg_len); 1242 } while ((bytes_written < msg->msg_len) || (rc == ENOSPC)); 1243 1244 /* 1245 * this could be a spurious interrupt 1246 * as the SC may be merrily readings its 1247 * mail even as send, but what can you do ? No 1248 * synchronization method between SC <-> OS 1249 * SRAM data eaters means that this is inevitable. 1250 * It would take a bigger brain to fix this. 1251 * 1252 */ 1253 (void) iosram_send_intr(SBBC_MAILBOX_OUT); 1254 1255 return (rc); 1256 } 1257 1258 1259 /* 1260 * get next message 1261 * Read the next message from SRAM 1262 * Check if theres an entry on the wait queue 1263 * for this message 1264 * If yes, read the message in and signal 1265 * the waiter (if all the message has been received) 1266 * No, its unsolicited, if theres a handler installed for 1267 * this message type trigger it, otherwise toss 1268 * the message 1269 */ 1270 int 1271 sbbc_mbox_recv_msg() 1272 { 1273 struct sbbc_mbox_header header; 1274 struct sbbc_fragment frag; 1275 sbbc_msg_t tmpmsg; /* Temporary msg storage */ 1276 int rc = 0, i, first_hdlr, last_hdlr; 1277 uint32_t intr_enabled; 1278 sbbc_intrs_t *intr; 1279 struct sbbc_msg_waiter *waiter; 1280 uint16_t type; /* frag.f_type.type */ 1281 uint32_t f_id; /* frag.f_id */ 1282 uint32_t f_frag_offset, f_frag_len; 1283 kmutex_t *mbox_wait_lock; 1284 static fn_t f = "sbbc_mbox_recv_msg"; 1285 1286 /* 1287 * First check that the OS has enabled its mailbox 1288 */ 1289 rc = iosram_read(SBBC_SC_INTR_ENABLED_KEY, 0, 1290 (caddr_t)&intr_enabled, sizeof (intr_enabled)); 1291 1292 if (rc) { 1293 return (rc); 1294 } 1295 1296 if (!(intr_enabled & SBBC_MAILBOX_IN)) 1297 return (ENOTSUP); 1298 1299 /* 1300 * read the mailbox header 1301 */ 1302 if (rc = mbox_read_header(SBBC_INBOX, &header)) 1303 return (rc); 1304 1305 /* 1306 * check if any messages available. If 1307 * consumer == producer then no more 1308 * messages 1309 */ 1310 if ((header.mailboxes[SBBC_INBOX].mbox_consumer == 1311 header.mailboxes[SBBC_INBOX].mbox_producer)) { 1312 1313 return (-1); 1314 } 1315 1316 /* 1317 * read the fragment header for this message 1318 */ 1319 if (rc = mbox_read_frag(&header, &frag)) { 1320 1321 return (rc); 1322 } 1323 1324 /* Save to local variable for easy reading */ 1325 type = frag.f_type.type; 1326 f_id = frag.f_id; 1327 1328 SGSBBC_DBG_MBOX("%s: f_id = 0x%x\n", f, f_id); 1329 1330 /* 1331 * check the message type. If its invalid, we will 1332 * just toss the message 1333 */ 1334 if (type >= SBBC_MBOX_MSG_TYPES) { 1335 goto done; 1336 } 1337 1338 /* 1339 * if theres no waiters for this message type, and theres 1340 * no message handler installed, toss it. 1341 * 1342 * Unsolicited messages (f_id == 0) are tricky because we won't know 1343 * when the handler has finished so that we can 1344 * remove the message, so, given the small brains in operation 1345 * here, what we do is restrict junk mail to zero-length 1346 * messages, then we allocate a fragment using kmem, 1347 * make a copy of the fragment in this memory, 1348 * pass this pointer to the fragment, then skip the message. 1349 * So even if there is data associated with the junkmail, 1350 * the message handler doesn't get to see it 1351 * We expect the mesaage handler to free the memory. 1352 */ 1353 if (type == SBBC_BROADCAST_MSG) { 1354 /* 1355 * Broadcast message, trigger all handlers 1356 */ 1357 first_hdlr = 0; 1358 last_hdlr = SBBC_MBOX_MSG_TYPES - 1; 1359 } else if ((master_mbox->mbox_wait_list[type] == NULL) || (f_id == 0)) { 1360 /* 1361 * Theres no waiters, or its unsolicited anyway 1362 */ 1363 first_hdlr = last_hdlr = type; 1364 } else { 1365 /* 1366 * check the fragment message type, look at the wait list for 1367 * that type to find its associated message 1368 * 1369 * First find the message. If we get it, take it off 1370 * the waiter list and read the data. We will 1371 * put it back on the list if necessary. 1372 * This avoids the problem of a second message-in 1373 * interrupt playing with this waiter. 1374 * This will cut down on mutex spinning on the wait 1375 * list locks, also, expect the next fragment to be 1376 * for this messageso we might as well have it at the 1377 * start of the list. 1378 * 1379 * its possible that a return message has a different type, 1380 * (possible but not recommended!). So, if we don't find 1381 * it on the list pointed to by the request type, 1382 * go look at all the other lists 1383 */ 1384 1385 mbox_wait_lock = &master_mbox->mbox_wait_lock[type]; 1386 1387 mutex_enter(mbox_wait_lock); 1388 if ((waiter = mbox_find_waiter(type, f_id)) == NULL) { 1389 for (i = 0; i < SBBC_MBOX_MSG_TYPES; i++) { 1390 if (i == type) 1391 continue; 1392 if ((waiter = mbox_find_waiter(i, f_id)) 1393 != NULL) 1394 break; 1395 } 1396 } 1397 mutex_exit(mbox_wait_lock); 1398 1399 if (waiter == NULL) { 1400 rc = -1; 1401 /* 1402 * there's no waiter for this message, but that 1403 * could mean that this message is the start of 1404 * a send/receive to us, and every 'first' request 1405 * must by definition be unsolicited, 1406 * so trigger the handler 1407 */ 1408 first_hdlr = last_hdlr = type; 1409 } else { 1410 SGSBBC_DBG_MBOX("%s: f_id = 0x%x, msg_id = 0x%x, " 1411 "msg_len = 0x%x\n", 1412 f, f_id, waiter->w_id, 1413 waiter->w_msg->msg_len); 1414 1415 rc = mbox_read(&header, &frag, waiter->w_msg); 1416 1417 SGSBBC_DBG_MBOX("%s: f_id = 0x%x, offset = 0x%x, " 1418 "len = 0x%x, total_len = 0x%x\n", 1419 f, frag.f_id, frag.f_frag_offset, 1420 frag.f_frag_len, frag.f_total_len); 1421 1422 if (rc || ((frag.f_frag_offset + frag.f_frag_len) == 1423 frag.f_total_len)) { 1424 /* 1425 * failed or all the message has been read in 1426 */ 1427 mutex_enter(mbox_wait_lock); 1428 waiter->w_msg->msg_status = (rc == ENOMEM)? 1429 rc : frag.f_status; 1430 SGSBBC_DBG_MBOX("%s: msg_status = %d\n", 1431 f, waiter->w_msg->msg_status); 1432 cv_signal(&waiter->w_cv); 1433 mutex_exit(mbox_wait_lock); 1434 1435 } else { 1436 /* 1437 * back on the wait list 1438 */ 1439 mutex_enter(mbox_wait_lock); 1440 if (waiter->w_msg->msg_status == ETIMEDOUT) { 1441 cv_signal(&waiter->w_cv); 1442 mutex_exit(mbox_wait_lock); 1443 goto done; 1444 } 1445 1446 if (master_mbox->mbox_wait_list[type] == NULL) { 1447 master_mbox->mbox_wait_list[type] = 1448 waiter; 1449 waiter->w_next = NULL; 1450 } else { 1451 struct sbbc_msg_waiter *tmp; 1452 tmp = master_mbox->mbox_wait_list[type]; 1453 master_mbox->mbox_wait_list[type] = 1454 waiter; 1455 waiter->w_next = tmp; 1456 } 1457 mutex_exit(mbox_wait_lock); 1458 } 1459 goto done; 1460 } 1461 } 1462 1463 /* 1464 * Set msg_len to f_frag_len so msg_buf will be large enough 1465 * to contain what is in the fragment. 1466 */ 1467 f_frag_len = tmpmsg.msg_len = frag.f_frag_len; 1468 /* 1469 * Save the f_frag_offset for copying into client's space. 1470 * Set frag.f_frag_offset to 0 so we don't have to allocate 1471 * too much space for reading in the message. 1472 */ 1473 f_frag_offset = frag.f_frag_offset; 1474 frag.f_frag_offset = 0; 1475 1476 /* Allocate space for msg_buf */ 1477 if (f_frag_len != 0 && (tmpmsg.msg_buf = 1478 kmem_alloc(f_frag_len, KM_NOSLEEP)) == NULL) { 1479 1480 rc = ENOMEM; 1481 cmn_err(CE_WARN, "Can't allocate memory" 1482 " for unsolicited messages\n"); 1483 } else { 1484 /* Save the incoming message in tmpmsg */ 1485 rc = mbox_read(&header, &frag, &tmpmsg); 1486 1487 for (i = first_hdlr; rc == 0 && i <= last_hdlr; i++) { 1488 1489 intr = master_mbox->intrs[i]; 1490 if ((intr == NULL) || (intr->sbbc_intr_id == 0)) { 1491 continue; 1492 } 1493 1494 while (intr != NULL) { 1495 /* 1496 * If the client has allocated enough space 1497 * for incoming message, copy into the 1498 * client buffer. 1499 */ 1500 sbbc_msg_t *arg = (sbbc_msg_t *)intr->sbbc_arg; 1501 if (arg != (void *)NULL) { 1502 if (arg->msg_len >= frag.f_total_len) { 1503 if (f_frag_len > 0) 1504 bcopy(tmpmsg.msg_buf, 1505 arg->msg_buf + 1506 f_frag_offset, 1507 f_frag_len); 1508 } else { 1509 arg->msg_status = ENOMEM; 1510 } 1511 } 1512 1513 /* 1514 * Only trigger the interrupt when we 1515 * have received the whole message. 1516 */ 1517 if (f_frag_offset + f_frag_len == 1518 frag.f_total_len) { 1519 1520 ddi_trigger_softintr( 1521 intr->sbbc_intr_id); 1522 } 1523 intr = intr->sbbc_intr_next; 1524 } 1525 } 1526 1527 if (f_frag_len != 0) { 1528 /* Don't forget to free the buffer */ 1529 kmem_free(tmpmsg.msg_buf, f_frag_len); 1530 } 1531 } 1532 done: 1533 mbox_skip_next_msg(&header); 1534 return (rc); 1535 } 1536 1537 /* 1538 * available free space in the outbox 1539 */ 1540 static int 1541 mbox_has_free_space(struct sbbc_mbox_header *header) 1542 { 1543 uint32_t space = 0; 1544 1545 ASSERT(MUTEX_HELD(&master_mbox->mbox_out->mb_lock)); 1546 1547 if (header->mailboxes[SBBC_OUTBOX].mbox_producer == 1548 header->mailboxes[SBBC_OUTBOX].mbox_consumer) { 1549 /* 1550 * mailbox is empty 1551 */ 1552 space += header->mailboxes[SBBC_OUTBOX].mbox_len - 1553 header->mailboxes[SBBC_OUTBOX].mbox_producer; 1554 space += 1555 header->mailboxes[SBBC_OUTBOX].mbox_producer; 1556 } else if (header->mailboxes[SBBC_OUTBOX].mbox_producer > 1557 header->mailboxes[SBBC_OUTBOX].mbox_consumer) { 1558 space += header->mailboxes[SBBC_OUTBOX].mbox_len - 1559 header->mailboxes[SBBC_OUTBOX].mbox_producer; 1560 space += header->mailboxes[SBBC_OUTBOX].mbox_consumer; 1561 } else { 1562 /* 1563 * mailbox wrapped around 1564 */ 1565 space += header->mailboxes[SBBC_OUTBOX].mbox_consumer - 1566 header->mailboxes[SBBC_OUTBOX].mbox_producer; 1567 } 1568 1569 /* 1570 * Need to make sure that the mailbox never 1571 * gets completely full, as consumer == producer is 1572 * our test for empty, so we drop MBOX_ALIGN_BYTES. 1573 */ 1574 1575 if (space >= MBOX_ALIGN_BYTES) 1576 space -= MBOX_ALIGN_BYTES; 1577 else 1578 space = 0; 1579 1580 return (space); 1581 1582 } 1583 /* 1584 * Write the data to IOSRAM 1585 * Update the SRAM mailbox header 1586 * Update the local mailbox pointers 1587 * Only write a single fragment. If possible, 1588 * put the whole message into a fragment. 1589 * 1590 * Note: We assume that there is no 'max' message 1591 * size. We will just keep fragmenting. 1592 * Note: We always write to SBBC_OUTBOX and 1593 * read from SBBC_INBOX 1594 * 1595 * If we get an error at any time, return immediately 1596 * without updating the mailbox header in SRAM 1597 */ 1598 static int 1599 mbox_write(struct sbbc_mbox_header *header, 1600 struct sbbc_fragment *frag, sbbc_msg_t *msg) 1601 { 1602 int bytes_written, bytes_remaining, free_space; 1603 int rc = 0; 1604 caddr_t src; 1605 uint32_t sram_dst; 1606 int space_at_end, space_at_start; 1607 uint32_t mbox_offset, mbox_len; 1608 uint32_t mbox_producer, mbox_consumer; 1609 uint32_t f_total_len, f_frag_offset; 1610 uint32_t frag_header_size; 1611 static fn_t f = "mbox_write"; 1612 1613 ASSERT(MUTEX_HELD(&master_mbox->mbox_out->mb_lock)); 1614 1615 /* 1616 * Save to local variables to make code more readable 1617 */ 1618 mbox_offset = header->mailboxes[SBBC_OUTBOX].mbox_offset; 1619 mbox_len = header->mailboxes[SBBC_OUTBOX].mbox_len; 1620 mbox_producer = header->mailboxes[SBBC_OUTBOX].mbox_producer; 1621 mbox_consumer = header->mailboxes[SBBC_OUTBOX].mbox_consumer; 1622 f_total_len = frag->f_total_len; 1623 f_frag_offset = frag->f_frag_offset; 1624 frag_header_size = sizeof (struct sbbc_fragment); 1625 1626 SGSBBC_DBG_MBOX("%s: mbox_consumer = 0x%x, " 1627 "mbox_producer = 0x%x\n", f, mbox_consumer, mbox_producer); 1628 1629 /* 1630 * Write pointer in SRAM 1631 */ 1632 sram_dst = mbox_offset + mbox_producer; 1633 1634 /* 1635 * NB We assume that the consumer stays constant 1636 * during the write. It may not necessarily 1637 * be the case but it won't cause us any problems, just means 1638 * we fragment more than is absolutely necessary 1639 * 1640 * possible cases 1641 * 1) consumer == producer, mailbox empty 1642 * space_at_end == mailbox end - producer 1643 * space_at_start == producer - MBOX_ALIGN_BYTES 1644 * 2) producer < consumer 1645 * space_at_end = (consumer - producer - MBOX_ALIGN_BYTES) 1646 * space_at_start == 0 1647 * 3) producer > consumer 1648 * space_at_end = mailbox end - producer 1649 * space_at_start = consumer - MBOX_ALIGN_BYTES 1650 * 1651 * (space - MBOX_ALIGN_BYTES) because we need to avoid the 1652 * scenario where the producer wraps around completely and 1653 * producer == consumer, as this is our test for 'empty'. 1654 * Also we want it to be 8-byte aligned. 1655 * Note: start is assumed = 0 1656 */ 1657 if (mbox_producer < mbox_consumer) { 1658 space_at_end = mbox_consumer - mbox_producer - MBOX_ALIGN_BYTES; 1659 if (space_at_end < 0) 1660 space_at_end = 0; 1661 space_at_start = 0; 1662 } else { 1663 space_at_end = mbox_len - mbox_producer; 1664 if (mbox_consumer == 0) 1665 space_at_end -= MBOX_ALIGN_BYTES; 1666 space_at_start = mbox_consumer - MBOX_ALIGN_BYTES; 1667 if (space_at_start < 0) 1668 space_at_start = 0; 1669 } 1670 1671 SGSBBC_DBG_MBOX("%s: space_at_end = 0x%x, space_at_start = 0x%x\n", 1672 f, space_at_end, space_at_start); 1673 1674 free_space = space_at_end + space_at_start; 1675 1676 if (free_space < frag_header_size) { 1677 /* 1678 * can't even write a fragment header, so just return 1679 * the caller will block waiting for space 1680 */ 1681 frag->f_frag_len = 0; 1682 return (ENOSPC); 1683 } 1684 1685 /* 1686 * How many bytes will be in the fragment ? 1687 */ 1688 bytes_remaining = f_total_len - f_frag_offset; 1689 frag->f_frag_len = min(bytes_remaining, free_space - frag_header_size); 1690 1691 SGSBBC_DBG_MBOX("%s: writing header:sram_dst = 0x%x\n", 1692 f, sram_dst); 1693 1694 /* 1695 * we can write the fragment header and some data 1696 * First, the fragment header 1697 */ 1698 if (space_at_end >= frag_header_size) { 1699 rc = iosram_write(SBBC_MAILBOX_KEY, sram_dst, (caddr_t)frag, 1700 frag_header_size); 1701 if (rc) 1702 return (rc); 1703 1704 sram_dst = (uint32_t)(sram_dst + frag_header_size); 1705 /* 1706 * Wrap around if we reach the end 1707 */ 1708 if (sram_dst >= (mbox_len + mbox_offset)) { 1709 sram_dst = mbox_offset; 1710 } 1711 space_at_end -= frag_header_size; 1712 } else { 1713 /* wraparound */ 1714 if (space_at_end) { 1715 rc = iosram_write(SBBC_MAILBOX_KEY, sram_dst, 1716 (caddr_t)frag, space_at_end); 1717 if (rc) 1718 return (rc); 1719 sram_dst = (uint32_t)mbox_offset; 1720 } 1721 rc = iosram_write(SBBC_MAILBOX_KEY, sram_dst, 1722 (caddr_t)((caddr_t)frag + space_at_end), 1723 (frag_header_size - space_at_end)); 1724 if (rc) 1725 return (rc); 1726 sram_dst += frag_header_size - space_at_end; 1727 space_at_start -= (frag_header_size - space_at_end); 1728 space_at_end = 0; 1729 } 1730 1731 SGSBBC_DBG_MBOX("%s: space_at_end = 0x%x, space_at_start = 0x%x\n", 1732 f, space_at_end, space_at_start); 1733 1734 /* 1735 * Now the fragment data 1736 */ 1737 free_space -= frag_header_size; 1738 src = (caddr_t)(msg->msg_buf + f_frag_offset); 1739 bytes_written = 0; 1740 if (space_at_end) { 1741 SGSBBC_DBG_MBOX("%s: writing data:sram_dst = 0x%x, " 1742 "bytes_remaining = 0x%x\n", 1743 f, sram_dst, bytes_remaining); 1744 1745 if (space_at_end < bytes_remaining) 1746 bytes_written = space_at_end; 1747 else 1748 bytes_written = bytes_remaining; 1749 rc = iosram_write(SBBC_MAILBOX_KEY, sram_dst, src, 1750 bytes_written); 1751 if (rc) 1752 return (rc); 1753 1754 sram_dst = (uint32_t)(sram_dst + bytes_written); 1755 /* 1756 * Wrap around if we reach the end 1757 */ 1758 if (sram_dst >= (mbox_len + mbox_offset)) { 1759 sram_dst = mbox_offset; 1760 } 1761 src = (caddr_t)(src + bytes_written); 1762 bytes_remaining -= bytes_written; 1763 } 1764 1765 if ((bytes_remaining > 0) && space_at_start) { 1766 SGSBBC_DBG_MBOX("%s: writing the rest:sram_dst = 0x%x, " 1767 "bytes_remaining = 0x%x\n", 1768 f, sram_dst, bytes_remaining); 1769 if (space_at_start < bytes_remaining) { 1770 rc = iosram_write(SBBC_MAILBOX_KEY, sram_dst, src, 1771 space_at_start); 1772 bytes_written += space_at_start; 1773 } else { 1774 rc = iosram_write(SBBC_MAILBOX_KEY, sram_dst, src, 1775 bytes_remaining); 1776 bytes_written += bytes_remaining; 1777 } 1778 if (rc) 1779 return (rc); 1780 } 1781 1782 frag->f_frag_len = bytes_written; 1783 1784 /* 1785 * update header->mbox_producer (bytes_written + frag_size) 1786 */ 1787 sram_dst = mbox_producer + bytes_written + frag_header_size; 1788 if (sram_dst >= mbox_len) { 1789 sram_dst = sram_dst % mbox_len; 1790 } 1791 1792 SGSBBC_DBG_MBOX("%s: after writing data:sram_dst = 0x%x, " 1793 "bytes_written = 0x%x\n", f, sram_dst, bytes_written); 1794 1795 header->mailboxes[SBBC_OUTBOX].mbox_producer = sram_dst; 1796 1797 mbox_update_header(SBBC_OUTBOX, header); 1798 1799 1800 return (rc); 1801 } 1802 1803 1804 /* 1805 * Get the next frag from IOSRAM. 1806 * Write it to the corresponding msg buf. 1807 * The caller must update the SRAM pointers etc. 1808 */ 1809 static int 1810 mbox_read(struct sbbc_mbox_header *header, 1811 struct sbbc_fragment *frag, sbbc_msg_t *msg) 1812 { 1813 int rc = 0; 1814 uint32_t sram_src, sram_end; 1815 caddr_t msg_buf; 1816 int bytes_at_start, bytes_at_end; 1817 int bytes_to_read; 1818 uint32_t frag_header_size, frag_total_size; 1819 uint32_t f_frag_offset, f_frag_len; 1820 uint32_t mbox_producer, mbox_consumer; 1821 uint32_t mbox_len, mbox_offset; 1822 static fn_t f = "mbox_read"; 1823 1824 ASSERT(MUTEX_HELD(&master_mbox->mbox_in->mb_lock)); 1825 1826 /* 1827 * Save to local variables to make code more readable 1828 */ 1829 mbox_producer = header->mailboxes[SBBC_INBOX].mbox_producer; 1830 mbox_consumer = header->mailboxes[SBBC_INBOX].mbox_consumer; 1831 mbox_len = header->mailboxes[SBBC_INBOX].mbox_len; 1832 mbox_offset = header->mailboxes[SBBC_INBOX].mbox_offset; 1833 frag_header_size = sizeof (struct sbbc_fragment); 1834 f_frag_offset = frag->f_frag_offset; 1835 f_frag_len = frag->f_frag_len; 1836 frag_total_size = frag_header_size + f_frag_len; 1837 1838 /* 1839 * If the message buffer size is smaller than the fragment 1840 * size, return an error. 1841 */ 1842 if (msg->msg_len < f_frag_len) { 1843 rc = ENOMEM; 1844 goto done; 1845 } 1846 1847 msg_buf = (caddr_t)(msg->msg_buf + f_frag_offset); 1848 1849 /* 1850 * Throw in the message data 1851 */ 1852 bcopy(&frag->f_data, &msg->msg_data, sizeof (msg->msg_data)); 1853 1854 /* 1855 * We have it all, waiter, message, so lets 1856 * go get that puppy! 1857 * Message could be in one or two chunks - 1858 * consumer < producer: 1 chunk, (producer - consumer) 1859 * consumer > producer: 2 chunks, (end - consumer) 1860 * (producer - start) 1861 */ 1862 sram_end = (uint32_t)(mbox_offset + mbox_len); 1863 sram_src = (uint32_t)(mbox_offset + mbox_consumer + frag_header_size); 1864 1865 /* 1866 * wraparound 1867 */ 1868 if (sram_src >= sram_end) 1869 sram_src -= mbox_len; 1870 1871 /* 1872 * find where the data is 1873 * possible cases 1874 * 1) consumer == producer, mailbox empty 1875 * error 1876 * 2) producer < consumer 1877 * bytes_at_end = mailbox end - consumer 1878 * bytes_at_start = producer 1879 * 3) producer > consumer 1880 * bytes_at_end = producer - consumer 1881 * bytes_at_start = 0 1882 */ 1883 1884 SGSBBC_DBG_MBOX("%s: mbox_consumer = 0x%x, mbox_producer = 0x%x, " 1885 "frag_len = 0x%x\n", 1886 f, mbox_consumer, mbox_producer, f_frag_len); 1887 1888 if (mbox_producer == mbox_consumer) { 1889 bytes_at_end = bytes_at_start = 0; 1890 } else if (mbox_producer < mbox_consumer) { 1891 bytes_at_end = mbox_len - mbox_consumer; 1892 bytes_at_start = mbox_producer; 1893 } else { 1894 bytes_at_end = mbox_producer - mbox_consumer; 1895 bytes_at_start = 0; 1896 } 1897 1898 SGSBBC_DBG_MBOX("%s: bytes_at_end = 0x%x, " 1899 "bytes_at_start = 0x%x\n", f, bytes_at_end, bytes_at_start); 1900 1901 if ((bytes_at_end + bytes_at_start) < frag_total_size) { 1902 1903 /* 1904 * mailbox is corrupt 1905 * but what to do ? 1906 */ 1907 cmn_err(CE_PANIC, "Corrupt INBOX!\n" 1908 "producer = %x, consumer = %x, bytes_at_start = %x, " 1909 "bytes_at_end = %x\n", mbox_producer, mbox_consumer, 1910 bytes_at_start, bytes_at_end); 1911 } 1912 1913 /* 1914 * If bytes_at_end is greater than header size, read the 1915 * part at the end of the mailbox, and then update the 1916 * pointers and bytes_to_read. 1917 */ 1918 if (bytes_at_end > frag_header_size) { 1919 /* 1920 * We are only interested in the data segment. 1921 */ 1922 bytes_at_end -= frag_header_size; 1923 bytes_to_read = (bytes_at_end >= f_frag_len)? 1924 f_frag_len : bytes_at_end; 1925 SGSBBC_DBG_MBOX("%s: reading data: sram_src = 0x%x, " 1926 "bytes_to_read = 0x%x\n", f, sram_src, bytes_to_read); 1927 rc = iosram_read(SBBC_MAILBOX_KEY, sram_src, msg_buf, 1928 bytes_to_read); 1929 if (rc) { 1930 goto done; 1931 } 1932 1933 /* 1934 * Update pointers in SRAM and message buffer. 1935 */ 1936 sram_src = (uint32_t)mbox_offset; 1937 msg_buf = (caddr_t)(msg_buf + bytes_to_read); 1938 bytes_to_read = f_frag_len - bytes_to_read; 1939 } else { 1940 bytes_to_read = f_frag_len; 1941 } 1942 1943 /* 1944 * wraparound to start of mailbox 1945 */ 1946 if (bytes_to_read > 0) { 1947 SGSBBC_DBG_MBOX("%s: reading the rest: sram_src = 0x%x, " 1948 "bytes_to_read = 0x%x\n", f, sram_src, bytes_to_read); 1949 rc = iosram_read(SBBC_MAILBOX_KEY, sram_src, msg_buf, 1950 bytes_to_read); 1951 } 1952 1953 done: 1954 msg->msg_bytes += f_frag_len; 1955 1956 return (rc); 1957 } 1958 1959 /* 1960 * move past the next message in the inbox 1961 */ 1962 static void 1963 mbox_skip_next_msg(struct sbbc_mbox_header *header) 1964 { 1965 struct sbbc_fragment frag; 1966 uint32_t next_msg; 1967 1968 ASSERT(MUTEX_HELD(&master_mbox->mbox_in->mb_lock)); 1969 1970 if (mbox_read_frag(header, &frag)) { 1971 cmn_err(CE_PANIC, "INBOX is Corrupt !\n"); 1972 } 1973 1974 /* 1975 * Move on to the next message 1976 */ 1977 next_msg = header->mailboxes[SBBC_INBOX].mbox_consumer; 1978 next_msg += sizeof (struct sbbc_fragment); 1979 next_msg += frag.f_frag_len; 1980 if (next_msg >= header->mailboxes[SBBC_INBOX].mbox_len) { 1981 next_msg = (next_msg + 1982 header->mailboxes[SBBC_INBOX].mbox_len) % 1983 header->mailboxes[SBBC_INBOX].mbox_len; 1984 } 1985 header->mailboxes[SBBC_INBOX].mbox_consumer = 1986 next_msg; 1987 1988 mbox_update_header(SBBC_INBOX, header); 1989 1990 return; 1991 1992 } 1993 1994 static struct sbbc_msg_waiter * 1995 mbox_find_waiter(uint16_t msg_type, uint32_t msg_id) 1996 { 1997 struct sbbc_msg_waiter *waiter, *prev; 1998 1999 prev = NULL; 2000 for (waiter = master_mbox->mbox_wait_list[msg_type]; 2001 waiter != NULL; waiter = waiter->w_next) { 2002 2003 if (waiter->w_id == msg_id) { 2004 if (prev != NULL) { 2005 prev->w_next = waiter->w_next; 2006 } else { 2007 master_mbox->mbox_wait_list[msg_type] = 2008 waiter->w_next; 2009 } 2010 break; 2011 } 2012 prev = waiter; 2013 } 2014 2015 return (waiter); 2016 } 2017 2018 static int 2019 mbox_read_header(uint32_t mailbox, struct sbbc_mbox_header *header) 2020 { 2021 struct sbbc_mbox_header *hd; 2022 uint32_t offset; 2023 int rc; 2024 2025 /* 2026 * Initialize a sbbc_mbox_header pointer to 0 so that we 2027 * can use it to calculate the offsets of fields inside 2028 * the structure. 2029 */ 2030 hd = (struct sbbc_mbox_header *)0; 2031 2032 if (rc = iosram_read(SBBC_MAILBOX_KEY, 0, (caddr_t)header, 2033 sizeof (struct sbbc_mbox_header))) 2034 return (rc); 2035 2036 /* 2037 * Since the header is read in a byte-by-byte fashion 2038 * using ddi_rep_get8, we need to re-read the producer 2039 * or consumer pointer as integer in case it has changed 2040 * after part of the previous value has been read. 2041 */ 2042 switch (mailbox) { 2043 2044 case SBBC_INBOX: 2045 offset = (uint32_t)(uintptr_t) 2046 (&hd->mailboxes[SBBC_INBOX].mbox_producer); 2047 rc = iosram_read(SBBC_MAILBOX_KEY, offset, 2048 (caddr_t)&header->mailboxes[SBBC_INBOX].mbox_producer, 2049 sizeof (uint32_t)); 2050 break; 2051 case SBBC_OUTBOX: 2052 offset = (uint32_t)(uintptr_t) 2053 (&hd->mailboxes[SBBC_OUTBOX].mbox_consumer); 2054 rc = iosram_read(SBBC_MAILBOX_KEY, offset, 2055 (caddr_t)&header->mailboxes[SBBC_OUTBOX].mbox_consumer, 2056 sizeof (uint32_t)); 2057 break; 2058 default: 2059 cmn_err(CE_PANIC, "Invalid Mbox header type\n"); 2060 break; 2061 2062 } 2063 2064 return (rc); 2065 } 2066 2067 /* 2068 * There are only two fields updated by the domain, 2069 * the inbox consumer field and the outbox producer 2070 * field. These fields are protected by the respective 2071 * mbox_{in|out}->mb_lock so that accesses will 2072 * be serialised. The only coherency issue is writing 2073 * back the header, so we do it here after grabbing 2074 * the global mailbox lock. 2075 */ 2076 static void 2077 mbox_update_header(uint32_t mailbox, struct sbbc_mbox_header *header) 2078 { 2079 struct sbbc_mbox_header *hd; 2080 uint32_t value, offset, mbox_len; 2081 2082 /* 2083 * Initialize a sbbc_mbox_header pointer to 0 so that we 2084 * can use it to calculate the offsets of fields inside 2085 * the structure. 2086 */ 2087 hd = (struct sbbc_mbox_header *)0; 2088 2089 switch (mailbox) { 2090 2091 case SBBC_INBOX: 2092 value = header->mailboxes[SBBC_INBOX].mbox_consumer; 2093 offset = (uint32_t)(uintptr_t) 2094 (&hd->mailboxes[SBBC_INBOX].mbox_consumer); 2095 2096 mbox_len = header->mailboxes[SBBC_INBOX].mbox_len; 2097 break; 2098 case SBBC_OUTBOX: 2099 value = header->mailboxes[SBBC_OUTBOX].mbox_producer; 2100 offset = (uint32_t)(uintptr_t) 2101 (&hd->mailboxes[SBBC_OUTBOX].mbox_producer); 2102 mbox_len = header->mailboxes[SBBC_OUTBOX].mbox_len; 2103 break; 2104 default: 2105 cmn_err(CE_PANIC, "Invalid Mbox header type\n"); 2106 break; 2107 2108 } 2109 2110 /* 2111 * If the last read/write would cause the next read/write 2112 * to be unaligned, we skip on modulo MBOX_ALIGN_BYTES. 2113 * This is OK because all the mailbox handlers will 2114 * conform to this. 2115 */ 2116 if (value % MBOX_ALIGN_BYTES) { 2117 value += (MBOX_ALIGN_BYTES - (value % MBOX_ALIGN_BYTES)); 2118 value %= mbox_len; 2119 } 2120 2121 if (iosram_write(SBBC_MAILBOX_KEY, offset, (caddr_t)&value, 2122 sizeof (uint32_t))) { 2123 cmn_err(CE_PANIC, "Mailbox Corrupt ! \n"); 2124 } 2125 2126 /* 2127 * Update internal pointers so they won't be out of sync with 2128 * the values in IOSRAM. 2129 */ 2130 switch (mailbox) { 2131 2132 case SBBC_INBOX: 2133 header->mailboxes[SBBC_INBOX].mbox_consumer = value; 2134 break; 2135 case SBBC_OUTBOX: 2136 header->mailboxes[SBBC_OUTBOX].mbox_producer = value; 2137 break; 2138 } 2139 } 2140 2141 static int 2142 mbox_read_frag(struct sbbc_mbox_header *header, 2143 struct sbbc_fragment *frag) 2144 { 2145 int rc = 0; 2146 uint32_t sram_src, bytes; 2147 caddr_t dst; 2148 2149 ASSERT(MUTEX_HELD(&master_mbox->mbox_in->mb_lock)); 2150 /* 2151 * read the fragment header for this message 2152 */ 2153 sram_src = (uint32_t)(header->mailboxes[SBBC_INBOX].mbox_offset + 2154 header->mailboxes[SBBC_INBOX].mbox_consumer); 2155 2156 /* 2157 * wraparound ? 2158 */ 2159 if ((header->mailboxes[SBBC_INBOX].mbox_consumer + 2160 sizeof (struct sbbc_fragment)) >= 2161 header->mailboxes[SBBC_INBOX].mbox_len) { 2162 2163 dst = (caddr_t)frag; 2164 bytes = header->mailboxes[SBBC_INBOX].mbox_len - 2165 header->mailboxes[SBBC_INBOX].mbox_consumer; 2166 2167 if (rc = iosram_read(SBBC_MAILBOX_KEY, sram_src, dst, bytes)) { 2168 return (rc); 2169 } 2170 2171 dst += bytes; 2172 sram_src = header->mailboxes[SBBC_INBOX].mbox_offset; 2173 bytes = (header->mailboxes[SBBC_INBOX].mbox_consumer + 2174 sizeof (struct sbbc_fragment)) % 2175 header->mailboxes[SBBC_INBOX].mbox_len; 2176 2177 if (rc = iosram_read(SBBC_MAILBOX_KEY, sram_src, 2178 dst, bytes)) { 2179 return (rc); 2180 } 2181 } else { 2182 if (rc = iosram_read(SBBC_MAILBOX_KEY, sram_src, (caddr_t)frag, 2183 sizeof (struct sbbc_fragment))) { 2184 return (rc); 2185 } 2186 } 2187 2188 return (0); 2189 } 2190 2191 2192 /* 2193 * This function is triggered by a soft interrupt and it's purpose is to call 2194 * to kadmin() to shutdown the Domain. 2195 */ 2196 /*ARGSUSED0*/ 2197 static uint_t 2198 sbbc_do_fast_shutdown(char *arg) 2199 { 2200 (void) kadmin(A_SHUTDOWN, AD_POWEROFF, NULL, kcred); 2201 2202 /* 2203 * If kadmin fails for some reason then we bring the system down 2204 * via power_down(), or failing that using halt(). 2205 */ 2206 power_down("kadmin() failed, trying power_down()"); 2207 2208 halt("power_down() failed, trying halt()"); 2209 2210 /* 2211 * We should never make it this far, so something must have gone 2212 * horribly, horribly wrong. 2213 */ 2214 /*NOTREACHED*/ 2215 return (DDI_INTR_UNCLAIMED); 2216 } 2217 2218 2219 /* 2220 * This function handles unsolicited PANIC_SHUTDOWN events 2221 */ 2222 static uint_t 2223 sbbc_panic_shutdown_handler(char *arg) 2224 { 2225 static fn_t f = "sbbc_panic_shutdown_handler()"; 2226 2227 sg_panic_shutdown_t *payload = NULL; 2228 sbbc_msg_t *msg = NULL; 2229 2230 if (arg == NULL) { 2231 SGSBBC_DBG_EVENT(CE_NOTE, "%s: arg == NULL", f); 2232 return (DDI_INTR_UNCLAIMED); 2233 } 2234 2235 msg = (sbbc_msg_t *)arg; 2236 2237 if (msg->msg_buf == NULL) { 2238 SGSBBC_DBG_EVENT(CE_NOTE, "%s: msg_buf == NULL", f); 2239 return (DDI_INTR_UNCLAIMED); 2240 } 2241 2242 payload = (sg_panic_shutdown_t *)msg->msg_buf; 2243 2244 switch (*payload) { 2245 case SC_EVENT_PANIC_ENV: 2246 2247 /* 2248 * Let the user know why the domain is going down. 2249 */ 2250 cmn_err(CE_WARN, "%s", PANIC_ENV_EVENT_MSG); 2251 2252 /* 2253 * trigger sbbc_do_fast_shutdown(). 2254 */ 2255 ddi_trigger_softintr(panic_softintr_id); 2256 2257 /*NOTREACHED*/ 2258 break; 2259 2260 case SC_EVENT_PANIC_KEYSWITCH: 2261 /* 2262 * The SC warns a user if they try a destructive keyswitch 2263 * command on a Domain which is currently running Solaris. 2264 * If the user chooses to continue despite our best advise 2265 * then we bring down the Domain immediately without trying 2266 * to shut the system down gracefully. 2267 */ 2268 break; 2269 2270 default: 2271 SGSBBC_DBG_EVENT(CE_NOTE, "%s: Unknown payload:%d", f, 2272 *payload); 2273 return (DDI_INTR_UNCLAIMED); 2274 } 2275 2276 return (DDI_INTR_CLAIMED); 2277 } 2278 2279 /* 2280 * dp_get_cores() 2281 * 2282 * Checks cpu implementation for the input cpuid and returns 2283 * the number of cores. 2284 * If implementation cannot be determined, returns 1 2285 */ 2286 static int 2287 dp_get_cores(uint16_t cpuid) 2288 { 2289 int bd, ii, impl, nc; 2290 2291 bd = cpuid / 4; 2292 nc = SG_MAX_CPUS_PER_BD; 2293 2294 /* find first with valid implementation */ 2295 for (ii = 0; ii < nc; ii++) 2296 if (cpu[MAKE_CPUID(bd, ii)]) { 2297 impl = cpunodes[MAKE_CPUID(bd, ii)].implementation; 2298 break; 2299 } 2300 2301 if (IS_JAGUAR(impl) || IS_PANTHER(impl)) 2302 return (2); 2303 else 2304 return (1); 2305 } 2306 2307 /* 2308 * dp_payload_add_cpus() 2309 * 2310 * From datapath mailbox message, determines the number of and safari IDs 2311 * for affected cpus, then adds this info to the datapath ereport. 2312 * 2313 */ 2314 static int 2315 dp_payload_add_cpus(plat_datapath_info_t *dpmsg, nvlist_t *erp) 2316 { 2317 int jj = 0, numcpus = 0; 2318 int bd, procpos, ii, num, ncores, ret; 2319 uint16_t *dparray, cpuid; 2320 uint64_t *snarray; 2321 2322 /* check for multiple core architectures */ 2323 ncores = dp_get_cores(dpmsg->cpuid); 2324 2325 switch (dpmsg->type) { 2326 case DP_CDS_TYPE: 2327 numcpus = ncores; 2328 break; 2329 2330 case DP_DX_TYPE: 2331 numcpus = 2 * ncores; 2332 break; 2333 2334 case DP_RP_TYPE: 2335 numcpus = SG_MAX_CPUS_PER_BD; 2336 break; 2337 2338 default: 2339 ASSERT(0); 2340 return (-1); 2341 } 2342 2343 num = numcpus; 2344 2345 /* 2346 * populate dparray with impacted cores (only those present) 2347 */ 2348 dparray = kmem_zalloc(num * sizeof (uint16_t *), KM_SLEEP); 2349 bd = SG_PORTID_TO_BOARD_NUM(SG_CPUID_TO_PORTID(dpmsg->cpuid)); 2350 procpos = SG_CPUID_TO_PORTID(dpmsg->cpuid) & 0x3; 2351 2352 mutex_enter(&cpu_lock); 2353 2354 switch (dpmsg->type) { 2355 2356 case DP_CDS_TYPE: 2357 /* 2358 * For a CDS error, it's the reporting cpuid 2359 * and it's other core (if present) 2360 */ 2361 cpuid = dpmsg->cpuid & 0x1FF; /* core 0 */ 2362 if (cpu[cpuid]) 2363 dparray[jj++] = cpuid; 2364 2365 cpuid = dpmsg->cpuid | SG_CORE_ID_MASK; /* core 1 */ 2366 if (cpu[cpuid]) 2367 dparray[jj++] = cpuid; 2368 break; 2369 2370 case DP_DX_TYPE: 2371 /* 2372 * For a DX error, it's the reporting cpuid (all 2373 * cores) and the other CPU sharing the same 2374 * DX<-->DCDS interface (all cores) 2375 */ 2376 2377 /* reporting cpuid */ 2378 cpuid = dpmsg->cpuid & 0x1FF; /* core 0 */ 2379 if (cpu[cpuid]) 2380 dparray[jj++] = cpuid; 2381 2382 cpuid = dpmsg->cpuid | SG_CORE_ID_MASK; /* core 1 */ 2383 if (cpu[cpuid]) 2384 dparray[jj++] = cpuid; 2385 2386 /* find partner cpuid */ 2387 if (procpos == 0 || procpos == 2) 2388 cpuid = dpmsg->cpuid + 1; 2389 else 2390 cpuid = dpmsg->cpuid - 1; 2391 2392 /* add partner cpuid */ 2393 cpuid &= 0x1FF; /* core 0 */ 2394 if (cpu[cpuid]) 2395 dparray[jj++] = cpuid; 2396 2397 cpuid |= SG_CORE_ID_MASK; /* core 1 */ 2398 if (cpu[cpuid]) 2399 dparray[jj++] = cpuid; 2400 break; 2401 2402 case DP_RP_TYPE: 2403 /* 2404 * For a RP error, it's all cpuids (all cores) on 2405 * the reporting board 2406 */ 2407 for (ii = 0; ii < SG_MAX_CMPS_PER_BD; ii++) { 2408 cpuid = MAKE_CPUID(bd, ii); 2409 if (cpu[cpuid]) /* core 0 */ 2410 dparray[jj++] = cpuid; 2411 cpuid |= SG_CORE_ID_MASK; 2412 if (cpu[cpuid]) /* core 1 */ 2413 dparray[jj++] = cpuid; 2414 } 2415 break; 2416 } 2417 2418 mutex_exit(&cpu_lock); 2419 2420 /* 2421 * The datapath message could not be associated with any 2422 * configured CPU. 2423 */ 2424 if (!jj) { 2425 kmem_free(dparray, num * sizeof (uint16_t *)); 2426 ret = nvlist_add_uint32(erp, DP_LIST_SIZE, jj); 2427 ASSERT(ret == 0); 2428 return (-1); 2429 } 2430 2431 snarray = kmem_zalloc(jj * sizeof (uint64_t), KM_SLEEP); 2432 for (ii = 0; ii < jj; ii++) 2433 snarray[ii] = cpunodes[dparray[ii]].device_id; 2434 2435 ret = nvlist_add_uint32(erp, DP_LIST_SIZE, jj); 2436 ret |= nvlist_add_uint16_array(erp, DP_LIST, dparray, jj); 2437 ret |= nvlist_add_uint64_array(erp, SN_LIST, snarray, jj); 2438 ASSERT(ret == 0); 2439 2440 kmem_free(dparray, num * sizeof (uint16_t *)); 2441 kmem_free(snarray, jj * sizeof (uint64_t *)); 2442 2443 return (0); 2444 } 2445 2446 /* 2447 * sbbc_dp_trans_event() - datapath message handler. 2448 * 2449 * Process datapath error and fault messages received from the SC. Checks 2450 * for, and disregards, messages associated with I/O boards. Otherwise, 2451 * extracts message info to produce a datapath ereport. 2452 */ 2453 /*ARGSUSED*/ 2454 static uint_t 2455 sbbc_dp_trans_event(char *arg) 2456 { 2457 const char *f = "sbbc_dp_trans_event()"; 2458 nvlist_t *erp, *detector, *hcelem; 2459 char buf[FM_MAX_CLASS]; 2460 int board; 2461 plat_datapath_info_t *dpmsg; 2462 sbbc_msg_t *msg; 2463 int msgtype; 2464 2465 /* set i/f message and payload pointers */ 2466 msg = &dp_payload_msg; 2467 dpmsg = &dp_payload; 2468 msgtype = msg->msg_type.type; 2469 2470 cmn_err(CE_NOTE, "%s: msgtype=0x%x\n", f, msgtype); 2471 cmn_err(CE_NOTE, "type=0x%x cpuid=0x%x t_value=0x%x\n", dpmsg->type, 2472 dpmsg->cpuid, dpmsg->t_value); 2473 2474 /* check for valid type */ 2475 if (dpmsg->type > DP_RP_TYPE) { 2476 cmn_err(CE_WARN, "%s: dpmsg type 0x%x invalid\n", 2477 f, dpmsg->type); 2478 return (DDI_INTR_CLAIMED); 2479 } 2480 2481 /* check for I/O board message - Schizo AIDs are 25 - 30 */ 2482 if (dpmsg->cpuid > 23) { 2483 cmn_err(CE_NOTE, "%s: ignore I/O board msg\n", f); 2484 return (DDI_INTR_CLAIMED); 2485 } 2486 2487 /* allocate space for ereport */ 2488 erp = fm_nvlist_create(NULL); 2489 2490 /* 2491 * Member Name Data Type Comments 2492 * ----------- --------- ----------- 2493 * version uint8 0 2494 * class string "asic" 2495 * ENA uint64 ENA Format 1 2496 * detector fmri aggregated ID data for SC-DE 2497 * 2498 * Datapath ereport subclasses and data payloads: 2499 * There will be two types of ereports (error and fault) which will be 2500 * identified by the "type" member. 2501 * 2502 * ereport.asic.serengeti.cds.cds-dp 2503 * ereport.asic.serengeti.dx.dx-dp (board) 2504 * ereport.asic.serengeti.rp.rp-dp (centerplane) 2505 * 2506 * Member Name Data Type Comments 2507 * ----------- --------- ----------- 2508 * erptype uint16 derived from message type: error or 2509 * fault 2510 * t-value uint32 SC's datapath SERD timeout threshold 2511 * dp-list-sz uint8 number of dp-list array elements 2512 * dp-list array of uint16 Safari IDs of affected cpus 2513 * sn-list array of uint64 Serial numbers of affected cpus 2514 */ 2515 2516 /* compose common ereport elements */ 2517 detector = fm_nvlist_create(NULL); 2518 2519 /* 2520 * Create legacy FMRI for the detector 2521 */ 2522 board = SG_PORTID_TO_BOARD_NUM(SG_CPUID_TO_PORTID(dpmsg->cpuid)); 2523 switch (dpmsg->type) { 2524 case DP_CDS_TYPE: 2525 case DP_DX_TYPE: 2526 (void) snprintf(buf, FM_MAX_CLASS, "SB%d", board); 2527 break; 2528 case DP_RP_TYPE: 2529 (void) snprintf(buf, FM_MAX_CLASS, "RP"); 2530 break; 2531 default: 2532 (void) snprintf(buf, FM_MAX_CLASS, "UNKNOWN"); 2533 break; 2534 } 2535 2536 hcelem = fm_nvlist_create(NULL); 2537 2538 (void) nvlist_add_string(hcelem, FM_FMRI_HC_NAME, FM_FMRI_LEGACY_HC); 2539 (void) nvlist_add_string(hcelem, FM_FMRI_HC_ID, buf); 2540 2541 (void) nvlist_add_uint8(detector, FM_VERSION, FM_HC_SCHEME_VERSION); 2542 (void) nvlist_add_string(detector, FM_FMRI_SCHEME, FM_FMRI_SCHEME_HC); 2543 (void) nvlist_add_string(detector, FM_FMRI_HC_ROOT, ""); 2544 (void) nvlist_add_uint32(detector, FM_FMRI_HC_LIST_SZ, 1); 2545 (void) nvlist_add_nvlist_array(detector, FM_FMRI_HC_LIST, &hcelem, 1); 2546 2547 /* build ereport class name */ 2548 (void) snprintf(buf, FM_MAX_CLASS, "asic.serengeti.%s.%s-%s", 2549 dperrtype[dpmsg->type], dperrtype[dpmsg->type], 2550 FM_ERROR_DATAPATH); 2551 2552 fm_ereport_set(erp, FM_EREPORT_VERSION, buf, 2553 fm_ena_generate(0, FM_ENA_FMT1), detector, NULL); 2554 2555 /* add payload elements */ 2556 if (msgtype == MBOX_EVENT_DP_ERROR) 2557 fm_payload_set(erp, 2558 DP_EREPORT_TYPE, DATA_TYPE_UINT16, DP_ERROR, NULL); 2559 else 2560 fm_payload_set(erp, 2561 DP_EREPORT_TYPE, DATA_TYPE_UINT16, DP_FAULT, NULL); 2562 2563 fm_payload_set(erp, DP_TVALUE, DATA_TYPE_UINT32, dpmsg->t_value, NULL); 2564 2565 dp_payload_add_cpus(dpmsg, erp); 2566 2567 /* post ereport */ 2568 fm_ereport_post(erp, EVCH_SLEEP); 2569 2570 /* free ereport memory */ 2571 fm_nvlist_destroy(erp, FM_NVA_FREE); 2572 fm_nvlist_destroy(detector, FM_NVA_FREE); 2573 2574 return (DDI_INTR_CLAIMED); 2575 } 2576 2577 static uint_t 2578 sbbc_datapath_error_msg_handler(char *arg) 2579 { 2580 static fn_t f = "sbbc_datapath_error_msg_handler()"; 2581 sbbc_msg_t *msg = NULL; 2582 2583 if (arg == NULL) { 2584 SGSBBC_DBG_EVENT(CE_NOTE, "%s: arg == NULL", f); 2585 return (DDI_INTR_UNCLAIMED); 2586 } 2587 2588 msg = (sbbc_msg_t *)arg; 2589 2590 if (msg->msg_buf == NULL) { 2591 SGSBBC_DBG_EVENT(CE_NOTE, "%s: msg_buf == NULL", f); 2592 return (DDI_INTR_UNCLAIMED); 2593 } 2594 2595 msg->msg_type.type = MBOX_EVENT_DP_ERROR; 2596 2597 /* trigger sbbc_dp_trans_event() */ 2598 ddi_trigger_softintr(dp_softintr_id); 2599 2600 return (DDI_INTR_CLAIMED); 2601 } 2602 2603 static uint_t 2604 sbbc_datapath_fault_msg_handler(char *arg) 2605 { 2606 2607 static fn_t f = "sbbc_datapath_fault_msg_handler()"; 2608 2609 sbbc_msg_t *msg = NULL; 2610 2611 if (arg == NULL) { 2612 SGSBBC_DBG_EVENT(CE_NOTE, "%s: arg == NULL", f); 2613 return (DDI_INTR_UNCLAIMED); 2614 } 2615 2616 msg = (sbbc_msg_t *)arg; 2617 2618 if (msg->msg_buf == NULL) { 2619 SGSBBC_DBG_EVENT(CE_NOTE, "%s: msg_buf == NULL", f); 2620 return (DDI_INTR_UNCLAIMED); 2621 } 2622 2623 msg->msg_type.type = MBOX_EVENT_DP_FAULT; 2624 2625 /* trigger sbbc_dp_trans_event() */ 2626 ddi_trigger_softintr(dp_softintr_id); 2627 2628 return (DDI_INTR_CLAIMED); 2629 } 2630 2631 /* 2632 * Log an ECC event message to the SC. This is called from the 2633 * sbbc_ecc_mbox_taskq or directly from plat_send_ecc_mailbox_msg 2634 * for indictment messages. 2635 */ 2636 int 2637 sbbc_mbox_ecc_output(sbbc_ecc_mbox_t *msgp) 2638 { 2639 int rv; 2640 plat_capability_data_t *cap; 2641 plat_dimm_sid_board_data_t *ddata; 2642 plat_ecc_msg_hdr_t *hdr; 2643 2644 rv = sbbc_mbox_request_response(&msgp->ecc_req, &msgp->ecc_resp, 2645 sbbc_mbox_default_timeout); 2646 2647 if (rv != 0) { 2648 /* 2649 * Indictment messages use the return value to indicate a 2650 * problem in the mailbox. For Error mailbox messages, we'll 2651 * have to use a syslog message. 2652 */ 2653 if (msgp->ecc_log_error) { 2654 if (sbbc_ecc_mbox_send_errs == 0) { 2655 cmn_err(CE_NOTE, "!Solaris failed to send a " 2656 "message (0x%x/0x%x) to the System " 2657 "Controller. Error: %d, Message Status: %d", 2658 msgp->ecc_resp.msg_type.type, 2659 msgp->ecc_resp.msg_type.sub_type, 2660 rv, msgp->ecc_resp.msg_status); 2661 } 2662 2663 if (++sbbc_ecc_mbox_send_errs >= 2664 sbbc_ecc_mbox_err_throttle) { 2665 sbbc_ecc_mbox_send_errs = 0; 2666 } 2667 } 2668 2669 } else if (msgp->ecc_resp.msg_status != 0) { 2670 if (msgp->ecc_resp.msg_type.type == INFO_MBOX) { 2671 switch (msgp->ecc_resp.msg_type.sub_type) { 2672 case INFO_MBOX_ECC: 2673 hdr = (plat_ecc_msg_hdr_t *) 2674 msgp->ecc_req.msg_buf; 2675 if (hdr->emh_msg_type == 2676 PLAT_ECC_DIMM_SID_MESSAGE) { 2677 rv = msgp->ecc_resp.msg_status; 2678 break; 2679 } 2680 /*FALLTHROUGH*/ 2681 case INFO_MBOX_ECC_CAP: 2682 /* 2683 * The positive response comes only 2684 * from the AVL FS1 updated SC. 2685 * If the firmware is either downgraded 2686 * or failover to an older version, then 2687 * lets reset the SC capability to 2688 * default. 2689 */ 2690 plat_ecc_capability_sc_set 2691 (PLAT_ECC_CAPABILITY_SC_DEFAULT); 2692 break; 2693 default: 2694 break; 2695 } 2696 } 2697 if (msgp->ecc_log_error) { 2698 if (sbbc_ecc_mbox_inval_errs == 0) { 2699 cmn_err(CE_NOTE, "!An internal error (%d) " 2700 "occurred in the System Controller while " 2701 "processing this message (0x%x/0x%x)", 2702 msgp->ecc_resp.msg_status, 2703 msgp->ecc_resp.msg_type.type, 2704 msgp->ecc_resp.msg_type.sub_type); 2705 } 2706 if (msgp->ecc_resp.msg_status == EINVAL) { 2707 if (++sbbc_ecc_mbox_inval_errs >= 2708 sbbc_ecc_mbox_err_throttle) { 2709 sbbc_ecc_mbox_inval_errs = 0; 2710 } 2711 rv = ENOMSG; 2712 } else { 2713 if (++sbbc_ecc_mbox_other_errs >= 2714 sbbc_ecc_mbox_err_throttle) { 2715 sbbc_ecc_mbox_other_errs = 0; 2716 } 2717 rv = msgp->ecc_resp.msg_status; 2718 } 2719 } 2720 2721 } else { 2722 if (msgp->ecc_resp.msg_type.type == INFO_MBOX) { 2723 switch (msgp->ecc_resp.msg_type.sub_type) { 2724 case INFO_MBOX_ECC_CAP: 2725 /* 2726 * Successfully received the response 2727 * for the capability message, so updating 2728 * the SC ECC messaging capability. 2729 */ 2730 cap = (plat_capability_data_t *) 2731 msgp->ecc_resp.msg_buf; 2732 plat_ecc_capability_sc_set 2733 (cap->capd_capability); 2734 break; 2735 2736 case INFO_MBOX_ECC: 2737 hdr = (plat_ecc_msg_hdr_t *) 2738 msgp->ecc_resp.msg_buf; 2739 if (hdr && (hdr->emh_msg_type == 2740 PLAT_ECC_DIMM_SID_MESSAGE)) { 2741 /* 2742 * Successfully received a response 2743 * to a request for DIMM serial ids. 2744 */ 2745 ddata = (plat_dimm_sid_board_data_t *) 2746 msgp->ecc_resp.msg_buf; 2747 (void) plat_store_mem_sids(ddata); 2748 } 2749 break; 2750 2751 default: 2752 break; 2753 } 2754 } 2755 } 2756 2757 if (msgp->ecc_resp.msg_buf) 2758 kmem_free((void *)msgp->ecc_resp.msg_buf, 2759 (size_t)msgp->ecc_resp.msg_len); 2760 2761 kmem_free((void *)msgp->ecc_req.msg_buf, (size_t)msgp->ecc_req.msg_len); 2762 kmem_free(msgp, sizeof (sbbc_ecc_mbox_t)); 2763 return (rv); 2764 } 2765 2766 /* 2767 * Enqueue ECC event message on taskq to SC. This is invoked from 2768 * plat_send_ecc_mailbox_msg() for each ECC event generating a message. 2769 */ 2770 void 2771 sbbc_mbox_queue_ecc_event(sbbc_ecc_mbox_t *sbbc_ecc_msgp) 2772 { 2773 /* 2774 * Create the ECC event mailbox taskq, if it does not yet exist. 2775 * This must be done here rather than in sbbc_mbox_init(). The 2776 * sgsbbc driver is loaded very early in the boot flow. Calling 2777 * taskq_create() from sbbc_mbox_init could lead to a boot deadlock. 2778 * 2779 * There might be a tiny probability that two ECC handlers on 2780 * different processors could arrive here simultaneously. If 2781 * the taskq has not been created previously, then these two 2782 * simultaneous events could cause the creation of an extra taskq. 2783 * Given the extremely small likelihood (if not outright impossibility) 2784 * of this occurrence, sbbc_ecc_mbox_taskq is not protected by a lock. 2785 */ 2786 2787 if (sbbc_ecc_mbox_taskq == NULL) { 2788 sbbc_ecc_mbox_taskq = taskq_create("ECC_event_mailbox", 1, 2789 minclsyspri, ECC_MBOX_TASKQ_MIN, ECC_MBOX_TASKQ_MAX, 2790 TASKQ_PREPOPULATE); 2791 if (sbbc_ecc_mbox_taskq == NULL) { 2792 if (sbbc_ecc_mbox_taskq_errs == 0) { 2793 cmn_err(CE_NOTE, "Unable to create mailbox " 2794 "task queue for ECC event logging to " 2795 "System Controller"); 2796 } 2797 if (++sbbc_ecc_mbox_taskq_errs >= 2798 sbbc_ecc_mbox_err_throttle) { 2799 sbbc_ecc_mbox_taskq_errs = 0; 2800 } 2801 2802 kmem_free((void *)sbbc_ecc_msgp->ecc_req.msg_buf, 2803 (size_t)sbbc_ecc_msgp->ecc_req.msg_len); 2804 kmem_free((void *)sbbc_ecc_msgp, 2805 sizeof (sbbc_ecc_mbox_t)); 2806 return; 2807 } 2808 2809 /* 2810 * Reset error counter so that first taskq_dispatch 2811 * error will be output 2812 */ 2813 sbbc_ecc_mbox_taskq_errs = 0; 2814 } 2815 2816 /* 2817 * Enqueue the message 2818 */ 2819 2820 if (taskq_dispatch(sbbc_ecc_mbox_taskq, 2821 (task_func_t *)sbbc_mbox_ecc_output, sbbc_ecc_msgp, 2822 TQ_NOSLEEP) == NULL) { 2823 2824 if (sbbc_ecc_mbox_taskq_errs == 0) { 2825 cmn_err(CE_NOTE, "Unable to send ECC event " 2826 "message to System Controller"); 2827 } 2828 if (++sbbc_ecc_mbox_taskq_errs >= sbbc_ecc_mbox_err_throttle) { 2829 sbbc_ecc_mbox_taskq_errs = 0; 2830 } 2831 2832 kmem_free((void *)sbbc_ecc_msgp->ecc_req.msg_buf, 2833 (size_t)sbbc_ecc_msgp->ecc_req.msg_len); 2834 kmem_free((void *)sbbc_ecc_msgp, sizeof (sbbc_ecc_mbox_t)); 2835 } 2836 } 2837 2838 static uint_t 2839 cap_ecc_msg_handler(char *addr) 2840 { 2841 sbbc_msg_t *msg = NULL; 2842 plat_capability_data_t *cap = NULL; 2843 static fn_t f = "cap_ecc_msg_handler"; 2844 2845 msg = (sbbc_msg_t *)addr; 2846 2847 if (msg == NULL) { 2848 SGSBBC_DBG_EVENT(CE_WARN, "cap_ecc_msg_handler() called with " 2849 "null addr"); 2850 return (DDI_INTR_CLAIMED); 2851 } 2852 2853 if (msg->msg_buf == NULL) { 2854 SGSBBC_DBG_EVENT(CE_WARN, "cap_ecc_msg_handler() called with " 2855 "null data buffer"); 2856 return (DDI_INTR_CLAIMED); 2857 } 2858 2859 cap = (plat_capability_data_t *)msg->msg_buf; 2860 switch (cap->capd_msg_type) { 2861 case PLAT_ECC_CAPABILITY_MESSAGE: 2862 SGSBBC_DBG_MBOX("%s: capability 0x%x\n", f, 2863 cap->capd_capability); 2864 plat_ecc_capability_sc_set(cap->capd_capability); 2865 break; 2866 default: 2867 SGSBBC_DBG_MBOX("%s: Unknown message type = 0x%x\n", f, 2868 cap->capd_msg_type); 2869 break; 2870 } 2871 2872 return (DDI_INTR_CLAIMED); 2873 } 2874