1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * hermon_event.c 29 * Hermon Interrupt and Event Processing Routines 30 * 31 * Implements all the routines necessary for allocating, freeing, and 32 * handling all of the various event types that the Hermon hardware can 33 * generate. 34 * These routines include the main Hermon interrupt service routine 35 * (hermon_isr()) as well as all the code necessary to setup and handle 36 * events from each of the many event queues used by the Hermon device. 37 */ 38 39 #include <sys/types.h> 40 #include <sys/conf.h> 41 #include <sys/ddi.h> 42 #include <sys/sunddi.h> 43 #include <sys/modctl.h> 44 45 #include <sys/ib/adapters/hermon/hermon.h> 46 47 static void hermon_eq_poll(hermon_state_t *state, hermon_eqhdl_t eq); 48 static void hermon_eq_catastrophic(hermon_state_t *state); 49 static int hermon_eq_alloc(hermon_state_t *state, uint32_t log_eq_size, 50 uint_t intr, hermon_eqhdl_t *eqhdl); 51 static int hermon_eq_free(hermon_state_t *state, hermon_eqhdl_t *eqhdl); 52 static int hermon_eq_handler_init(hermon_state_t *state, hermon_eqhdl_t eq, 53 uint_t evt_type_mask, int (*eqfunc)(hermon_state_t *state, 54 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe)); 55 static int hermon_eq_handler_fini(hermon_state_t *state, hermon_eqhdl_t eq); 56 static int hermon_port_state_change_handler(hermon_state_t *state, 57 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 58 static int hermon_comm_estbl_handler(hermon_state_t *state, 59 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 60 static int hermon_local_wq_cat_err_handler(hermon_state_t *state, 61 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 62 static int hermon_invreq_local_wq_err_handler(hermon_state_t *state, 63 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 64 static int hermon_local_acc_vio_wq_err_handler(hermon_state_t *state, 65 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 66 static int hermon_sendq_drained_handler(hermon_state_t *state, 67 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 68 static int hermon_path_mig_handler(hermon_state_t *state, 69 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 70 static int hermon_path_mig_err_handler(hermon_state_t *state, 71 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 72 static int hermon_catastrophic_handler(hermon_state_t *state, 73 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 74 static int hermon_srq_last_wqe_reached_handler(hermon_state_t *state, 75 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 76 static int hermon_ecc_detection_handler(hermon_state_t *state, 77 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe); 78 static int hermon_no_eqhandler(hermon_state_t *state, hermon_eqhdl_t eq, 79 hermon_hw_eqe_t *eqe); 80 static int hermon_eq_demux(hermon_state_t *state, hermon_eqhdl_t eq, 81 hermon_hw_eqe_t *eqe); 82 83 /* 84 * hermon_eq_init_all 85 * Context: Only called from attach() path context 86 */ 87 int 88 hermon_eq_init_all(hermon_state_t *state) 89 { 90 uint_t log_eq_size, intr_num; 91 uint_t num_eq, num_eq_init, num_eq_unmap, num_eq_rsvd; 92 uint32_t event_mask; /* used for multiple event types */ 93 int status, i, num_extra; 94 uint64_t offset; 95 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state); 96 97 /* initialize the FMA retry loop */ 98 hermon_pio_init(fm_loop_cnt, fm_status, fm_test); 99 100 /* 101 * For now, all Event Queues default to the same size (pulled from 102 * the current configuration profile) and are all assigned to the 103 * same interrupt or MSI. In the future we may support assigning 104 * EQs to specific interrupts or MSIs XXX 105 */ 106 log_eq_size = state->hs_cfg_profile->cp_log_eq_sz; 107 108 /* 109 * Total number of supported EQs is fixed. Hermon hardware 110 * supports up to 512 EQs, though in theory they will one day be 111 * alloc'd to virtual HCA's. We are currently using only 47 of them 112 * - that is, in Arbel and Tavor, before HERMON, where 113 * we had set aside the first 32 for use with Completion Queues (CQ) 114 * and reserved a few of the other 32 for each specific class of event 115 * 116 * However, with the coming of vitualization, we'll have only 4 per 117 * potential guest - so, we'll try alloc'ing them differntly 118 * (see below for more details). 119 */ 120 num_eq = HERMON_NUM_EQ_USED; 121 num_eq_rsvd = state->hs_devlim.num_rsvd_eq; 122 123 /* 124 * If MSI is to be used, then set intr_num to the MSI number. 125 * Otherwise, for fixed (i.e. 'legacy') interrupts, 126 * it is what the card tells us in 'inta_pin'. 127 */ 128 if (state->hs_intr_type_chosen == DDI_INTR_TYPE_FIXED) { 129 intr_num = state->hs_adapter.inta_pin; 130 num_extra = 0; 131 } else { 132 /* If we have more than one MSI-X vector, init them. */ 133 for (i = 0; i + 1 < state->hs_intrmsi_allocd; i++) { 134 status = hermon_eq_alloc(state, log_eq_size, i, 135 &state->hs_eqhdl[i + num_eq_rsvd]); 136 if (status != DDI_SUCCESS) { 137 while (--i >= 0) { 138 (void) hermon_eq_handler_fini(state, 139 state->hs_eqhdl[i + num_eq_rsvd]); 140 (void) hermon_eq_free(state, 141 &state->hs_eqhdl[i + num_eq_rsvd]); 142 } 143 return (DDI_FAILURE); 144 } 145 146 (void) hermon_eq_handler_init(state, 147 state->hs_eqhdl[i + num_eq_rsvd], 148 HERMON_EVT_NO_MASK, hermon_cq_handler); 149 } 150 intr_num = i; 151 num_extra = i; 152 } 153 154 /* 155 * Allocate and initialize the rest of the Event Queues to be used. 156 * If any of these EQ allocations fail then jump to the end, cleanup 157 * what had been successfully initialized, and return an error. 158 */ 159 for (i = 0; i < num_eq; i++) { 160 status = hermon_eq_alloc(state, log_eq_size, intr_num, 161 &state->hs_eqhdl[num_eq_rsvd + num_extra + i]); 162 if (status != DDI_SUCCESS) { 163 num_eq_init = i; 164 goto all_eq_init_fail; 165 } 166 } 167 num_eq_init = num_eq; 168 /* 169 * The "num_eq_unmap" variable is used in any possible failure 170 * cleanup (below) to indicate which events queues might require 171 * possible event class unmapping. 172 */ 173 num_eq_unmap = 0; 174 /* 175 * Setup EQ0 (first avail) for use with Completion Queues. Note: We can 176 * cast the return value to void here because, when we use the 177 * HERMON_EVT_NO_MASK flag, it is not possible for 178 * hermon_eq_handler_init() to return an error. 179 */ 180 181 (void) hermon_eq_handler_init(state, 182 state->hs_eqhdl[num_eq_unmap + num_extra + num_eq_rsvd], 183 HERMON_EVT_NO_MASK, hermon_cq_handler); 184 185 num_eq_unmap++; 186 187 /* 188 * Setup EQ1 for handling Completion Queue Error Events. 189 * 190 * These events include things like CQ overflow or CQ access 191 * violation errors. If this setup fails for any reason (which, in 192 * general, it really never should), then jump to the end, cleanup 193 * everything that has been successfully initialized, and return an 194 * error. 195 */ 196 status = hermon_eq_handler_init(state, 197 state->hs_eqhdl[num_eq_unmap + num_extra + num_eq_rsvd], 198 HERMON_EVT_MSK_CQ_ERRORS, hermon_cq_err_handler); 199 if (status != DDI_SUCCESS) { 200 goto all_eq_init_fail; 201 } 202 num_eq_unmap++; 203 204 205 /* 206 * Setup EQ2 for handling most other things including: 207 * 208 * Port State Change Events 209 * These events include things like Port Up and Port Down events. 210 * 211 * Communication Established Events 212 * These events correspond to the IB affiliated asynchronous events 213 * that are used for connection management 214 * 215 * Path Migration Succeeded Events 216 * These evens corresponid to the IB affiliated asynchronous events 217 * that are used to indicate successful completion of a 218 * Path Migration. 219 * 220 * Command Completion Events 221 * These events correspond to the Arbel generated events that are used 222 * to indicate Arbel firmware command completion. 223 * 224 * Local WQ Catastrophic Error Events 225 * Invalid Req Local WQ Error Events 226 * Local Access Violation WQ Error Events 227 * SRQ Catastrophic Error Events 228 * SRQ Last WQE Reached Events 229 * ECC error detection events 230 * These events also correspond to the similarly-named IB affiliated 231 * asynchronous error type. 232 * 233 * Send Queue Drained Events 234 * These events correspond to the IB affiliated asynchronous events 235 * that are used to indicate completion of a Send Queue Drained QP 236 * state transition. 237 * 238 * Path Migration Failed Events 239 * These events correspond to the IB affiliated asynchronous events 240 * that are used to indicate that path migration was not successful. 241 * 242 * NOTE: When an event fires on this EQ, it will demux the type and 243 * send it to the right specific handler routine 244 * 245 */ 246 247 248 event_mask = 249 HERMON_EVT_MSK_PORT_STATE_CHANGE | 250 HERMON_EVT_MSK_COMM_ESTABLISHED | 251 HERMON_EVT_MSK_COMMAND_INTF_COMP | 252 HERMON_EVT_MSK_LOCAL_WQ_CAT_ERROR | 253 HERMON_EVT_MSK_INV_REQ_LOCAL_WQ_ERROR | 254 HERMON_EVT_MSK_LOCAL_ACC_VIO_WQ_ERROR | 255 HERMON_EVT_MSK_SEND_QUEUE_DRAINED | 256 HERMON_EVT_MSK_PATH_MIGRATED | 257 HERMON_EVT_MSK_PATH_MIGRATE_FAILED | 258 HERMON_EVT_MSK_SRQ_CATASTROPHIC_ERROR | 259 HERMON_EVT_MSK_SRQ_LAST_WQE_REACHED | 260 HERMON_EVT_MSK_ECC_DETECTION; 261 262 status = hermon_eq_handler_init(state, 263 state->hs_eqhdl[num_eq_unmap + num_extra + num_eq_rsvd], 264 event_mask, hermon_eq_demux); 265 if (status != DDI_SUCCESS) { 266 goto all_eq_init_fail; 267 } 268 num_eq_unmap++; 269 270 /* 271 * Setup EQ3 to catch all other types of events. Specifically, we 272 * do not catch the "Local EEC Catastrophic Error Event" because we 273 * should have no EEC (the Arbel driver does not support RD). We also 274 * choose not to handle any of the address translation page fault 275 * event types. Since we are not doing any page fault handling (and 276 * since the Arbel firmware does not currently support any such 277 * handling), we allow these events to go to the catch-all handler. 278 */ 279 status = hermon_eq_handler_init(state, 280 state->hs_eqhdl[num_eq_unmap + num_extra + num_eq_rsvd], 281 HERMON_EVT_CATCHALL_MASK, hermon_no_eqhandler); 282 if (status != DDI_SUCCESS) { 283 goto all_eq_init_fail; 284 } 285 num_eq_unmap++; 286 287 /* the FMA retry loop starts. */ 288 hermon_pio_start(state, uarhdl, all_eq_init_fail, fm_loop_cnt, 289 fm_status, fm_test); 290 291 /* 292 * Run through and initialize the Consumer Index for each EQC. 293 */ 294 for (i = 0; i < num_eq + num_extra; i++) { 295 offset = ARM_EQ_INDEX(i + num_eq_rsvd); 296 ddi_put32(uarhdl, 297 (uint32_t *)((uintptr_t)state->hs_reg_uar_baseaddr + 298 (uint32_t)offset), 0x0); 299 } 300 301 /* the FMA retry loop ends. */ 302 hermon_pio_end(state, uarhdl, all_eq_init_fail, fm_loop_cnt, 303 fm_status, fm_test); 304 305 return (DDI_SUCCESS); 306 307 all_eq_init_fail: 308 309 /* Unmap any of the partially mapped EQs from above */ 310 for (i = 0; i < num_eq_unmap + num_extra; i++) { 311 (void) hermon_eq_handler_fini(state, 312 state->hs_eqhdl[i + num_eq_rsvd]); 313 } 314 315 /* Free up any of the partially allocated EQs from above */ 316 for (i = 0; i < num_eq_init + num_extra; i++) { 317 (void) hermon_eq_free(state, &state->hs_eqhdl[i]); 318 } 319 320 /* If a HW error happen during ddi_pio, return DDI_FAILURE */ 321 if (fm_status == HCA_PIO_PERSISTENT) { 322 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL); 323 status = DDI_FAILURE; 324 } 325 326 return (status); 327 } 328 329 330 /* 331 * hermon_eq_fini_all 332 * Context: Only called from attach() and/or detach() path contexts 333 */ 334 int 335 hermon_eq_fini_all(hermon_state_t *state) 336 { 337 uint_t num_eq, num_eq_rsvd; 338 int status, i; 339 340 /* 341 * Grab the total number of supported EQs again. This is the same 342 * hardcoded value that was used above (during the event queue 343 * initialization.) 344 */ 345 num_eq = HERMON_NUM_EQ_USED + state->hs_intrmsi_allocd - 1; 346 num_eq_rsvd = state->hs_devlim.num_rsvd_eq; 347 348 /* 349 * For each of the event queues that we initialized and mapped 350 * earlier, attempt to unmap the events from the EQ. 351 */ 352 for (i = 0; i < num_eq; i++) { 353 status = hermon_eq_handler_fini(state, 354 state->hs_eqhdl[i + num_eq_rsvd]); 355 if (status != DDI_SUCCESS) { 356 return (DDI_FAILURE); 357 } 358 } 359 360 /* 361 * Teardown and free up all the Event Queues that were allocated 362 * earlier. 363 */ 364 for (i = 0; i < num_eq; i++) { 365 status = hermon_eq_free(state, 366 &state->hs_eqhdl[i + num_eq_rsvd]); 367 if (status != DDI_SUCCESS) { 368 return (DDI_FAILURE); 369 } 370 } 371 372 return (DDI_SUCCESS); 373 } 374 375 /* 376 * hermon_eq_arm() 377 * Context: called from interrupt 378 * 379 * Arms a single eq - eqn is the __logical__ eq number 0-based 380 */ 381 void 382 hermon_eq_arm(hermon_state_t *state, int eqn) 383 { 384 uint64_t offset; 385 hermon_eqhdl_t eq; 386 uint32_t eq_ci; 387 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state); 388 389 /* initialize the FMA retry loop */ 390 hermon_pio_init(fm_loop_cnt, fm_status, fm_test); 391 392 offset = ARM_EQ_INDEX(eqn + state->hs_devlim.num_rsvd_eq); 393 eq = state->hs_eqhdl[eqn + state->hs_devlim.num_rsvd_eq]; 394 eq_ci = (eq->eq_consindx & HERMON_EQ_CI_MASK) | EQ_ARM_BIT; 395 396 /* the FMA retry loop starts. */ 397 hermon_pio_start(state, uarhdl, pio_error, fm_loop_cnt, fm_status, 398 fm_test); 399 400 ddi_put32(uarhdl, 401 (uint32_t *)((uintptr_t)state->hs_reg_uar_baseaddr + 402 (uint32_t)offset), eq_ci); 403 404 /* the FMA retry loop ends. */ 405 hermon_pio_end(state, uarhdl, pio_error, fm_loop_cnt, fm_status, 406 fm_test); 407 408 return; 409 410 pio_error: 411 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_FATAL); 412 } 413 414 415 /* 416 * hermon_eq_arm_all 417 * Context: Only called from attach() and/or detach() path contexts 418 * Arbel calls in interrupt, currently (initial impl) in Hermon as well 419 */ 420 int 421 hermon_eq_arm_all(hermon_state_t *state) 422 { 423 uint_t num_eq, num_eq_rsvd; 424 uint64_t offset; 425 hermon_eqhdl_t eq; 426 uint32_t eq_ci; 427 int i; 428 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state); 429 430 /* initialize the FMA retry loop */ 431 hermon_pio_init(fm_loop_cnt, fm_status, fm_test); 432 433 num_eq = HERMON_NUM_EQ_USED; 434 num_eq_rsvd = state->hs_devlim.num_rsvd_eq; 435 436 /* the FMA retry loop starts. */ 437 hermon_pio_start(state, uarhdl, pio_error, fm_loop_cnt, fm_status, 438 fm_test); 439 440 for (i = 0; i < num_eq; i++) { 441 offset = ARM_EQ_INDEX(i + num_eq_rsvd); 442 eq = state->hs_eqhdl[i + num_eq_rsvd]; 443 eq_ci = (eq->eq_consindx & HERMON_EQ_CI_MASK) | EQ_ARM_BIT; 444 ddi_put32(uarhdl, 445 (uint32_t *)((uintptr_t)state->hs_reg_uar_baseaddr + 446 (uint32_t)offset), eq_ci); 447 } 448 449 /* the FMA retry loop ends. */ 450 hermon_pio_end(state, uarhdl, pio_error, fm_loop_cnt, fm_status, 451 fm_test); 452 453 return (DDI_SUCCESS); 454 455 pio_error: 456 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL); 457 return (DDI_FAILURE); 458 } 459 460 461 /* 462 * hermon_isr() 463 * Context: Only called from interrupt context (and during panic) 464 */ 465 uint_t 466 hermon_isr(caddr_t arg1, caddr_t arg2) 467 { 468 hermon_state_t *state; 469 int i, r; 470 int intr; 471 472 /* 473 * Grab the Hermon softstate pointer from the input parameter 474 */ 475 state = (hermon_state_t *)(void *)arg1; 476 477 /* Get the interrupt number */ 478 intr = (int)(uintptr_t)arg2; 479 480 /* 481 * Clear the interrupt. Note: This is only needed for 482 * fixed interrupts as the framework does what is needed for 483 * MSI-X interrupts. 484 */ 485 if (state->hs_intr_type_chosen == DDI_INTR_TYPE_FIXED) { 486 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state); 487 488 /* initialize the FMA retry loop */ 489 hermon_pio_init(fm_loop_cnt, fm_status, fm_test); 490 491 /* the FMA retry loop starts. */ 492 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, 493 fm_status, fm_test); 494 495 ddi_put64(cmdhdl, state->hs_cmd_regs.clr_intr, 496 (uint64_t)1 << state->hs_adapter.inta_pin); 497 498 /* the FMA retry loop ends. */ 499 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status, 500 fm_test); 501 } 502 503 /* 504 * Loop through all the EQs looking for ones that have "fired". 505 * To determine if an EQ is fired, the ownership will be the SW 506 * (the HW will set the owner appropriately). Update the Consumer Index 507 * of the Event Queue Entry (EQE) and pass it to HW by writing it 508 * to the respective Set CI DB Register. 509 * 510 * The "else" case handles the extra EQs used only for completion 511 * events, whereas the "if" case deals with the required interrupt 512 * vector that is used for all classes of events. 513 */ 514 r = state->hs_devlim.num_rsvd_eq; 515 516 if (intr + 1 == state->hs_intrmsi_allocd) { /* last intr */ 517 r += state->hs_intrmsi_allocd - 1; 518 for (i = 0; i < HERMON_NUM_EQ_USED; i++) { 519 hermon_eq_poll(state, state->hs_eqhdl[i + r]); 520 } 521 } else { /* only poll the one EQ */ 522 hermon_eq_poll(state, state->hs_eqhdl[intr + r]); 523 } 524 525 return (DDI_INTR_CLAIMED); 526 527 pio_error: 528 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_FATAL); 529 return (DDI_INTR_UNCLAIMED); 530 } 531 532 533 /* 534 * hermon_eq_poll 535 * Context: Only called from interrupt context (and during panic) 536 */ 537 static void 538 hermon_eq_poll(hermon_state_t *state, hermon_eqhdl_t eq) 539 { 540 hermon_hw_eqe_t *eqe; 541 uint64_t offset; 542 int polled_some; 543 uint32_t cons_indx, wrap_around_mask; 544 int (*eqfunction)(hermon_state_t *state, hermon_eqhdl_t eq, 545 hermon_hw_eqe_t *eqe); 546 ddi_acc_handle_t uarhdl = hermon_get_uarhdl(state); 547 548 /* initialize the FMA retry loop */ 549 hermon_pio_init(fm_loop_cnt, fm_status, fm_test); 550 551 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eq)) 552 553 /* Get the consumer pointer index */ 554 cons_indx = eq->eq_consindx; 555 556 /* 557 * Calculate the wrap around mask. Note: This operation only works 558 * because all Hermon event queues have power-of-2 sizes 559 */ 560 wrap_around_mask = (eq->eq_bufsz - 1); 561 562 /* Calculate the pointer to the first EQ entry */ 563 eqe = &eq->eq_buf[(cons_indx & wrap_around_mask)]; 564 565 566 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eqe)) 567 568 /* 569 * Pull the handler function for this EQ from the Hermon Event Queue 570 * handle 571 */ 572 eqfunction = eq->eq_func; 573 574 for (;;) { 575 polled_some = 0; 576 while (HERMON_EQE_OWNER_IS_SW(eq, eqe)) { 577 578 /* 579 * Call the EQ handler function. But only call if we 580 * are not in polled I/O mode (i.e. not processing 581 * because of a system panic). Note: We don't call 582 * the EQ handling functions from a system panic 583 * because we are primarily concerned only with 584 * ensuring that the event queues do not overflow (or, 585 * more specifically, the event queue associated with 586 * the CQ that is being used in the sync/dump process). 587 * Also, we don't want to make any upcalls (to the 588 * IBTF) because we can't guarantee when/if those 589 * calls would ever return. And, if we're in panic, 590 * then we reached here through a PollCQ() call (from 591 * hermon_cq_poll()), and we need to ensure that we 592 * successfully return any work completions to the 593 * caller. 594 */ 595 if (ddi_in_panic() == 0) { 596 eqfunction(state, eq, eqe); 597 } 598 599 /* Reset to hardware ownership is implicit */ 600 601 eq->eq_nexteqe++; /* for next time through */ 602 603 /* Increment the consumer index */ 604 cons_indx++; 605 606 /* Update the pointer to the next EQ entry */ 607 eqe = &eq->eq_buf[(cons_indx & wrap_around_mask)]; 608 609 polled_some = 1; 610 } 611 612 /* 613 * write consumer index via EQ set CI Doorbell, to keep overflow 614 * from occuring during poll 615 */ 616 617 eq->eq_consindx = cons_indx; 618 619 offset = ARM_EQ_INDEX(eq->eq_eqnum); 620 621 /* the FMA retry loop starts. */ 622 hermon_pio_start(state, uarhdl, pio_error, fm_loop_cnt, 623 fm_status, fm_test); 624 625 ddi_put32(uarhdl, 626 (uint32_t *)((uintptr_t)state->hs_reg_uar_baseaddr + 627 (uint32_t)offset), (cons_indx & HERMON_EQ_CI_MASK) | 628 EQ_ARM_BIT); 629 630 /* the FMA retry loop starts. */ 631 hermon_pio_end(state, uarhdl, pio_error, fm_loop_cnt, 632 fm_status, fm_test); 633 634 if (polled_some == 0) 635 break; 636 }; 637 return; 638 639 pio_error: 640 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_FATAL); 641 } 642 643 644 /* 645 * hermon_eq_catastrophic 646 * Context: Only called from interrupt context (and during panic) 647 */ 648 static void 649 hermon_eq_catastrophic(hermon_state_t *state) 650 { 651 ddi_acc_handle_t cmdhdl = hermon_get_cmdhdl(state); 652 ibt_async_code_t type; 653 ibc_async_event_t event; 654 uint32_t *base_addr; 655 uint32_t buf_size; 656 uint32_t word; 657 uint8_t err_type; 658 uint32_t err_buf; 659 int i; 660 661 /* initialize the FMA retry loop */ 662 hermon_pio_init(fm_loop_cnt, fm_status, fm_test); 663 664 bzero(&event, sizeof (ibc_async_event_t)); 665 base_addr = state->hs_cmd_regs.fw_err_buf; 666 667 buf_size = state->hs_fw.error_buf_sz; /* in #dwords */ 668 669 /* the FMA retry loop starts. */ 670 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, fm_status, 671 fm_test); 672 673 word = ddi_get32(cmdhdl, base_addr); 674 675 /* the FMA retry loop ends. */ 676 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status, 677 fm_test); 678 679 err_type = (word & 0xFF000000) >> 24; 680 type = IBT_ERROR_LOCAL_CATASTROPHIC; 681 682 switch (err_type) { 683 case HERMON_CATASTROPHIC_INTERNAL_ERROR: 684 cmn_err(CE_WARN, "Catastrophic Internal Error: 0x%02x", 685 err_type); 686 687 break; 688 689 case HERMON_CATASTROPHIC_UPLINK_BUS_ERROR: 690 cmn_err(CE_WARN, "Catastrophic Uplink Bus Error: 0x%02x", 691 err_type); 692 693 break; 694 695 case HERMON_CATASTROPHIC_DDR_DATA_ERROR: 696 cmn_err(CE_WARN, "Catastrophic DDR Data Error: 0x%02x", 697 err_type); 698 699 break; 700 701 case HERMON_CATASTROPHIC_INTERNAL_PARITY_ERROR: 702 cmn_err(CE_WARN, "Catastrophic Internal Parity Error: 0x%02x", 703 err_type); 704 705 break; 706 707 default: 708 /* Unknown type of Catastrophic error */ 709 cmn_err(CE_WARN, "Catastrophic Unknown Error: 0x%02x", 710 err_type); 711 712 break; 713 } 714 715 /* the FMA retry loop starts. */ 716 hermon_pio_start(state, cmdhdl, pio_error, fm_loop_cnt, fm_status, 717 fm_test); 718 719 /* 720 * Read in the catastrophic error buffer from the hardware. 721 */ 722 for (i = 0; i < buf_size; i++) { 723 base_addr = 724 (state->hs_cmd_regs.fw_err_buf + i); 725 err_buf = ddi_get32(cmdhdl, base_addr); 726 cmn_err(CE_NOTE, "hermon%d: catastrophic_error[%02x]: %08X", 727 state->hs_instance, i, err_buf); 728 } 729 730 /* the FMA retry loop ends. */ 731 hermon_pio_end(state, cmdhdl, pio_error, fm_loop_cnt, fm_status, 732 fm_test); 733 734 /* 735 * We also call the IBTF here to inform it of the catastrophic error. 736 * Note: Since no event information (i.e. QP handles, CQ handles, 737 * etc.) is necessary, we pass a NULL pointer instead of a pointer to 738 * an empty ibc_async_event_t struct. 739 * 740 * But we also check if "hs_ibtfpriv" is NULL. If it is then it 741 * means that we've have either received this event before we 742 * finished attaching to the IBTF or we've received it while we 743 * are in the process of detaching. 744 */ 745 if (state->hs_ibtfpriv != NULL) { 746 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 747 } 748 749 pio_error: 750 /* ignore these errors but log them because they're harmless. */ 751 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_NON_FATAL); 752 } 753 754 755 /* 756 * hermon_eq_alloc() 757 * Context: Only called from attach() path context 758 */ 759 static int 760 hermon_eq_alloc(hermon_state_t *state, uint32_t log_eq_size, uint_t intr, 761 hermon_eqhdl_t *eqhdl) 762 { 763 hermon_rsrc_t *eqc, *rsrc; 764 hermon_hw_eqc_t eqc_entry; 765 hermon_eqhdl_t eq; 766 ibt_mr_attr_t mr_attr; 767 hermon_mr_options_t op; 768 hermon_pdhdl_t pd; 769 hermon_mrhdl_t mr; 770 hermon_hw_eqe_t *buf; 771 int status; 772 773 /* Use the internal protection domain (PD) for setting up EQs */ 774 pd = state->hs_pdhdl_internal; 775 776 /* Increment the reference count on the protection domain (PD) */ 777 hermon_pd_refcnt_inc(pd); 778 779 /* 780 * Allocate an EQ context entry. This will be filled in with all 781 * the necessary parameters to define the Event Queue. And then 782 * ownership will be passed to the hardware in the final step 783 * below. If we fail here, we must undo the protection domain 784 * reference count. 785 */ 786 status = hermon_rsrc_alloc(state, HERMON_EQC, 1, HERMON_SLEEP, &eqc); 787 if (status != DDI_SUCCESS) { 788 status = DDI_FAILURE; 789 goto eqalloc_fail1; 790 } 791 792 /* 793 * Allocate the software structure for tracking the event queue (i.e. 794 * the Hermon Event Queue handle). If we fail here, we must undo the 795 * protection domain reference count and the previous resource 796 * allocation. 797 */ 798 status = hermon_rsrc_alloc(state, HERMON_EQHDL, 1, HERMON_SLEEP, &rsrc); 799 if (status != DDI_SUCCESS) { 800 status = DDI_FAILURE; 801 goto eqalloc_fail2; 802 } 803 804 eq = (hermon_eqhdl_t)rsrc->hr_addr; 805 806 /* 807 * Allocate the memory for Event Queue. 808 */ 809 eq->eq_eqinfo.qa_size = (1 << log_eq_size) * sizeof (hermon_hw_eqe_t); 810 eq->eq_eqinfo.qa_alloc_align = PAGESIZE; 811 eq->eq_eqinfo.qa_bind_align = PAGESIZE; 812 813 eq->eq_eqinfo.qa_location = HERMON_QUEUE_LOCATION_NORMAL; 814 status = hermon_queue_alloc(state, &eq->eq_eqinfo, HERMON_SLEEP); 815 if (status != DDI_SUCCESS) { 816 status = DDI_FAILURE; 817 goto eqalloc_fail3; 818 } 819 820 buf = (hermon_hw_eqe_t *)eq->eq_eqinfo.qa_buf_aligned; 821 /* 822 * Initializing each of the Event Queue Entries (EQE) by setting their 823 * ownership to hardware ("owner" bit set to HW) is now done by HW 824 * when the transfer of ownership (below) of the 825 * EQ context itself is done. 826 */ 827 828 /* 829 * Register the memory for the EQ. 830 * 831 * Because we are in the attach path we use NOSLEEP here so that we 832 * SPIN in the HCR since the event queues are not setup yet, and we 833 * cannot NOSPIN at this point in time. 834 */ 835 836 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf; 837 mr_attr.mr_len = eq->eq_eqinfo.qa_size; 838 mr_attr.mr_as = NULL; 839 mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE; 840 op.mro_bind_type = state->hs_cfg_profile->cp_iommu_bypass; 841 op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl; 842 op.mro_bind_override_addr = 0; 843 status = hermon_mr_register(state, pd, &mr_attr, &mr, &op, 844 HERMON_EQ_CMPT); 845 if (status != DDI_SUCCESS) { 846 status = DDI_FAILURE; 847 goto eqalloc_fail4; 848 } 849 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr)) 850 851 /* Sync entire EQ for use by the hardware */ 852 eq->eq_sync = 1; 853 854 (void) ddi_dma_sync(mr->mr_bindinfo.bi_dmahdl, 0, 855 eq->eq_eqinfo.qa_size, DDI_DMA_SYNC_FORDEV); 856 857 /* 858 * Fill in the EQC entry. This is the final step before passing 859 * ownership of the EQC entry to the Hermon hardware. We use all of 860 * the information collected/calculated above to fill in the 861 * requisite portions of the EQC. Note: We create all EQs in the 862 * "fired" state. We will arm them later (after our interrupt 863 * routine had been registered.) 864 */ 865 bzero(&eqc_entry, sizeof (hermon_hw_eqc_t)); 866 eqc_entry.state = HERMON_EQ_ARMED; 867 eqc_entry.log_eq_sz = log_eq_size; 868 eqc_entry.intr = intr; 869 eqc_entry.log2_pgsz = mr->mr_log2_pgsz; 870 eqc_entry.pg_offs = eq->eq_eqinfo.qa_pgoffs >> 5; 871 eqc_entry.mtt_base_addrh = (uint32_t)((mr->mr_mttaddr >> 32) & 0xFF); 872 eqc_entry.mtt_base_addrl = mr->mr_mttaddr >> 3; 873 eqc_entry.cons_indx = 0x0; 874 eqc_entry.prod_indx = 0x0; 875 876 /* 877 * Write the EQC entry to hardware. Lastly, we pass ownership of 878 * the entry to the hardware (using the Hermon SW2HW_EQ firmware 879 * command). Note: in general, this operation shouldn't fail. But 880 * if it does, we have to undo everything we've done above before 881 * returning error. 882 */ 883 status = hermon_cmn_ownership_cmd_post(state, SW2HW_EQ, &eqc_entry, 884 sizeof (hermon_hw_eqc_t), eqc->hr_indx, HERMON_CMD_NOSLEEP_SPIN); 885 if (status != HERMON_CMD_SUCCESS) { 886 cmn_err(CE_NOTE, "hermon%d: SW2HW_EQ command failed: %08x\n", 887 state->hs_instance, status); 888 if (status == HERMON_CMD_INVALID_STATUS) { 889 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST); 890 } 891 status = ibc_get_ci_failure(0); 892 goto eqalloc_fail5; 893 } 894 895 /* 896 * Fill in the rest of the Hermon Event Queue handle. Having 897 * successfully transferred ownership of the EQC, we can update the 898 * following fields for use in further operations on the EQ. 899 */ 900 eq->eq_eqcrsrcp = eqc; 901 eq->eq_rsrcp = rsrc; 902 eq->eq_consindx = 0; 903 eq->eq_eqnum = eqc->hr_indx; 904 eq->eq_buf = buf; 905 eq->eq_bufsz = (1 << log_eq_size); 906 eq->eq_log_eqsz = log_eq_size; 907 eq->eq_nexteqe = 0; 908 eq->eq_mrhdl = mr; 909 *eqhdl = eq; 910 911 return (DDI_SUCCESS); 912 913 /* 914 * The following is cleanup for all possible failure cases in this routine 915 */ 916 eqalloc_fail5: 917 if (hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL, 918 HERMON_NOSLEEP) != DDI_SUCCESS) { 919 HERMON_WARNING(state, "failed to deregister EQ memory"); 920 } 921 eqalloc_fail4: 922 hermon_queue_free(&eq->eq_eqinfo); 923 eqalloc_fail3: 924 hermon_rsrc_free(state, &rsrc); 925 eqalloc_fail2: 926 hermon_rsrc_free(state, &eqc); 927 eqalloc_fail1: 928 hermon_pd_refcnt_dec(pd); 929 eqalloc_fail: 930 return (status); 931 } 932 933 934 /* 935 * hermon_eq_free() 936 * Context: Only called from attach() and/or detach() path contexts 937 */ 938 static int 939 hermon_eq_free(hermon_state_t *state, hermon_eqhdl_t *eqhdl) 940 { 941 hermon_rsrc_t *eqc, *rsrc; 942 hermon_hw_eqc_t eqc_entry; 943 hermon_pdhdl_t pd; 944 hermon_mrhdl_t mr; 945 hermon_eqhdl_t eq; 946 uint32_t eqnum; 947 int status; 948 949 /* 950 * Pull all the necessary information from the Hermon Event Queue 951 * handle. This is necessary here because the resource for the 952 * EQ handle is going to be freed up as part of this operation. 953 */ 954 eq = *eqhdl; 955 eqc = eq->eq_eqcrsrcp; 956 rsrc = eq->eq_rsrcp; 957 pd = state->hs_pdhdl_internal; 958 mr = eq->eq_mrhdl; 959 eqnum = eq->eq_eqnum; 960 961 /* 962 * Reclaim EQC entry from hardware (using the Hermon HW2SW_EQ 963 * firmware command). If the ownership transfer fails for any reason, 964 * then it is an indication that something (either in HW or SW) has 965 * gone seriously wrong. 966 */ 967 status = hermon_cmn_ownership_cmd_post(state, HW2SW_EQ, &eqc_entry, 968 sizeof (hermon_hw_eqc_t), eqnum, HERMON_CMD_NOSLEEP_SPIN); 969 if (status != HERMON_CMD_SUCCESS) { 970 HERMON_WARNING(state, "failed to reclaim EQC ownership"); 971 cmn_err(CE_CONT, "Hermon: HW2SW_EQ command failed: %08x\n", 972 status); 973 return (DDI_FAILURE); 974 } 975 976 /* 977 * Deregister the memory for the Event Queue. If this fails 978 * for any reason, then it is an indication that something (either 979 * in HW or SW) has gone seriously wrong. So we print a warning 980 * message and continue. 981 */ 982 status = hermon_mr_deregister(state, &mr, HERMON_MR_DEREG_ALL, 983 HERMON_NOSLEEP); 984 if (status != DDI_SUCCESS) { 985 HERMON_WARNING(state, "failed to deregister EQ memory"); 986 } 987 988 /* Free the memory for the EQ */ 989 hermon_queue_free(&eq->eq_eqinfo); 990 991 /* Free the Hermon Event Queue handle */ 992 hermon_rsrc_free(state, &rsrc); 993 994 /* Free up the EQC entry resource */ 995 hermon_rsrc_free(state, &eqc); 996 997 /* Decrement the reference count on the protection domain (PD) */ 998 hermon_pd_refcnt_dec(pd); 999 1000 /* Set the eqhdl pointer to NULL and return success */ 1001 *eqhdl = NULL; 1002 1003 return (DDI_SUCCESS); 1004 } 1005 1006 1007 /* 1008 * hermon_eq_handler_init 1009 * Context: Only called from attach() path context 1010 */ 1011 static int 1012 hermon_eq_handler_init(hermon_state_t *state, hermon_eqhdl_t eq, 1013 uint_t evt_type_mask, int (*eq_func)(hermon_state_t *state, 1014 hermon_eqhdl_t eq, hermon_hw_eqe_t *eqe)) 1015 { 1016 int status; 1017 1018 /* 1019 * Save away the EQ handler function and the event type mask. These 1020 * will be used later during interrupt and event queue processing. 1021 */ 1022 eq->eq_func = eq_func; 1023 eq->eq_evttypemask = evt_type_mask; 1024 1025 /* 1026 * Map the EQ to a specific class of event (or events) depending 1027 * on the mask value passed in. The HERMON_EVT_NO_MASK means not 1028 * to attempt associating the EQ with any specific class of event. 1029 * This is particularly useful when initializing the events queues 1030 * used for CQ events. The mapping is done using the Hermon MAP_EQ 1031 * firmware command. Note: This command should not, in general, fail. 1032 * If it does, then something (probably HW related) has gone seriously 1033 * wrong. 1034 */ 1035 if (evt_type_mask != HERMON_EVT_NO_MASK) { 1036 status = hermon_map_eq_cmd_post(state, 1037 HERMON_CMD_MAP_EQ_EVT_MAP, eq->eq_eqnum, evt_type_mask, 1038 HERMON_CMD_NOSLEEP_SPIN); 1039 if (status != HERMON_CMD_SUCCESS) { 1040 cmn_err(CE_NOTE, "hermon%d: MAP_EQ command failed: " 1041 "%08x\n", state->hs_instance, status); 1042 return (DDI_FAILURE); 1043 } 1044 } 1045 1046 return (DDI_SUCCESS); 1047 } 1048 1049 1050 /* 1051 * hermon_eq_handler_fini 1052 * Context: Only called from attach() and/or detach() path contexts 1053 */ 1054 static int 1055 hermon_eq_handler_fini(hermon_state_t *state, hermon_eqhdl_t eq) 1056 { 1057 int status; 1058 1059 /* 1060 * Unmap the EQ from the event class to which it had been previously 1061 * mapped. The unmapping is done using the Hermon MAP_EQ (in much 1062 * the same way that the initial mapping was done). The difference, 1063 * however, is in the HERMON_EQ_EVT_UNMAP flag that is passed to the 1064 * MAP_EQ firmware command. The HERMON_EVT_NO_MASK (which may have 1065 * been passed in at init time) still means that no association has 1066 * been made between the EQ and any specific class of event (and, 1067 * hence, no unmapping is necessary). Note: This command should not, 1068 * in general, fail. If it does, then something (probably HW related) 1069 * has gone seriously wrong. 1070 */ 1071 if (eq->eq_evttypemask != HERMON_EVT_NO_MASK) { 1072 status = hermon_map_eq_cmd_post(state, 1073 HERMON_CMD_MAP_EQ_EVT_UNMAP, eq->eq_eqnum, 1074 eq->eq_evttypemask, HERMON_CMD_NOSLEEP_SPIN); 1075 if (status != HERMON_CMD_SUCCESS) { 1076 cmn_err(CE_NOTE, "hermon%d: MAP_EQ command failed: " 1077 "%08x\n", state->hs_instance, status); 1078 return (DDI_FAILURE); 1079 } 1080 } 1081 1082 return (DDI_SUCCESS); 1083 } 1084 1085 1086 /* 1087 * hermon_eq_demux() 1088 * Context: Called only from interrupt context 1089 * Usage: to demux the various type reported on one EQ 1090 */ 1091 static int 1092 hermon_eq_demux(hermon_state_t *state, hermon_eqhdl_t eq, 1093 hermon_hw_eqe_t *eqe) 1094 { 1095 uint_t eqe_evttype; 1096 int status = DDI_FAILURE; 1097 1098 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1099 1100 switch (eqe_evttype) { 1101 1102 case HERMON_EVT_PORT_STATE_CHANGE: 1103 status = hermon_port_state_change_handler(state, eq, eqe); 1104 break; 1105 1106 case HERMON_EVT_COMM_ESTABLISHED: 1107 status = hermon_comm_estbl_handler(state, eq, eqe); 1108 break; 1109 1110 case HERMON_EVT_COMMAND_INTF_COMP: 1111 status = hermon_cmd_complete_handler(state, eq, eqe); 1112 break; 1113 1114 case HERMON_EVT_LOCAL_WQ_CAT_ERROR: 1115 HERMON_FMANOTE(state, HERMON_FMA_LOCCAT); 1116 status = hermon_local_wq_cat_err_handler(state, eq, eqe); 1117 break; 1118 1119 case HERMON_EVT_INV_REQ_LOCAL_WQ_ERROR: 1120 HERMON_FMANOTE(state, HERMON_FMA_LOCINV); 1121 status = hermon_invreq_local_wq_err_handler(state, eq, eqe); 1122 break; 1123 1124 case HERMON_EVT_LOCAL_ACC_VIO_WQ_ERROR: 1125 HERMON_FMANOTE(state, HERMON_FMA_LOCACEQ); 1126 IBTF_DPRINTF_L2("async", HERMON_FMA_LOCACEQ); 1127 status = hermon_local_acc_vio_wq_err_handler(state, eq, eqe); 1128 break; 1129 case HERMON_EVT_SEND_QUEUE_DRAINED: 1130 status = hermon_sendq_drained_handler(state, eq, eqe); 1131 break; 1132 1133 case HERMON_EVT_PATH_MIGRATED: 1134 status = hermon_path_mig_handler(state, eq, eqe); 1135 break; 1136 1137 case HERMON_EVT_PATH_MIGRATE_FAILED: 1138 HERMON_FMANOTE(state, HERMON_FMA_PATHMIG); 1139 status = hermon_path_mig_err_handler(state, eq, eqe); 1140 break; 1141 1142 case HERMON_EVT_SRQ_CATASTROPHIC_ERROR: 1143 HERMON_FMANOTE(state, HERMON_FMA_SRQCAT); 1144 status = hermon_catastrophic_handler(state, eq, eqe); 1145 break; 1146 1147 case HERMON_EVT_SRQ_LAST_WQE_REACHED: 1148 status = hermon_srq_last_wqe_reached_handler(state, eq, eqe); 1149 break; 1150 1151 case HERMON_EVT_ECC_DETECTION: 1152 status = hermon_ecc_detection_handler(state, eq, eqe); 1153 break; 1154 1155 default: 1156 break; 1157 } 1158 return (status); 1159 } 1160 1161 /* 1162 * hermon_port_state_change_handler() 1163 * Context: Only called from interrupt context 1164 */ 1165 static int 1166 hermon_port_state_change_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1167 hermon_hw_eqe_t *eqe) 1168 { 1169 ibc_async_event_t event; 1170 ibt_async_code_t type; 1171 uint_t subtype; 1172 uint8_t port; 1173 uint_t eqe_evttype; 1174 char link_msg[24]; 1175 1176 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1177 1178 ASSERT(eqe_evttype == HERMON_EVT_PORT_STATE_CHANGE || 1179 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1180 1181 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1182 hermon_eq_overflow_handler(state, eq, eqe); 1183 return (DDI_FAILURE); 1184 } 1185 1186 /* 1187 * Depending on the type of Port State Change event, pass the 1188 * appropriate asynch event to the IBTF. 1189 */ 1190 port = (uint8_t)HERMON_EQE_PORTNUM_GET(eq, eqe); 1191 1192 /* Check for valid port number in event */ 1193 if ((port == 0) || (port > state->hs_cfg_profile->cp_num_ports)) { 1194 HERMON_WARNING(state, "Unexpected port number in port state " 1195 "change event"); 1196 cmn_err(CE_CONT, " Port number: %02x\n", port); 1197 return (DDI_FAILURE); 1198 } 1199 1200 subtype = HERMON_EQE_EVTSUBTYPE_GET(eq, eqe); 1201 if (subtype == HERMON_PORT_LINK_ACTIVE) { 1202 event.ev_port = port; 1203 type = IBT_EVENT_PORT_UP; 1204 1205 (void) snprintf(link_msg, 23, "port %d up", port); 1206 ddi_dev_report_fault(state->hs_dip, DDI_SERVICE_RESTORED, 1207 DDI_EXTERNAL_FAULT, link_msg); 1208 } else if (subtype == HERMON_PORT_LINK_DOWN) { 1209 event.ev_port = port; 1210 type = IBT_ERROR_PORT_DOWN; 1211 1212 (void) snprintf(link_msg, 23, "port %d down", port); 1213 ddi_dev_report_fault(state->hs_dip, DDI_SERVICE_LOST, 1214 DDI_EXTERNAL_FAULT, link_msg); 1215 } else { 1216 HERMON_WARNING(state, "Unexpected subtype in port state change " 1217 "event"); 1218 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n", 1219 HERMON_EQE_EVTTYPE_GET(eq, eqe), subtype); 1220 return (DDI_FAILURE); 1221 } 1222 1223 /* 1224 * Deliver the event to the IBTF. Note: If "hs_ibtfpriv" is NULL, 1225 * then we have either received this event before we finished 1226 * attaching to the IBTF or we've received it while we are in the 1227 * process of detaching. 1228 */ 1229 if (state->hs_ibtfpriv != NULL) { 1230 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1231 } 1232 1233 return (DDI_SUCCESS); 1234 } 1235 1236 1237 /* 1238 * hermon_comm_estbl_handler() 1239 * Context: Only called from interrupt context 1240 */ 1241 static int 1242 hermon_comm_estbl_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1243 hermon_hw_eqe_t *eqe) 1244 { 1245 hermon_qphdl_t qp; 1246 uint_t qpnum; 1247 ibc_async_event_t event; 1248 ibt_async_code_t type; 1249 uint_t eqe_evttype; 1250 1251 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1252 1253 ASSERT(eqe_evttype == HERMON_EVT_COMM_ESTABLISHED || 1254 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1255 1256 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1257 hermon_eq_overflow_handler(state, eq, eqe); 1258 return (DDI_FAILURE); 1259 } 1260 1261 /* Get the QP handle from QP number in event descriptor */ 1262 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1263 qp = hermon_qphdl_from_qpnum(state, qpnum); 1264 1265 /* 1266 * If the QP handle is NULL, this is probably an indication 1267 * that the QP has been freed already. In which case, we 1268 * should not deliver this event. 1269 * 1270 * We also check that the QP number in the handle is the 1271 * same as the QP number in the event queue entry. This 1272 * extra check allows us to handle the case where a QP was 1273 * freed and then allocated again in the time it took to 1274 * handle the event queue processing. By constantly incrementing 1275 * the non-constrained portion of the QP number every time 1276 * a new QP is allocated, we mitigate (somewhat) the chance 1277 * that a stale event could be passed to the client's QP 1278 * handler. 1279 * 1280 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1281 * means that we've have either received this event before we 1282 * finished attaching to the IBTF or we've received it while we 1283 * are in the process of detaching. 1284 */ 1285 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1286 (state->hs_ibtfpriv != NULL)) { 1287 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1288 type = IBT_EVENT_COM_EST_QP; 1289 1290 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1291 } 1292 1293 return (DDI_SUCCESS); 1294 } 1295 1296 1297 /* 1298 * hermon_local_wq_cat_err_handler() 1299 * Context: Only called from interrupt context 1300 */ 1301 static int 1302 hermon_local_wq_cat_err_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1303 hermon_hw_eqe_t *eqe) 1304 { 1305 hermon_qphdl_t qp; 1306 uint_t qpnum; 1307 ibc_async_event_t event; 1308 ibt_async_code_t type; 1309 uint_t eqe_evttype; 1310 1311 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1312 1313 ASSERT(eqe_evttype == HERMON_EVT_LOCAL_WQ_CAT_ERROR || 1314 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1315 1316 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1317 hermon_eq_overflow_handler(state, eq, eqe); 1318 return (DDI_FAILURE); 1319 } 1320 1321 /* Get the QP handle from QP number in event descriptor */ 1322 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1323 qp = hermon_qphdl_from_qpnum(state, qpnum); 1324 1325 /* 1326 * If the QP handle is NULL, this is probably an indication 1327 * that the QP has been freed already. In which case, we 1328 * should not deliver this event. 1329 * 1330 * We also check that the QP number in the handle is the 1331 * same as the QP number in the event queue entry. This 1332 * extra check allows us to handle the case where a QP was 1333 * freed and then allocated again in the time it took to 1334 * handle the event queue processing. By constantly incrementing 1335 * the non-constrained portion of the QP number every time 1336 * a new QP is allocated, we mitigate (somewhat) the chance 1337 * that a stale event could be passed to the client's QP 1338 * handler. 1339 * 1340 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1341 * means that we've have either received this event before we 1342 * finished attaching to the IBTF or we've received it while we 1343 * are in the process of detaching. 1344 */ 1345 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1346 (state->hs_ibtfpriv != NULL)) { 1347 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1348 type = IBT_ERROR_CATASTROPHIC_QP; 1349 1350 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1351 } 1352 1353 return (DDI_SUCCESS); 1354 } 1355 1356 1357 /* 1358 * hermon_invreq_local_wq_err_handler() 1359 * Context: Only called from interrupt context 1360 */ 1361 static int 1362 hermon_invreq_local_wq_err_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1363 hermon_hw_eqe_t *eqe) 1364 { 1365 hermon_qphdl_t qp; 1366 uint_t qpnum; 1367 ibc_async_event_t event; 1368 ibt_async_code_t type; 1369 uint_t eqe_evttype; 1370 1371 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1372 1373 ASSERT(eqe_evttype == HERMON_EVT_INV_REQ_LOCAL_WQ_ERROR || 1374 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1375 1376 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1377 hermon_eq_overflow_handler(state, eq, eqe); 1378 return (DDI_FAILURE); 1379 } 1380 1381 /* Get the QP handle from QP number in event descriptor */ 1382 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1383 qp = hermon_qphdl_from_qpnum(state, qpnum); 1384 1385 /* 1386 * If the QP handle is NULL, this is probably an indication 1387 * that the QP has been freed already. In which case, we 1388 * should not deliver this event. 1389 * 1390 * We also check that the QP number in the handle is the 1391 * same as the QP number in the event queue entry. This 1392 * extra check allows us to handle the case where a QP was 1393 * freed and then allocated again in the time it took to 1394 * handle the event queue processing. By constantly incrementing 1395 * the non-constrained portion of the QP number every time 1396 * a new QP is allocated, we mitigate (somewhat) the chance 1397 * that a stale event could be passed to the client's QP 1398 * handler. 1399 * 1400 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1401 * means that we've have either received this event before we 1402 * finished attaching to the IBTF or we've received it while we 1403 * are in the process of detaching. 1404 */ 1405 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1406 (state->hs_ibtfpriv != NULL)) { 1407 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1408 type = IBT_ERROR_INVALID_REQUEST_QP; 1409 1410 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1411 } 1412 1413 return (DDI_SUCCESS); 1414 } 1415 1416 1417 /* 1418 * hermon_local_acc_vio_wq_err_handler() 1419 * Context: Only called from interrupt context 1420 */ 1421 static int 1422 hermon_local_acc_vio_wq_err_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1423 hermon_hw_eqe_t *eqe) 1424 { 1425 hermon_qphdl_t qp; 1426 uint_t qpnum; 1427 ibc_async_event_t event; 1428 ibt_async_code_t type; 1429 uint_t eqe_evttype; 1430 1431 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1432 1433 ASSERT(eqe_evttype == HERMON_EVT_LOCAL_ACC_VIO_WQ_ERROR || 1434 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1435 1436 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1437 hermon_eq_overflow_handler(state, eq, eqe); 1438 return (DDI_FAILURE); 1439 } 1440 1441 /* Get the QP handle from QP number in event descriptor */ 1442 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1443 qp = hermon_qphdl_from_qpnum(state, qpnum); 1444 1445 /* 1446 * If the QP handle is NULL, this is probably an indication 1447 * that the QP has been freed already. In which case, we 1448 * should not deliver this event. 1449 * 1450 * We also check that the QP number in the handle is the 1451 * same as the QP number in the event queue entry. This 1452 * extra check allows us to handle the case where a QP was 1453 * freed and then allocated again in the time it took to 1454 * handle the event queue processing. By constantly incrementing 1455 * the non-constrained portion of the QP number every time 1456 * a new QP is allocated, we mitigate (somewhat) the chance 1457 * that a stale event could be passed to the client's QP 1458 * handler. 1459 * 1460 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1461 * means that we've have either received this event before we 1462 * finished attaching to the IBTF or we've received it while we 1463 * are in the process of detaching. 1464 */ 1465 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1466 (state->hs_ibtfpriv != NULL)) { 1467 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1468 type = IBT_ERROR_ACCESS_VIOLATION_QP; 1469 1470 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1471 } 1472 1473 return (DDI_SUCCESS); 1474 } 1475 1476 1477 /* 1478 * hermon_sendq_drained_handler() 1479 * Context: Only called from interrupt context 1480 */ 1481 static int 1482 hermon_sendq_drained_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1483 hermon_hw_eqe_t *eqe) 1484 { 1485 hermon_qphdl_t qp; 1486 uint_t qpnum; 1487 ibc_async_event_t event; 1488 uint_t forward_sqd_event; 1489 ibt_async_code_t type; 1490 uint_t eqe_evttype; 1491 1492 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1493 1494 ASSERT(eqe_evttype == HERMON_EVT_SEND_QUEUE_DRAINED || 1495 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1496 1497 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1498 hermon_eq_overflow_handler(state, eq, eqe); 1499 return (DDI_FAILURE); 1500 } 1501 1502 /* Get the QP handle from QP number in event descriptor */ 1503 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1504 qp = hermon_qphdl_from_qpnum(state, qpnum); 1505 1506 /* 1507 * If the QP handle is NULL, this is probably an indication 1508 * that the QP has been freed already. In which case, we 1509 * should not deliver this event. 1510 * 1511 * We also check that the QP number in the handle is the 1512 * same as the QP number in the event queue entry. This 1513 * extra check allows us to handle the case where a QP was 1514 * freed and then allocated again in the time it took to 1515 * handle the event queue processing. By constantly incrementing 1516 * the non-constrained portion of the QP number every time 1517 * a new QP is allocated, we mitigate (somewhat) the chance 1518 * that a stale event could be passed to the client's QP 1519 * handler. 1520 * 1521 * And then we check if "hs_ibtfpriv" is NULL. If it is then it 1522 * means that we've have either received this event before we 1523 * finished attaching to the IBTF or we've received it while we 1524 * are in the process of detaching. 1525 */ 1526 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1527 (state->hs_ibtfpriv != NULL)) { 1528 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1529 type = IBT_EVENT_SQD; 1530 1531 /* 1532 * Grab the QP lock and update the QP state to reflect that 1533 * the Send Queue Drained event has arrived. Also determine 1534 * whether the event is intended to be forwarded on to the 1535 * consumer or not. This information is used below in 1536 * determining whether or not to call the IBTF. 1537 */ 1538 mutex_enter(&qp->qp_lock); 1539 forward_sqd_event = qp->qp_forward_sqd_event; 1540 qp->qp_forward_sqd_event = 0; 1541 qp->qp_sqd_still_draining = 0; 1542 mutex_exit(&qp->qp_lock); 1543 1544 if (forward_sqd_event != 0) { 1545 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1546 } 1547 } 1548 1549 return (DDI_SUCCESS); 1550 } 1551 1552 1553 /* 1554 * hermon_path_mig_handler() 1555 * Context: Only called from interrupt context 1556 */ 1557 static int 1558 hermon_path_mig_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1559 hermon_hw_eqe_t *eqe) 1560 { 1561 hermon_qphdl_t qp; 1562 uint_t qpnum; 1563 ibc_async_event_t event; 1564 ibt_async_code_t type; 1565 uint_t eqe_evttype; 1566 1567 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1568 1569 ASSERT(eqe_evttype == HERMON_EVT_PATH_MIGRATED || 1570 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1571 1572 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1573 hermon_eq_overflow_handler(state, eq, eqe); 1574 return (DDI_FAILURE); 1575 } 1576 1577 /* Get the QP handle from QP number in event descriptor */ 1578 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1579 qp = hermon_qphdl_from_qpnum(state, qpnum); 1580 1581 /* 1582 * If the QP handle is NULL, this is probably an indication 1583 * that the QP has been freed already. In which case, we 1584 * should not deliver this event. 1585 * 1586 * We also check that the QP number in the handle is the 1587 * same as the QP number in the event queue entry. This 1588 * extra check allows us to handle the case where a QP was 1589 * freed and then allocated again in the time it took to 1590 * handle the event queue processing. By constantly incrementing 1591 * the non-constrained portion of the QP number every time 1592 * a new QP is allocated, we mitigate (somewhat) the chance 1593 * that a stale event could be passed to the client's QP 1594 * handler. 1595 * 1596 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1597 * means that we've have either received this event before we 1598 * finished attaching to the IBTF or we've received it while we 1599 * are in the process of detaching. 1600 */ 1601 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1602 (state->hs_ibtfpriv != NULL)) { 1603 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1604 type = IBT_EVENT_PATH_MIGRATED_QP; 1605 1606 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1607 } 1608 1609 return (DDI_SUCCESS); 1610 } 1611 1612 1613 /* 1614 * hermon_path_mig_err_handler() 1615 * Context: Only called from interrupt context 1616 */ 1617 static int 1618 hermon_path_mig_err_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1619 hermon_hw_eqe_t *eqe) 1620 { 1621 hermon_qphdl_t qp; 1622 uint_t qpnum; 1623 ibc_async_event_t event; 1624 ibt_async_code_t type; 1625 uint_t eqe_evttype; 1626 1627 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1628 1629 ASSERT(eqe_evttype == HERMON_EVT_PATH_MIGRATE_FAILED || 1630 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1631 1632 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1633 hermon_eq_overflow_handler(state, eq, eqe); 1634 return (DDI_FAILURE); 1635 } 1636 1637 /* Get the QP handle from QP number in event descriptor */ 1638 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1639 qp = hermon_qphdl_from_qpnum(state, qpnum); 1640 1641 /* 1642 * If the QP handle is NULL, this is probably an indication 1643 * that the QP has been freed already. In which case, we 1644 * should not deliver this event. 1645 * 1646 * We also check that the QP number in the handle is the 1647 * same as the QP number in the event queue entry. This 1648 * extra check allows us to handle the case where a QP was 1649 * freed and then allocated again in the time it took to 1650 * handle the event queue processing. By constantly incrementing 1651 * the non-constrained portion of the QP number every time 1652 * a new QP is allocated, we mitigate (somewhat) the chance 1653 * that a stale event could be passed to the client's QP 1654 * handler. 1655 * 1656 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1657 * means that we've have either received this event before we 1658 * finished attaching to the IBTF or we've received it while we 1659 * are in the process of detaching. 1660 */ 1661 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1662 (state->hs_ibtfpriv != NULL)) { 1663 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1664 type = IBT_ERROR_PATH_MIGRATE_REQ_QP; 1665 1666 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1667 } 1668 1669 return (DDI_SUCCESS); 1670 } 1671 1672 1673 /* 1674 * hermon_catastrophic_handler() 1675 * Context: Only called from interrupt context 1676 */ 1677 static int 1678 hermon_catastrophic_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1679 hermon_hw_eqe_t *eqe) 1680 { 1681 hermon_qphdl_t qp; 1682 uint_t qpnum; 1683 ibc_async_event_t event; 1684 ibt_async_code_t type; 1685 uint_t eqe_evttype; 1686 1687 if (eq->eq_evttypemask == HERMON_EVT_MSK_LOCAL_CAT_ERROR) { 1688 HERMON_FMANOTE(state, HERMON_FMA_INTERNAL); 1689 hermon_eq_catastrophic(state); 1690 return (DDI_SUCCESS); 1691 } 1692 1693 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1694 1695 ASSERT(eqe_evttype == HERMON_EVT_SRQ_CATASTROPHIC_ERROR || 1696 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1697 1698 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1699 hermon_eq_overflow_handler(state, eq, eqe); 1700 return (DDI_FAILURE); 1701 } 1702 1703 /* Get the QP handle from QP number in event descriptor */ 1704 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1705 qp = hermon_qphdl_from_qpnum(state, qpnum); 1706 1707 /* 1708 * If the QP handle is NULL, this is probably an indication 1709 * that the QP has been freed already. In which case, we 1710 * should not deliver this event. 1711 * 1712 * We also check that the QP number in the handle is the 1713 * same as the QP number in the event queue entry. This 1714 * extra check allows us to handle the case where a QP was 1715 * freed and then allocated again in the time it took to 1716 * handle the event queue processing. By constantly incrementing 1717 * the non-constrained portion of the QP number every time 1718 * a new QP is allocated, we mitigate (somewhat) the chance 1719 * that a stale event could be passed to the client's QP 1720 * handler. 1721 * 1722 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1723 * means that we've have either received this event before we 1724 * finished attaching to the IBTF or we've received it while we 1725 * are in the process of detaching. 1726 */ 1727 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1728 (state->hs_ibtfpriv != NULL)) { 1729 event.ev_srq_hdl = (ibt_srq_hdl_t)qp->qp_srqhdl->srq_hdlrarg; 1730 type = IBT_ERROR_CATASTROPHIC_SRQ; 1731 1732 mutex_enter(&qp->qp_srqhdl->srq_lock); 1733 qp->qp_srqhdl->srq_state = HERMON_SRQ_STATE_ERROR; 1734 mutex_exit(&qp->qp_srqhdl->srq_lock); 1735 1736 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1737 } 1738 1739 return (DDI_SUCCESS); 1740 } 1741 1742 1743 /* 1744 * hermon_srq_last_wqe_reached_handler() 1745 * Context: Only called from interrupt context 1746 */ 1747 static int 1748 hermon_srq_last_wqe_reached_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1749 hermon_hw_eqe_t *eqe) 1750 { 1751 hermon_qphdl_t qp; 1752 uint_t qpnum; 1753 ibc_async_event_t event; 1754 ibt_async_code_t type; 1755 uint_t eqe_evttype; 1756 1757 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1758 1759 ASSERT(eqe_evttype == HERMON_EVT_SRQ_LAST_WQE_REACHED || 1760 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1761 1762 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1763 hermon_eq_overflow_handler(state, eq, eqe); 1764 return (DDI_FAILURE); 1765 } 1766 1767 /* Get the QP handle from QP number in event descriptor */ 1768 qpnum = HERMON_EQE_QPNUM_GET(eq, eqe); 1769 qp = hermon_qphdl_from_qpnum(state, qpnum); 1770 1771 /* 1772 * If the QP handle is NULL, this is probably an indication 1773 * that the QP has been freed already. In which case, we 1774 * should not deliver this event. 1775 * 1776 * We also check that the QP number in the handle is the 1777 * same as the QP number in the event queue entry. This 1778 * extra check allows us to handle the case where a QP was 1779 * freed and then allocated again in the time it took to 1780 * handle the event queue processing. By constantly incrementing 1781 * the non-constrained portion of the QP number every time 1782 * a new QP is allocated, we mitigate (somewhat) the chance 1783 * that a stale event could be passed to the client's QP 1784 * handler. 1785 * 1786 * Lastly, we check if "hs_ibtfpriv" is NULL. If it is then it 1787 * means that we've have either received this event before we 1788 * finished attaching to the IBTF or we've received it while we 1789 * are in the process of detaching. 1790 */ 1791 if ((qp != NULL) && (qp->qp_qpnum == qpnum) && 1792 (state->hs_ibtfpriv != NULL)) { 1793 event.ev_qp_hdl = (ibtl_qp_hdl_t)qp->qp_hdlrarg; 1794 type = IBT_EVENT_EMPTY_CHAN; 1795 1796 HERMON_DO_IBTF_ASYNC_CALLB(state, type, &event); 1797 } 1798 1799 return (DDI_SUCCESS); 1800 } 1801 1802 1803 /* 1804 * hermon_ecc_detection_handler() 1805 * Context: Only called from interrupt context 1806 */ 1807 static int 1808 hermon_ecc_detection_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1809 hermon_hw_eqe_t *eqe) 1810 { 1811 uint_t eqe_evttype; 1812 uint_t data; 1813 int i; 1814 1815 eqe_evttype = HERMON_EQE_EVTTYPE_GET(eq, eqe); 1816 1817 ASSERT(eqe_evttype == HERMON_EVT_ECC_DETECTION || 1818 eqe_evttype == HERMON_EVT_EQ_OVERFLOW); 1819 1820 if (eqe_evttype == HERMON_EVT_EQ_OVERFLOW) { 1821 hermon_eq_overflow_handler(state, eq, eqe); 1822 return (DDI_FAILURE); 1823 } 1824 1825 /* 1826 * The "ECC Detection Event" indicates that a correctable single-bit 1827 * has occurred with the attached DDR. The EQE provides some 1828 * additional information about the errored EQ. So we print a warning 1829 * message here along with that additional information. 1830 */ 1831 HERMON_WARNING(state, "ECC Correctable Error Event Detected"); 1832 for (i = 0; i < sizeof (hermon_hw_eqe_t) >> 2; i++) { 1833 data = ((uint_t *)eqe)[i]; 1834 cmn_err(CE_CONT, "! EQE[%02x]: %08x\n", i, data); 1835 } 1836 1837 return (DDI_SUCCESS); 1838 } 1839 1840 1841 /* 1842 * hermon_eq_overflow_handler() 1843 * Context: Only called from interrupt context 1844 */ 1845 /* ARGSUSED */ 1846 void 1847 hermon_eq_overflow_handler(hermon_state_t *state, hermon_eqhdl_t eq, 1848 hermon_hw_eqe_t *eqe) 1849 { 1850 uint_t error_type, data; 1851 1852 ASSERT(HERMON_EQE_EVTTYPE_GET(eq, eqe) == HERMON_EVT_EQ_OVERFLOW); 1853 1854 /* 1855 * The "Event Queue Overflow Event" indicates that something has 1856 * probably gone seriously wrong with some hardware (or, perhaps, 1857 * with the software... though it's unlikely in this case). The EQE 1858 * provides some additional information about the errored EQ. So we 1859 * print a warning message here along with that additional information. 1860 */ 1861 error_type = HERMON_EQE_OPERRTYPE_GET(eq, eqe); 1862 data = HERMON_EQE_OPERRDATA_GET(eq, eqe); 1863 1864 HERMON_WARNING(state, "Event Queue overflow"); 1865 cmn_err(CE_CONT, " Error type: %02x, data: %08x\n", error_type, data); 1866 } 1867 1868 1869 /* 1870 * hermon_no_eqhandler 1871 * Context: Only called from interrupt context 1872 */ 1873 /* ARGSUSED */ 1874 static int 1875 hermon_no_eqhandler(hermon_state_t *state, hermon_eqhdl_t eq, 1876 hermon_hw_eqe_t *eqe) 1877 { 1878 uint_t data; 1879 int i; 1880 1881 /* 1882 * This "unexpected event" handler (or "catch-all" handler) will 1883 * receive all events for which no other handler has been registered. 1884 * If we end up here, then something has probably gone seriously wrong 1885 * with the Hermon hardware (or, perhaps, with the software... though 1886 * it's unlikely in this case). The EQE provides all the information 1887 * about the event. So we print a warning message here along with 1888 * the contents of the EQE. 1889 */ 1890 HERMON_WARNING(state, "Unexpected Event handler"); 1891 cmn_err(CE_CONT, " Event type: %02x, subtype: %02x\n", 1892 HERMON_EQE_EVTTYPE_GET(eq, eqe), 1893 HERMON_EQE_EVTSUBTYPE_GET(eq, eqe)); 1894 for (i = 0; i < sizeof (hermon_hw_eqe_t) >> 2; i++) { 1895 data = ((uint_t *)eqe)[i]; 1896 cmn_err(CE_CONT, " EQE[%02x]: %08x\n", i, data); 1897 } 1898 1899 return (DDI_SUCCESS); 1900 } 1901