1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2014 QLogic Corporation 24 * The contents of this file are subject to the terms of the 25 * QLogic End User License (the "License"). 26 * You may not use this file except in compliance with the License. 27 * 28 * You can obtain a copy of the License at 29 * http://www.qlogic.com/Resources/Documents/DriverDownloadHelp/ 30 * QLogic_End_User_Software_License.txt 31 * See the License for the specific language governing permissions 32 * and limitations under the License. 33 */ 34 35 /* 36 * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved. 37 */ 38 39 #include "bnxe.h" 40 41 /* 42 * The interrupt status bit vector is as follows: 43 * 44 * bit 0: default interrupt 45 * 46 * Single Mode: 47 * 48 * bits 1-16: Function 1 (RSS 0-15) 49 * 50 * Multi-Function Mode: 51 * 52 * bits 1-4: Virtual Function 1 (RSS 0-3) 53 * bits 5-8: Virtual Function 2 (RSS 4-7) 54 * bits 9-12: Virtual Function 3 (RSS 8-11) 55 * bits 13-16: Virtual Function 4 (RSS 12-15) 56 * 57 * While processing interrupts the programmatic index used for the default 58 * status block is 16 and the RSS status blocks are shifted down one (i.e. 59 * 0-15). 60 * 61 * Solaris defaults to 2 MSIX interrupts per function so only the default 62 * interrupt plus one RSS interrupt is used. This default behavior can be 63 * modified via the /etc/system configuration file. 64 */ 65 66 67 static inline char * BnxeIntrTypeName(int intrType) 68 { 69 return (intrType == DDI_INTR_TYPE_MSIX) ? "MSIX" : 70 (intrType == DDI_INTR_TYPE_MSI) ? "MSI" : 71 (intrType == DDI_INTR_TYPE_FIXED) ? "FIXED" : 72 "UNKNOWN"; 73 } 74 75 76 static void BnxeFindDmaHandles(um_device_t * pUM) 77 { 78 lm_address_t physAddr; 79 BnxeMemDma * pTmp; 80 u32_t idx; 81 82 BNXE_LOCK_ENTER_MEM(pUM); 83 84 /* find the RSS status blocks */ 85 86 LM_FOREACH_SB_ID(&pUM->lm_dev, idx) 87 { 88 if (CHIP_IS_E1x(&pUM->lm_dev)) 89 { 90 physAddr.as_u32.low = 91 pUM->lm_dev.vars.status_blocks_arr[idx].hc_status_block_data.e1x_sb_data.common.host_sb_addr.lo; 92 physAddr.as_u32.high = 93 pUM->lm_dev.vars.status_blocks_arr[idx].hc_status_block_data.e1x_sb_data.common.host_sb_addr.hi; 94 } 95 else 96 { 97 physAddr.as_u32.low = 98 pUM->lm_dev.vars.status_blocks_arr[idx].hc_status_block_data.e2_sb_data.common.host_sb_addr.lo; 99 physAddr.as_u32.high = 100 pUM->lm_dev.vars.status_blocks_arr[idx].hc_status_block_data.e2_sb_data.common.host_sb_addr.hi; 101 } 102 103 pTmp = (BnxeMemDma *)d_list_peek_head(&pUM->memDmaList); 104 while (pTmp) 105 { 106 if (pTmp->physAddr.as_ptr == physAddr.as_ptr) 107 { 108 break; 109 } 110 111 pTmp = (BnxeMemDma *)d_list_next_entry(&pTmp->link); 112 } 113 114 if (pTmp == NULL) 115 { 116 BnxeLogWarn(pUM, "Failed to find DMA handle for RSS status block %d", idx); 117 } 118 119 pUM->statusBlocks[idx] = pTmp; 120 } 121 122 /* find the default status block */ 123 124 pTmp = (BnxeMemDma *)d_list_peek_head(&pUM->memDmaList); 125 while (pTmp) 126 { 127 if (pTmp->physAddr.as_ptr == 128 pUM->lm_dev.vars.gen_sp_status_block.blk_phy_address.as_ptr) 129 { 130 break; 131 } 132 133 pTmp = (BnxeMemDma *)d_list_next_entry(&pTmp->link); 134 } 135 136 if (pTmp == NULL) 137 { 138 BnxeLogWarn(pUM, "Failed to find DMA handle for default status block"); 139 } 140 141 pUM->statusBlocks[DEF_STATUS_BLOCK_IGU_INDEX] = pTmp; 142 143 BNXE_LOCK_EXIT_MEM(pUM); 144 } 145 146 147 void BnxeIntrIguSbEnable(um_device_t * pUM, 148 u32_t idx, 149 boolean_t fromISR) 150 { 151 RxQueue * pRxQ = &pUM->rxq[idx]; 152 u32_t igu_id = (FCOE_CID(&pUM->lm_dev) == idx) ? 153 LM_NON_RSS_SB(&pUM->lm_dev) : idx; 154 155 BNXE_LOCK_ENTER_INTR_FLIP(pUM, igu_id); 156 157 if (fromISR) 158 { 159 /* 160 * If in an ISR and poll mode is ON then poll mode was flipped in the 161 * ISR which can occur during Rx processing. If this is the case then 162 * don't do anything. Only re-enable the IGU when poll mode is OFF. 163 */ 164 if (!pRxQ->inPollMode) 165 { 166 lm_int_ack_sb_enable(&pUM->lm_dev, igu_id); 167 } 168 } 169 else 170 { 171 if (!pRxQ->inPollMode) 172 { 173 /* Why are intrs getting enabled on the ring twice...? */ 174 cmn_err(CE_PANIC, 175 "%s: Ring %d, enable intrs and NOT in poll mode!", 176 BnxeDevName(pUM), igu_id); 177 } 178 179 atomic_swap_32(&pRxQ->inPollMode, B_FALSE); 180 pRxQ->intrEnableCnt++; 181 182 lm_int_ack_sb_enable(&pUM->lm_dev, igu_id); 183 } 184 185 BNXE_LOCK_EXIT_INTR_FLIP(pUM, igu_id); 186 } 187 188 189 void BnxeIntrIguSbDisable(um_device_t * pUM, 190 u32_t idx, 191 boolean_t fromISR) 192 { 193 RxQueue * pRxQ = &pUM->rxq[idx]; 194 u32_t igu_id = (FCOE_CID(&pUM->lm_dev) == idx) ? 195 LM_NON_RSS_SB(&pUM->lm_dev) : idx; 196 197 BNXE_LOCK_ENTER_INTR_FLIP(pUM, igu_id); 198 199 if (fromISR) 200 { 201 /* we should never get here when in poll mode... */ 202 ASSERT(pRxQ->inPollMode == B_FALSE); 203 lm_int_ack_sb_disable(&pUM->lm_dev, igu_id); 204 } 205 else 206 { 207 if (pRxQ->inPollMode) 208 { 209 /* Why are intrs getting disabled on the ring twice...? */ 210 cmn_err(CE_PANIC, 211 "%s: Ring %d, disable intrs and ALREADY in poll mode!", 212 BnxeDevName(pUM), igu_id); 213 } 214 215 /* 216 * Note here that the interrupt can already be disabled if GLDv3 217 * is disabling the interrupt under the context of an ISR. This is 218 * OK as the inPollMode flag will tell the ISR not to re-enable the 219 * interrupt upon return. 220 */ 221 222 lm_int_ack_sb_disable(&pUM->lm_dev, igu_id); 223 224 atomic_swap_32(&pRxQ->inPollMode, B_TRUE); 225 pRxQ->intrDisableCnt++; 226 } 227 228 BNXE_LOCK_EXIT_INTR_FLIP(pUM, igu_id); 229 } 230 231 232 static void BnxeServiceDefSbIntr(um_device_t * pUM, 233 boolean_t * pPktsRxed, 234 boolean_t * pPktsTxed) 235 { 236 lm_device_t * pLM = (lm_device_t *)pUM; 237 u32_t activity_flg = 0; 238 u16_t lcl_attn_bits = 0; 239 u16_t lcl_attn_ack = 0; 240 u16_t asserted_proc_grps = 0; 241 u16_t deasserted_proc_grps = 0; 242 243 *pPktsRxed = B_FALSE; 244 *pPktsTxed = B_FALSE; 245 246 BnxeLogDbg(pUM, "Default INTR: Handling default status block %d", DEF_STATUS_BLOCK_INDEX); 247 248 ddi_dma_sync(pUM->statusBlocks[DEF_STATUS_BLOCK_IGU_INDEX]->dmaHandle, 249 0, 0, DDI_DMA_SYNC_FORKERNEL); 250 251 if (pUM->fmCapabilities && 252 BnxeCheckDmaHandle(pUM->statusBlocks[DEF_STATUS_BLOCK_IGU_INDEX]->dmaHandle) != DDI_FM_OK) 253 { 254 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 255 } 256 257 pUM->intrSbCnt[DEF_STATUS_BLOCK_IGU_INDEX]++; 258 259 if (lm_is_def_sb_updated(pLM) == 0) 260 { 261 BnxeLogDbg(pUM, "Default INTR: No change in default status index so bail!"); 262 pUM->intrSbNoChangeCnt[DEF_STATUS_BLOCK_IGU_INDEX]++; 263 264 if (pUM->fmCapabilities && 265 BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK) 266 { 267 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 268 } 269 270 return; 271 } 272 273 /* get a local copy of the indices from the status block */ 274 lm_update_def_hc_indices(pLM, DEF_STATUS_BLOCK_INDEX, &activity_flg); 275 276 BnxeDbgBreakIfFastPath(pUM, !(activity_flg & LM_DEF_EVENT_MASK)); 277 278 BnxeLogDbg(pUM, "Default INTR: processing events on sb: %x, events: 0x%x", 279 DEF_STATUS_BLOCK_INDEX, activity_flg); 280 281 if (activity_flg & LM_DEF_ATTN_ACTIVE) 282 { 283 /* Attentions! Usually these are bad things we don't want to see */ 284 285 lm_get_attn_info(pLM, &lcl_attn_bits, &lcl_attn_ack); 286 287 // NOTE: in case the status index of the attention has changed 288 // already (while processing), we could override with it our local 289 // copy. However, this is not a must, since it will be caught at the 290 // end of the loop with the call to lm_is_sb_updated(). In case the 291 // dpc_loop_cnt has exhausted, no worry, since will get an interrupt 292 // for that at a later time. 293 294 // find out which lines are asserted/deasserted with account to 295 // their states, ASSERT if necessary. 296 GET_ATTN_CHNG_GROUPS(pLM, lcl_attn_bits, lcl_attn_ack, 297 &asserted_proc_grps, &deasserted_proc_grps); 298 299 BnxeLogDbg(pUM, "Default INTR: asserted_proc_grps: 0x%x, deasserted_proc_grps:0x%x", 300 asserted_proc_grps, deasserted_proc_grps); 301 302 if (asserted_proc_grps) 303 { 304 lm_handle_assertion_processing(pLM, asserted_proc_grps); 305 306 if (pUM->fmCapabilities && 307 BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK) 308 { 309 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 310 } 311 } 312 313 // keep in mind that in the same round, it is possible that this 314 // func will have processing to do regarding deassertion on bits 315 // that are different than the ones processed earlier for assertion 316 // processing. 317 318 if (deasserted_proc_grps) 319 { 320 lm_handle_deassertion_processing(pLM, deasserted_proc_grps); 321 322 if (pUM->fmCapabilities && 323 BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK) 324 { 325 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 326 } 327 } 328 } 329 330 if (activity_flg & LM_DEF_USTORM_ACTIVE) 331 { 332 /* Check for L4 TOE/iSCSI/FCoE Rx completions. */ 333 334 if (lm_is_rx_completion(pLM, ISCSI_CID(pLM))) 335 { 336 BnxeDbgBreakMsg(pUM, "Unknown iSCSI Rx completion!"); 337 } 338 339 if (lm_is_rx_completion(pLM, FCOE_CID(pLM))) 340 { 341 *pPktsRxed = B_TRUE; 342 } 343 } 344 345 if (activity_flg & LM_DEF_CSTORM_ACTIVE) 346 { 347 if (lm_is_eq_completion(pLM)) 348 { 349 lm_service_eq_intr(pLM); 350 } 351 352 if (lm_is_tx_completion(pLM, FWD_CID(pLM))) 353 { 354 /* XXX Possible? */ 355 *pPktsTxed = B_TRUE; 356 } 357 358 if (lm_is_tx_completion(pLM, ISCSI_CID(pLM))) 359 { 360 /* XXX iSCSI Tx. NO! */ 361 BnxeDbgBreakMsg(pUM, "Unknown iSCSI Tx completion!"); 362 } 363 364 if (lm_is_tx_completion(pLM, FCOE_CID(pLM))) 365 { 366 *pPktsTxed = B_TRUE; 367 } 368 } 369 } 370 371 372 /* 373 * This is the polling path for an individual Rx Ring. Here we simply pull 374 * any pending packets out of the hardware and put them into the wait queue. 375 * Note that there might already be packets in the wait queue which is OK as 376 * the caller will call BnxeRxRingProcess() next to process the queue. 377 */ 378 void BnxePollRxRing(um_device_t * pUM, 379 u32_t idx, 380 boolean_t * pPktsRxed, 381 boolean_t * pPktsTxed) 382 { 383 lm_device_t * pLM = (lm_device_t *)pUM; 384 u32_t activity_flg = 0; 385 u8_t drv_rss_id = (u8_t)idx; 386 387 *pPktsRxed = B_FALSE; 388 *pPktsTxed = B_FALSE; 389 390 BnxeLogDbg(pUM, "Ring Poll: Handling status block sb_id:%d drv_rss_id:%d", 391 idx, drv_rss_id); 392 393 /* use drv_rss_id for mapping into status block array (from LM) */ 394 ddi_dma_sync(pUM->statusBlocks[drv_rss_id]->dmaHandle, 395 0, 0, DDI_DMA_SYNC_FORKERNEL); 396 397 if (pUM->fmCapabilities && 398 BnxeCheckDmaHandle(pUM->statusBlocks[drv_rss_id]->dmaHandle) != DDI_FM_OK) 399 { 400 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 401 } 402 403 pUM->intrSbPollCnt[drv_rss_id]++; 404 405 if (lm_is_sb_updated(pLM, drv_rss_id) == 0) 406 { 407 BnxeLogDbg(pUM, "Ring Poll: No change in status index so bail!"); 408 pUM->intrSbPollNoChangeCnt[drv_rss_id]++; 409 return; 410 } 411 412 /* get a local copy of the indices from the status block */ 413 lm_update_fp_hc_indices(pLM, drv_rss_id, &activity_flg, &drv_rss_id); 414 415 BnxeDbgBreakIf(pUM, !(activity_flg & LM_NON_DEF_EVENT_MASK)); 416 417 BnxeLogDbg(pUM, "Ring Poll: processing events on sb: %x, events: 0x%x", 418 drv_rss_id, activity_flg); 419 420 if (activity_flg & LM_NON_DEF_USTORM_ACTIVE) 421 { 422 /* Rx Completions */ 423 if (lm_is_rx_completion(pLM, drv_rss_id)) 424 { 425 *pPktsRxed = B_TRUE; 426 } 427 428 /* XXX Check for L4 TOE/FCoE Rx completions. NO! */ 429 } 430 431 if (activity_flg & LM_NON_DEF_CSTORM_ACTIVE) 432 { 433 /* Tx completions */ 434 if (lm_is_tx_completion(pLM, drv_rss_id)) 435 { 436 *pPktsTxed = B_TRUE; 437 } 438 439 /* XXX Check for L4 Tx and L5 EQ completions. NO! */ 440 } 441 } 442 443 444 /* 445 * This is the polling path for the FCoE Ring. Here we don't pull any 446 * pending packets out of the hardware. We only care about FCoE Fast Path 447 * completions. FCoE slow path L2 packets are processed via the default 448 * status block not the LM_NON_RSS_SB. In this path we're assuming that 449 * the FCoE driver is performing a crashdump. 450 */ 451 void BnxePollRxRingFCOE(um_device_t * pUM) 452 { 453 lm_device_t * pLM = (lm_device_t *)pUM; 454 u32_t activity_flg = 0; 455 456 u8_t sb_id = LM_NON_RSS_SB(pLM); 457 u8_t drv_rss_id = FCOE_CID(pLM); 458 459 BnxeLogDbg(pUM, "Ring Poll FCoE: Handling status block sb_id:%d drv_rss_id:%d", 460 sb_id, drv_rss_id); 461 462 /* use sb_id for mapping into status block array (from LM) */ 463 ddi_dma_sync(pUM->statusBlocks[sb_id]->dmaHandle, 464 0, 0, DDI_DMA_SYNC_FORKERNEL); 465 466 if (pUM->fmCapabilities && 467 BnxeCheckDmaHandle(pUM->statusBlocks[sb_id]->dmaHandle) != DDI_FM_OK) 468 { 469 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 470 } 471 472 pUM->intrSbPollCnt[sb_id]++; 473 474 if (lm_is_sb_updated(pLM, sb_id) == 0) 475 { 476 BnxeLogDbg(pUM, "Ring Poll FCoE: No change in status index so bail!"); 477 pUM->intrSbPollNoChangeCnt[sb_id]++; 478 return; 479 } 480 481 /* get a local copy of the indices from the status block */ 482 lm_update_fp_hc_indices(pLM, sb_id, &activity_flg, &drv_rss_id); 483 484 BnxeDbgBreakIf(pUM, !(activity_flg & LM_NON_DEF_EVENT_MASK)); 485 486 BnxeLogDbg(pUM, "Ring Poll FCoE: processing events on sb: %x, events: 0x%x", 487 sb_id, activity_flg); 488 489 if (activity_flg & LM_NON_DEF_USTORM_ACTIVE) 490 { 491 if (lm_fc_is_eq_completion(pLM, drv_rss_id)) 492 { 493 lm_fc_service_eq_intr(pLM, drv_rss_id); 494 } 495 } 496 } 497 498 499 static void BnxeServiceSbIntr(um_device_t * pUM, 500 u8_t sb_id, 501 boolean_t * pPktsRxed, 502 boolean_t * pPktsTxed) 503 { 504 lm_device_t * pLM = (lm_device_t *)pUM; 505 u32_t activity_flg = 0; 506 u8_t drv_rss_id; 507 508 *pPktsRxed = B_FALSE; 509 *pPktsTxed = B_FALSE; 510 511 drv_rss_id = lm_map_igu_sb_id_to_drv_rss(pLM, sb_id); 512 513 BnxeLogDbg(pUM, "Ring INTR: Handling status block sb_id:%d drv_rss_id:%d", 514 sb_id, drv_rss_id); 515 516 /* use sb_id for mapping into status block array (from LM) */ 517 ddi_dma_sync(pUM->statusBlocks[sb_id]->dmaHandle, 518 0, 0, DDI_DMA_SYNC_FORKERNEL); 519 520 if (pUM->fmCapabilities && 521 BnxeCheckDmaHandle(pUM->statusBlocks[sb_id]->dmaHandle) != DDI_FM_OK) 522 { 523 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 524 } 525 526 pUM->intrSbCnt[sb_id]++; 527 528 if (lm_is_sb_updated(pLM, sb_id) == 0) 529 { 530 BnxeLogDbg(pUM, "Ring INTR: No change in status index so bail!"); 531 pUM->intrSbNoChangeCnt[sb_id]++; 532 return; 533 } 534 535 /* 536 * get a local copy of the indices from the status block 537 * XXX note that here drv_rss_id is assigned to the sb_id 538 */ 539 lm_update_fp_hc_indices(pLM, sb_id, &activity_flg, &drv_rss_id); 540 541 BnxeDbgBreakIf(pUM, !(activity_flg & LM_NON_DEF_EVENT_MASK)); 542 543 BnxeLogDbg(pUM, "Ring INTR: processing events on sb: %x, events: 0x%x", 544 drv_rss_id, activity_flg); 545 546 if (activity_flg & LM_NON_DEF_USTORM_ACTIVE) 547 { 548 /* Rx Completions */ 549 if (lm_is_rx_completion(pLM, drv_rss_id)) 550 { 551 *pPktsRxed = B_TRUE; 552 } 553 554 if (lm_fc_is_eq_completion(pLM, drv_rss_id)) 555 { 556 lm_fc_service_eq_intr(pLM, drv_rss_id); 557 } 558 559 /* XXX Check for ISCSI-OOO and L4 TOE Rx completions. NO! */ 560 } 561 562 if (activity_flg & LM_NON_DEF_CSTORM_ACTIVE) 563 { 564 /* Tx completions */ 565 if (lm_is_tx_completion(pLM, drv_rss_id)) 566 { 567 *pPktsTxed = B_TRUE; 568 } 569 570 /* XXX Check for L4 Tx and L5 EQ completions. NO! */ 571 572 /* L4 Tx completions */ 573 if (lm_toe_is_tx_completion(pLM, drv_rss_id)) 574 { 575 BnxeDbgBreakMsg(pUM, "Unknown TOE Tx completion!"); 576 } 577 578 /* L5 EQ completions */ 579 if (lm_sc_is_eq_completion(pLM, drv_rss_id)) 580 { 581 BnxeDbgBreakMsg(pUM, "Unknown iSCSI EQ completion!"); 582 //lm_sc_service_eq_intr(pLM, drv_rss_id); 583 } 584 } 585 } 586 587 588 uint_t BnxeIntrISR(caddr_t arg1, caddr_t arg2) 589 { 590 um_device_t * pUM = (um_device_t *)arg1; 591 lm_device_t * pLM = &pUM->lm_dev; 592 lm_interrupt_status_t intrStatus = 0; 593 boolean_t pktsRxed = 0; 594 boolean_t pktsTxed = 0; 595 u32_t rss_id = 0; 596 int idx = (int)(uintptr_t)arg2; 597 598 BNXE_LOCK_ENTER_INTR(pUM, idx); 599 600 if (!pUM->intrEnabled) 601 { 602 pLM->vars.dbg_intr_in_wrong_state++; 603 604 BNXE_LOCK_EXIT_INTR(pUM, idx); 605 return DDI_INTR_UNCLAIMED; 606 } 607 608 BnxeLogDbg(pUM, "-> BNXE INTA Interrupt <-"); 609 610 if (pLM->vars.enable_intr) 611 { 612 intrStatus = lm_get_interrupt_status(pLM); 613 614 if (pUM->fmCapabilities && 615 BnxeCheckAccHandle(pLM->vars.reg_handle[BAR_0]) != DDI_FM_OK) 616 { 617 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 618 } 619 620 if (intrStatus == 0) 621 { 622 pLM->vars.dbg_intr_zero_status++; 623 624 BNXE_LOCK_EXIT_INTR(pUM, idx); 625 return DDI_INTR_UNCLAIMED; 626 } 627 } 628 else 629 { 630 pLM->vars.dbg_intr_in_disabled++; 631 BnxeLogDbg(pUM, "INTA INTR: we got an interrupt when disabled"); 632 633 BNXE_LOCK_EXIT_INTR(pUM, idx); 634 return DDI_INTR_CLAIMED; 635 } 636 637 atomic_add_64((volatile uint64_t *)&pUM->intrFired, 1); 638 639 while (intrStatus) 640 { 641 if (intrStatus & 0x1) 642 { 643 if (rss_id == 0) 644 { 645 lm_int_ack_def_sb_disable(pLM); 646 647 BnxeServiceDefSbIntr(pUM, &pktsRxed, &pktsTxed); 648 649 /* 650 * Default sb only handles FCoE only right now. If this changes 651 * BnxeServiceDefSbIntr will have to change to return which CIDs 652 * have packets pending. 653 */ 654 655 if (pktsTxed) BnxeTxRingProcess(pUM, FCOE_CID(pLM)); 656 if (pktsRxed) BnxeRxRingProcess(pUM, FCOE_CID(pLM), B_FALSE, 0); 657 658 lm_sq_post_pending(pLM); 659 660 lm_int_ack_def_sb_enable(pLM); 661 } 662 else 663 { 664 /* 665 * (rss_id - 1) is used because the non-default sbs are located 666 * in lm_device at indices 0-15. 667 */ 668 669 lm_int_ack_sb_disable(pLM, (rss_id - 1)); 670 671 BnxeServiceSbIntr(pUM, (rss_id - 1), &pktsRxed, &pktsTxed); 672 673 if (pktsTxed) BnxeTxRingProcess(pUM, (rss_id - 1)); 674 if (pktsRxed) BnxeRxRingProcess(pUM, (rss_id - 1), B_FALSE, 0); 675 676 lm_sq_post_pending(pLM); 677 678 lm_int_ack_sb_enable(pLM, (rss_id - 1)); 679 } 680 } 681 682 intrStatus >>= 1; 683 rss_id++; 684 } 685 686 BNXE_LOCK_EXIT_INTR(pUM, idx); 687 688 return DDI_INTR_CLAIMED; 689 } 690 691 692 uint_t BnxeIntrMISR(caddr_t arg1, caddr_t arg2) 693 { 694 um_device_t * pUM = (um_device_t *)arg1; 695 lm_device_t * pLM = &pUM->lm_dev; 696 boolean_t pktsRxed = 0; 697 boolean_t pktsTxed = 0; 698 int sb_id = (int)(uintptr_t)arg2; 699 u32_t idx; 700 701 BNXE_LOCK_ENTER_INTR(pUM, sb_id); 702 703 if (!pUM->intrEnabled) 704 { 705 pLM->vars.dbg_intr_in_wrong_state++; 706 707 BNXE_LOCK_EXIT_INTR(pUM, sb_id); 708 return DDI_INTR_UNCLAIMED; 709 } 710 711 BnxeLogDbg(pUM, "-> BNXE MSIX Interrupt SB %d <-", sb_id); 712 713 if (!pLM->vars.enable_intr) 714 { 715 pLM->vars.dbg_intr_in_disabled++; 716 BnxeLogDbg(pUM, "MISR INTR: we got an interrupt when disabled"); 717 718 BNXE_LOCK_EXIT_INTR(pUM, sb_id); 719 return DDI_INTR_CLAIMED; 720 } 721 722 atomic_add_64((volatile uint64_t *)&pUM->intrFired, 1); 723 724 if (sb_id == DEF_STATUS_BLOCK_IGU_INDEX) 725 { 726 lm_int_ack_def_sb_disable(pLM); 727 728 BnxeServiceDefSbIntr(pUM, &pktsRxed, &pktsTxed); 729 730 /* 731 * Default sb only handles FCoE only right now. If this changes 732 * BnxeServiceDefSbIntr will have to change to return which CIDs 733 * have packets pending. 734 */ 735 736 if (pktsTxed) BnxeTxRingProcess(pUM, FCOE_CID(pLM)); 737 if (pktsRxed) BnxeRxRingProcess(pUM, FCOE_CID(pLM), FALSE, 0); 738 739 lm_sq_post_pending(pLM); 740 741 lm_int_ack_def_sb_enable(pLM); 742 } 743 else 744 { 745 /* 746 * Note that polling is not allowed by GLDv3 on the LM_NON_RSS_SB when 747 * overlapped with FCoE. This is enforced by the BnxeRxRingIntrEnable 748 * and BnxeRxRingIntrDisable routines. The FCoE driver IS ALLOWED to 749 * put the SB into poll mode. FCoE trumps GLDv3/L2 and it's assumed 750 * the FCoE driver is performing a crashdump in this case. 751 */ 752 753 idx = ((sb_id == LM_NON_RSS_SB(pLM)) && 754 CLIENT_BOUND(pUM, LM_CLI_IDX_FCOE) && 755 (pUM->rssIntr.intrCount == LM_MAX_RSS_CHAINS(&pUM->lm_dev))) ? 756 FCOE_CID(pLM) : sb_id; 757 758 if (pUM->rxq[idx].inPollMode) 759 { 760 /* Shouldn't be here! */ 761 cmn_err(CE_PANIC, 762 "%s: Interupt on RSS/MSIX ring %d when in poll mode!", 763 BnxeDevName(pUM), idx); 764 } 765 766 /* accounts for poll mode */ 767 BnxeIntrIguSbDisable(pUM, idx, B_TRUE); 768 769 BnxeServiceSbIntr(pUM, sb_id, &pktsRxed, &pktsTxed); 770 771 if (pktsTxed) BnxeTxRingProcess(pUM, sb_id); 772 if (pktsRxed) BnxeRxRingProcess(pUM, sb_id, B_FALSE, 0); 773 774 lm_sq_post_pending(pLM); 775 776 /* accounts for poll mode */ 777 BnxeIntrIguSbEnable(pUM, idx, B_TRUE); 778 } 779 780 BNXE_LOCK_EXIT_INTR(pUM, sb_id); 781 782 return DDI_INTR_CLAIMED; 783 } 784 785 786 static int BnxeGetInterruptCount(dev_info_t * pDev, int type, int intrTypes) 787 { 788 int nintrs = 0; 789 790 if (intrTypes & type) 791 { 792 return (ddi_intr_get_nintrs(pDev, type, &nintrs) != DDI_SUCCESS) ? 793 -1 : nintrs; 794 } 795 796 return -1; 797 } 798 799 800 static boolean_t BnxeIntrBlockAlloc(um_device_t * pUM, 801 int intrInum, 802 int intrCnt, 803 BnxeIntrBlock * pBlock) 804 805 { 806 dev_info_t * pDev = pUM->pDev; 807 int intrRequest; 808 int intrActual; 809 int rc, i; 810 811 if ((pUM->intrType == DDI_INTR_TYPE_FIXED) && (intrCnt != 1)) 812 { 813 return B_FALSE; 814 } 815 816 intrRequest = intrCnt; 817 intrActual = 0; 818 819 /* 820 * We need to allocate an interrupt block array of maximum size which is 821 * MAX_RSS_CHAINS plus one for the default interrupt. Even though we 822 * won't allocate all of those handlers the "inum" value passed to 823 * ddi_intr_alloc() determines the starting index where the handlers 824 * will be allocated. See the multi-function block offset documentation 825 * at the top of this file. 826 */ 827 pBlock->intrHandleBlockSize = 828 ((MAX_RSS_CHAINS + 1) * sizeof(ddi_intr_handle_t)); 829 830 if ((pBlock->pIntrHandleBlockAlloc = 831 (ddi_intr_handle_t *)kmem_zalloc(pBlock->intrHandleBlockSize, 832 KM_SLEEP)) == NULL) 833 { 834 BnxeLogWarn(pUM, "Memory alloc failed for isr handle block (inum=%d)!", 835 intrInum); 836 return B_FALSE; 837 } 838 839 if ((rc = ddi_intr_alloc(pDev, 840 pBlock->pIntrHandleBlockAlloc, 841 pUM->intrType, 842 intrInum, 843 intrRequest, 844 &intrActual, 845 DDI_INTR_ALLOC_NORMAL)) != DDI_SUCCESS) 846 { 847 BnxeLogWarn(pUM, "Failed to allocate isr handle block (%d) (inum=%d cnt=%d)!", 848 rc, intrInum, intrRequest); 849 kmem_free(pBlock->pIntrHandleBlockAlloc, pBlock->intrHandleBlockSize); 850 return B_FALSE; 851 } 852 853 /* 854 * Point 'pIntrHandleBlock' to the starting interrupt index in the 855 * allocated interrupt block array. This is done so we can easily enable, 856 * disable, free, etc the interrupts. For 10u8 and beyond the inum value 857 * is also used as an index into the interrupt block so we point 858 * pIntrHandleBlock to the inum'th index. For 10u7 and below all 859 * interrupt allocations start at index 0 per block. 860 */ 861 #if 0 862 863 #ifdef DDI_INTR_IRM 864 pBlock->pIntrHandleBlock = 865 &pBlock->pIntrHandleBlockAlloc[intrInum]; 866 #else 867 pBlock->pIntrHandleBlock = 868 &pBlock->pIntrHandleBlockAlloc[0]; 869 #endif 870 871 #else 872 873 if (pBlock->pIntrHandleBlockAlloc[0]) 874 { 875 pBlock->pIntrHandleBlock = 876 &pBlock->pIntrHandleBlockAlloc[0]; 877 } 878 else 879 { 880 pBlock->pIntrHandleBlock = 881 &pBlock->pIntrHandleBlockAlloc[intrInum]; 882 } 883 884 #endif 885 886 if (intrRequest != intrActual) 887 { 888 BnxeLogWarn(pUM, "Failed to allocate desired isr count (%d/%d)!", 889 intrActual, intrRequest); 890 891 #if 0 892 for (i = 0; i < intrActual; i++) 893 { 894 ddi_intr_free(pBlock->pIntrHandleBlock[i]); 895 } 896 897 kmem_free(pBlock->pIntrHandleBlockAlloc, pBlock->intrHandleBlockSize); 898 return B_FALSE; 899 #else 900 if (intrActual == 0) 901 { 902 kmem_free(pBlock->pIntrHandleBlockAlloc, pBlock->intrHandleBlockSize); 903 return B_FALSE; 904 } 905 #endif 906 } 907 908 pBlock->intrCount = intrActual; 909 910 if ((rc = ddi_intr_get_cap(pBlock->pIntrHandleBlock[0], 911 &pBlock->intrCapability)) != DDI_SUCCESS) 912 { 913 BnxeLogWarn(pUM, "Failed to get isr capability (%d)!", rc); 914 goto BnxeIntrBlockAlloc_fail; 915 } 916 917 if ((rc = ddi_intr_get_pri(pBlock->pIntrHandleBlock[0], 918 &pBlock->intrPriority)) != DDI_SUCCESS) 919 { 920 BnxeLogWarn(pUM, "Failed to get isr priority (%d)!", rc); 921 goto BnxeIntrBlockAlloc_fail; 922 } 923 924 if (pBlock->intrPriority >= ddi_intr_get_hilevel_pri()) 925 { 926 BnxeLogWarn(pUM, "Interrupt priority is too high!"); 927 goto BnxeIntrBlockAlloc_fail; 928 } 929 930 return B_TRUE; 931 932 BnxeIntrBlockAlloc_fail: 933 934 for (i = 0; i < intrActual; i++) 935 { 936 ddi_intr_free(pBlock->pIntrHandleBlock[i]); 937 } 938 939 kmem_free(pBlock->pIntrHandleBlockAlloc, pBlock->intrHandleBlockSize); 940 941 memset(pBlock, 0, sizeof(BnxeIntrBlock)); 942 943 return B_FALSE; 944 } 945 946 947 static void BnxeIntrBlockFree(um_device_t * pUM, 948 BnxeIntrBlock * pBlock) 949 950 { 951 int i; 952 953 if (pBlock->intrCount == 0) 954 { 955 memset(pBlock, 0, sizeof(BnxeIntrBlock)); 956 return; 957 } 958 959 for (i = 0; i < pBlock->intrCount; i++) 960 { 961 ddi_intr_free(pBlock->pIntrHandleBlock[i]); 962 } 963 964 kmem_free(pBlock->pIntrHandleBlockAlloc, pBlock->intrHandleBlockSize); 965 966 memset(pBlock, 0, sizeof(BnxeIntrBlock)); 967 } 968 969 970 static boolean_t BnxeIntrAddHandlers(um_device_t * pUM) 971 { 972 int rc, i, j; 973 974 switch (pUM->intrType) 975 { 976 case DDI_INTR_TYPE_MSIX: 977 978 if ((rc = ddi_intr_add_handler( 979 pUM->defIntr.pIntrHandleBlock[0], 980 BnxeIntrMISR, 981 (caddr_t)pUM, 982 (caddr_t)(uintptr_t)DEF_STATUS_BLOCK_IGU_INDEX)) != 983 DDI_SUCCESS) 984 { 985 BnxeLogWarn(pUM, "Failed to add the MSIX default isr handler (%d)", rc); 986 return B_FALSE; 987 } 988 989 for (i = 0; i < pUM->rssIntr.intrCount; i++) 990 { 991 if ((rc = ddi_intr_add_handler( 992 pUM->rssIntr.pIntrHandleBlock[i], 993 BnxeIntrMISR, 994 (caddr_t)pUM, 995 (caddr_t)(uintptr_t)i)) != 996 DDI_SUCCESS) 997 { 998 BnxeLogWarn(pUM, "Failed to add the MSIX RSS isr handler %d (%d)", 999 (i + NDIS_CID(&pUM->lm_dev)), rc); 1000 1001 ddi_intr_remove_handler(pUM->defIntr.pIntrHandleBlock[0]); 1002 1003 for (j = 0; j < i; j++) /* unwind */ 1004 { 1005 ddi_intr_remove_handler(pUM->rssIntr.pIntrHandleBlock[j]); 1006 } 1007 1008 return B_FALSE; 1009 } 1010 } 1011 1012 /* 1013 * fcoeIntr.intrCount == 1 implies LM_NON_RSS_SB (last) status block 1014 * was allocated for FCoE and there was no overlap with the RSS 1015 * allocation. 1016 */ 1017 if (pUM->fcoeIntr.intrCount == 1) 1018 { 1019 if ((rc = ddi_intr_add_handler( 1020 pUM->fcoeIntr.pIntrHandleBlock[0], 1021 BnxeIntrMISR, 1022 (caddr_t)pUM, 1023 (caddr_t)(uintptr_t)LM_NON_RSS_SB(&pUM->lm_dev))) != 1024 DDI_SUCCESS) 1025 { 1026 BnxeLogWarn(pUM, "Failed to add the MSIX FCoE isr handler (%d)", rc); 1027 1028 ddi_intr_remove_handler(pUM->defIntr.pIntrHandleBlock[0]); 1029 1030 for (i = 0; i < pUM->rssIntr.intrCount; i++) 1031 { 1032 ddi_intr_remove_handler(pUM->rssIntr.pIntrHandleBlock[i]); 1033 } 1034 1035 return B_FALSE; 1036 } 1037 } 1038 1039 break; 1040 1041 case DDI_INTR_TYPE_FIXED: 1042 1043 if ((rc = ddi_intr_add_handler( 1044 pUM->defIntr.pIntrHandleBlock[0], 1045 BnxeIntrISR, 1046 (caddr_t)pUM, 1047 (caddr_t)(uintptr_t)DEF_STATUS_BLOCK_IGU_INDEX)) != 1048 DDI_SUCCESS) 1049 { 1050 BnxeLogWarn(pUM, "Failed to add the fixed default isr handler (%d)", rc); 1051 return B_FALSE; 1052 } 1053 1054 break; 1055 1056 default: 1057 1058 BnxeLogWarn(pUM, "Failed to add isr handler (unsupported type %d)!", 1059 pUM->intrType); 1060 return B_FALSE; 1061 } 1062 1063 return B_TRUE; 1064 } 1065 1066 1067 static void BnxeIntrBlockRemoveHandler(um_device_t * pUM, 1068 BnxeIntrBlock * pBlock) 1069 { 1070 int i; 1071 1072 (void)pUM; 1073 1074 if (pBlock->intrCount == 0) 1075 { 1076 return; 1077 } 1078 1079 for (i = 0; i < pBlock->intrCount; i++) 1080 { 1081 ddi_intr_remove_handler(pBlock->pIntrHandleBlock[i]); 1082 } 1083 } 1084 1085 1086 static boolean_t BnxeIntrBlockEnable(um_device_t * pUM, 1087 BnxeIntrBlock * pBlock) 1088 { 1089 int rc, i, j; 1090 1091 if (pBlock->intrCount == 0) 1092 { 1093 return B_TRUE; 1094 } 1095 1096 if (pBlock->intrCapability & DDI_INTR_FLAG_BLOCK) 1097 { 1098 if ((rc = ddi_intr_block_enable(pBlock->pIntrHandleBlock, 1099 pBlock->intrCount)) != DDI_SUCCESS) 1100 { 1101 BnxeLogWarn(pUM, "Failed to enable isr block (%d)", rc); 1102 return B_FALSE; 1103 } 1104 } 1105 else 1106 { 1107 for (i = 0; i < pBlock->intrCount; i++) 1108 { 1109 if ((rc = ddi_intr_enable(pBlock->pIntrHandleBlock[i])) != 1110 DDI_SUCCESS) 1111 { 1112 BnxeLogWarn(pUM, "Failed to enable isr %d (%d)", i, rc); 1113 1114 for (j = 0; j < i; j++) /* unwind */ 1115 { 1116 ddi_intr_disable(pBlock->pIntrHandleBlock[j]); 1117 } 1118 1119 return B_FALSE; 1120 } 1121 } 1122 } 1123 1124 return B_TRUE; 1125 } 1126 1127 1128 static void BnxeIntrBlockDisable(um_device_t * pUM, 1129 BnxeIntrBlock * pBlock) 1130 { 1131 int i; 1132 1133 if (pBlock->intrCount == 0) 1134 { 1135 return; 1136 } 1137 1138 if (pBlock->intrCapability & DDI_INTR_FLAG_BLOCK) 1139 { 1140 ddi_intr_block_disable(pBlock->pIntrHandleBlock, pBlock->intrCount); 1141 } 1142 else 1143 { 1144 for (i = 0; i < pBlock->intrCount; i++) 1145 { 1146 ddi_intr_disable(pBlock->pIntrHandleBlock[i]); 1147 } 1148 } 1149 } 1150 1151 1152 int BnxeIntrEnable(um_device_t * pUM) 1153 { 1154 BnxeMemDma * pDma; 1155 int rc, i, j; 1156 1157 atomic_swap_64((volatile uint64_t *)&pUM->intrFired, 0); 1158 1159 for (i = 0; i < (MAX_RSS_CHAINS + 1); i++) 1160 { 1161 pUM->intrSbCnt[i] = 0; 1162 pUM->intrSbNoChangeCnt[i] = 0; 1163 } 1164 1165 /* get the DMA handles for quick access to the status blocks for sync */ 1166 BnxeFindDmaHandles(pUM); 1167 1168 /* Enable the default interrupt... */ 1169 1170 if (!BnxeIntrBlockEnable(pUM, &pUM->defIntr)) 1171 { 1172 BnxeLogWarn(pUM, "Failed to enable the default interrupt"); 1173 return -1; 1174 } 1175 1176 /* Enable the FCoE interrupt... */ 1177 1178 if (!BnxeIntrBlockEnable(pUM, &pUM->fcoeIntr)) 1179 { 1180 BnxeLogWarn(pUM, "Failed to enable the FCoE interrupt"); 1181 BnxeIntrBlockDisable(pUM, &pUM->defIntr); 1182 return -1; 1183 } 1184 1185 /* Enable the RSS interrupts... */ 1186 1187 if (!BnxeIntrBlockEnable(pUM, &pUM->rssIntr)) 1188 { 1189 BnxeLogWarn(pUM, "Failed to enable the RSS interrupt"); 1190 BnxeIntrBlockDisable(pUM, &pUM->defIntr); 1191 BnxeIntrBlockDisable(pUM, &pUM->fcoeIntr); 1192 return -1; 1193 } 1194 1195 /* allow the hardware to generate interrupts */ 1196 atomic_swap_32(&pUM->intrEnabled, B_TRUE); 1197 lm_enable_int(&pUM->lm_dev); 1198 1199 if (pUM->fmCapabilities && 1200 BnxeCheckAccHandle(pUM->lm_dev.vars.reg_handle[BAR_0]) != DDI_FM_OK) 1201 { 1202 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 1203 } 1204 1205 /* XXX do not remove this... edavis */ 1206 drv_usecwait(1000000); /* :-( */ 1207 1208 return 0; 1209 } 1210 1211 1212 void BnxeIntrDisable(um_device_t * pUM) 1213 { 1214 int rc, i; 1215 1216 /* stop the device from generating any interrupts */ 1217 lm_disable_int(&pUM->lm_dev); 1218 1219 if (pUM->fmCapabilities && 1220 BnxeCheckAccHandle(pUM->lm_dev.vars.reg_handle[BAR_0]) != DDI_FM_OK) 1221 { 1222 ddi_fm_service_impact(pUM->pDev, DDI_SERVICE_DEGRADED); 1223 } 1224 1225 atomic_swap_32(&pUM->intrEnabled, B_FALSE); 1226 1227 /* 1228 * Ensure the ISR no longer touches the hardware by making sure the ISR 1229 * is not running or the current run completes. Since interrupts were 1230 * disabled before here and intrEnabled is FALSE, we can be sure 1231 * interrupts will no longer be processed. 1232 */ 1233 for (i = 0; i < (MAX_RSS_CHAINS + 1); i++) 1234 { 1235 BNXE_LOCK_ENTER_INTR(pUM, i); 1236 BNXE_LOCK_EXIT_INTR(pUM, i); 1237 } 1238 1239 /* Disable the default interrupt... */ 1240 1241 BnxeIntrBlockDisable(pUM, &pUM->defIntr); 1242 1243 /* Disable the FCoE interrupt... */ 1244 1245 BnxeIntrBlockDisable(pUM, &pUM->fcoeIntr); 1246 1247 /* Disable the RSS interrupts... */ 1248 1249 BnxeIntrBlockDisable(pUM, &pUM->rssIntr); 1250 } 1251 1252 1253 boolean_t BnxeIntrInit(um_device_t * pUM) 1254 { 1255 dev_info_t * pDev; 1256 int intrTypes = 0; 1257 int intrTotalAlloc = 0; 1258 int numMSIX, numMSI, numFIX; 1259 int rc, i; 1260 1261 pDev = pUM->pDev; 1262 1263 atomic_swap_32(&pUM->intrEnabled, B_FALSE); 1264 1265 if ((rc = ddi_intr_get_supported_types(pDev, &intrTypes)) != DDI_SUCCESS) 1266 { 1267 BnxeLogWarn(pUM, "Failed to get supported interrupt types (%d)", rc); 1268 return B_FALSE; 1269 } 1270 1271 numMSIX = BnxeGetInterruptCount(pDev, DDI_INTR_TYPE_MSIX, intrTypes); 1272 numMSI = BnxeGetInterruptCount(pDev, DDI_INTR_TYPE_MSI, intrTypes); 1273 numFIX = BnxeGetInterruptCount(pDev, DDI_INTR_TYPE_FIXED, intrTypes); 1274 1275 if (numFIX <= 0) 1276 { 1277 BnxeLogWarn(pUM, "Fixed interrupt not supported!"); 1278 return B_FALSE; 1279 } 1280 1281 memset(&pUM->defIntr, 0, sizeof(BnxeIntrBlock)); 1282 memset(&pUM->rssIntr, 0, sizeof(BnxeIntrBlock)); 1283 memset(&pUM->fcoeIntr, 0, sizeof(BnxeIntrBlock)); 1284 1285 if (pUM->devParams.disableMsix) 1286 { 1287 BnxeLogInfo(pUM, "Forcing fixed level interrupts."); 1288 pUM->lm_dev.params.interrupt_mode = LM_INT_MODE_INTA; 1289 pUM->intrType = DDI_INTR_TYPE_FIXED; 1290 } 1291 else if (numMSIX > 0) 1292 { 1293 pUM->lm_dev.params.interrupt_mode = LM_INT_MODE_MIMD; 1294 pUM->intrType = DDI_INTR_TYPE_MSIX; 1295 } 1296 else /* numFIX */ 1297 { 1298 pUM->lm_dev.params.interrupt_mode = LM_INT_MODE_INTA; 1299 pUM->intrType = DDI_INTR_TYPE_FIXED; 1300 } 1301 1302 while (1) 1303 { 1304 /* allocate the default interrupt */ 1305 1306 if (!BnxeIntrBlockAlloc(pUM, 1307 0, 1308 1, 1309 &pUM->defIntr)) 1310 { 1311 BnxeLogWarn(pUM, "Failed to allocate default %s interrupt!", 1312 BnxeIntrTypeName(pUM->intrType)); 1313 goto BnxeIntrInit_alloc_fail; 1314 } 1315 1316 intrTotalAlloc++; 1317 1318 if (pUM->intrType == DDI_INTR_TYPE_FIXED) 1319 { 1320 /* only one interrupt allocated for fixed (default) */ 1321 break; 1322 } 1323 1324 if (BnxeProtoFcoeAfex(pUM)) 1325 { 1326 pUM->devParams.numRings = 0; 1327 } 1328 else 1329 { 1330 /* allocate the RSS interrupts */ 1331 1332 while (pUM->devParams.numRings > 0) 1333 { 1334 if (!BnxeIntrBlockAlloc(pUM, 1335 (NDIS_CID(&pUM->lm_dev) + 1), 1336 pUM->devParams.numRings, 1337 &pUM->rssIntr)) 1338 { 1339 BnxeLogWarn(pUM, "Failed to allocate %d RSS %s interrupts!", 1340 pUM->devParams.numRings, 1341 BnxeIntrTypeName(pUM->intrType)); 1342 pUM->devParams.numRings >>= 1; 1343 continue; 1344 } 1345 1346 break; 1347 } 1348 1349 if (pUM->devParams.numRings == 0) 1350 { 1351 BnxeIntrBlockFree(pUM, &pUM->defIntr); 1352 goto BnxeIntrInit_alloc_fail; 1353 } 1354 1355 BnxeLogInfo(pUM, "Allocated %d RSS %s interrupts.", 1356 pUM->rssIntr.intrCount, 1357 BnxeIntrTypeName(pUM->intrType)); 1358 1359 intrTotalAlloc += pUM->rssIntr.intrCount; /* intrCount <= numRings */ 1360 } 1361 1362 /* 1363 * Allocate the FCoE interrupt only if all available status blocks 1364 * were not taken up by the RSS chains. If they were then the last 1365 * status block (LM_NON_RSS_SB) is overloaded for both RSS and FCoE. 1366 */ 1367 1368 if (BNXE_FCOE(pUM)) 1369 { 1370 if (pUM->rssIntr.intrCount < LM_MAX_RSS_CHAINS(&pUM->lm_dev)) 1371 { 1372 if (!BnxeIntrBlockAlloc(pUM, 1373 (LM_NON_RSS_SB(&pUM->lm_dev) + 1), 1374 1, 1375 &pUM->fcoeIntr)) 1376 { 1377 BnxeLogWarn(pUM, "Failed to allocate FCoE %s interrupt!", 1378 BnxeIntrTypeName(pUM->intrType)); 1379 BnxeIntrBlockFree(pUM, &pUM->defIntr); 1380 BnxeIntrBlockFree(pUM, &pUM->rssIntr); 1381 goto BnxeIntrInit_alloc_fail; 1382 } 1383 1384 intrTotalAlloc++; 1385 } 1386 else 1387 { 1388 /* to be safe, sets fcoeIntr.intrCount to 0 */ 1389 memset(&pUM->fcoeIntr, 0, sizeof(BnxeIntrBlock)); 1390 } 1391 } 1392 1393 break; 1394 1395 BnxeIntrInit_alloc_fail: 1396 1397 if (pUM->intrType == DDI_INTR_TYPE_FIXED) 1398 { 1399 return B_FALSE; 1400 } 1401 1402 /* fall back to fixed a retry allocation */ 1403 intrTotalAlloc = 0; 1404 pUM->lm_dev.params.interrupt_mode = LM_INT_MODE_INTA; 1405 pUM->intrType = DDI_INTR_TYPE_FIXED; 1406 } 1407 1408 if (pUM->intrType == DDI_INTR_TYPE_MSIX) 1409 { 1410 pUM->devParams.numRings = pUM->rssIntr.intrCount; 1411 pUM->lm_dev.params.rss_chain_cnt = pUM->rssIntr.intrCount; 1412 pUM->lm_dev.params.tss_chain_cnt = pUM->rssIntr.intrCount; 1413 } 1414 else 1415 { 1416 /* fixed level (no rings)... */ 1417 pUM->devParams.numRings = 0; 1418 pUM->lm_dev.params.rss_chain_cnt = 1; 1419 pUM->lm_dev.params.tss_chain_cnt = 1; 1420 1421 BnxeLogWarn(pUM, "Using Fixed Level Interrupts! (set ddi_msix_alloc_limit in /etc/system)"); 1422 } 1423 1424 BnxeLogInfo(pUM, "Interrupts (Supported - %d Fixed / %d MSI / %d MSIX) (Allocated - %d %s)", 1425 numFIX, numMSI, numMSIX, intrTotalAlloc, BnxeIntrTypeName(pUM->intrType)); 1426 1427 if (!BnxeIntrAddHandlers(pUM)) 1428 { 1429 BnxeLogWarn(pUM, "Failed to add interrupts!"); 1430 BnxeIntrBlockFree(pUM, &pUM->defIntr); 1431 BnxeIntrBlockFree(pUM, &pUM->fcoeIntr); 1432 BnxeIntrBlockFree(pUM, &pUM->rssIntr); 1433 return B_FALSE; 1434 } 1435 1436 /* copy default priority and assume rest are the same (for mutex) */ 1437 pUM->intrPriority = pUM->defIntr.intrPriority; 1438 1439 return B_TRUE; 1440 } 1441 1442 1443 void BnxeIntrFini(um_device_t * pUM) 1444 { 1445 int i; 1446 1447 BnxeIntrBlockDisable(pUM, &pUM->defIntr); 1448 BnxeIntrBlockRemoveHandler(pUM, &pUM->defIntr); 1449 BnxeIntrBlockFree(pUM, &pUM->defIntr); 1450 1451 BnxeIntrBlockDisable(pUM, &pUM->fcoeIntr); 1452 BnxeIntrBlockRemoveHandler(pUM, &pUM->fcoeIntr); 1453 BnxeIntrBlockFree(pUM, &pUM->fcoeIntr); 1454 1455 BnxeIntrBlockDisable(pUM, &pUM->rssIntr); 1456 BnxeIntrBlockRemoveHandler(pUM, &pUM->rssIntr); 1457 BnxeIntrBlockFree(pUM, &pUM->rssIntr); 1458 } 1459 1460