1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 26 const char *const port_state_str[] = { 27 "Unknown", 28 "UNCONFIGURED", 29 "DEAD", 30 "LOST", 31 "ONLINE" 32 }; 33 34 static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt, 35 void (*process_item)(struct scsi_qla_host *vha, void *pkt)) 36 { 37 struct purex_list *list = &vha->purex_list; 38 struct purex_item *item; 39 ulong flags; 40 41 item = kzalloc(sizeof(*item), GFP_KERNEL); 42 if (!item) { 43 ql_log(ql_log_warn, vha, 0x5092, 44 ">> Failed allocate purex list item.\n"); 45 return; 46 } 47 48 item->vha = vha; 49 item->process_item = process_item; 50 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 51 52 spin_lock_irqsave(&list->lock, flags); 53 list_add_tail(&item->list, &list->head); 54 spin_unlock_irqrestore(&list->lock, flags); 55 56 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 57 } 58 59 static void 60 qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt) 61 { 62 struct abts_entry_24xx *abts = pkt; 63 struct qla_hw_data *ha = vha->hw; 64 struct els_entry_24xx *rsp_els; 65 struct abts_entry_24xx *abts_rsp; 66 dma_addr_t dma; 67 uint32_t fctl; 68 int rval; 69 70 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 71 72 ql_log(ql_log_warn, vha, 0x0287, 73 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 74 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 75 abts->seq_id, abts->seq_cnt); 76 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 77 "-------- ABTS RCV -------\n"); 78 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 79 (uint8_t *)abts, sizeof(*abts)); 80 81 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 82 GFP_KERNEL); 83 if (!rsp_els) { 84 ql_log(ql_log_warn, vha, 0x0287, 85 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 86 return; 87 } 88 89 /* terminate exchange */ 90 rsp_els->entry_type = ELS_IOCB_TYPE; 91 rsp_els->entry_count = 1; 92 rsp_els->nport_handle = ~0; 93 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 94 rsp_els->control_flags = EPD_RX_XCHG; 95 ql_dbg(ql_dbg_init, vha, 0x0283, 96 "Sending ELS Response to terminate exchange %#x...\n", 97 abts->rx_xch_addr_to_abort); 98 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 99 "-------- ELS RSP -------\n"); 100 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 101 (uint8_t *)rsp_els, sizeof(*rsp_els)); 102 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 103 if (rval) { 104 ql_log(ql_log_warn, vha, 0x0288, 105 "%s: iocb failed to execute -> %x\n", __func__, rval); 106 } else if (rsp_els->comp_status) { 107 ql_log(ql_log_warn, vha, 0x0289, 108 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 109 __func__, rsp_els->comp_status, 110 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 111 } else { 112 ql_dbg(ql_dbg_init, vha, 0x028a, 113 "%s: abort exchange done.\n", __func__); 114 } 115 116 /* send ABTS response */ 117 abts_rsp = (void *)rsp_els; 118 memset(abts_rsp, 0, sizeof(*abts_rsp)); 119 abts_rsp->entry_type = ABTS_RSP_TYPE; 120 abts_rsp->entry_count = 1; 121 abts_rsp->nport_handle = abts->nport_handle; 122 abts_rsp->vp_idx = abts->vp_idx; 123 abts_rsp->sof_type = abts->sof_type & 0xf0; 124 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 125 abts_rsp->d_id[0] = abts->s_id[0]; 126 abts_rsp->d_id[1] = abts->s_id[1]; 127 abts_rsp->d_id[2] = abts->s_id[2]; 128 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 129 abts_rsp->s_id[0] = abts->d_id[0]; 130 abts_rsp->s_id[1] = abts->d_id[1]; 131 abts_rsp->s_id[2] = abts->d_id[2]; 132 abts_rsp->cs_ctl = abts->cs_ctl; 133 /* include flipping bit23 in fctl */ 134 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 135 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 136 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 137 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 138 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 139 abts_rsp->type = FC_TYPE_BLD; 140 abts_rsp->rx_id = abts->rx_id; 141 abts_rsp->ox_id = abts->ox_id; 142 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 143 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 144 abts_rsp->payload.ba_acc.high_seq_cnt = ~0; 145 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 146 ql_dbg(ql_dbg_init, vha, 0x028b, 147 "Sending BA ACC response to ABTS %#x...\n", 148 abts->rx_xch_addr_to_abort); 149 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 150 "-------- ELS RSP -------\n"); 151 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 152 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 153 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 154 if (rval) { 155 ql_log(ql_log_warn, vha, 0x028c, 156 "%s: iocb failed to execute -> %x\n", __func__, rval); 157 } else if (abts_rsp->comp_status) { 158 ql_log(ql_log_warn, vha, 0x028d, 159 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 160 __func__, abts_rsp->comp_status, 161 abts_rsp->payload.error.subcode1, 162 abts_rsp->payload.error.subcode2); 163 } else { 164 ql_dbg(ql_dbg_init, vha, 0x028ea, 165 "%s: done.\n", __func__); 166 } 167 168 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 169 } 170 171 /** 172 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 173 * @irq: interrupt number 174 * @dev_id: SCSI driver HA context 175 * 176 * Called by system whenever the host adapter generates an interrupt. 177 * 178 * Returns handled flag. 179 */ 180 irqreturn_t 181 qla2100_intr_handler(int irq, void *dev_id) 182 { 183 scsi_qla_host_t *vha; 184 struct qla_hw_data *ha; 185 struct device_reg_2xxx __iomem *reg; 186 int status; 187 unsigned long iter; 188 uint16_t hccr; 189 uint16_t mb[8]; 190 struct rsp_que *rsp; 191 unsigned long flags; 192 193 rsp = (struct rsp_que *) dev_id; 194 if (!rsp) { 195 ql_log(ql_log_info, NULL, 0x505d, 196 "%s: NULL response queue pointer.\n", __func__); 197 return (IRQ_NONE); 198 } 199 200 ha = rsp->hw; 201 reg = &ha->iobase->isp; 202 status = 0; 203 204 spin_lock_irqsave(&ha->hardware_lock, flags); 205 vha = pci_get_drvdata(ha->pdev); 206 for (iter = 50; iter--; ) { 207 hccr = RD_REG_WORD(®->hccr); 208 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 209 break; 210 if (hccr & HCCR_RISC_PAUSE) { 211 if (pci_channel_offline(ha->pdev)) 212 break; 213 214 /* 215 * Issue a "HARD" reset in order for the RISC interrupt 216 * bit to be cleared. Schedule a big hammer to get 217 * out of the RISC PAUSED state. 218 */ 219 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 220 RD_REG_WORD(®->hccr); 221 222 ha->isp_ops->fw_dump(vha, 1); 223 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 224 break; 225 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 226 break; 227 228 if (RD_REG_WORD(®->semaphore) & BIT_0) { 229 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 230 RD_REG_WORD(®->hccr); 231 232 /* Get mailbox data. */ 233 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 234 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 235 qla2x00_mbx_completion(vha, mb[0]); 236 status |= MBX_INTERRUPT; 237 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 238 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 239 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 240 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 241 qla2x00_async_event(vha, rsp, mb); 242 } else { 243 /*EMPTY*/ 244 ql_dbg(ql_dbg_async, vha, 0x5025, 245 "Unrecognized interrupt type (%d).\n", 246 mb[0]); 247 } 248 /* Release mailbox registers. */ 249 WRT_REG_WORD(®->semaphore, 0); 250 RD_REG_WORD(®->semaphore); 251 } else { 252 qla2x00_process_response_queue(rsp); 253 254 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 255 RD_REG_WORD(®->hccr); 256 } 257 } 258 qla2x00_handle_mbx_completion(ha, status); 259 spin_unlock_irqrestore(&ha->hardware_lock, flags); 260 261 return (IRQ_HANDLED); 262 } 263 264 bool 265 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 266 { 267 /* Check for PCI disconnection */ 268 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 269 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 270 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 271 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 272 /* 273 * Schedule this (only once) on the default system 274 * workqueue so that all the adapter workqueues and the 275 * DPC thread can be shutdown cleanly. 276 */ 277 schedule_work(&vha->hw->board_disable); 278 } 279 return true; 280 } else 281 return false; 282 } 283 284 bool 285 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 286 { 287 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 288 } 289 290 /** 291 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 292 * @irq: interrupt number 293 * @dev_id: SCSI driver HA context 294 * 295 * Called by system whenever the host adapter generates an interrupt. 296 * 297 * Returns handled flag. 298 */ 299 irqreturn_t 300 qla2300_intr_handler(int irq, void *dev_id) 301 { 302 scsi_qla_host_t *vha; 303 struct device_reg_2xxx __iomem *reg; 304 int status; 305 unsigned long iter; 306 uint32_t stat; 307 uint16_t hccr; 308 uint16_t mb[8]; 309 struct rsp_que *rsp; 310 struct qla_hw_data *ha; 311 unsigned long flags; 312 313 rsp = (struct rsp_que *) dev_id; 314 if (!rsp) { 315 ql_log(ql_log_info, NULL, 0x5058, 316 "%s: NULL response queue pointer.\n", __func__); 317 return (IRQ_NONE); 318 } 319 320 ha = rsp->hw; 321 reg = &ha->iobase->isp; 322 status = 0; 323 324 spin_lock_irqsave(&ha->hardware_lock, flags); 325 vha = pci_get_drvdata(ha->pdev); 326 for (iter = 50; iter--; ) { 327 stat = RD_REG_DWORD(®->u.isp2300.host_status); 328 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 329 break; 330 if (stat & HSR_RISC_PAUSED) { 331 if (unlikely(pci_channel_offline(ha->pdev))) 332 break; 333 334 hccr = RD_REG_WORD(®->hccr); 335 336 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 337 ql_log(ql_log_warn, vha, 0x5026, 338 "Parity error -- HCCR=%x, Dumping " 339 "firmware.\n", hccr); 340 else 341 ql_log(ql_log_warn, vha, 0x5027, 342 "RISC paused -- HCCR=%x, Dumping " 343 "firmware.\n", hccr); 344 345 /* 346 * Issue a "HARD" reset in order for the RISC 347 * interrupt bit to be cleared. Schedule a big 348 * hammer to get out of the RISC PAUSED state. 349 */ 350 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 351 RD_REG_WORD(®->hccr); 352 353 ha->isp_ops->fw_dump(vha, 1); 354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 355 break; 356 } else if ((stat & HSR_RISC_INT) == 0) 357 break; 358 359 switch (stat & 0xff) { 360 case 0x1: 361 case 0x2: 362 case 0x10: 363 case 0x11: 364 qla2x00_mbx_completion(vha, MSW(stat)); 365 status |= MBX_INTERRUPT; 366 367 /* Release mailbox registers. */ 368 WRT_REG_WORD(®->semaphore, 0); 369 break; 370 case 0x12: 371 mb[0] = MSW(stat); 372 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 373 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 374 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 375 qla2x00_async_event(vha, rsp, mb); 376 break; 377 case 0x13: 378 qla2x00_process_response_queue(rsp); 379 break; 380 case 0x15: 381 mb[0] = MBA_CMPLT_1_16BIT; 382 mb[1] = MSW(stat); 383 qla2x00_async_event(vha, rsp, mb); 384 break; 385 case 0x16: 386 mb[0] = MBA_SCSI_COMPLETION; 387 mb[1] = MSW(stat); 388 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 389 qla2x00_async_event(vha, rsp, mb); 390 break; 391 default: 392 ql_dbg(ql_dbg_async, vha, 0x5028, 393 "Unrecognized interrupt type (%d).\n", stat & 0xff); 394 break; 395 } 396 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 397 RD_REG_WORD_RELAXED(®->hccr); 398 } 399 qla2x00_handle_mbx_completion(ha, status); 400 spin_unlock_irqrestore(&ha->hardware_lock, flags); 401 402 return (IRQ_HANDLED); 403 } 404 405 /** 406 * qla2x00_mbx_completion() - Process mailbox command completions. 407 * @vha: SCSI driver HA context 408 * @mb0: Mailbox0 register 409 */ 410 static void 411 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 412 { 413 uint16_t cnt; 414 uint32_t mboxes; 415 uint16_t __iomem *wptr; 416 struct qla_hw_data *ha = vha->hw; 417 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 418 419 /* Read all mbox registers? */ 420 WARN_ON_ONCE(ha->mbx_count > 32); 421 mboxes = (1ULL << ha->mbx_count) - 1; 422 if (!ha->mcp) 423 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 424 else 425 mboxes = ha->mcp->in_mb; 426 427 /* Load return mailbox registers. */ 428 ha->flags.mbox_int = 1; 429 ha->mailbox_out[0] = mb0; 430 mboxes >>= 1; 431 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 432 433 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 434 if (IS_QLA2200(ha) && cnt == 8) 435 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 436 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 437 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 438 else if (mboxes & BIT_0) 439 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 440 441 wptr++; 442 mboxes >>= 1; 443 } 444 } 445 446 static void 447 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 448 { 449 static char *event[] = 450 { "Complete", "Request Notification", "Time Extension" }; 451 int rval; 452 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 453 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 454 uint16_t __iomem *wptr; 455 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 456 457 /* Seed data -- mailbox1 -> mailbox7. */ 458 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 459 wptr = (uint16_t __iomem *)®24->mailbox1; 460 else if (IS_QLA8044(vha->hw)) 461 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 462 else 463 return; 464 465 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 466 mb[cnt] = RD_REG_WORD(wptr); 467 468 ql_dbg(ql_dbg_async, vha, 0x5021, 469 "Inter-Driver Communication %s -- " 470 "%04x %04x %04x %04x %04x %04x %04x.\n", 471 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 472 mb[4], mb[5], mb[6]); 473 switch (aen) { 474 /* Handle IDC Error completion case. */ 475 case MBA_IDC_COMPLETE: 476 if (mb[1] >> 15) { 477 vha->hw->flags.idc_compl_status = 1; 478 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 479 complete(&vha->hw->dcbx_comp); 480 } 481 break; 482 483 case MBA_IDC_NOTIFY: 484 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 485 timeout = (descr >> 8) & 0xf; 486 ql_dbg(ql_dbg_async, vha, 0x5022, 487 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 488 vha->host_no, event[aen & 0xff], timeout); 489 490 if (!timeout) 491 return; 492 rval = qla2x00_post_idc_ack_work(vha, mb); 493 if (rval != QLA_SUCCESS) 494 ql_log(ql_log_warn, vha, 0x5023, 495 "IDC failed to post ACK.\n"); 496 break; 497 case MBA_IDC_TIME_EXT: 498 vha->hw->idc_extend_tmo = descr; 499 ql_dbg(ql_dbg_async, vha, 0x5087, 500 "%lu Inter-Driver Communication %s -- " 501 "Extend timeout by=%d.\n", 502 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 503 break; 504 } 505 } 506 507 #define LS_UNKNOWN 2 508 const char * 509 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 510 { 511 static const char *const link_speeds[] = { 512 "1", "2", "?", "4", "8", "16", "32", "10" 513 }; 514 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 515 516 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 517 return link_speeds[0]; 518 else if (speed == 0x13) 519 return link_speeds[QLA_LAST_SPEED]; 520 else if (speed < QLA_LAST_SPEED) 521 return link_speeds[speed]; 522 else 523 return link_speeds[LS_UNKNOWN]; 524 } 525 526 static void 527 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 528 { 529 struct qla_hw_data *ha = vha->hw; 530 531 /* 532 * 8200 AEN Interpretation: 533 * mb[0] = AEN code 534 * mb[1] = AEN Reason code 535 * mb[2] = LSW of Peg-Halt Status-1 Register 536 * mb[6] = MSW of Peg-Halt Status-1 Register 537 * mb[3] = LSW of Peg-Halt Status-2 register 538 * mb[7] = MSW of Peg-Halt Status-2 register 539 * mb[4] = IDC Device-State Register value 540 * mb[5] = IDC Driver-Presence Register value 541 */ 542 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 543 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 544 mb[0], mb[1], mb[2], mb[6]); 545 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 546 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 547 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 548 549 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 550 IDC_HEARTBEAT_FAILURE)) { 551 ha->flags.nic_core_hung = 1; 552 ql_log(ql_log_warn, vha, 0x5060, 553 "83XX: F/W Error Reported: Check if reset required.\n"); 554 555 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 556 uint32_t protocol_engine_id, fw_err_code, err_level; 557 558 /* 559 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 560 * - PEG-Halt Status-1 Register: 561 * (LSW = mb[2], MSW = mb[6]) 562 * Bits 0-7 = protocol-engine ID 563 * Bits 8-28 = f/w error code 564 * Bits 29-31 = Error-level 565 * Error-level 0x1 = Non-Fatal error 566 * Error-level 0x2 = Recoverable Fatal error 567 * Error-level 0x4 = UnRecoverable Fatal error 568 * - PEG-Halt Status-2 Register: 569 * (LSW = mb[3], MSW = mb[7]) 570 */ 571 protocol_engine_id = (mb[2] & 0xff); 572 fw_err_code = (((mb[2] & 0xff00) >> 8) | 573 ((mb[6] & 0x1fff) << 8)); 574 err_level = ((mb[6] & 0xe000) >> 13); 575 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 576 "Register: protocol_engine_id=0x%x " 577 "fw_err_code=0x%x err_level=0x%x.\n", 578 protocol_engine_id, fw_err_code, err_level); 579 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 580 "Register: 0x%x%x.\n", mb[7], mb[3]); 581 if (err_level == ERR_LEVEL_NON_FATAL) { 582 ql_log(ql_log_warn, vha, 0x5063, 583 "Not a fatal error, f/w has recovered itself.\n"); 584 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 585 ql_log(ql_log_fatal, vha, 0x5064, 586 "Recoverable Fatal error: Chip reset " 587 "required.\n"); 588 qla83xx_schedule_work(vha, 589 QLA83XX_NIC_CORE_RESET); 590 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 591 ql_log(ql_log_fatal, vha, 0x5065, 592 "Unrecoverable Fatal error: Set FAILED " 593 "state, reboot required.\n"); 594 qla83xx_schedule_work(vha, 595 QLA83XX_NIC_CORE_UNRECOVERABLE); 596 } 597 } 598 599 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 600 uint16_t peg_fw_state, nw_interface_link_up; 601 uint16_t nw_interface_signal_detect, sfp_status; 602 uint16_t htbt_counter, htbt_monitor_enable; 603 uint16_t sfp_additional_info, sfp_multirate; 604 uint16_t sfp_tx_fault, link_speed, dcbx_status; 605 606 /* 607 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 608 * - PEG-to-FC Status Register: 609 * (LSW = mb[2], MSW = mb[6]) 610 * Bits 0-7 = Peg-Firmware state 611 * Bit 8 = N/W Interface Link-up 612 * Bit 9 = N/W Interface signal detected 613 * Bits 10-11 = SFP Status 614 * SFP Status 0x0 = SFP+ transceiver not expected 615 * SFP Status 0x1 = SFP+ transceiver not present 616 * SFP Status 0x2 = SFP+ transceiver invalid 617 * SFP Status 0x3 = SFP+ transceiver present and 618 * valid 619 * Bits 12-14 = Heartbeat Counter 620 * Bit 15 = Heartbeat Monitor Enable 621 * Bits 16-17 = SFP Additional Info 622 * SFP info 0x0 = Unregocnized transceiver for 623 * Ethernet 624 * SFP info 0x1 = SFP+ brand validation failed 625 * SFP info 0x2 = SFP+ speed validation failed 626 * SFP info 0x3 = SFP+ access error 627 * Bit 18 = SFP Multirate 628 * Bit 19 = SFP Tx Fault 629 * Bits 20-22 = Link Speed 630 * Bits 23-27 = Reserved 631 * Bits 28-30 = DCBX Status 632 * DCBX Status 0x0 = DCBX Disabled 633 * DCBX Status 0x1 = DCBX Enabled 634 * DCBX Status 0x2 = DCBX Exchange error 635 * Bit 31 = Reserved 636 */ 637 peg_fw_state = (mb[2] & 0x00ff); 638 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 639 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 640 sfp_status = ((mb[2] & 0x0c00) >> 10); 641 htbt_counter = ((mb[2] & 0x7000) >> 12); 642 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 643 sfp_additional_info = (mb[6] & 0x0003); 644 sfp_multirate = ((mb[6] & 0x0004) >> 2); 645 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 646 link_speed = ((mb[6] & 0x0070) >> 4); 647 dcbx_status = ((mb[6] & 0x7000) >> 12); 648 649 ql_log(ql_log_warn, vha, 0x5066, 650 "Peg-to-Fc Status Register:\n" 651 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 652 "nw_interface_signal_detect=0x%x" 653 "\nsfp_statis=0x%x.\n ", peg_fw_state, 654 nw_interface_link_up, nw_interface_signal_detect, 655 sfp_status); 656 ql_log(ql_log_warn, vha, 0x5067, 657 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 658 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 659 htbt_counter, htbt_monitor_enable, 660 sfp_additional_info, sfp_multirate); 661 ql_log(ql_log_warn, vha, 0x5068, 662 "sfp_tx_fault=0x%x, link_state=0x%x, " 663 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 664 dcbx_status); 665 666 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 667 } 668 669 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 670 ql_log(ql_log_warn, vha, 0x5069, 671 "Heartbeat Failure encountered, chip reset " 672 "required.\n"); 673 674 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 675 } 676 } 677 678 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 679 ql_log(ql_log_info, vha, 0x506a, 680 "IDC Device-State changed = 0x%x.\n", mb[4]); 681 if (ha->flags.nic_core_reset_owner) 682 return; 683 qla83xx_schedule_work(vha, MBA_IDC_AEN); 684 } 685 } 686 687 int 688 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 689 { 690 struct qla_hw_data *ha = vha->hw; 691 scsi_qla_host_t *vp; 692 uint32_t vp_did; 693 unsigned long flags; 694 int ret = 0; 695 696 if (!ha->num_vhosts) 697 return ret; 698 699 spin_lock_irqsave(&ha->vport_slock, flags); 700 list_for_each_entry(vp, &ha->vp_list, list) { 701 vp_did = vp->d_id.b24; 702 if (vp_did == rscn_entry) { 703 ret = 1; 704 break; 705 } 706 } 707 spin_unlock_irqrestore(&ha->vport_slock, flags); 708 709 return ret; 710 } 711 712 fc_port_t * 713 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 714 { 715 fc_port_t *f, *tf; 716 717 f = tf = NULL; 718 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 719 if (f->loop_id == loop_id) 720 return f; 721 return NULL; 722 } 723 724 fc_port_t * 725 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 726 { 727 fc_port_t *f, *tf; 728 729 f = tf = NULL; 730 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 731 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 732 if (incl_deleted) 733 return f; 734 else if (f->deleted == 0) 735 return f; 736 } 737 } 738 return NULL; 739 } 740 741 fc_port_t * 742 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 743 u8 incl_deleted) 744 { 745 fc_port_t *f, *tf; 746 747 f = tf = NULL; 748 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 749 if (f->d_id.b24 == id->b24) { 750 if (incl_deleted) 751 return f; 752 else if (f->deleted == 0) 753 return f; 754 } 755 } 756 return NULL; 757 } 758 759 /** 760 * qla2x00_async_event() - Process aynchronous events. 761 * @vha: SCSI driver HA context 762 * @rsp: response queue 763 * @mb: Mailbox registers (0 - 3) 764 */ 765 void 766 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 767 { 768 uint16_t handle_cnt; 769 uint16_t cnt, mbx; 770 uint32_t handles[5]; 771 struct qla_hw_data *ha = vha->hw; 772 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 773 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 774 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 775 uint32_t rscn_entry, host_pid; 776 unsigned long flags; 777 fc_port_t *fcport = NULL; 778 779 if (!vha->hw->flags.fw_started) 780 return; 781 782 /* Setup to process RIO completion. */ 783 handle_cnt = 0; 784 if (IS_CNA_CAPABLE(ha)) 785 goto skip_rio; 786 switch (mb[0]) { 787 case MBA_SCSI_COMPLETION: 788 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 789 handle_cnt = 1; 790 break; 791 case MBA_CMPLT_1_16BIT: 792 handles[0] = mb[1]; 793 handle_cnt = 1; 794 mb[0] = MBA_SCSI_COMPLETION; 795 break; 796 case MBA_CMPLT_2_16BIT: 797 handles[0] = mb[1]; 798 handles[1] = mb[2]; 799 handle_cnt = 2; 800 mb[0] = MBA_SCSI_COMPLETION; 801 break; 802 case MBA_CMPLT_3_16BIT: 803 handles[0] = mb[1]; 804 handles[1] = mb[2]; 805 handles[2] = mb[3]; 806 handle_cnt = 3; 807 mb[0] = MBA_SCSI_COMPLETION; 808 break; 809 case MBA_CMPLT_4_16BIT: 810 handles[0] = mb[1]; 811 handles[1] = mb[2]; 812 handles[2] = mb[3]; 813 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 814 handle_cnt = 4; 815 mb[0] = MBA_SCSI_COMPLETION; 816 break; 817 case MBA_CMPLT_5_16BIT: 818 handles[0] = mb[1]; 819 handles[1] = mb[2]; 820 handles[2] = mb[3]; 821 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 822 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 823 handle_cnt = 5; 824 mb[0] = MBA_SCSI_COMPLETION; 825 break; 826 case MBA_CMPLT_2_32BIT: 827 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 828 handles[1] = le32_to_cpu( 829 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 830 RD_MAILBOX_REG(ha, reg, 6)); 831 handle_cnt = 2; 832 mb[0] = MBA_SCSI_COMPLETION; 833 break; 834 default: 835 break; 836 } 837 skip_rio: 838 switch (mb[0]) { 839 case MBA_SCSI_COMPLETION: /* Fast Post */ 840 if (!vha->flags.online) 841 break; 842 843 for (cnt = 0; cnt < handle_cnt; cnt++) 844 qla2x00_process_completed_request(vha, rsp->req, 845 handles[cnt]); 846 break; 847 848 case MBA_RESET: /* Reset */ 849 ql_dbg(ql_dbg_async, vha, 0x5002, 850 "Asynchronous RESET.\n"); 851 852 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 853 break; 854 855 case MBA_SYSTEM_ERR: /* System Error */ 856 mbx = 0; 857 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 858 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 859 u16 m[4]; 860 861 m[0] = RD_REG_WORD(®24->mailbox4); 862 m[1] = RD_REG_WORD(®24->mailbox5); 863 m[2] = RD_REG_WORD(®24->mailbox6); 864 mbx = m[3] = RD_REG_WORD(®24->mailbox7); 865 866 ql_log(ql_log_warn, vha, 0x5003, 867 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 868 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 869 } else 870 ql_log(ql_log_warn, vha, 0x5003, 871 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 872 mb[1], mb[2], mb[3]); 873 874 ha->fw_dump_mpi = 875 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 876 RD_REG_WORD(®24->mailbox7) & BIT_8; 877 ha->isp_ops->fw_dump(vha, 1); 878 ha->flags.fw_init_done = 0; 879 QLA_FW_STOPPED(ha); 880 881 if (IS_FWI2_CAPABLE(ha)) { 882 if (mb[1] == 0 && mb[2] == 0) { 883 ql_log(ql_log_fatal, vha, 0x5004, 884 "Unrecoverable Hardware Error: adapter " 885 "marked OFFLINE!\n"); 886 vha->flags.online = 0; 887 vha->device_flags |= DFLG_DEV_FAILED; 888 } else { 889 /* Check to see if MPI timeout occurred */ 890 if ((mbx & MBX_3) && (ha->port_no == 0)) 891 set_bit(MPI_RESET_NEEDED, 892 &vha->dpc_flags); 893 894 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 895 } 896 } else if (mb[1] == 0) { 897 ql_log(ql_log_fatal, vha, 0x5005, 898 "Unrecoverable Hardware Error: adapter marked " 899 "OFFLINE!\n"); 900 vha->flags.online = 0; 901 vha->device_flags |= DFLG_DEV_FAILED; 902 } else 903 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 904 break; 905 906 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 907 ql_log(ql_log_warn, vha, 0x5006, 908 "ISP Request Transfer Error (%x).\n", mb[1]); 909 910 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 911 break; 912 913 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 914 ql_log(ql_log_warn, vha, 0x5007, 915 "ISP Response Transfer Error (%x).\n", mb[1]); 916 917 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 918 break; 919 920 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 921 ql_dbg(ql_dbg_async, vha, 0x5008, 922 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 923 break; 924 925 case MBA_LOOP_INIT_ERR: 926 ql_log(ql_log_warn, vha, 0x5090, 927 "LOOP INIT ERROR (%x).\n", mb[1]); 928 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 929 break; 930 931 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 932 ha->flags.lip_ae = 1; 933 934 ql_dbg(ql_dbg_async, vha, 0x5009, 935 "LIP occurred (%x).\n", mb[1]); 936 937 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 938 atomic_set(&vha->loop_state, LOOP_DOWN); 939 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 940 qla2x00_mark_all_devices_lost(vha); 941 } 942 943 if (vha->vp_idx) { 944 atomic_set(&vha->vp_state, VP_FAILED); 945 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 946 } 947 948 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 949 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 950 951 vha->flags.management_server_logged_in = 0; 952 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 953 break; 954 955 case MBA_LOOP_UP: /* Loop Up Event */ 956 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 957 ha->link_data_rate = PORT_SPEED_1GB; 958 else 959 ha->link_data_rate = mb[1]; 960 961 ql_log(ql_log_info, vha, 0x500a, 962 "LOOP UP detected (%s Gbps).\n", 963 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 964 965 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 966 if (mb[2] & BIT_0) 967 ql_log(ql_log_info, vha, 0x11a0, 968 "FEC=enabled (link up).\n"); 969 } 970 971 vha->flags.management_server_logged_in = 0; 972 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 973 974 break; 975 976 case MBA_LOOP_DOWN: /* Loop Down Event */ 977 SAVE_TOPO(ha); 978 ha->flags.lip_ae = 0; 979 ha->current_topology = 0; 980 981 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 982 ? RD_REG_WORD(®24->mailbox4) : 0; 983 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 984 : mbx; 985 ql_log(ql_log_info, vha, 0x500b, 986 "LOOP DOWN detected (%x %x %x %x).\n", 987 mb[1], mb[2], mb[3], mbx); 988 989 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 990 atomic_set(&vha->loop_state, LOOP_DOWN); 991 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 992 /* 993 * In case of loop down, restore WWPN from 994 * NVRAM in case of FA-WWPN capable ISP 995 * Restore for Physical Port only 996 */ 997 if (!vha->vp_idx) { 998 if (ha->flags.fawwpn_enabled && 999 (ha->current_topology == ISP_CFG_F)) { 1000 void *wwpn = ha->init_cb->port_name; 1001 1002 memcpy(vha->port_name, wwpn, WWN_SIZE); 1003 fc_host_port_name(vha->host) = 1004 wwn_to_u64(vha->port_name); 1005 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1006 vha, 0x00d8, "LOOP DOWN detected," 1007 "restore WWPN %016llx\n", 1008 wwn_to_u64(vha->port_name)); 1009 } 1010 1011 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1012 } 1013 1014 vha->device_flags |= DFLG_NO_CABLE; 1015 qla2x00_mark_all_devices_lost(vha); 1016 } 1017 1018 if (vha->vp_idx) { 1019 atomic_set(&vha->vp_state, VP_FAILED); 1020 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1021 } 1022 1023 vha->flags.management_server_logged_in = 0; 1024 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1025 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1026 break; 1027 1028 case MBA_LIP_RESET: /* LIP reset occurred */ 1029 ql_dbg(ql_dbg_async, vha, 0x500c, 1030 "LIP reset occurred (%x).\n", mb[1]); 1031 1032 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1033 atomic_set(&vha->loop_state, LOOP_DOWN); 1034 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1035 qla2x00_mark_all_devices_lost(vha); 1036 } 1037 1038 if (vha->vp_idx) { 1039 atomic_set(&vha->vp_state, VP_FAILED); 1040 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1041 } 1042 1043 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1044 1045 ha->operating_mode = LOOP; 1046 vha->flags.management_server_logged_in = 0; 1047 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1048 break; 1049 1050 /* case MBA_DCBX_COMPLETE: */ 1051 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1052 ha->flags.lip_ae = 0; 1053 1054 if (IS_QLA2100(ha)) 1055 break; 1056 1057 if (IS_CNA_CAPABLE(ha)) { 1058 ql_dbg(ql_dbg_async, vha, 0x500d, 1059 "DCBX Completed -- %04x %04x %04x.\n", 1060 mb[1], mb[2], mb[3]); 1061 if (ha->notify_dcbx_comp && !vha->vp_idx) 1062 complete(&ha->dcbx_comp); 1063 1064 } else 1065 ql_dbg(ql_dbg_async, vha, 0x500e, 1066 "Asynchronous P2P MODE received.\n"); 1067 1068 /* 1069 * Until there's a transition from loop down to loop up, treat 1070 * this as loop down only. 1071 */ 1072 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1073 atomic_set(&vha->loop_state, LOOP_DOWN); 1074 if (!atomic_read(&vha->loop_down_timer)) 1075 atomic_set(&vha->loop_down_timer, 1076 LOOP_DOWN_TIME); 1077 if (!N2N_TOPO(ha)) 1078 qla2x00_mark_all_devices_lost(vha); 1079 } 1080 1081 if (vha->vp_idx) { 1082 atomic_set(&vha->vp_state, VP_FAILED); 1083 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1084 } 1085 1086 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1087 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1088 1089 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1090 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1091 1092 vha->flags.management_server_logged_in = 0; 1093 break; 1094 1095 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1096 if (IS_QLA2100(ha)) 1097 break; 1098 1099 ql_dbg(ql_dbg_async, vha, 0x500f, 1100 "Configuration change detected: value=%x.\n", mb[1]); 1101 1102 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1103 atomic_set(&vha->loop_state, LOOP_DOWN); 1104 if (!atomic_read(&vha->loop_down_timer)) 1105 atomic_set(&vha->loop_down_timer, 1106 LOOP_DOWN_TIME); 1107 qla2x00_mark_all_devices_lost(vha); 1108 } 1109 1110 if (vha->vp_idx) { 1111 atomic_set(&vha->vp_state, VP_FAILED); 1112 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1113 } 1114 1115 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1116 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1117 break; 1118 1119 case MBA_PORT_UPDATE: /* Port database update */ 1120 /* 1121 * Handle only global and vn-port update events 1122 * 1123 * Relevant inputs: 1124 * mb[1] = N_Port handle of changed port 1125 * OR 0xffff for global event 1126 * mb[2] = New login state 1127 * 7 = Port logged out 1128 * mb[3] = LSB is vp_idx, 0xff = all vps 1129 * 1130 * Skip processing if: 1131 * Event is global, vp_idx is NOT all vps, 1132 * vp_idx does not match 1133 * Event is not global, vp_idx does not match 1134 */ 1135 if (IS_QLA2XXX_MIDTYPE(ha) && 1136 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1137 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1138 break; 1139 1140 if (mb[2] == 0x7) { 1141 ql_dbg(ql_dbg_async, vha, 0x5010, 1142 "Port %s %04x %04x %04x.\n", 1143 mb[1] == 0xffff ? "unavailable" : "logout", 1144 mb[1], mb[2], mb[3]); 1145 1146 if (mb[1] == 0xffff) 1147 goto global_port_update; 1148 1149 if (mb[1] == NPH_SNS_LID(ha)) { 1150 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1151 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1152 break; 1153 } 1154 1155 /* use handle_cnt for loop id/nport handle */ 1156 if (IS_FWI2_CAPABLE(ha)) 1157 handle_cnt = NPH_SNS; 1158 else 1159 handle_cnt = SIMPLE_NAME_SERVER; 1160 if (mb[1] == handle_cnt) { 1161 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1162 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1163 break; 1164 } 1165 1166 /* Port logout */ 1167 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1168 if (!fcport) 1169 break; 1170 if (atomic_read(&fcport->state) != FCS_ONLINE) 1171 break; 1172 ql_dbg(ql_dbg_async, vha, 0x508a, 1173 "Marking port lost loopid=%04x portid=%06x.\n", 1174 fcport->loop_id, fcport->d_id.b24); 1175 if (qla_ini_mode_enabled(vha)) { 1176 fcport->logout_on_delete = 0; 1177 qlt_schedule_sess_for_deletion(fcport); 1178 } 1179 break; 1180 1181 global_port_update: 1182 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1183 atomic_set(&vha->loop_state, LOOP_DOWN); 1184 atomic_set(&vha->loop_down_timer, 1185 LOOP_DOWN_TIME); 1186 vha->device_flags |= DFLG_NO_CABLE; 1187 qla2x00_mark_all_devices_lost(vha); 1188 } 1189 1190 if (vha->vp_idx) { 1191 atomic_set(&vha->vp_state, VP_FAILED); 1192 fc_vport_set_state(vha->fc_vport, 1193 FC_VPORT_FAILED); 1194 qla2x00_mark_all_devices_lost(vha); 1195 } 1196 1197 vha->flags.management_server_logged_in = 0; 1198 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1199 break; 1200 } 1201 1202 /* 1203 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1204 * event etc. earlier indicating loop is down) then process 1205 * it. Otherwise ignore it and Wait for RSCN to come in. 1206 */ 1207 atomic_set(&vha->loop_down_timer, 0); 1208 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1209 !ha->flags.n2n_ae && 1210 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1211 ql_dbg(ql_dbg_async, vha, 0x5011, 1212 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1213 mb[1], mb[2], mb[3]); 1214 break; 1215 } 1216 1217 ql_dbg(ql_dbg_async, vha, 0x5012, 1218 "Port database changed %04x %04x %04x.\n", 1219 mb[1], mb[2], mb[3]); 1220 1221 /* 1222 * Mark all devices as missing so we will login again. 1223 */ 1224 atomic_set(&vha->loop_state, LOOP_UP); 1225 vha->scan.scan_retry = 0; 1226 1227 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1228 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1229 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1230 break; 1231 1232 case MBA_RSCN_UPDATE: /* State Change Registration */ 1233 /* Check if the Vport has issued a SCR */ 1234 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1235 break; 1236 /* Only handle SCNs for our Vport index. */ 1237 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1238 break; 1239 1240 ql_dbg(ql_dbg_async, vha, 0x5013, 1241 "RSCN database changed -- %04x %04x %04x.\n", 1242 mb[1], mb[2], mb[3]); 1243 1244 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1245 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1246 | vha->d_id.b.al_pa; 1247 if (rscn_entry == host_pid) { 1248 ql_dbg(ql_dbg_async, vha, 0x5014, 1249 "Ignoring RSCN update to local host " 1250 "port ID (%06x).\n", host_pid); 1251 break; 1252 } 1253 1254 /* Ignore reserved bits from RSCN-payload. */ 1255 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1256 1257 /* Skip RSCNs for virtual ports on the same physical port */ 1258 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1259 break; 1260 1261 atomic_set(&vha->loop_down_timer, 0); 1262 vha->flags.management_server_logged_in = 0; 1263 { 1264 struct event_arg ea; 1265 1266 memset(&ea, 0, sizeof(ea)); 1267 ea.id.b24 = rscn_entry; 1268 ea.id.b.rsvd_1 = rscn_entry >> 24; 1269 qla2x00_handle_rscn(vha, &ea); 1270 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1271 } 1272 break; 1273 /* case MBA_RIO_RESPONSE: */ 1274 case MBA_ZIO_RESPONSE: 1275 ql_dbg(ql_dbg_async, vha, 0x5015, 1276 "[R|Z]IO update completion.\n"); 1277 1278 if (IS_FWI2_CAPABLE(ha)) 1279 qla24xx_process_response_queue(vha, rsp); 1280 else 1281 qla2x00_process_response_queue(rsp); 1282 break; 1283 1284 case MBA_DISCARD_RND_FRAME: 1285 ql_dbg(ql_dbg_async, vha, 0x5016, 1286 "Discard RND Frame -- %04x %04x %04x.\n", 1287 mb[1], mb[2], mb[3]); 1288 break; 1289 1290 case MBA_TRACE_NOTIFICATION: 1291 ql_dbg(ql_dbg_async, vha, 0x5017, 1292 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1293 break; 1294 1295 case MBA_ISP84XX_ALERT: 1296 ql_dbg(ql_dbg_async, vha, 0x5018, 1297 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1298 mb[1], mb[2], mb[3]); 1299 1300 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1301 switch (mb[1]) { 1302 case A84_PANIC_RECOVERY: 1303 ql_log(ql_log_info, vha, 0x5019, 1304 "Alert 84XX: panic recovery %04x %04x.\n", 1305 mb[2], mb[3]); 1306 break; 1307 case A84_OP_LOGIN_COMPLETE: 1308 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1309 ql_log(ql_log_info, vha, 0x501a, 1310 "Alert 84XX: firmware version %x.\n", 1311 ha->cs84xx->op_fw_version); 1312 break; 1313 case A84_DIAG_LOGIN_COMPLETE: 1314 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1315 ql_log(ql_log_info, vha, 0x501b, 1316 "Alert 84XX: diagnostic firmware version %x.\n", 1317 ha->cs84xx->diag_fw_version); 1318 break; 1319 case A84_GOLD_LOGIN_COMPLETE: 1320 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1321 ha->cs84xx->fw_update = 1; 1322 ql_log(ql_log_info, vha, 0x501c, 1323 "Alert 84XX: gold firmware version %x.\n", 1324 ha->cs84xx->gold_fw_version); 1325 break; 1326 default: 1327 ql_log(ql_log_warn, vha, 0x501d, 1328 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1329 mb[1], mb[2], mb[3]); 1330 } 1331 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1332 break; 1333 case MBA_DCBX_START: 1334 ql_dbg(ql_dbg_async, vha, 0x501e, 1335 "DCBX Started -- %04x %04x %04x.\n", 1336 mb[1], mb[2], mb[3]); 1337 break; 1338 case MBA_DCBX_PARAM_UPDATE: 1339 ql_dbg(ql_dbg_async, vha, 0x501f, 1340 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1341 mb[1], mb[2], mb[3]); 1342 break; 1343 case MBA_FCF_CONF_ERR: 1344 ql_dbg(ql_dbg_async, vha, 0x5020, 1345 "FCF Configuration Error -- %04x %04x %04x.\n", 1346 mb[1], mb[2], mb[3]); 1347 break; 1348 case MBA_IDC_NOTIFY: 1349 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1350 mb[4] = RD_REG_WORD(®24->mailbox4); 1351 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1352 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1353 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1354 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1355 /* 1356 * Extend loop down timer since port is active. 1357 */ 1358 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1359 atomic_set(&vha->loop_down_timer, 1360 LOOP_DOWN_TIME); 1361 qla2xxx_wake_dpc(vha); 1362 } 1363 } 1364 /* fall through */ 1365 case MBA_IDC_COMPLETE: 1366 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1367 complete(&ha->lb_portup_comp); 1368 /* Fallthru */ 1369 case MBA_IDC_TIME_EXT: 1370 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1371 IS_QLA8044(ha)) 1372 qla81xx_idc_event(vha, mb[0], mb[1]); 1373 break; 1374 1375 case MBA_IDC_AEN: 1376 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1377 ha->flags.fw_init_done = 0; 1378 ql_log(ql_log_warn, vha, 0xffff, 1379 "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 1380 mb[0], mb[1], mb[2], mb[3]); 1381 1382 if ((mb[1] & BIT_8) || 1383 (mb[2] & BIT_8)) { 1384 ql_log(ql_log_warn, vha, 0xd013, 1385 "MPI Heartbeat stop. FW dump needed\n"); 1386 ha->fw_dump_mpi = 1; 1387 ha->isp_ops->fw_dump(vha, 1); 1388 } 1389 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1390 qla2xxx_wake_dpc(vha); 1391 } else if (IS_QLA83XX(ha)) { 1392 mb[4] = RD_REG_WORD(®24->mailbox4); 1393 mb[5] = RD_REG_WORD(®24->mailbox5); 1394 mb[6] = RD_REG_WORD(®24->mailbox6); 1395 mb[7] = RD_REG_WORD(®24->mailbox7); 1396 qla83xx_handle_8200_aen(vha, mb); 1397 } else { 1398 ql_dbg(ql_dbg_async, vha, 0x5052, 1399 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1400 mb[0], mb[1], mb[2], mb[3]); 1401 } 1402 break; 1403 1404 case MBA_DPORT_DIAGNOSTICS: 1405 ql_dbg(ql_dbg_async, vha, 0x5052, 1406 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1407 mb[0], mb[1], mb[2], mb[3]); 1408 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1409 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1410 static char *results[] = { 1411 "start", "done(pass)", "done(error)", "undefined" }; 1412 static char *types[] = { 1413 "none", "dynamic", "static", "other" }; 1414 uint result = mb[1] >> 0 & 0x3; 1415 uint type = mb[1] >> 6 & 0x3; 1416 uint sw = mb[1] >> 15 & 0x1; 1417 ql_dbg(ql_dbg_async, vha, 0x5052, 1418 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1419 results[result], types[type], sw); 1420 if (result == 2) { 1421 static char *reasons[] = { 1422 "reserved", "unexpected reject", 1423 "unexpected phase", "retry exceeded", 1424 "timed out", "not supported", 1425 "user stopped" }; 1426 uint reason = mb[2] >> 0 & 0xf; 1427 uint phase = mb[2] >> 12 & 0xf; 1428 ql_dbg(ql_dbg_async, vha, 0x5052, 1429 "D-Port Diagnostics: reason=%s phase=%u \n", 1430 reason < 7 ? reasons[reason] : "other", 1431 phase >> 1); 1432 } 1433 } 1434 break; 1435 1436 case MBA_TEMPERATURE_ALERT: 1437 ql_dbg(ql_dbg_async, vha, 0x505e, 1438 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1439 if (mb[1] == 0x12) 1440 schedule_work(&ha->board_disable); 1441 break; 1442 1443 case MBA_TRANS_INSERT: 1444 ql_dbg(ql_dbg_async, vha, 0x5091, 1445 "Transceiver Insertion: %04x\n", mb[1]); 1446 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1447 break; 1448 1449 case MBA_TRANS_REMOVE: 1450 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1451 break; 1452 1453 default: 1454 ql_dbg(ql_dbg_async, vha, 0x5057, 1455 "Unknown AEN:%04x %04x %04x %04x\n", 1456 mb[0], mb[1], mb[2], mb[3]); 1457 } 1458 1459 qlt_async_event(mb[0], vha, mb); 1460 1461 if (!vha->vp_idx && ha->num_vhosts) 1462 qla2x00_alert_all_vps(rsp, mb); 1463 } 1464 1465 /** 1466 * qla2x00_process_completed_request() - Process a Fast Post response. 1467 * @vha: SCSI driver HA context 1468 * @req: request queue 1469 * @index: SRB index 1470 */ 1471 void 1472 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1473 struct req_que *req, uint32_t index) 1474 { 1475 srb_t *sp; 1476 struct qla_hw_data *ha = vha->hw; 1477 1478 /* Validate handle. */ 1479 if (index >= req->num_outstanding_cmds) { 1480 ql_log(ql_log_warn, vha, 0x3014, 1481 "Invalid SCSI command index (%x).\n", index); 1482 1483 if (IS_P3P_TYPE(ha)) 1484 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1485 else 1486 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1487 return; 1488 } 1489 1490 sp = req->outstanding_cmds[index]; 1491 if (sp) { 1492 /* Free outstanding command slot. */ 1493 req->outstanding_cmds[index] = NULL; 1494 1495 /* Save ISP completion status */ 1496 sp->done(sp, DID_OK << 16); 1497 } else { 1498 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1499 1500 if (IS_P3P_TYPE(ha)) 1501 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1502 else 1503 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1504 } 1505 } 1506 1507 srb_t * 1508 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1509 struct req_que *req, void *iocb) 1510 { 1511 struct qla_hw_data *ha = vha->hw; 1512 sts_entry_t *pkt = iocb; 1513 srb_t *sp = NULL; 1514 uint16_t index; 1515 1516 index = LSW(pkt->handle); 1517 if (index >= req->num_outstanding_cmds) { 1518 ql_log(ql_log_warn, vha, 0x5031, 1519 "Invalid command index (%x) type %8ph.\n", 1520 index, iocb); 1521 if (IS_P3P_TYPE(ha)) 1522 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1523 else 1524 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1525 goto done; 1526 } 1527 sp = req->outstanding_cmds[index]; 1528 if (!sp) { 1529 ql_log(ql_log_warn, vha, 0x5032, 1530 "Invalid completion handle (%x) -- timed-out.\n", index); 1531 return sp; 1532 } 1533 if (sp->handle != index) { 1534 ql_log(ql_log_warn, vha, 0x5033, 1535 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1536 return NULL; 1537 } 1538 1539 req->outstanding_cmds[index] = NULL; 1540 1541 done: 1542 return sp; 1543 } 1544 1545 static void 1546 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1547 struct mbx_entry *mbx) 1548 { 1549 const char func[] = "MBX-IOCB"; 1550 const char *type; 1551 fc_port_t *fcport; 1552 srb_t *sp; 1553 struct srb_iocb *lio; 1554 uint16_t *data; 1555 uint16_t status; 1556 1557 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1558 if (!sp) 1559 return; 1560 1561 lio = &sp->u.iocb_cmd; 1562 type = sp->name; 1563 fcport = sp->fcport; 1564 data = lio->u.logio.data; 1565 1566 data[0] = MBS_COMMAND_ERROR; 1567 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1568 QLA_LOGIO_LOGIN_RETRIED : 0; 1569 if (mbx->entry_status) { 1570 ql_dbg(ql_dbg_async, vha, 0x5043, 1571 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1572 "entry-status=%x status=%x state-flag=%x " 1573 "status-flags=%x.\n", type, sp->handle, 1574 fcport->d_id.b.domain, fcport->d_id.b.area, 1575 fcport->d_id.b.al_pa, mbx->entry_status, 1576 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1577 le16_to_cpu(mbx->status_flags)); 1578 1579 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1580 mbx, sizeof(*mbx)); 1581 1582 goto logio_done; 1583 } 1584 1585 status = le16_to_cpu(mbx->status); 1586 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1587 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1588 status = 0; 1589 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1590 ql_dbg(ql_dbg_async, vha, 0x5045, 1591 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1592 type, sp->handle, fcport->d_id.b.domain, 1593 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1594 le16_to_cpu(mbx->mb1)); 1595 1596 data[0] = MBS_COMMAND_COMPLETE; 1597 if (sp->type == SRB_LOGIN_CMD) { 1598 fcport->port_type = FCT_TARGET; 1599 if (le16_to_cpu(mbx->mb1) & BIT_0) 1600 fcport->port_type = FCT_INITIATOR; 1601 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1602 fcport->flags |= FCF_FCP2_DEVICE; 1603 } 1604 goto logio_done; 1605 } 1606 1607 data[0] = le16_to_cpu(mbx->mb0); 1608 switch (data[0]) { 1609 case MBS_PORT_ID_USED: 1610 data[1] = le16_to_cpu(mbx->mb1); 1611 break; 1612 case MBS_LOOP_ID_USED: 1613 break; 1614 default: 1615 data[0] = MBS_COMMAND_ERROR; 1616 break; 1617 } 1618 1619 ql_log(ql_log_warn, vha, 0x5046, 1620 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1621 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1622 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1623 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1624 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1625 le16_to_cpu(mbx->mb7)); 1626 1627 logio_done: 1628 sp->done(sp, 0); 1629 } 1630 1631 static void 1632 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1633 struct mbx_24xx_entry *pkt) 1634 { 1635 const char func[] = "MBX-IOCB2"; 1636 srb_t *sp; 1637 struct srb_iocb *si; 1638 u16 sz, i; 1639 int res; 1640 1641 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1642 if (!sp) 1643 return; 1644 1645 si = &sp->u.iocb_cmd; 1646 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1647 1648 for (i = 0; i < sz; i++) 1649 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); 1650 1651 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1652 1653 sp->done(sp, res); 1654 } 1655 1656 static void 1657 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1658 struct nack_to_isp *pkt) 1659 { 1660 const char func[] = "nack"; 1661 srb_t *sp; 1662 int res = 0; 1663 1664 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1665 if (!sp) 1666 return; 1667 1668 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1669 res = QLA_FUNCTION_FAILED; 1670 1671 sp->done(sp, res); 1672 } 1673 1674 static void 1675 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1676 sts_entry_t *pkt, int iocb_type) 1677 { 1678 const char func[] = "CT_IOCB"; 1679 const char *type; 1680 srb_t *sp; 1681 struct bsg_job *bsg_job; 1682 struct fc_bsg_reply *bsg_reply; 1683 uint16_t comp_status; 1684 int res = 0; 1685 1686 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1687 if (!sp) 1688 return; 1689 1690 switch (sp->type) { 1691 case SRB_CT_CMD: 1692 bsg_job = sp->u.bsg_job; 1693 bsg_reply = bsg_job->reply; 1694 1695 type = "ct pass-through"; 1696 1697 comp_status = le16_to_cpu(pkt->comp_status); 1698 1699 /* 1700 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1701 * fc payload to the caller 1702 */ 1703 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1704 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1705 1706 if (comp_status != CS_COMPLETE) { 1707 if (comp_status == CS_DATA_UNDERRUN) { 1708 res = DID_OK << 16; 1709 bsg_reply->reply_payload_rcv_len = 1710 le16_to_cpu(pkt->rsp_info_len); 1711 1712 ql_log(ql_log_warn, vha, 0x5048, 1713 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1714 type, comp_status, 1715 bsg_reply->reply_payload_rcv_len); 1716 } else { 1717 ql_log(ql_log_warn, vha, 0x5049, 1718 "CT pass-through-%s error comp_status=0x%x.\n", 1719 type, comp_status); 1720 res = DID_ERROR << 16; 1721 bsg_reply->reply_payload_rcv_len = 0; 1722 } 1723 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1724 pkt, sizeof(*pkt)); 1725 } else { 1726 res = DID_OK << 16; 1727 bsg_reply->reply_payload_rcv_len = 1728 bsg_job->reply_payload.payload_len; 1729 bsg_job->reply_len = 0; 1730 } 1731 break; 1732 case SRB_CT_PTHRU_CMD: 1733 /* 1734 * borrowing sts_entry_24xx.comp_status. 1735 * same location as ct_entry_24xx.comp_status 1736 */ 1737 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1738 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1739 sp->name); 1740 break; 1741 } 1742 1743 sp->done(sp, res); 1744 } 1745 1746 static void 1747 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1748 struct sts_entry_24xx *pkt, int iocb_type) 1749 { 1750 const char func[] = "ELS_CT_IOCB"; 1751 const char *type; 1752 srb_t *sp; 1753 struct bsg_job *bsg_job; 1754 struct fc_bsg_reply *bsg_reply; 1755 uint16_t comp_status; 1756 uint32_t fw_status[3]; 1757 int res; 1758 struct srb_iocb *els; 1759 1760 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1761 if (!sp) 1762 return; 1763 1764 type = NULL; 1765 switch (sp->type) { 1766 case SRB_ELS_CMD_RPT: 1767 case SRB_ELS_CMD_HST: 1768 type = "els"; 1769 break; 1770 case SRB_CT_CMD: 1771 type = "ct pass-through"; 1772 break; 1773 case SRB_ELS_DCMD: 1774 type = "Driver ELS logo"; 1775 if (iocb_type != ELS_IOCB_TYPE) { 1776 ql_dbg(ql_dbg_user, vha, 0x5047, 1777 "Completing %s: (%p) type=%d.\n", 1778 type, sp, sp->type); 1779 sp->done(sp, 0); 1780 return; 1781 } 1782 break; 1783 case SRB_CT_PTHRU_CMD: 1784 /* borrowing sts_entry_24xx.comp_status. 1785 same location as ct_entry_24xx.comp_status 1786 */ 1787 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 1788 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1789 sp->name); 1790 sp->done(sp, res); 1791 return; 1792 default: 1793 ql_dbg(ql_dbg_user, vha, 0x503e, 1794 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1795 return; 1796 } 1797 1798 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1799 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); 1800 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); 1801 1802 if (iocb_type == ELS_IOCB_TYPE) { 1803 els = &sp->u.iocb_cmd; 1804 els->u.els_plogi.fw_status[0] = fw_status[0]; 1805 els->u.els_plogi.fw_status[1] = fw_status[1]; 1806 els->u.els_plogi.fw_status[2] = fw_status[2]; 1807 els->u.els_plogi.comp_status = fw_status[0]; 1808 if (comp_status == CS_COMPLETE) { 1809 res = DID_OK << 16; 1810 } else { 1811 if (comp_status == CS_DATA_UNDERRUN) { 1812 res = DID_OK << 16; 1813 els->u.els_plogi.len = 1814 le16_to_cpu(((struct els_sts_entry_24xx *) 1815 pkt)->total_byte_count); 1816 } else { 1817 els->u.els_plogi.len = 0; 1818 res = DID_ERROR << 16; 1819 } 1820 } 1821 ql_dbg(ql_dbg_user, vha, 0x503f, 1822 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 1823 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1824 le16_to_cpu(((struct els_sts_entry_24xx *) 1825 pkt)->total_byte_count)); 1826 goto els_ct_done; 1827 } 1828 1829 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1830 * fc payload to the caller 1831 */ 1832 bsg_job = sp->u.bsg_job; 1833 bsg_reply = bsg_job->reply; 1834 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1835 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1836 1837 if (comp_status != CS_COMPLETE) { 1838 if (comp_status == CS_DATA_UNDERRUN) { 1839 res = DID_OK << 16; 1840 bsg_reply->reply_payload_rcv_len = 1841 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1842 1843 ql_dbg(ql_dbg_user, vha, 0x503f, 1844 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1845 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1846 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1847 le16_to_cpu(((struct els_sts_entry_24xx *) 1848 pkt)->total_byte_count)); 1849 } else { 1850 ql_dbg(ql_dbg_user, vha, 0x5040, 1851 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1852 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1853 type, sp->handle, comp_status, 1854 le16_to_cpu(((struct els_sts_entry_24xx *) 1855 pkt)->error_subcode_1), 1856 le16_to_cpu(((struct els_sts_entry_24xx *) 1857 pkt)->error_subcode_2)); 1858 res = DID_ERROR << 16; 1859 bsg_reply->reply_payload_rcv_len = 0; 1860 } 1861 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 1862 fw_status, sizeof(fw_status)); 1863 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1864 pkt, sizeof(*pkt)); 1865 } 1866 else { 1867 res = DID_OK << 16; 1868 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1869 bsg_job->reply_len = 0; 1870 } 1871 els_ct_done: 1872 1873 sp->done(sp, res); 1874 } 1875 1876 static void 1877 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1878 struct logio_entry_24xx *logio) 1879 { 1880 const char func[] = "LOGIO-IOCB"; 1881 const char *type; 1882 fc_port_t *fcport; 1883 srb_t *sp; 1884 struct srb_iocb *lio; 1885 uint16_t *data; 1886 uint32_t iop[2]; 1887 1888 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1889 if (!sp) 1890 return; 1891 1892 lio = &sp->u.iocb_cmd; 1893 type = sp->name; 1894 fcport = sp->fcport; 1895 data = lio->u.logio.data; 1896 1897 data[0] = MBS_COMMAND_ERROR; 1898 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1899 QLA_LOGIO_LOGIN_RETRIED : 0; 1900 if (logio->entry_status) { 1901 ql_log(ql_log_warn, fcport->vha, 0x5034, 1902 "Async-%s error entry - %8phC hdl=%x" 1903 "portid=%02x%02x%02x entry-status=%x.\n", 1904 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 1905 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1906 logio->entry_status); 1907 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1908 logio, sizeof(*logio)); 1909 1910 goto logio_done; 1911 } 1912 1913 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1914 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 1915 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 1916 type, sp->handle, fcport->d_id.b24, fcport->port_name, 1917 le32_to_cpu(logio->io_parameter[0])); 1918 1919 vha->hw->exch_starvation = 0; 1920 data[0] = MBS_COMMAND_COMPLETE; 1921 1922 if (sp->type == SRB_PRLI_CMD) { 1923 lio->u.logio.iop[0] = 1924 le32_to_cpu(logio->io_parameter[0]); 1925 lio->u.logio.iop[1] = 1926 le32_to_cpu(logio->io_parameter[1]); 1927 goto logio_done; 1928 } 1929 1930 if (sp->type != SRB_LOGIN_CMD) 1931 goto logio_done; 1932 1933 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1934 if (iop[0] & BIT_4) { 1935 fcport->port_type = FCT_TARGET; 1936 if (iop[0] & BIT_8) 1937 fcport->flags |= FCF_FCP2_DEVICE; 1938 } else if (iop[0] & BIT_5) 1939 fcport->port_type = FCT_INITIATOR; 1940 1941 if (iop[0] & BIT_7) 1942 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1943 1944 if (logio->io_parameter[7] || logio->io_parameter[8]) 1945 fcport->supported_classes |= FC_COS_CLASS2; 1946 if (logio->io_parameter[9] || logio->io_parameter[10]) 1947 fcport->supported_classes |= FC_COS_CLASS3; 1948 1949 goto logio_done; 1950 } 1951 1952 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1953 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1954 lio->u.logio.iop[0] = iop[0]; 1955 lio->u.logio.iop[1] = iop[1]; 1956 switch (iop[0]) { 1957 case LSC_SCODE_PORTID_USED: 1958 data[0] = MBS_PORT_ID_USED; 1959 data[1] = LSW(iop[1]); 1960 break; 1961 case LSC_SCODE_NPORT_USED: 1962 data[0] = MBS_LOOP_ID_USED; 1963 break; 1964 case LSC_SCODE_CMD_FAILED: 1965 if (iop[1] == 0x0606) { 1966 /* 1967 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 1968 * Target side acked. 1969 */ 1970 data[0] = MBS_COMMAND_COMPLETE; 1971 goto logio_done; 1972 } 1973 data[0] = MBS_COMMAND_ERROR; 1974 break; 1975 case LSC_SCODE_NOXCB: 1976 vha->hw->exch_starvation++; 1977 if (vha->hw->exch_starvation > 5) { 1978 ql_log(ql_log_warn, vha, 0xd046, 1979 "Exchange starvation. Resetting RISC\n"); 1980 1981 vha->hw->exch_starvation = 0; 1982 1983 if (IS_P3P_TYPE(vha->hw)) 1984 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1985 else 1986 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1987 qla2xxx_wake_dpc(vha); 1988 } 1989 /* fall through */ 1990 default: 1991 data[0] = MBS_COMMAND_ERROR; 1992 break; 1993 } 1994 1995 ql_dbg(ql_dbg_async, sp->vha, 0x5037, 1996 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 1997 type, sp->handle, fcport->d_id.b24, fcport->port_name, 1998 le16_to_cpu(logio->comp_status), 1999 le32_to_cpu(logio->io_parameter[0]), 2000 le32_to_cpu(logio->io_parameter[1])); 2001 2002 logio_done: 2003 sp->done(sp, 0); 2004 } 2005 2006 static void 2007 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2008 { 2009 const char func[] = "TMF-IOCB"; 2010 const char *type; 2011 fc_port_t *fcport; 2012 srb_t *sp; 2013 struct srb_iocb *iocb; 2014 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2015 2016 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2017 if (!sp) 2018 return; 2019 2020 iocb = &sp->u.iocb_cmd; 2021 type = sp->name; 2022 fcport = sp->fcport; 2023 iocb->u.tmf.data = QLA_SUCCESS; 2024 2025 if (sts->entry_status) { 2026 ql_log(ql_log_warn, fcport->vha, 0x5038, 2027 "Async-%s error - hdl=%x entry-status(%x).\n", 2028 type, sp->handle, sts->entry_status); 2029 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2030 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2031 ql_log(ql_log_warn, fcport->vha, 0x5039, 2032 "Async-%s error - hdl=%x completion status(%x).\n", 2033 type, sp->handle, sts->comp_status); 2034 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2035 } else if ((le16_to_cpu(sts->scsi_status) & 2036 SS_RESPONSE_INFO_LEN_VALID)) { 2037 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2038 ql_log(ql_log_warn, fcport->vha, 0x503b, 2039 "Async-%s error - hdl=%x not enough response(%d).\n", 2040 type, sp->handle, sts->rsp_data_len); 2041 } else if (sts->data[3]) { 2042 ql_log(ql_log_warn, fcport->vha, 0x503c, 2043 "Async-%s error - hdl=%x response(%x).\n", 2044 type, sp->handle, sts->data[3]); 2045 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2046 } 2047 } 2048 2049 if (iocb->u.tmf.data != QLA_SUCCESS) 2050 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2051 sts, sizeof(*sts)); 2052 2053 sp->done(sp, 0); 2054 } 2055 2056 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2057 void *tsk, srb_t *sp) 2058 { 2059 fc_port_t *fcport; 2060 struct srb_iocb *iocb; 2061 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2062 uint16_t state_flags; 2063 struct nvmefc_fcp_req *fd; 2064 uint16_t ret = QLA_SUCCESS; 2065 uint16_t comp_status = le16_to_cpu(sts->comp_status); 2066 int logit = 0; 2067 2068 iocb = &sp->u.iocb_cmd; 2069 fcport = sp->fcport; 2070 iocb->u.nvme.comp_status = comp_status; 2071 state_flags = le16_to_cpu(sts->state_flags); 2072 fd = iocb->u.nvme.desc; 2073 2074 if (unlikely(iocb->u.nvme.aen_op)) 2075 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2076 2077 if (unlikely(comp_status != CS_COMPLETE)) 2078 logit = 1; 2079 2080 fd->transferred_length = fd->payload_length - 2081 le32_to_cpu(sts->residual_len); 2082 2083 /* 2084 * State flags: Bit 6 and 0. 2085 * If 0 is set, we don't care about 6. 2086 * both cases resp was dma'd to host buffer 2087 * if both are 0, that is good path case. 2088 * if six is set and 0 is clear, we need to 2089 * copy resp data from status iocb to resp buffer. 2090 */ 2091 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2092 iocb->u.nvme.rsp_pyld_len = 0; 2093 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2094 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2095 /* Response already DMA'd to fd->rspaddr. */ 2096 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); 2097 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2098 /* 2099 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2100 * as an error. 2101 */ 2102 iocb->u.nvme.rsp_pyld_len = 0; 2103 fd->transferred_length = 0; 2104 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2105 "Unexpected values in NVMe_RSP IU.\n"); 2106 logit = 1; 2107 } else if (state_flags & SF_NVME_ERSP) { 2108 uint32_t *inbuf, *outbuf; 2109 uint16_t iter; 2110 2111 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2112 outbuf = (uint32_t *)fd->rspaddr; 2113 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); 2114 if (unlikely(iocb->u.nvme.rsp_pyld_len > 2115 sizeof(struct nvme_fc_ersp_iu))) { 2116 if (ql_mask_match(ql_dbg_io)) { 2117 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2118 iocb->u.nvme.rsp_pyld_len); 2119 ql_log(ql_log_warn, fcport->vha, 0x5100, 2120 "Unexpected response payload length %u.\n", 2121 iocb->u.nvme.rsp_pyld_len); 2122 } 2123 iocb->u.nvme.rsp_pyld_len = 2124 sizeof(struct nvme_fc_ersp_iu); 2125 } 2126 iter = iocb->u.nvme.rsp_pyld_len >> 2; 2127 for (; iter; iter--) 2128 *outbuf++ = swab32(*inbuf++); 2129 } 2130 2131 if (state_flags & SF_NVME_ERSP) { 2132 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2133 u32 tgt_xfer_len; 2134 2135 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2136 if (fd->transferred_length != tgt_xfer_len) { 2137 ql_dbg(ql_dbg_io, fcport->vha, 0x3079, 2138 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2139 tgt_xfer_len, fd->transferred_length); 2140 logit = 1; 2141 } else if (comp_status == CS_DATA_UNDERRUN) { 2142 /* 2143 * Do not log if this is just an underflow and there 2144 * is no data loss. 2145 */ 2146 logit = 0; 2147 } 2148 } 2149 2150 if (unlikely(logit)) 2151 ql_log(ql_log_warn, fcport->vha, 0x5060, 2152 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2153 sp->name, sp->handle, comp_status, 2154 fd->transferred_length, le32_to_cpu(sts->residual_len), 2155 sts->ox_id); 2156 2157 /* 2158 * If transport error then Failure (HBA rejects request) 2159 * otherwise transport will handle. 2160 */ 2161 switch (comp_status) { 2162 case CS_COMPLETE: 2163 break; 2164 2165 case CS_RESET: 2166 case CS_PORT_UNAVAILABLE: 2167 case CS_PORT_LOGGED_OUT: 2168 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2169 /* fall through */ 2170 case CS_ABORTED: 2171 case CS_PORT_BUSY: 2172 fd->transferred_length = 0; 2173 iocb->u.nvme.rsp_pyld_len = 0; 2174 ret = QLA_ABORTED; 2175 break; 2176 case CS_DATA_UNDERRUN: 2177 break; 2178 default: 2179 ret = QLA_FUNCTION_FAILED; 2180 break; 2181 } 2182 sp->done(sp, ret); 2183 } 2184 2185 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2186 struct vp_ctrl_entry_24xx *vce) 2187 { 2188 const char func[] = "CTRLVP-IOCB"; 2189 srb_t *sp; 2190 int rval = QLA_SUCCESS; 2191 2192 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2193 if (!sp) 2194 return; 2195 2196 if (vce->entry_status != 0) { 2197 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2198 "%s: Failed to complete IOCB -- error status (%x)\n", 2199 sp->name, vce->entry_status); 2200 rval = QLA_FUNCTION_FAILED; 2201 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2202 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2203 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2204 sp->name, le16_to_cpu(vce->comp_status), 2205 le16_to_cpu(vce->vp_idx_failed)); 2206 rval = QLA_FUNCTION_FAILED; 2207 } else { 2208 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2209 "Done %s.\n", __func__); 2210 } 2211 2212 sp->rc = rval; 2213 sp->done(sp, rval); 2214 } 2215 2216 /* Process a single response queue entry. */ 2217 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2218 struct rsp_que *rsp, 2219 sts_entry_t *pkt) 2220 { 2221 sts21_entry_t *sts21_entry; 2222 sts22_entry_t *sts22_entry; 2223 uint16_t handle_cnt; 2224 uint16_t cnt; 2225 2226 switch (pkt->entry_type) { 2227 case STATUS_TYPE: 2228 qla2x00_status_entry(vha, rsp, pkt); 2229 break; 2230 case STATUS_TYPE_21: 2231 sts21_entry = (sts21_entry_t *)pkt; 2232 handle_cnt = sts21_entry->handle_count; 2233 for (cnt = 0; cnt < handle_cnt; cnt++) 2234 qla2x00_process_completed_request(vha, rsp->req, 2235 sts21_entry->handle[cnt]); 2236 break; 2237 case STATUS_TYPE_22: 2238 sts22_entry = (sts22_entry_t *)pkt; 2239 handle_cnt = sts22_entry->handle_count; 2240 for (cnt = 0; cnt < handle_cnt; cnt++) 2241 qla2x00_process_completed_request(vha, rsp->req, 2242 sts22_entry->handle[cnt]); 2243 break; 2244 case STATUS_CONT_TYPE: 2245 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2246 break; 2247 case MBX_IOCB_TYPE: 2248 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2249 break; 2250 case CT_IOCB_TYPE: 2251 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2252 break; 2253 default: 2254 /* Type Not Supported. */ 2255 ql_log(ql_log_warn, vha, 0x504a, 2256 "Received unknown response pkt type %x entry status=%x.\n", 2257 pkt->entry_type, pkt->entry_status); 2258 break; 2259 } 2260 } 2261 2262 /** 2263 * qla2x00_process_response_queue() - Process response queue entries. 2264 * @rsp: response queue 2265 */ 2266 void 2267 qla2x00_process_response_queue(struct rsp_que *rsp) 2268 { 2269 struct scsi_qla_host *vha; 2270 struct qla_hw_data *ha = rsp->hw; 2271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2272 sts_entry_t *pkt; 2273 2274 vha = pci_get_drvdata(ha->pdev); 2275 2276 if (!vha->flags.online) 2277 return; 2278 2279 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2280 pkt = (sts_entry_t *)rsp->ring_ptr; 2281 2282 rsp->ring_index++; 2283 if (rsp->ring_index == rsp->length) { 2284 rsp->ring_index = 0; 2285 rsp->ring_ptr = rsp->ring; 2286 } else { 2287 rsp->ring_ptr++; 2288 } 2289 2290 if (pkt->entry_status != 0) { 2291 qla2x00_error_entry(vha, rsp, pkt); 2292 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2293 wmb(); 2294 continue; 2295 } 2296 2297 qla2x00_process_response_entry(vha, rsp, pkt); 2298 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2299 wmb(); 2300 } 2301 2302 /* Adjust ring index */ 2303 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2304 } 2305 2306 static inline void 2307 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2308 uint32_t sense_len, struct rsp_que *rsp, int res) 2309 { 2310 struct scsi_qla_host *vha = sp->vha; 2311 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2312 uint32_t track_sense_len; 2313 2314 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2315 sense_len = SCSI_SENSE_BUFFERSIZE; 2316 2317 SET_CMD_SENSE_LEN(sp, sense_len); 2318 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2319 track_sense_len = sense_len; 2320 2321 if (sense_len > par_sense_len) 2322 sense_len = par_sense_len; 2323 2324 memcpy(cp->sense_buffer, sense_data, sense_len); 2325 2326 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2327 track_sense_len -= sense_len; 2328 SET_CMD_SENSE_LEN(sp, track_sense_len); 2329 2330 if (track_sense_len != 0) { 2331 rsp->status_srb = sp; 2332 cp->result = res; 2333 } 2334 2335 if (sense_len) { 2336 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2337 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2338 sp->vha->host_no, cp->device->id, cp->device->lun, 2339 cp); 2340 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2341 cp->sense_buffer, sense_len); 2342 } 2343 } 2344 2345 struct scsi_dif_tuple { 2346 __be16 guard; /* Checksum */ 2347 __be16 app_tag; /* APPL identifier */ 2348 __be32 ref_tag; /* Target LBA or indirect LBA */ 2349 }; 2350 2351 /* 2352 * Checks the guard or meta-data for the type of error 2353 * detected by the HBA. In case of errors, we set the 2354 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2355 * to indicate to the kernel that the HBA detected error. 2356 */ 2357 static inline int 2358 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2359 { 2360 struct scsi_qla_host *vha = sp->vha; 2361 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2362 uint8_t *ap = &sts24->data[12]; 2363 uint8_t *ep = &sts24->data[20]; 2364 uint32_t e_ref_tag, a_ref_tag; 2365 uint16_t e_app_tag, a_app_tag; 2366 uint16_t e_guard, a_guard; 2367 2368 /* 2369 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2370 * would make guard field appear at offset 2 2371 */ 2372 a_guard = get_unaligned_le16(ap + 2); 2373 a_app_tag = get_unaligned_le16(ap + 0); 2374 a_ref_tag = get_unaligned_le32(ap + 4); 2375 e_guard = get_unaligned_le16(ep + 2); 2376 e_app_tag = get_unaligned_le16(ep + 0); 2377 e_ref_tag = get_unaligned_le32(ep + 4); 2378 2379 ql_dbg(ql_dbg_io, vha, 0x3023, 2380 "iocb(s) %p Returned STATUS.\n", sts24); 2381 2382 ql_dbg(ql_dbg_io, vha, 0x3024, 2383 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2384 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2385 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2386 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2387 a_app_tag, e_app_tag, a_guard, e_guard); 2388 2389 /* 2390 * Ignore sector if: 2391 * For type 3: ref & app tag is all 'f's 2392 * For type 0,1,2: app tag is all 'f's 2393 */ 2394 if ((a_app_tag == T10_PI_APP_ESCAPE) && 2395 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 2396 (a_ref_tag == T10_PI_REF_ESCAPE))) { 2397 uint32_t blocks_done, resid; 2398 sector_t lba_s = scsi_get_lba(cmd); 2399 2400 /* 2TB boundary case covered automatically with this */ 2401 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2402 2403 resid = scsi_bufflen(cmd) - (blocks_done * 2404 cmd->device->sector_size); 2405 2406 scsi_set_resid(cmd, resid); 2407 cmd->result = DID_OK << 16; 2408 2409 /* Update protection tag */ 2410 if (scsi_prot_sg_count(cmd)) { 2411 uint32_t i, j = 0, k = 0, num_ent; 2412 struct scatterlist *sg; 2413 struct t10_pi_tuple *spt; 2414 2415 /* Patch the corresponding protection tags */ 2416 scsi_for_each_prot_sg(cmd, sg, 2417 scsi_prot_sg_count(cmd), i) { 2418 num_ent = sg_dma_len(sg) / 8; 2419 if (k + num_ent < blocks_done) { 2420 k += num_ent; 2421 continue; 2422 } 2423 j = blocks_done - k - 1; 2424 k = blocks_done; 2425 break; 2426 } 2427 2428 if (k != blocks_done) { 2429 ql_log(ql_log_warn, vha, 0x302f, 2430 "unexpected tag values tag:lba=%x:%llx)\n", 2431 e_ref_tag, (unsigned long long)lba_s); 2432 return 1; 2433 } 2434 2435 spt = page_address(sg_page(sg)) + sg->offset; 2436 spt += j; 2437 2438 spt->app_tag = T10_PI_APP_ESCAPE; 2439 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2440 spt->ref_tag = T10_PI_REF_ESCAPE; 2441 } 2442 2443 return 0; 2444 } 2445 2446 /* check guard */ 2447 if (e_guard != a_guard) { 2448 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2449 0x10, 0x1); 2450 set_driver_byte(cmd, DRIVER_SENSE); 2451 set_host_byte(cmd, DID_ABORT); 2452 cmd->result |= SAM_STAT_CHECK_CONDITION; 2453 return 1; 2454 } 2455 2456 /* check ref tag */ 2457 if (e_ref_tag != a_ref_tag) { 2458 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2459 0x10, 0x3); 2460 set_driver_byte(cmd, DRIVER_SENSE); 2461 set_host_byte(cmd, DID_ABORT); 2462 cmd->result |= SAM_STAT_CHECK_CONDITION; 2463 return 1; 2464 } 2465 2466 /* check appl tag */ 2467 if (e_app_tag != a_app_tag) { 2468 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2469 0x10, 0x2); 2470 set_driver_byte(cmd, DRIVER_SENSE); 2471 set_host_byte(cmd, DID_ABORT); 2472 cmd->result |= SAM_STAT_CHECK_CONDITION; 2473 return 1; 2474 } 2475 2476 return 1; 2477 } 2478 2479 static void 2480 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2481 struct req_que *req, uint32_t index) 2482 { 2483 struct qla_hw_data *ha = vha->hw; 2484 srb_t *sp; 2485 uint16_t comp_status; 2486 uint16_t scsi_status; 2487 uint16_t thread_id; 2488 uint32_t rval = EXT_STATUS_OK; 2489 struct bsg_job *bsg_job = NULL; 2490 struct fc_bsg_request *bsg_request; 2491 struct fc_bsg_reply *bsg_reply; 2492 sts_entry_t *sts = pkt; 2493 struct sts_entry_24xx *sts24 = pkt; 2494 2495 /* Validate handle. */ 2496 if (index >= req->num_outstanding_cmds) { 2497 ql_log(ql_log_warn, vha, 0x70af, 2498 "Invalid SCSI completion handle 0x%x.\n", index); 2499 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2500 return; 2501 } 2502 2503 sp = req->outstanding_cmds[index]; 2504 if (!sp) { 2505 ql_log(ql_log_warn, vha, 0x70b0, 2506 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2507 req->id, index); 2508 2509 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2510 return; 2511 } 2512 2513 /* Free outstanding command slot. */ 2514 req->outstanding_cmds[index] = NULL; 2515 bsg_job = sp->u.bsg_job; 2516 bsg_request = bsg_job->request; 2517 bsg_reply = bsg_job->reply; 2518 2519 if (IS_FWI2_CAPABLE(ha)) { 2520 comp_status = le16_to_cpu(sts24->comp_status); 2521 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2522 } else { 2523 comp_status = le16_to_cpu(sts->comp_status); 2524 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2525 } 2526 2527 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2528 switch (comp_status) { 2529 case CS_COMPLETE: 2530 if (scsi_status == 0) { 2531 bsg_reply->reply_payload_rcv_len = 2532 bsg_job->reply_payload.payload_len; 2533 vha->qla_stats.input_bytes += 2534 bsg_reply->reply_payload_rcv_len; 2535 vha->qla_stats.input_requests++; 2536 rval = EXT_STATUS_OK; 2537 } 2538 goto done; 2539 2540 case CS_DATA_OVERRUN: 2541 ql_dbg(ql_dbg_user, vha, 0x70b1, 2542 "Command completed with data overrun thread_id=%d\n", 2543 thread_id); 2544 rval = EXT_STATUS_DATA_OVERRUN; 2545 break; 2546 2547 case CS_DATA_UNDERRUN: 2548 ql_dbg(ql_dbg_user, vha, 0x70b2, 2549 "Command completed with data underrun thread_id=%d\n", 2550 thread_id); 2551 rval = EXT_STATUS_DATA_UNDERRUN; 2552 break; 2553 case CS_BIDIR_RD_OVERRUN: 2554 ql_dbg(ql_dbg_user, vha, 0x70b3, 2555 "Command completed with read data overrun thread_id=%d\n", 2556 thread_id); 2557 rval = EXT_STATUS_DATA_OVERRUN; 2558 break; 2559 2560 case CS_BIDIR_RD_WR_OVERRUN: 2561 ql_dbg(ql_dbg_user, vha, 0x70b4, 2562 "Command completed with read and write data overrun " 2563 "thread_id=%d\n", thread_id); 2564 rval = EXT_STATUS_DATA_OVERRUN; 2565 break; 2566 2567 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2568 ql_dbg(ql_dbg_user, vha, 0x70b5, 2569 "Command completed with read data over and write data " 2570 "underrun thread_id=%d\n", thread_id); 2571 rval = EXT_STATUS_DATA_OVERRUN; 2572 break; 2573 2574 case CS_BIDIR_RD_UNDERRUN: 2575 ql_dbg(ql_dbg_user, vha, 0x70b6, 2576 "Command completed with read data underrun " 2577 "thread_id=%d\n", thread_id); 2578 rval = EXT_STATUS_DATA_UNDERRUN; 2579 break; 2580 2581 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2582 ql_dbg(ql_dbg_user, vha, 0x70b7, 2583 "Command completed with read data under and write data " 2584 "overrun thread_id=%d\n", thread_id); 2585 rval = EXT_STATUS_DATA_UNDERRUN; 2586 break; 2587 2588 case CS_BIDIR_RD_WR_UNDERRUN: 2589 ql_dbg(ql_dbg_user, vha, 0x70b8, 2590 "Command completed with read and write data underrun " 2591 "thread_id=%d\n", thread_id); 2592 rval = EXT_STATUS_DATA_UNDERRUN; 2593 break; 2594 2595 case CS_BIDIR_DMA: 2596 ql_dbg(ql_dbg_user, vha, 0x70b9, 2597 "Command completed with data DMA error thread_id=%d\n", 2598 thread_id); 2599 rval = EXT_STATUS_DMA_ERR; 2600 break; 2601 2602 case CS_TIMEOUT: 2603 ql_dbg(ql_dbg_user, vha, 0x70ba, 2604 "Command completed with timeout thread_id=%d\n", 2605 thread_id); 2606 rval = EXT_STATUS_TIMEOUT; 2607 break; 2608 default: 2609 ql_dbg(ql_dbg_user, vha, 0x70bb, 2610 "Command completed with completion status=0x%x " 2611 "thread_id=%d\n", comp_status, thread_id); 2612 rval = EXT_STATUS_ERR; 2613 break; 2614 } 2615 bsg_reply->reply_payload_rcv_len = 0; 2616 2617 done: 2618 /* Return the vendor specific reply to API */ 2619 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2620 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2621 /* Always return DID_OK, bsg will send the vendor specific response 2622 * in this case only */ 2623 sp->done(sp, DID_OK << 16); 2624 2625 } 2626 2627 /** 2628 * qla2x00_status_entry() - Process a Status IOCB entry. 2629 * @vha: SCSI driver HA context 2630 * @rsp: response queue 2631 * @pkt: Entry pointer 2632 */ 2633 static void 2634 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2635 { 2636 srb_t *sp; 2637 fc_port_t *fcport; 2638 struct scsi_cmnd *cp; 2639 sts_entry_t *sts = pkt; 2640 struct sts_entry_24xx *sts24 = pkt; 2641 uint16_t comp_status; 2642 uint16_t scsi_status; 2643 uint16_t ox_id; 2644 uint8_t lscsi_status; 2645 int32_t resid; 2646 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2647 fw_resid_len; 2648 uint8_t *rsp_info, *sense_data; 2649 struct qla_hw_data *ha = vha->hw; 2650 uint32_t handle; 2651 uint16_t que; 2652 struct req_que *req; 2653 int logit = 1; 2654 int res = 0; 2655 uint16_t state_flags = 0; 2656 uint16_t retry_delay = 0; 2657 2658 if (IS_FWI2_CAPABLE(ha)) { 2659 comp_status = le16_to_cpu(sts24->comp_status); 2660 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2661 state_flags = le16_to_cpu(sts24->state_flags); 2662 } else { 2663 comp_status = le16_to_cpu(sts->comp_status); 2664 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2665 } 2666 handle = (uint32_t) LSW(sts->handle); 2667 que = MSW(sts->handle); 2668 req = ha->req_q_map[que]; 2669 2670 /* Check for invalid queue pointer */ 2671 if (req == NULL || 2672 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2673 ql_dbg(ql_dbg_io, vha, 0x3059, 2674 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2675 "que=%u.\n", sts->handle, req, que); 2676 return; 2677 } 2678 2679 /* Validate handle. */ 2680 if (handle < req->num_outstanding_cmds) { 2681 sp = req->outstanding_cmds[handle]; 2682 if (!sp) { 2683 ql_dbg(ql_dbg_io, vha, 0x3075, 2684 "%s(%ld): Already returned command for status handle (0x%x).\n", 2685 __func__, vha->host_no, sts->handle); 2686 return; 2687 } 2688 } else { 2689 ql_dbg(ql_dbg_io, vha, 0x3017, 2690 "Invalid status handle, out of range (0x%x).\n", 2691 sts->handle); 2692 2693 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2694 if (IS_P3P_TYPE(ha)) 2695 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2696 else 2697 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2698 qla2xxx_wake_dpc(vha); 2699 } 2700 return; 2701 } 2702 2703 if (sp->cmd_type != TYPE_SRB) { 2704 req->outstanding_cmds[handle] = NULL; 2705 ql_dbg(ql_dbg_io, vha, 0x3015, 2706 "Unknown sp->cmd_type %x %p).\n", 2707 sp->cmd_type, sp); 2708 return; 2709 } 2710 2711 /* NVME completion. */ 2712 if (sp->type == SRB_NVME_CMD) { 2713 req->outstanding_cmds[handle] = NULL; 2714 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 2715 return; 2716 } 2717 2718 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2719 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2720 return; 2721 } 2722 2723 /* Task Management completion. */ 2724 if (sp->type == SRB_TM_CMD) { 2725 qla24xx_tm_iocb_entry(vha, req, pkt); 2726 return; 2727 } 2728 2729 /* Fast path completion. */ 2730 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2731 qla2x00_process_completed_request(vha, req, handle); 2732 2733 return; 2734 } 2735 2736 req->outstanding_cmds[handle] = NULL; 2737 cp = GET_CMD_SP(sp); 2738 if (cp == NULL) { 2739 ql_dbg(ql_dbg_io, vha, 0x3018, 2740 "Command already returned (0x%x/%p).\n", 2741 sts->handle, sp); 2742 2743 return; 2744 } 2745 2746 lscsi_status = scsi_status & STATUS_MASK; 2747 2748 fcport = sp->fcport; 2749 2750 ox_id = 0; 2751 sense_len = par_sense_len = rsp_info_len = resid_len = 2752 fw_resid_len = 0; 2753 if (IS_FWI2_CAPABLE(ha)) { 2754 if (scsi_status & SS_SENSE_LEN_VALID) 2755 sense_len = le32_to_cpu(sts24->sense_len); 2756 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2757 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2758 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2759 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2760 if (comp_status == CS_DATA_UNDERRUN) 2761 fw_resid_len = le32_to_cpu(sts24->residual_len); 2762 rsp_info = sts24->data; 2763 sense_data = sts24->data; 2764 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2765 ox_id = le16_to_cpu(sts24->ox_id); 2766 par_sense_len = sizeof(sts24->data); 2767 /* Valid values of the retry delay timer are 0x1-0xffef */ 2768 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) { 2769 retry_delay = sts24->retry_delay & 0x3fff; 2770 ql_dbg(ql_dbg_io, sp->vha, 0x3033, 2771 "%s: scope=%#x retry_delay=%#x\n", __func__, 2772 sts24->retry_delay >> 14, retry_delay); 2773 } 2774 } else { 2775 if (scsi_status & SS_SENSE_LEN_VALID) 2776 sense_len = le16_to_cpu(sts->req_sense_length); 2777 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2778 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2779 resid_len = le32_to_cpu(sts->residual_length); 2780 rsp_info = sts->rsp_info; 2781 sense_data = sts->req_sense_data; 2782 par_sense_len = sizeof(sts->req_sense_data); 2783 } 2784 2785 /* Check for any FCP transport errors. */ 2786 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2787 /* Sense data lies beyond any FCP RESPONSE data. */ 2788 if (IS_FWI2_CAPABLE(ha)) { 2789 sense_data += rsp_info_len; 2790 par_sense_len -= rsp_info_len; 2791 } 2792 if (rsp_info_len > 3 && rsp_info[3]) { 2793 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2794 "FCP I/O protocol failure (0x%x/0x%x).\n", 2795 rsp_info_len, rsp_info[3]); 2796 2797 res = DID_BUS_BUSY << 16; 2798 goto out; 2799 } 2800 } 2801 2802 /* Check for overrun. */ 2803 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2804 scsi_status & SS_RESIDUAL_OVER) 2805 comp_status = CS_DATA_OVERRUN; 2806 2807 /* 2808 * Check retry_delay_timer value if we receive a busy or 2809 * queue full. 2810 */ 2811 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2812 lscsi_status == SAM_STAT_BUSY) 2813 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2814 2815 /* 2816 * Based on Host and scsi status generate status code for Linux 2817 */ 2818 switch (comp_status) { 2819 case CS_COMPLETE: 2820 case CS_QUEUE_FULL: 2821 if (scsi_status == 0) { 2822 res = DID_OK << 16; 2823 break; 2824 } 2825 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2826 resid = resid_len; 2827 scsi_set_resid(cp, resid); 2828 2829 if (!lscsi_status && 2830 ((unsigned)(scsi_bufflen(cp) - resid) < 2831 cp->underflow)) { 2832 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2833 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2834 resid, scsi_bufflen(cp)); 2835 2836 res = DID_ERROR << 16; 2837 break; 2838 } 2839 } 2840 res = DID_OK << 16 | lscsi_status; 2841 2842 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2843 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2844 "QUEUE FULL detected.\n"); 2845 break; 2846 } 2847 logit = 0; 2848 if (lscsi_status != SS_CHECK_CONDITION) 2849 break; 2850 2851 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2852 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2853 break; 2854 2855 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2856 rsp, res); 2857 break; 2858 2859 case CS_DATA_UNDERRUN: 2860 /* Use F/W calculated residual length. */ 2861 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2862 scsi_set_resid(cp, resid); 2863 if (scsi_status & SS_RESIDUAL_UNDER) { 2864 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2865 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2866 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2867 resid, scsi_bufflen(cp)); 2868 2869 res = DID_ERROR << 16 | lscsi_status; 2870 goto check_scsi_status; 2871 } 2872 2873 if (!lscsi_status && 2874 ((unsigned)(scsi_bufflen(cp) - resid) < 2875 cp->underflow)) { 2876 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2877 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2878 resid, scsi_bufflen(cp)); 2879 2880 res = DID_ERROR << 16; 2881 break; 2882 } 2883 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2884 lscsi_status != SAM_STAT_BUSY) { 2885 /* 2886 * scsi status of task set and busy are considered to be 2887 * task not completed. 2888 */ 2889 2890 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2891 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2892 resid, scsi_bufflen(cp)); 2893 2894 res = DID_ERROR << 16 | lscsi_status; 2895 goto check_scsi_status; 2896 } else { 2897 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2898 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2899 scsi_status, lscsi_status); 2900 } 2901 2902 res = DID_OK << 16 | lscsi_status; 2903 logit = 0; 2904 2905 check_scsi_status: 2906 /* 2907 * Check to see if SCSI Status is non zero. If so report SCSI 2908 * Status. 2909 */ 2910 if (lscsi_status != 0) { 2911 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2912 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2913 "QUEUE FULL detected.\n"); 2914 logit = 1; 2915 break; 2916 } 2917 if (lscsi_status != SS_CHECK_CONDITION) 2918 break; 2919 2920 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2921 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2922 break; 2923 2924 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2925 sense_len, rsp, res); 2926 } 2927 break; 2928 2929 case CS_PORT_LOGGED_OUT: 2930 case CS_PORT_CONFIG_CHG: 2931 case CS_PORT_BUSY: 2932 case CS_INCOMPLETE: 2933 case CS_PORT_UNAVAILABLE: 2934 case CS_TIMEOUT: 2935 case CS_RESET: 2936 2937 /* 2938 * We are going to have the fc class block the rport 2939 * while we try to recover so instruct the mid layer 2940 * to requeue until the class decides how to handle this. 2941 */ 2942 res = DID_TRANSPORT_DISRUPTED << 16; 2943 2944 if (comp_status == CS_TIMEOUT) { 2945 if (IS_FWI2_CAPABLE(ha)) 2946 break; 2947 else if ((le16_to_cpu(sts->status_flags) & 2948 SF_LOGOUT_SENT) == 0) 2949 break; 2950 } 2951 2952 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2953 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2954 "Port to be marked lost on fcport=%02x%02x%02x, current " 2955 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 2956 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2957 port_state_str[FCS_ONLINE], 2958 comp_status); 2959 2960 qlt_schedule_sess_for_deletion(fcport); 2961 } 2962 2963 break; 2964 2965 case CS_ABORTED: 2966 res = DID_RESET << 16; 2967 break; 2968 2969 case CS_DIF_ERROR: 2970 logit = qla2x00_handle_dif_error(sp, sts24); 2971 res = cp->result; 2972 break; 2973 2974 case CS_TRANSPORT: 2975 res = DID_ERROR << 16; 2976 2977 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2978 break; 2979 2980 if (state_flags & BIT_4) 2981 scmd_printk(KERN_WARNING, cp, 2982 "Unsupported device '%s' found.\n", 2983 cp->device->vendor); 2984 break; 2985 2986 case CS_DMA: 2987 ql_log(ql_log_info, fcport->vha, 0x3022, 2988 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 2989 comp_status, scsi_status, res, vha->host_no, 2990 cp->device->id, cp->device->lun, fcport->d_id.b24, 2991 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2992 resid_len, fw_resid_len, sp, cp); 2993 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 2994 pkt, sizeof(*sts24)); 2995 res = DID_ERROR << 16; 2996 break; 2997 default: 2998 res = DID_ERROR << 16; 2999 break; 3000 } 3001 3002 out: 3003 if (logit) 3004 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 3005 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 3006 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 3007 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3008 comp_status, scsi_status, res, vha->host_no, 3009 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3010 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3011 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3012 resid_len, fw_resid_len, sp, cp); 3013 3014 if (rsp->status_srb == NULL) 3015 sp->done(sp, res); 3016 } 3017 3018 /** 3019 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3020 * @rsp: response queue 3021 * @pkt: Entry pointer 3022 * 3023 * Extended sense data. 3024 */ 3025 static void 3026 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3027 { 3028 uint8_t sense_sz = 0; 3029 struct qla_hw_data *ha = rsp->hw; 3030 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3031 srb_t *sp = rsp->status_srb; 3032 struct scsi_cmnd *cp; 3033 uint32_t sense_len; 3034 uint8_t *sense_ptr; 3035 3036 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3037 return; 3038 3039 sense_len = GET_CMD_SENSE_LEN(sp); 3040 sense_ptr = GET_CMD_SENSE_PTR(sp); 3041 3042 cp = GET_CMD_SP(sp); 3043 if (cp == NULL) { 3044 ql_log(ql_log_warn, vha, 0x3025, 3045 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3046 3047 rsp->status_srb = NULL; 3048 return; 3049 } 3050 3051 if (sense_len > sizeof(pkt->data)) 3052 sense_sz = sizeof(pkt->data); 3053 else 3054 sense_sz = sense_len; 3055 3056 /* Move sense data. */ 3057 if (IS_FWI2_CAPABLE(ha)) 3058 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3059 memcpy(sense_ptr, pkt->data, sense_sz); 3060 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3061 sense_ptr, sense_sz); 3062 3063 sense_len -= sense_sz; 3064 sense_ptr += sense_sz; 3065 3066 SET_CMD_SENSE_PTR(sp, sense_ptr); 3067 SET_CMD_SENSE_LEN(sp, sense_len); 3068 3069 /* Place command on done queue. */ 3070 if (sense_len == 0) { 3071 rsp->status_srb = NULL; 3072 sp->done(sp, cp->result); 3073 } 3074 } 3075 3076 /** 3077 * qla2x00_error_entry() - Process an error entry. 3078 * @vha: SCSI driver HA context 3079 * @rsp: response queue 3080 * @pkt: Entry pointer 3081 * return : 1=allow further error analysis. 0=no additional error analysis. 3082 */ 3083 static int 3084 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3085 { 3086 srb_t *sp; 3087 struct qla_hw_data *ha = vha->hw; 3088 const char func[] = "ERROR-IOCB"; 3089 uint16_t que = MSW(pkt->handle); 3090 struct req_que *req = NULL; 3091 int res = DID_ERROR << 16; 3092 3093 ql_dbg(ql_dbg_async, vha, 0x502a, 3094 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3095 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3096 3097 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3098 goto fatal; 3099 3100 req = ha->req_q_map[que]; 3101 3102 if (pkt->entry_status & RF_BUSY) 3103 res = DID_BUS_BUSY << 16; 3104 3105 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3106 return 0; 3107 3108 switch (pkt->entry_type) { 3109 case NOTIFY_ACK_TYPE: 3110 case STATUS_TYPE: 3111 case STATUS_CONT_TYPE: 3112 case LOGINOUT_PORT_IOCB_TYPE: 3113 case CT_IOCB_TYPE: 3114 case ELS_IOCB_TYPE: 3115 case ABORT_IOCB_TYPE: 3116 case MBX_IOCB_TYPE: 3117 default: 3118 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3119 if (sp) { 3120 sp->done(sp, res); 3121 return 0; 3122 } 3123 break; 3124 3125 case ABTS_RESP_24XX: 3126 case CTIO_TYPE7: 3127 case CTIO_CRC2: 3128 return 1; 3129 } 3130 fatal: 3131 ql_log(ql_log_warn, vha, 0x5030, 3132 "Error entry - invalid handle/queue (%04x).\n", que); 3133 return 0; 3134 } 3135 3136 /** 3137 * qla24xx_mbx_completion() - Process mailbox command completions. 3138 * @vha: SCSI driver HA context 3139 * @mb0: Mailbox0 register 3140 */ 3141 static void 3142 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3143 { 3144 uint16_t cnt; 3145 uint32_t mboxes; 3146 uint16_t __iomem *wptr; 3147 struct qla_hw_data *ha = vha->hw; 3148 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3149 3150 /* Read all mbox registers? */ 3151 WARN_ON_ONCE(ha->mbx_count > 32); 3152 mboxes = (1ULL << ha->mbx_count) - 1; 3153 if (!ha->mcp) 3154 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3155 else 3156 mboxes = ha->mcp->in_mb; 3157 3158 /* Load return mailbox registers. */ 3159 ha->flags.mbox_int = 1; 3160 ha->mailbox_out[0] = mb0; 3161 mboxes >>= 1; 3162 wptr = (uint16_t __iomem *)®->mailbox1; 3163 3164 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3165 if (mboxes & BIT_0) 3166 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 3167 3168 mboxes >>= 1; 3169 wptr++; 3170 } 3171 } 3172 3173 static void 3174 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3175 struct abort_entry_24xx *pkt) 3176 { 3177 const char func[] = "ABT_IOCB"; 3178 srb_t *sp; 3179 struct srb_iocb *abt; 3180 3181 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3182 if (!sp) 3183 return; 3184 3185 abt = &sp->u.iocb_cmd; 3186 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); 3187 sp->done(sp, 0); 3188 } 3189 3190 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3191 struct pt_ls4_request *pkt, struct req_que *req) 3192 { 3193 srb_t *sp; 3194 const char func[] = "LS4_IOCB"; 3195 uint16_t comp_status; 3196 3197 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3198 if (!sp) 3199 return; 3200 3201 comp_status = le16_to_cpu(pkt->status); 3202 sp->done(sp, comp_status); 3203 } 3204 3205 /** 3206 * qla24xx_process_response_queue() - Process response queue entries. 3207 * @vha: SCSI driver HA context 3208 * @rsp: response queue 3209 */ 3210 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3211 struct rsp_que *rsp) 3212 { 3213 struct sts_entry_24xx *pkt; 3214 struct qla_hw_data *ha = vha->hw; 3215 3216 if (!ha->flags.fw_started) 3217 return; 3218 3219 if (rsp->qpair->cpuid != smp_processor_id()) 3220 qla_cpu_update(rsp->qpair, smp_processor_id()); 3221 3222 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3223 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3224 3225 rsp->ring_index++; 3226 if (rsp->ring_index == rsp->length) { 3227 rsp->ring_index = 0; 3228 rsp->ring_ptr = rsp->ring; 3229 } else { 3230 rsp->ring_ptr++; 3231 } 3232 3233 if (pkt->entry_status != 0) { 3234 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3235 goto process_err; 3236 3237 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3238 wmb(); 3239 continue; 3240 } 3241 process_err: 3242 3243 switch (pkt->entry_type) { 3244 case STATUS_TYPE: 3245 qla2x00_status_entry(vha, rsp, pkt); 3246 break; 3247 case STATUS_CONT_TYPE: 3248 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3249 break; 3250 case VP_RPT_ID_IOCB_TYPE: 3251 qla24xx_report_id_acquisition(vha, 3252 (struct vp_rpt_id_entry_24xx *)pkt); 3253 break; 3254 case LOGINOUT_PORT_IOCB_TYPE: 3255 qla24xx_logio_entry(vha, rsp->req, 3256 (struct logio_entry_24xx *)pkt); 3257 break; 3258 case CT_IOCB_TYPE: 3259 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3260 break; 3261 case ELS_IOCB_TYPE: 3262 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3263 break; 3264 case ABTS_RECV_24XX: 3265 if (qla_ini_mode_enabled(vha)) { 3266 qla24xx_purex_iocb(vha, pkt, 3267 qla24xx_process_abts); 3268 break; 3269 } 3270 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3271 IS_QLA28XX(ha)) { 3272 /* ensure that the ATIO queue is empty */ 3273 qlt_handle_abts_recv(vha, rsp, 3274 (response_t *)pkt); 3275 break; 3276 } else { 3277 qlt_24xx_process_atio_queue(vha, 1); 3278 } 3279 /* fall through */ 3280 case ABTS_RESP_24XX: 3281 case CTIO_TYPE7: 3282 case CTIO_CRC2: 3283 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3284 break; 3285 case PT_LS4_REQUEST: 3286 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3287 rsp->req); 3288 break; 3289 case NOTIFY_ACK_TYPE: 3290 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3291 qlt_response_pkt_all_vps(vha, rsp, 3292 (response_t *)pkt); 3293 else 3294 qla24xxx_nack_iocb_entry(vha, rsp->req, 3295 (struct nack_to_isp *)pkt); 3296 break; 3297 case MARKER_TYPE: 3298 /* Do nothing in this case, this check is to prevent it 3299 * from falling into default case 3300 */ 3301 break; 3302 case ABORT_IOCB_TYPE: 3303 qla24xx_abort_iocb_entry(vha, rsp->req, 3304 (struct abort_entry_24xx *)pkt); 3305 break; 3306 case MBX_IOCB_TYPE: 3307 qla24xx_mbx_iocb_entry(vha, rsp->req, 3308 (struct mbx_24xx_entry *)pkt); 3309 break; 3310 case VP_CTRL_IOCB_TYPE: 3311 qla_ctrlvp_completed(vha, rsp->req, 3312 (struct vp_ctrl_entry_24xx *)pkt); 3313 break; 3314 case PUREX_IOCB_TYPE: 3315 { 3316 struct purex_entry_24xx *purex = (void *)pkt; 3317 3318 if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) { 3319 ql_dbg(ql_dbg_init, vha, 0x5091, 3320 "Discarding ELS Request opcode %#x...\n", 3321 purex->els_frame_payload[3]); 3322 break; 3323 } 3324 qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp); 3325 break; 3326 } 3327 default: 3328 /* Type Not Supported. */ 3329 ql_dbg(ql_dbg_async, vha, 0x5042, 3330 "Received unknown response pkt type %x " 3331 "entry status=%x.\n", 3332 pkt->entry_type, pkt->entry_status); 3333 break; 3334 } 3335 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3336 wmb(); 3337 } 3338 3339 /* Adjust ring index */ 3340 if (IS_P3P_TYPE(ha)) { 3341 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3342 3343 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 3344 } else { 3345 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 3346 } 3347 } 3348 3349 static void 3350 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3351 { 3352 int rval; 3353 uint32_t cnt; 3354 struct qla_hw_data *ha = vha->hw; 3355 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3356 3357 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3358 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3359 return; 3360 3361 rval = QLA_SUCCESS; 3362 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 3363 RD_REG_DWORD(®->iobase_addr); 3364 WRT_REG_DWORD(®->iobase_window, 0x0001); 3365 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 3366 rval == QLA_SUCCESS; cnt--) { 3367 if (cnt) { 3368 WRT_REG_DWORD(®->iobase_window, 0x0001); 3369 udelay(10); 3370 } else 3371 rval = QLA_FUNCTION_TIMEOUT; 3372 } 3373 if (rval == QLA_SUCCESS) 3374 goto next_test; 3375 3376 rval = QLA_SUCCESS; 3377 WRT_REG_DWORD(®->iobase_window, 0x0003); 3378 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 3379 rval == QLA_SUCCESS; cnt--) { 3380 if (cnt) { 3381 WRT_REG_DWORD(®->iobase_window, 0x0003); 3382 udelay(10); 3383 } else 3384 rval = QLA_FUNCTION_TIMEOUT; 3385 } 3386 if (rval != QLA_SUCCESS) 3387 goto done; 3388 3389 next_test: 3390 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 3391 ql_log(ql_log_info, vha, 0x504c, 3392 "Additional code -- 0x55AA.\n"); 3393 3394 done: 3395 WRT_REG_DWORD(®->iobase_window, 0x0000); 3396 RD_REG_DWORD(®->iobase_window); 3397 } 3398 3399 /** 3400 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3401 * @irq: interrupt number 3402 * @dev_id: SCSI driver HA context 3403 * 3404 * Called by system whenever the host adapter generates an interrupt. 3405 * 3406 * Returns handled flag. 3407 */ 3408 irqreturn_t 3409 qla24xx_intr_handler(int irq, void *dev_id) 3410 { 3411 scsi_qla_host_t *vha; 3412 struct qla_hw_data *ha; 3413 struct device_reg_24xx __iomem *reg; 3414 int status; 3415 unsigned long iter; 3416 uint32_t stat; 3417 uint32_t hccr; 3418 uint16_t mb[8]; 3419 struct rsp_que *rsp; 3420 unsigned long flags; 3421 bool process_atio = false; 3422 3423 rsp = (struct rsp_que *) dev_id; 3424 if (!rsp) { 3425 ql_log(ql_log_info, NULL, 0x5059, 3426 "%s: NULL response queue pointer.\n", __func__); 3427 return IRQ_NONE; 3428 } 3429 3430 ha = rsp->hw; 3431 reg = &ha->iobase->isp24; 3432 status = 0; 3433 3434 if (unlikely(pci_channel_offline(ha->pdev))) 3435 return IRQ_HANDLED; 3436 3437 spin_lock_irqsave(&ha->hardware_lock, flags); 3438 vha = pci_get_drvdata(ha->pdev); 3439 for (iter = 50; iter--; ) { 3440 stat = RD_REG_DWORD(®->host_status); 3441 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3442 break; 3443 if (stat & HSRX_RISC_PAUSED) { 3444 if (unlikely(pci_channel_offline(ha->pdev))) 3445 break; 3446 3447 hccr = RD_REG_DWORD(®->hccr); 3448 3449 ql_log(ql_log_warn, vha, 0x504b, 3450 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3451 hccr); 3452 3453 qla2xxx_check_risc_status(vha); 3454 3455 ha->isp_ops->fw_dump(vha, 1); 3456 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3457 break; 3458 } else if ((stat & HSRX_RISC_INT) == 0) 3459 break; 3460 3461 switch (stat & 0xff) { 3462 case INTR_ROM_MB_SUCCESS: 3463 case INTR_ROM_MB_FAILED: 3464 case INTR_MB_SUCCESS: 3465 case INTR_MB_FAILED: 3466 qla24xx_mbx_completion(vha, MSW(stat)); 3467 status |= MBX_INTERRUPT; 3468 3469 break; 3470 case INTR_ASYNC_EVENT: 3471 mb[0] = MSW(stat); 3472 mb[1] = RD_REG_WORD(®->mailbox1); 3473 mb[2] = RD_REG_WORD(®->mailbox2); 3474 mb[3] = RD_REG_WORD(®->mailbox3); 3475 qla2x00_async_event(vha, rsp, mb); 3476 break; 3477 case INTR_RSP_QUE_UPDATE: 3478 case INTR_RSP_QUE_UPDATE_83XX: 3479 qla24xx_process_response_queue(vha, rsp); 3480 break; 3481 case INTR_ATIO_QUE_UPDATE_27XX: 3482 case INTR_ATIO_QUE_UPDATE: 3483 process_atio = true; 3484 break; 3485 case INTR_ATIO_RSP_QUE_UPDATE: 3486 process_atio = true; 3487 qla24xx_process_response_queue(vha, rsp); 3488 break; 3489 default: 3490 ql_dbg(ql_dbg_async, vha, 0x504f, 3491 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3492 break; 3493 } 3494 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3495 RD_REG_DWORD_RELAXED(®->hccr); 3496 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3497 ndelay(3500); 3498 } 3499 qla2x00_handle_mbx_completion(ha, status); 3500 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3501 3502 if (process_atio) { 3503 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3504 qlt_24xx_process_atio_queue(vha, 0); 3505 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3506 } 3507 3508 return IRQ_HANDLED; 3509 } 3510 3511 static irqreturn_t 3512 qla24xx_msix_rsp_q(int irq, void *dev_id) 3513 { 3514 struct qla_hw_data *ha; 3515 struct rsp_que *rsp; 3516 struct device_reg_24xx __iomem *reg; 3517 struct scsi_qla_host *vha; 3518 unsigned long flags; 3519 3520 rsp = (struct rsp_que *) dev_id; 3521 if (!rsp) { 3522 ql_log(ql_log_info, NULL, 0x505a, 3523 "%s: NULL response queue pointer.\n", __func__); 3524 return IRQ_NONE; 3525 } 3526 ha = rsp->hw; 3527 reg = &ha->iobase->isp24; 3528 3529 spin_lock_irqsave(&ha->hardware_lock, flags); 3530 3531 vha = pci_get_drvdata(ha->pdev); 3532 qla24xx_process_response_queue(vha, rsp); 3533 if (!ha->flags.disable_msix_handshake) { 3534 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3535 RD_REG_DWORD_RELAXED(®->hccr); 3536 } 3537 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3538 3539 return IRQ_HANDLED; 3540 } 3541 3542 static irqreturn_t 3543 qla24xx_msix_default(int irq, void *dev_id) 3544 { 3545 scsi_qla_host_t *vha; 3546 struct qla_hw_data *ha; 3547 struct rsp_que *rsp; 3548 struct device_reg_24xx __iomem *reg; 3549 int status; 3550 uint32_t stat; 3551 uint32_t hccr; 3552 uint16_t mb[8]; 3553 unsigned long flags; 3554 bool process_atio = false; 3555 3556 rsp = (struct rsp_que *) dev_id; 3557 if (!rsp) { 3558 ql_log(ql_log_info, NULL, 0x505c, 3559 "%s: NULL response queue pointer.\n", __func__); 3560 return IRQ_NONE; 3561 } 3562 ha = rsp->hw; 3563 reg = &ha->iobase->isp24; 3564 status = 0; 3565 3566 spin_lock_irqsave(&ha->hardware_lock, flags); 3567 vha = pci_get_drvdata(ha->pdev); 3568 do { 3569 stat = RD_REG_DWORD(®->host_status); 3570 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3571 break; 3572 if (stat & HSRX_RISC_PAUSED) { 3573 if (unlikely(pci_channel_offline(ha->pdev))) 3574 break; 3575 3576 hccr = RD_REG_DWORD(®->hccr); 3577 3578 ql_log(ql_log_info, vha, 0x5050, 3579 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3580 hccr); 3581 3582 qla2xxx_check_risc_status(vha); 3583 3584 ha->isp_ops->fw_dump(vha, 1); 3585 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3586 break; 3587 } else if ((stat & HSRX_RISC_INT) == 0) 3588 break; 3589 3590 switch (stat & 0xff) { 3591 case INTR_ROM_MB_SUCCESS: 3592 case INTR_ROM_MB_FAILED: 3593 case INTR_MB_SUCCESS: 3594 case INTR_MB_FAILED: 3595 qla24xx_mbx_completion(vha, MSW(stat)); 3596 status |= MBX_INTERRUPT; 3597 3598 break; 3599 case INTR_ASYNC_EVENT: 3600 mb[0] = MSW(stat); 3601 mb[1] = RD_REG_WORD(®->mailbox1); 3602 mb[2] = RD_REG_WORD(®->mailbox2); 3603 mb[3] = RD_REG_WORD(®->mailbox3); 3604 qla2x00_async_event(vha, rsp, mb); 3605 break; 3606 case INTR_RSP_QUE_UPDATE: 3607 case INTR_RSP_QUE_UPDATE_83XX: 3608 qla24xx_process_response_queue(vha, rsp); 3609 break; 3610 case INTR_ATIO_QUE_UPDATE_27XX: 3611 case INTR_ATIO_QUE_UPDATE: 3612 process_atio = true; 3613 break; 3614 case INTR_ATIO_RSP_QUE_UPDATE: 3615 process_atio = true; 3616 qla24xx_process_response_queue(vha, rsp); 3617 break; 3618 default: 3619 ql_dbg(ql_dbg_async, vha, 0x5051, 3620 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3621 break; 3622 } 3623 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3624 } while (0); 3625 qla2x00_handle_mbx_completion(ha, status); 3626 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3627 3628 if (process_atio) { 3629 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3630 qlt_24xx_process_atio_queue(vha, 0); 3631 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3632 } 3633 3634 return IRQ_HANDLED; 3635 } 3636 3637 irqreturn_t 3638 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3639 { 3640 struct qla_hw_data *ha; 3641 struct qla_qpair *qpair; 3642 3643 qpair = dev_id; 3644 if (!qpair) { 3645 ql_log(ql_log_info, NULL, 0x505b, 3646 "%s: NULL response queue pointer.\n", __func__); 3647 return IRQ_NONE; 3648 } 3649 ha = qpair->hw; 3650 3651 queue_work(ha->wq, &qpair->q_work); 3652 3653 return IRQ_HANDLED; 3654 } 3655 3656 irqreturn_t 3657 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 3658 { 3659 struct qla_hw_data *ha; 3660 struct qla_qpair *qpair; 3661 struct device_reg_24xx __iomem *reg; 3662 unsigned long flags; 3663 3664 qpair = dev_id; 3665 if (!qpair) { 3666 ql_log(ql_log_info, NULL, 0x505b, 3667 "%s: NULL response queue pointer.\n", __func__); 3668 return IRQ_NONE; 3669 } 3670 ha = qpair->hw; 3671 3672 reg = &ha->iobase->isp24; 3673 spin_lock_irqsave(&ha->hardware_lock, flags); 3674 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3675 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3676 3677 queue_work(ha->wq, &qpair->q_work); 3678 3679 return IRQ_HANDLED; 3680 } 3681 3682 /* Interrupt handling helpers. */ 3683 3684 struct qla_init_msix_entry { 3685 const char *name; 3686 irq_handler_t handler; 3687 }; 3688 3689 static const struct qla_init_msix_entry msix_entries[] = { 3690 { "default", qla24xx_msix_default }, 3691 { "rsp_q", qla24xx_msix_rsp_q }, 3692 { "atio_q", qla83xx_msix_atio_q }, 3693 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3694 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 3695 }; 3696 3697 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3698 { "qla2xxx (default)", qla82xx_msix_default }, 3699 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3700 }; 3701 3702 static int 3703 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3704 { 3705 int i, ret; 3706 struct qla_msix_entry *qentry; 3707 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3708 int min_vecs = QLA_BASE_VECTORS; 3709 struct irq_affinity desc = { 3710 .pre_vectors = QLA_BASE_VECTORS, 3711 }; 3712 3713 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3714 IS_ATIO_MSIX_CAPABLE(ha)) { 3715 desc.pre_vectors++; 3716 min_vecs++; 3717 } 3718 3719 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 3720 /* user wants to control IRQ setting for target mode */ 3721 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3722 ha->msix_count, PCI_IRQ_MSIX); 3723 } else 3724 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3725 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3726 &desc); 3727 3728 if (ret < 0) { 3729 ql_log(ql_log_fatal, vha, 0x00c7, 3730 "MSI-X: Failed to enable support, " 3731 "giving up -- %d/%d.\n", 3732 ha->msix_count, ret); 3733 goto msix_out; 3734 } else if (ret < ha->msix_count) { 3735 ql_log(ql_log_info, vha, 0x00c6, 3736 "MSI-X: Using %d vectors\n", ret); 3737 ha->msix_count = ret; 3738 /* Recalculate queue values */ 3739 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 3740 ha->max_req_queues = ha->msix_count - 1; 3741 3742 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3743 if (QLA_TGT_MODE_ENABLED()) 3744 ha->max_req_queues--; 3745 3746 ha->max_rsp_queues = ha->max_req_queues; 3747 3748 ha->max_qpairs = ha->max_req_queues - 1; 3749 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 3750 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3751 } 3752 } 3753 vha->irq_offset = desc.pre_vectors; 3754 ha->msix_entries = kcalloc(ha->msix_count, 3755 sizeof(struct qla_msix_entry), 3756 GFP_KERNEL); 3757 if (!ha->msix_entries) { 3758 ql_log(ql_log_fatal, vha, 0x00c8, 3759 "Failed to allocate memory for ha->msix_entries.\n"); 3760 ret = -ENOMEM; 3761 goto free_irqs; 3762 } 3763 ha->flags.msix_enabled = 1; 3764 3765 for (i = 0; i < ha->msix_count; i++) { 3766 qentry = &ha->msix_entries[i]; 3767 qentry->vector = pci_irq_vector(ha->pdev, i); 3768 qentry->entry = i; 3769 qentry->have_irq = 0; 3770 qentry->in_use = 0; 3771 qentry->handle = NULL; 3772 } 3773 3774 /* Enable MSI-X vectors for the base queue */ 3775 for (i = 0; i < QLA_BASE_VECTORS; i++) { 3776 qentry = &ha->msix_entries[i]; 3777 qentry->handle = rsp; 3778 rsp->msix = qentry; 3779 scnprintf(qentry->name, sizeof(qentry->name), 3780 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 3781 if (IS_P3P_TYPE(ha)) 3782 ret = request_irq(qentry->vector, 3783 qla82xx_msix_entries[i].handler, 3784 0, qla82xx_msix_entries[i].name, rsp); 3785 else 3786 ret = request_irq(qentry->vector, 3787 msix_entries[i].handler, 3788 0, qentry->name, rsp); 3789 if (ret) 3790 goto msix_register_fail; 3791 qentry->have_irq = 1; 3792 qentry->in_use = 1; 3793 } 3794 3795 /* 3796 * If target mode is enable, also request the vector for the ATIO 3797 * queue. 3798 */ 3799 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3800 IS_ATIO_MSIX_CAPABLE(ha)) { 3801 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 3802 rsp->msix = qentry; 3803 qentry->handle = rsp; 3804 scnprintf(qentry->name, sizeof(qentry->name), 3805 "qla2xxx%lu_%s", vha->host_no, 3806 msix_entries[QLA_ATIO_VECTOR].name); 3807 qentry->in_use = 1; 3808 ret = request_irq(qentry->vector, 3809 msix_entries[QLA_ATIO_VECTOR].handler, 3810 0, qentry->name, rsp); 3811 qentry->have_irq = 1; 3812 } 3813 3814 msix_register_fail: 3815 if (ret) { 3816 ql_log(ql_log_fatal, vha, 0x00cb, 3817 "MSI-X: unable to register handler -- %x/%d.\n", 3818 qentry->vector, ret); 3819 qla2x00_free_irqs(vha); 3820 ha->mqenable = 0; 3821 goto msix_out; 3822 } 3823 3824 /* Enable MSI-X vector for response queue update for queue 0 */ 3825 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3826 if (ha->msixbase && ha->mqiobase && 3827 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3828 ql2xmqsupport)) 3829 ha->mqenable = 1; 3830 } else 3831 if (ha->mqiobase && 3832 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3833 ql2xmqsupport)) 3834 ha->mqenable = 1; 3835 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3836 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3837 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3838 ql_dbg(ql_dbg_init, vha, 0x0055, 3839 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3840 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3841 3842 msix_out: 3843 return ret; 3844 3845 free_irqs: 3846 pci_free_irq_vectors(ha->pdev); 3847 goto msix_out; 3848 } 3849 3850 int 3851 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3852 { 3853 int ret = QLA_FUNCTION_FAILED; 3854 device_reg_t *reg = ha->iobase; 3855 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3856 3857 /* If possible, enable MSI-X. */ 3858 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 3859 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 3860 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 3861 goto skip_msi; 3862 3863 if (ql2xenablemsix == 2) 3864 goto skip_msix; 3865 3866 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3867 (ha->pdev->subsystem_device == 0x7040 || 3868 ha->pdev->subsystem_device == 0x7041 || 3869 ha->pdev->subsystem_device == 0x1705)) { 3870 ql_log(ql_log_warn, vha, 0x0034, 3871 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3872 ha->pdev->subsystem_vendor, 3873 ha->pdev->subsystem_device); 3874 goto skip_msi; 3875 } 3876 3877 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3878 ql_log(ql_log_warn, vha, 0x0035, 3879 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3880 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3881 goto skip_msix; 3882 } 3883 3884 ret = qla24xx_enable_msix(ha, rsp); 3885 if (!ret) { 3886 ql_dbg(ql_dbg_init, vha, 0x0036, 3887 "MSI-X: Enabled (0x%X, 0x%X).\n", 3888 ha->chip_revision, ha->fw_attributes); 3889 goto clear_risc_ints; 3890 } 3891 3892 skip_msix: 3893 3894 ql_log(ql_log_info, vha, 0x0037, 3895 "Falling back-to MSI mode -- ret=%d.\n", ret); 3896 3897 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3898 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3899 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3900 goto skip_msi; 3901 3902 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 3903 if (ret > 0) { 3904 ql_dbg(ql_dbg_init, vha, 0x0038, 3905 "MSI: Enabled.\n"); 3906 ha->flags.msi_enabled = 1; 3907 } else 3908 ql_log(ql_log_warn, vha, 0x0039, 3909 "Falling back-to INTa mode -- ret=%d.\n", ret); 3910 skip_msi: 3911 3912 /* Skip INTx on ISP82xx. */ 3913 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3914 return QLA_FUNCTION_FAILED; 3915 3916 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3917 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3918 QLA2XXX_DRIVER_NAME, rsp); 3919 if (ret) { 3920 ql_log(ql_log_warn, vha, 0x003a, 3921 "Failed to reserve interrupt %d already in use.\n", 3922 ha->pdev->irq); 3923 goto fail; 3924 } else if (!ha->flags.msi_enabled) { 3925 ql_dbg(ql_dbg_init, vha, 0x0125, 3926 "INTa mode: Enabled.\n"); 3927 ha->flags.mr_intr_valid = 1; 3928 } 3929 3930 clear_risc_ints: 3931 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3932 goto fail; 3933 3934 spin_lock_irq(&ha->hardware_lock); 3935 WRT_REG_WORD(®->isp.semaphore, 0); 3936 spin_unlock_irq(&ha->hardware_lock); 3937 3938 fail: 3939 return ret; 3940 } 3941 3942 void 3943 qla2x00_free_irqs(scsi_qla_host_t *vha) 3944 { 3945 struct qla_hw_data *ha = vha->hw; 3946 struct rsp_que *rsp; 3947 struct qla_msix_entry *qentry; 3948 int i; 3949 3950 /* 3951 * We need to check that ha->rsp_q_map is valid in case we are called 3952 * from a probe failure context. 3953 */ 3954 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3955 goto free_irqs; 3956 rsp = ha->rsp_q_map[0]; 3957 3958 if (ha->flags.msix_enabled) { 3959 for (i = 0; i < ha->msix_count; i++) { 3960 qentry = &ha->msix_entries[i]; 3961 if (qentry->have_irq) { 3962 irq_set_affinity_notifier(qentry->vector, NULL); 3963 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 3964 } 3965 } 3966 kfree(ha->msix_entries); 3967 ha->msix_entries = NULL; 3968 ha->flags.msix_enabled = 0; 3969 ql_dbg(ql_dbg_init, vha, 0x0042, 3970 "Disabled MSI-X.\n"); 3971 } else { 3972 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3973 } 3974 3975 free_irqs: 3976 pci_free_irq_vectors(ha->pdev); 3977 } 3978 3979 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 3980 struct qla_msix_entry *msix, int vector_type) 3981 { 3982 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 3983 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3984 int ret; 3985 3986 scnprintf(msix->name, sizeof(msix->name), 3987 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 3988 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 3989 if (ret) { 3990 ql_log(ql_log_fatal, vha, 0x00e6, 3991 "MSI-X: Unable to register handler -- %x/%d.\n", 3992 msix->vector, ret); 3993 return ret; 3994 } 3995 msix->have_irq = 1; 3996 msix->handle = qpair; 3997 return ret; 3998 } 3999